• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1#! /usr/bin/env perl
2# Copyright 2016 The OpenSSL Project Authors. All Rights Reserved.
3#
4# Licensed under the OpenSSL license (the "License").  You may not use
5# this file except in compliance with the License.  You can obtain a copy
6# in the file LICENSE in the source distribution or at
7# https://www.openssl.org/source/license.html
8
9#
10# ====================================================================
11# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
12# project. The module is, however, dual licensed under OpenSSL and
13# CRYPTOGAMS licenses depending on where you obtain it. For further
14# details see http://www.openssl.org/~appro/cryptogams/.
15# ====================================================================
16#
17# November 2014
18#
19# ChaCha20 for x86_64.
20#
21# December 2016
22#
23# Add AVX512F code path.
24#
25# Performance in cycles per byte out of large buffer.
26#
27#		IALU/gcc 4.8(i)	1xSSSE3/SSE2	4xSSSE3	    NxAVX(v)
28#
29# P4		9.48/+99%	-/22.7(ii)	-
30# Core2		7.83/+55%	7.90/8.08	4.35
31# Westmere	7.19/+50%	5.60/6.70	3.00
32# Sandy Bridge	8.31/+42%	5.45/6.76	2.72
33# Ivy Bridge	6.71/+46%	5.40/6.49	2.41
34# Haswell	5.92/+43%	5.20/6.45	2.42	    1.23
35# Skylake[-X]	5.87/+39%	4.70/-		2.31	    1.19[0.57]
36# Silvermont	12.0/+33%	7.75/7.40	7.03(iii)
37# Knights L	11.7/-		-		9.60(iii)   0.80
38# Goldmont	10.6/+17%	5.10/-		3.28
39# Sledgehammer	7.28/+52%	-/14.2(ii)	-
40# Bulldozer	9.66/+28%	9.85/11.1	3.06(iv)
41# Ryzen		5.96/+50%	5.19/-		2.40        2.09
42# VIA Nano	10.5/+46%	6.72/8.60	6.05
43#
44# (i)	compared to older gcc 3.x one can observe >2x improvement on
45#	most platforms;
46# (ii)	as it can be seen, SSE2 performance is too low on legacy
47#	processors; NxSSE2 results are naturally better, but not
48#	impressively better than IALU ones, which is why you won't
49#	find SSE2 code below;
50# (iii)	this is not optimal result for Atom because of MSROM
51#	limitations, SSE2 can do better, but gain is considered too
52#	low to justify the [maintenance] effort;
53# (iv)	Bulldozer actually executes 4xXOP code path that delivers 2.20;
54#
55# Modified from upstream OpenSSL to remove the XOP code.
56
57$flavour = shift;
58$output  = shift;
59if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
60
61$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
62
63$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
64( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
65( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
66die "can't locate x86_64-xlate.pl";
67
68$avx = 2;
69
70open OUT,"| \"$^X\" \"$xlate\" $flavour \"$output\"";
71*STDOUT=*OUT;
72
73# input parameter block
74($out,$inp,$len,$key,$counter)=("%rdi","%rsi","%rdx","%rcx","%r8");
75
76$code.=<<___;
77.text
78
79.section .rodata
80.align	64
81.Lzero:
82.long	0,0,0,0
83.Lone:
84.long	1,0,0,0
85.Linc:
86.long	0,1,2,3
87.Lfour:
88.long	4,4,4,4
89.Lincy:
90.long	0,2,4,6,1,3,5,7
91.Leight:
92.long	8,8,8,8,8,8,8,8
93.Lrot16:
94.byte	0x2,0x3,0x0,0x1, 0x6,0x7,0x4,0x5, 0xa,0xb,0x8,0x9, 0xe,0xf,0xc,0xd
95.Lrot24:
96.byte	0x3,0x0,0x1,0x2, 0x7,0x4,0x5,0x6, 0xb,0x8,0x9,0xa, 0xf,0xc,0xd,0xe
97.Lsigma:
98.asciz	"expand 32-byte k"
99.align	64
100.Lzeroz:
101.long	0,0,0,0, 1,0,0,0, 2,0,0,0, 3,0,0,0
102.Lfourz:
103.long	4,0,0,0, 4,0,0,0, 4,0,0,0, 4,0,0,0
104.Lincz:
105.long	0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
106.Lsixteen:
107.long	16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16
108.asciz	"ChaCha20 for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
109.text
110___
111
112sub AUTOLOAD()          # thunk [simplified] 32-bit style perlasm
113{ my $opcode = $AUTOLOAD; $opcode =~ s/.*:://;
114  my $arg = pop;
115    $arg = "\$$arg" if ($arg*1 eq $arg);
116    $code .= "\t$opcode\t".join(',',$arg,reverse @_)."\n";
117}
118
119@x=("%eax","%ebx","%ecx","%edx",map("%r${_}d",(8..11)),
120    "%nox","%nox","%nox","%nox",map("%r${_}d",(12..15)));
121@t=("%esi","%edi");
122
123sub ROUND {			# critical path is 24 cycles per round
124my ($a0,$b0,$c0,$d0)=@_;
125my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
126my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
127my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
128my ($xc,$xc_)=map("\"$_\"",@t);
129my @x=map("\"$_\"",@x);
130
131	# Consider order in which variables are addressed by their
132	# index:
133	#
134	#	a   b   c   d
135	#
136	#	0   4   8  12 < even round
137	#	1   5   9  13
138	#	2   6  10  14
139	#	3   7  11  15
140	#	0   5  10  15 < odd round
141	#	1   6  11  12
142	#	2   7   8  13
143	#	3   4   9  14
144	#
145	# 'a', 'b' and 'd's are permanently allocated in registers,
146	# @x[0..7,12..15], while 'c's are maintained in memory. If
147	# you observe 'c' column, you'll notice that pair of 'c's is
148	# invariant between rounds. This means that we have to reload
149	# them once per round, in the middle. This is why you'll see
150	# bunch of 'c' stores and loads in the middle, but none in
151	# the beginning or end.
152
153	# Normally instructions would be interleaved to favour in-order
154	# execution. Generally out-of-order cores manage it gracefully,
155	# but not this time for some reason. As in-order execution
156	# cores are dying breed, old Atom is the only one around,
157	# instructions are left uninterleaved. Besides, Atom is better
158	# off executing 1xSSSE3 code anyway...
159
160	(
161	"&add	(@x[$a0],@x[$b0])",	# Q1
162	"&xor	(@x[$d0],@x[$a0])",
163	"&rol	(@x[$d0],16)",
164	 "&add	(@x[$a1],@x[$b1])",	# Q2
165	 "&xor	(@x[$d1],@x[$a1])",
166	 "&rol	(@x[$d1],16)",
167
168	"&add	($xc,@x[$d0])",
169	"&xor	(@x[$b0],$xc)",
170	"&rol	(@x[$b0],12)",
171	 "&add	($xc_,@x[$d1])",
172	 "&xor	(@x[$b1],$xc_)",
173	 "&rol	(@x[$b1],12)",
174
175	"&add	(@x[$a0],@x[$b0])",
176	"&xor	(@x[$d0],@x[$a0])",
177	"&rol	(@x[$d0],8)",
178	 "&add	(@x[$a1],@x[$b1])",
179	 "&xor	(@x[$d1],@x[$a1])",
180	 "&rol	(@x[$d1],8)",
181
182	"&add	($xc,@x[$d0])",
183	"&xor	(@x[$b0],$xc)",
184	"&rol	(@x[$b0],7)",
185	 "&add	($xc_,@x[$d1])",
186	 "&xor	(@x[$b1],$xc_)",
187	 "&rol	(@x[$b1],7)",
188
189	"&mov	(\"4*$c0(%rsp)\",$xc)",	# reload pair of 'c's
190	 "&mov	(\"4*$c1(%rsp)\",$xc_)",
191	"&mov	($xc,\"4*$c2(%rsp)\")",
192	 "&mov	($xc_,\"4*$c3(%rsp)\")",
193
194	"&add	(@x[$a2],@x[$b2])",	# Q3
195	"&xor	(@x[$d2],@x[$a2])",
196	"&rol	(@x[$d2],16)",
197	 "&add	(@x[$a3],@x[$b3])",	# Q4
198	 "&xor	(@x[$d3],@x[$a3])",
199	 "&rol	(@x[$d3],16)",
200
201	"&add	($xc,@x[$d2])",
202	"&xor	(@x[$b2],$xc)",
203	"&rol	(@x[$b2],12)",
204	 "&add	($xc_,@x[$d3])",
205	 "&xor	(@x[$b3],$xc_)",
206	 "&rol	(@x[$b3],12)",
207
208	"&add	(@x[$a2],@x[$b2])",
209	"&xor	(@x[$d2],@x[$a2])",
210	"&rol	(@x[$d2],8)",
211	 "&add	(@x[$a3],@x[$b3])",
212	 "&xor	(@x[$d3],@x[$a3])",
213	 "&rol	(@x[$d3],8)",
214
215	"&add	($xc,@x[$d2])",
216	"&xor	(@x[$b2],$xc)",
217	"&rol	(@x[$b2],7)",
218	 "&add	($xc_,@x[$d3])",
219	 "&xor	(@x[$b3],$xc_)",
220	 "&rol	(@x[$b3],7)"
221	);
222}
223
224########################################################################
225# Generic code path that handles all lengths on pre-SSSE3 processors.
226$code.=<<___;
227.globl	ChaCha20_ctr32_nohw
228.type	ChaCha20_ctr32_nohw,\@function,5
229.align	64
230ChaCha20_ctr32_nohw:
231.cfi_startproc
232	_CET_ENDBR
233	push	%rbx
234.cfi_push	rbx
235	push	%rbp
236.cfi_push	rbp
237	push	%r12
238.cfi_push	r12
239	push	%r13
240.cfi_push	r13
241	push	%r14
242.cfi_push	r14
243	push	%r15
244.cfi_push	r15
245	sub	\$64+24,%rsp
246.cfi_adjust_cfa_offset	`64+24`
247.Lctr32_body:
248
249	#movdqa	.Lsigma(%rip),%xmm0
250	movdqu	($key),%xmm1
251	movdqu	16($key),%xmm2
252	movdqu	($counter),%xmm3
253	movdqa	.Lone(%rip),%xmm4
254
255	#movdqa	%xmm0,4*0(%rsp)		# key[0]
256	movdqa	%xmm1,4*4(%rsp)		# key[1]
257	movdqa	%xmm2,4*8(%rsp)		# key[2]
258	movdqa	%xmm3,4*12(%rsp)	# key[3]
259	mov	$len,%rbp		# reassign $len
260	jmp	.Loop_outer
261
262.align	32
263.Loop_outer:
264	mov	\$0x61707865,@x[0]      # 'expa'
265	mov	\$0x3320646e,@x[1]      # 'nd 3'
266	mov	\$0x79622d32,@x[2]      # '2-by'
267	mov	\$0x6b206574,@x[3]      # 'te k'
268	mov	4*4(%rsp),@x[4]
269	mov	4*5(%rsp),@x[5]
270	mov	4*6(%rsp),@x[6]
271	mov	4*7(%rsp),@x[7]
272	movd	%xmm3,@x[12]
273	mov	4*13(%rsp),@x[13]
274	mov	4*14(%rsp),@x[14]
275	mov	4*15(%rsp),@x[15]
276
277	mov	%rbp,64+0(%rsp)		# save len
278	mov	\$10,%ebp
279	mov	$inp,64+8(%rsp)		# save inp
280	movq	%xmm2,%rsi		# "@x[8]"
281	mov	$out,64+16(%rsp)	# save out
282	mov	%rsi,%rdi
283	shr	\$32,%rdi		# "@x[9]"
284	jmp	.Loop
285
286.align	32
287.Loop:
288___
289	foreach (&ROUND (0, 4, 8,12)) { eval; }
290	foreach (&ROUND	(0, 5,10,15)) { eval; }
291	&dec	("%ebp");
292	&jnz	(".Loop");
293
294$code.=<<___;
295	mov	@t[1],4*9(%rsp)		# modulo-scheduled
296	mov	@t[0],4*8(%rsp)
297	mov	64(%rsp),%rbp		# load len
298	movdqa	%xmm2,%xmm1
299	mov	64+8(%rsp),$inp		# load inp
300	paddd	%xmm4,%xmm3		# increment counter
301	mov	64+16(%rsp),$out	# load out
302
303	add	\$0x61707865,@x[0]      # 'expa'
304	add	\$0x3320646e,@x[1]      # 'nd 3'
305	add	\$0x79622d32,@x[2]      # '2-by'
306	add	\$0x6b206574,@x[3]      # 'te k'
307	add	4*4(%rsp),@x[4]
308	add	4*5(%rsp),@x[5]
309	add	4*6(%rsp),@x[6]
310	add	4*7(%rsp),@x[7]
311	add	4*12(%rsp),@x[12]
312	add	4*13(%rsp),@x[13]
313	add	4*14(%rsp),@x[14]
314	add	4*15(%rsp),@x[15]
315	paddd	4*8(%rsp),%xmm1
316
317	cmp	\$64,%rbp
318	jb	.Ltail
319
320	xor	4*0($inp),@x[0]		# xor with input
321	xor	4*1($inp),@x[1]
322	xor	4*2($inp),@x[2]
323	xor	4*3($inp),@x[3]
324	xor	4*4($inp),@x[4]
325	xor	4*5($inp),@x[5]
326	xor	4*6($inp),@x[6]
327	xor	4*7($inp),@x[7]
328	movdqu	4*8($inp),%xmm0
329	xor	4*12($inp),@x[12]
330	xor	4*13($inp),@x[13]
331	xor	4*14($inp),@x[14]
332	xor	4*15($inp),@x[15]
333	lea	4*16($inp),$inp		# inp+=64
334	pxor	%xmm1,%xmm0
335
336	movdqa	%xmm2,4*8(%rsp)
337	movd	%xmm3,4*12(%rsp)
338
339	mov	@x[0],4*0($out)		# write output
340	mov	@x[1],4*1($out)
341	mov	@x[2],4*2($out)
342	mov	@x[3],4*3($out)
343	mov	@x[4],4*4($out)
344	mov	@x[5],4*5($out)
345	mov	@x[6],4*6($out)
346	mov	@x[7],4*7($out)
347	movdqu	%xmm0,4*8($out)
348	mov	@x[12],4*12($out)
349	mov	@x[13],4*13($out)
350	mov	@x[14],4*14($out)
351	mov	@x[15],4*15($out)
352	lea	4*16($out),$out		# out+=64
353
354	sub	\$64,%rbp
355	jnz	.Loop_outer
356
357	jmp	.Ldone
358
359.align	16
360.Ltail:
361	mov	@x[0],4*0(%rsp)
362	mov	@x[1],4*1(%rsp)
363	xor	%rbx,%rbx
364	mov	@x[2],4*2(%rsp)
365	mov	@x[3],4*3(%rsp)
366	mov	@x[4],4*4(%rsp)
367	mov	@x[5],4*5(%rsp)
368	mov	@x[6],4*6(%rsp)
369	mov	@x[7],4*7(%rsp)
370	movdqa	%xmm1,4*8(%rsp)
371	mov	@x[12],4*12(%rsp)
372	mov	@x[13],4*13(%rsp)
373	mov	@x[14],4*14(%rsp)
374	mov	@x[15],4*15(%rsp)
375
376.Loop_tail:
377	movzb	($inp,%rbx),%eax
378	movzb	(%rsp,%rbx),%edx
379	lea	1(%rbx),%rbx
380	xor	%edx,%eax
381	mov	%al,-1($out,%rbx)
382	dec	%rbp
383	jnz	.Loop_tail
384
385.Ldone:
386	lea	64+24+48(%rsp),%rsi
387	mov	-48(%rsi),%r15
388.cfi_restore	r15
389	mov	-40(%rsi),%r14
390.cfi_restore	r14
391	mov	-32(%rsi),%r13
392.cfi_restore	r13
393	mov	-24(%rsi),%r12
394.cfi_restore	r12
395	mov	-16(%rsi),%rbp
396.cfi_restore	rbp
397	mov	-8(%rsi),%rbx
398.cfi_restore	rbx
399	lea	(%rsi),%rsp
400.cfi_adjust_cfa_offset	`-64-24-48`
401.Lno_data:
402	ret
403.cfi_endproc
404.size	ChaCha20_ctr32_nohw,.-ChaCha20_ctr32_nohw
405___
406
407########################################################################
408# SSSE3 code path that handles shorter lengths
409{
410my ($a,$b,$c,$d,$t,$t1,$rot16,$rot24)=map("%xmm$_",(0..7));
411
412sub SSSE3ROUND {	# critical path is 20 "SIMD ticks" per round
413	&paddd	($a,$b);
414	&pxor	($d,$a);
415	&pshufb	($d,$rot16);
416
417	&paddd	($c,$d);
418	&pxor	($b,$c);
419	&movdqa	($t,$b);
420	&psrld	($b,20);
421	&pslld	($t,12);
422	&por	($b,$t);
423
424	&paddd	($a,$b);
425	&pxor	($d,$a);
426	&pshufb	($d,$rot24);
427
428	&paddd	($c,$d);
429	&pxor	($b,$c);
430	&movdqa	($t,$b);
431	&psrld	($b,25);
432	&pslld	($t,7);
433	&por	($b,$t);
434}
435
436my $xframe = $win64 ? 32+8 : 8;
437
438$code.=<<___;
439.globl	ChaCha20_ctr32_ssse3
440.type	ChaCha20_ctr32_ssse3,\@function,5
441.align	32
442ChaCha20_ctr32_ssse3:
443.cfi_startproc
444	_CET_ENDBR
445	mov	%rsp,%r9		# frame pointer
446.cfi_def_cfa_register	r9
447___
448$code.=<<___;
449	sub	\$64+$xframe,%rsp
450___
451$code.=<<___	if ($win64);
452	movaps	%xmm6,-0x28(%r9)
453	movaps	%xmm7,-0x18(%r9)
454.Lssse3_body:
455___
456$code.=<<___;
457	movdqa	.Lsigma(%rip),$a
458	movdqu	($key),$b
459	movdqu	16($key),$c
460	movdqu	($counter),$d
461	movdqa	.Lrot16(%rip),$rot16
462	movdqa	.Lrot24(%rip),$rot24
463
464	movdqa	$a,0x00(%rsp)
465	movdqa	$b,0x10(%rsp)
466	movdqa	$c,0x20(%rsp)
467	movdqa	$d,0x30(%rsp)
468	mov	\$10,$counter		# reuse $counter
469	jmp	.Loop_ssse3
470
471.align	32
472.Loop_outer_ssse3:
473	movdqa	.Lone(%rip),$d
474	movdqa	0x00(%rsp),$a
475	movdqa	0x10(%rsp),$b
476	movdqa	0x20(%rsp),$c
477	paddd	0x30(%rsp),$d
478	mov	\$10,$counter
479	movdqa	$d,0x30(%rsp)
480	jmp	.Loop_ssse3
481
482.align	32
483.Loop_ssse3:
484___
485	&SSSE3ROUND();
486	&pshufd	($c,$c,0b01001110);
487	&pshufd	($b,$b,0b00111001);
488	&pshufd	($d,$d,0b10010011);
489	&nop	();
490
491	&SSSE3ROUND();
492	&pshufd	($c,$c,0b01001110);
493	&pshufd	($b,$b,0b10010011);
494	&pshufd	($d,$d,0b00111001);
495
496	&dec	($counter);
497	&jnz	(".Loop_ssse3");
498
499$code.=<<___;
500	paddd	0x00(%rsp),$a
501	paddd	0x10(%rsp),$b
502	paddd	0x20(%rsp),$c
503	paddd	0x30(%rsp),$d
504
505	cmp	\$64,$len
506	jb	.Ltail_ssse3
507
508	movdqu	0x00($inp),$t
509	movdqu	0x10($inp),$t1
510	pxor	$t,$a			# xor with input
511	movdqu	0x20($inp),$t
512	pxor	$t1,$b
513	movdqu	0x30($inp),$t1
514	lea	0x40($inp),$inp		# inp+=64
515	pxor	$t,$c
516	pxor	$t1,$d
517
518	movdqu	$a,0x00($out)		# write output
519	movdqu	$b,0x10($out)
520	movdqu	$c,0x20($out)
521	movdqu	$d,0x30($out)
522	lea	0x40($out),$out		# out+=64
523
524	sub	\$64,$len
525	jnz	.Loop_outer_ssse3
526
527	jmp	.Ldone_ssse3
528
529.align	16
530.Ltail_ssse3:
531	movdqa	$a,0x00(%rsp)
532	movdqa	$b,0x10(%rsp)
533	movdqa	$c,0x20(%rsp)
534	movdqa	$d,0x30(%rsp)
535	xor	$counter,$counter
536
537.Loop_tail_ssse3:
538	movzb	($inp,$counter),%eax
539	movzb	(%rsp,$counter),%ecx
540	lea	1($counter),$counter
541	xor	%ecx,%eax
542	mov	%al,-1($out,$counter)
543	dec	$len
544	jnz	.Loop_tail_ssse3
545
546.Ldone_ssse3:
547___
548$code.=<<___	if ($win64);
549	movaps	-0x28(%r9),%xmm6
550	movaps	-0x18(%r9),%xmm7
551___
552$code.=<<___;
553	lea	(%r9),%rsp
554.cfi_def_cfa_register	rsp
555.Lssse3_epilogue:
556	ret
557.cfi_endproc
558.size	ChaCha20_ctr32_ssse3,.-ChaCha20_ctr32_ssse3
559___
560}
561
562########################################################################
563# SSSE3 code path that handles longer messages.
564{
565# assign variables to favor Atom front-end
566my ($xd0,$xd1,$xd2,$xd3, $xt0,$xt1,$xt2,$xt3,
567    $xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3)=map("%xmm$_",(0..15));
568my  @xx=($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
569	"%nox","%nox","%nox","%nox", $xd0,$xd1,$xd2,$xd3);
570
571sub SSSE3_lane_ROUND {
572my ($a0,$b0,$c0,$d0)=@_;
573my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
574my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
575my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
576my ($xc,$xc_,$t0,$t1)=map("\"$_\"",$xt0,$xt1,$xt2,$xt3);
577my @x=map("\"$_\"",@xx);
578
579	# Consider order in which variables are addressed by their
580	# index:
581	#
582	#	a   b   c   d
583	#
584	#	0   4   8  12 < even round
585	#	1   5   9  13
586	#	2   6  10  14
587	#	3   7  11  15
588	#	0   5  10  15 < odd round
589	#	1   6  11  12
590	#	2   7   8  13
591	#	3   4   9  14
592	#
593	# 'a', 'b' and 'd's are permanently allocated in registers,
594	# @x[0..7,12..15], while 'c's are maintained in memory. If
595	# you observe 'c' column, you'll notice that pair of 'c's is
596	# invariant between rounds. This means that we have to reload
597	# them once per round, in the middle. This is why you'll see
598	# bunch of 'c' stores and loads in the middle, but none in
599	# the beginning or end.
600
601	(
602	"&paddd		(@x[$a0],@x[$b0])",	# Q1
603	 "&paddd	(@x[$a1],@x[$b1])",	# Q2
604	"&pxor		(@x[$d0],@x[$a0])",
605	 "&pxor		(@x[$d1],@x[$a1])",
606	"&pshufb	(@x[$d0],$t1)",
607	 "&pshufb	(@x[$d1],$t1)",
608
609	"&paddd		($xc,@x[$d0])",
610	 "&paddd	($xc_,@x[$d1])",
611	"&pxor		(@x[$b0],$xc)",
612	 "&pxor		(@x[$b1],$xc_)",
613	"&movdqa	($t0,@x[$b0])",
614	"&pslld		(@x[$b0],12)",
615	"&psrld		($t0,20)",
616	 "&movdqa	($t1,@x[$b1])",
617	 "&pslld	(@x[$b1],12)",
618	"&por		(@x[$b0],$t0)",
619	 "&psrld	($t1,20)",
620	"&movdqa	($t0,'(%r11)')",	# .Lrot24(%rip)
621	 "&por		(@x[$b1],$t1)",
622
623	"&paddd		(@x[$a0],@x[$b0])",
624	 "&paddd	(@x[$a1],@x[$b1])",
625	"&pxor		(@x[$d0],@x[$a0])",
626	 "&pxor		(@x[$d1],@x[$a1])",
627	"&pshufb	(@x[$d0],$t0)",
628	 "&pshufb	(@x[$d1],$t0)",
629
630	"&paddd		($xc,@x[$d0])",
631	 "&paddd	($xc_,@x[$d1])",
632	"&pxor		(@x[$b0],$xc)",
633	 "&pxor		(@x[$b1],$xc_)",
634	"&movdqa	($t1,@x[$b0])",
635	"&pslld		(@x[$b0],7)",
636	"&psrld		($t1,25)",
637	 "&movdqa	($t0,@x[$b1])",
638	 "&pslld	(@x[$b1],7)",
639	"&por		(@x[$b0],$t1)",
640	 "&psrld	($t0,25)",
641	"&movdqa	($t1,'(%r10)')",	# .Lrot16(%rip)
642	 "&por		(@x[$b1],$t0)",
643
644	"&movdqa	(\"`16*($c0-8)`(%rsp)\",$xc)",	# reload pair of 'c's
645	 "&movdqa	(\"`16*($c1-8)`(%rsp)\",$xc_)",
646	"&movdqa	($xc,\"`16*($c2-8)`(%rsp)\")",
647	 "&movdqa	($xc_,\"`16*($c3-8)`(%rsp)\")",
648
649	"&paddd		(@x[$a2],@x[$b2])",	# Q3
650	 "&paddd	(@x[$a3],@x[$b3])",	# Q4
651	"&pxor		(@x[$d2],@x[$a2])",
652	 "&pxor		(@x[$d3],@x[$a3])",
653	"&pshufb	(@x[$d2],$t1)",
654	 "&pshufb	(@x[$d3],$t1)",
655
656	"&paddd		($xc,@x[$d2])",
657	 "&paddd	($xc_,@x[$d3])",
658	"&pxor		(@x[$b2],$xc)",
659	 "&pxor		(@x[$b3],$xc_)",
660	"&movdqa	($t0,@x[$b2])",
661	"&pslld		(@x[$b2],12)",
662	"&psrld		($t0,20)",
663	 "&movdqa	($t1,@x[$b3])",
664	 "&pslld	(@x[$b3],12)",
665	"&por		(@x[$b2],$t0)",
666	 "&psrld	($t1,20)",
667	"&movdqa	($t0,'(%r11)')",	# .Lrot24(%rip)
668	 "&por		(@x[$b3],$t1)",
669
670	"&paddd		(@x[$a2],@x[$b2])",
671	 "&paddd	(@x[$a3],@x[$b3])",
672	"&pxor		(@x[$d2],@x[$a2])",
673	 "&pxor		(@x[$d3],@x[$a3])",
674	"&pshufb	(@x[$d2],$t0)",
675	 "&pshufb	(@x[$d3],$t0)",
676
677	"&paddd		($xc,@x[$d2])",
678	 "&paddd	($xc_,@x[$d3])",
679	"&pxor		(@x[$b2],$xc)",
680	 "&pxor		(@x[$b3],$xc_)",
681	"&movdqa	($t1,@x[$b2])",
682	"&pslld		(@x[$b2],7)",
683	"&psrld		($t1,25)",
684	 "&movdqa	($t0,@x[$b3])",
685	 "&pslld	(@x[$b3],7)",
686	"&por		(@x[$b2],$t1)",
687	 "&psrld	($t0,25)",
688	"&movdqa	($t1,'(%r10)')",	# .Lrot16(%rip)
689	 "&por		(@x[$b3],$t0)"
690	);
691}
692
693my $xframe = $win64 ? 0xa8 : 8;
694
695$code.=<<___;
696.globl	ChaCha20_ctr32_ssse3_4x
697.type	ChaCha20_ctr32_ssse3_4x,\@function,5
698.align	32
699ChaCha20_ctr32_ssse3_4x:
700.cfi_startproc
701	_CET_ENDBR
702	mov		%rsp,%r9		# frame pointer
703.cfi_def_cfa_register	r9
704	mov		%r10,%r11
705___
706$code.=<<___;
707	sub		\$0x140+$xframe,%rsp
708___
709	################ stack layout
710	# +0x00		SIMD equivalent of @x[8-12]
711	# ...
712	# +0x40		constant copy of key[0-2] smashed by lanes
713	# ...
714	# +0x100	SIMD counters (with nonce smashed by lanes)
715	# ...
716	# +0x140
717$code.=<<___	if ($win64);
718	movaps		%xmm6,-0xa8(%r9)
719	movaps		%xmm7,-0x98(%r9)
720	movaps		%xmm8,-0x88(%r9)
721	movaps		%xmm9,-0x78(%r9)
722	movaps		%xmm10,-0x68(%r9)
723	movaps		%xmm11,-0x58(%r9)
724	movaps		%xmm12,-0x48(%r9)
725	movaps		%xmm13,-0x38(%r9)
726	movaps		%xmm14,-0x28(%r9)
727	movaps		%xmm15,-0x18(%r9)
728.L4x_body:
729___
730$code.=<<___;
731	movdqa		.Lsigma(%rip),$xa3	# key[0]
732	movdqu		($key),$xb3		# key[1]
733	movdqu		16($key),$xt3		# key[2]
734	movdqu		($counter),$xd3		# key[3]
735	lea		0x100(%rsp),%rcx	# size optimization
736	lea		.Lrot16(%rip),%r10
737	lea		.Lrot24(%rip),%r11
738
739	pshufd		\$0x00,$xa3,$xa0	# smash key by lanes...
740	pshufd		\$0x55,$xa3,$xa1
741	movdqa		$xa0,0x40(%rsp)		# ... and offload
742	pshufd		\$0xaa,$xa3,$xa2
743	movdqa		$xa1,0x50(%rsp)
744	pshufd		\$0xff,$xa3,$xa3
745	movdqa		$xa2,0x60(%rsp)
746	movdqa		$xa3,0x70(%rsp)
747
748	pshufd		\$0x00,$xb3,$xb0
749	pshufd		\$0x55,$xb3,$xb1
750	movdqa		$xb0,0x80-0x100(%rcx)
751	pshufd		\$0xaa,$xb3,$xb2
752	movdqa		$xb1,0x90-0x100(%rcx)
753	pshufd		\$0xff,$xb3,$xb3
754	movdqa		$xb2,0xa0-0x100(%rcx)
755	movdqa		$xb3,0xb0-0x100(%rcx)
756
757	pshufd		\$0x00,$xt3,$xt0	# "$xc0"
758	pshufd		\$0x55,$xt3,$xt1	# "$xc1"
759	movdqa		$xt0,0xc0-0x100(%rcx)
760	pshufd		\$0xaa,$xt3,$xt2	# "$xc2"
761	movdqa		$xt1,0xd0-0x100(%rcx)
762	pshufd		\$0xff,$xt3,$xt3	# "$xc3"
763	movdqa		$xt2,0xe0-0x100(%rcx)
764	movdqa		$xt3,0xf0-0x100(%rcx)
765
766	pshufd		\$0x00,$xd3,$xd0
767	pshufd		\$0x55,$xd3,$xd1
768	paddd		.Linc(%rip),$xd0	# don't save counters yet
769	pshufd		\$0xaa,$xd3,$xd2
770	movdqa		$xd1,0x110-0x100(%rcx)
771	pshufd		\$0xff,$xd3,$xd3
772	movdqa		$xd2,0x120-0x100(%rcx)
773	movdqa		$xd3,0x130-0x100(%rcx)
774
775	jmp		.Loop_enter4x
776
777.align	32
778.Loop_outer4x:
779	movdqa		0x40(%rsp),$xa0		# re-load smashed key
780	movdqa		0x50(%rsp),$xa1
781	movdqa		0x60(%rsp),$xa2
782	movdqa		0x70(%rsp),$xa3
783	movdqa		0x80-0x100(%rcx),$xb0
784	movdqa		0x90-0x100(%rcx),$xb1
785	movdqa		0xa0-0x100(%rcx),$xb2
786	movdqa		0xb0-0x100(%rcx),$xb3
787	movdqa		0xc0-0x100(%rcx),$xt0	# "$xc0"
788	movdqa		0xd0-0x100(%rcx),$xt1	# "$xc1"
789	movdqa		0xe0-0x100(%rcx),$xt2	# "$xc2"
790	movdqa		0xf0-0x100(%rcx),$xt3	# "$xc3"
791	movdqa		0x100-0x100(%rcx),$xd0
792	movdqa		0x110-0x100(%rcx),$xd1
793	movdqa		0x120-0x100(%rcx),$xd2
794	movdqa		0x130-0x100(%rcx),$xd3
795	paddd		.Lfour(%rip),$xd0	# next SIMD counters
796
797.Loop_enter4x:
798	movdqa		$xt2,0x20(%rsp)		# SIMD equivalent of "@x[10]"
799	movdqa		$xt3,0x30(%rsp)		# SIMD equivalent of "@x[11]"
800	movdqa		(%r10),$xt3		# .Lrot16(%rip)
801	mov		\$10,%eax
802	movdqa		$xd0,0x100-0x100(%rcx)	# save SIMD counters
803	jmp		.Loop4x
804
805.align	32
806.Loop4x:
807___
808	foreach (&SSSE3_lane_ROUND(0, 4, 8,12)) { eval; }
809	foreach (&SSSE3_lane_ROUND(0, 5,10,15)) { eval; }
810$code.=<<___;
811	dec		%eax
812	jnz		.Loop4x
813
814	paddd		0x40(%rsp),$xa0		# accumulate key material
815	paddd		0x50(%rsp),$xa1
816	paddd		0x60(%rsp),$xa2
817	paddd		0x70(%rsp),$xa3
818
819	movdqa		$xa0,$xt2		# "de-interlace" data
820	punpckldq	$xa1,$xa0
821	movdqa		$xa2,$xt3
822	punpckldq	$xa3,$xa2
823	punpckhdq	$xa1,$xt2
824	punpckhdq	$xa3,$xt3
825	movdqa		$xa0,$xa1
826	punpcklqdq	$xa2,$xa0		# "a0"
827	movdqa		$xt2,$xa3
828	punpcklqdq	$xt3,$xt2		# "a2"
829	punpckhqdq	$xa2,$xa1		# "a1"
830	punpckhqdq	$xt3,$xa3		# "a3"
831___
832	($xa2,$xt2)=($xt2,$xa2);
833$code.=<<___;
834	paddd		0x80-0x100(%rcx),$xb0
835	paddd		0x90-0x100(%rcx),$xb1
836	paddd		0xa0-0x100(%rcx),$xb2
837	paddd		0xb0-0x100(%rcx),$xb3
838
839	movdqa		$xa0,0x00(%rsp)		# offload $xaN
840	movdqa		$xa1,0x10(%rsp)
841	movdqa		0x20(%rsp),$xa0		# "xc2"
842	movdqa		0x30(%rsp),$xa1		# "xc3"
843
844	movdqa		$xb0,$xt2
845	punpckldq	$xb1,$xb0
846	movdqa		$xb2,$xt3
847	punpckldq	$xb3,$xb2
848	punpckhdq	$xb1,$xt2
849	punpckhdq	$xb3,$xt3
850	movdqa		$xb0,$xb1
851	punpcklqdq	$xb2,$xb0		# "b0"
852	movdqa		$xt2,$xb3
853	punpcklqdq	$xt3,$xt2		# "b2"
854	punpckhqdq	$xb2,$xb1		# "b1"
855	punpckhqdq	$xt3,$xb3		# "b3"
856___
857	($xb2,$xt2)=($xt2,$xb2);
858	my ($xc0,$xc1,$xc2,$xc3)=($xt0,$xt1,$xa0,$xa1);
859$code.=<<___;
860	paddd		0xc0-0x100(%rcx),$xc0
861	paddd		0xd0-0x100(%rcx),$xc1
862	paddd		0xe0-0x100(%rcx),$xc2
863	paddd		0xf0-0x100(%rcx),$xc3
864
865	movdqa		$xa2,0x20(%rsp)		# keep offloading $xaN
866	movdqa		$xa3,0x30(%rsp)
867
868	movdqa		$xc0,$xt2
869	punpckldq	$xc1,$xc0
870	movdqa		$xc2,$xt3
871	punpckldq	$xc3,$xc2
872	punpckhdq	$xc1,$xt2
873	punpckhdq	$xc3,$xt3
874	movdqa		$xc0,$xc1
875	punpcklqdq	$xc2,$xc0		# "c0"
876	movdqa		$xt2,$xc3
877	punpcklqdq	$xt3,$xt2		# "c2"
878	punpckhqdq	$xc2,$xc1		# "c1"
879	punpckhqdq	$xt3,$xc3		# "c3"
880___
881	($xc2,$xt2)=($xt2,$xc2);
882	($xt0,$xt1)=($xa2,$xa3);		# use $xaN as temporary
883$code.=<<___;
884	paddd		0x100-0x100(%rcx),$xd0
885	paddd		0x110-0x100(%rcx),$xd1
886	paddd		0x120-0x100(%rcx),$xd2
887	paddd		0x130-0x100(%rcx),$xd3
888
889	movdqa		$xd0,$xt2
890	punpckldq	$xd1,$xd0
891	movdqa		$xd2,$xt3
892	punpckldq	$xd3,$xd2
893	punpckhdq	$xd1,$xt2
894	punpckhdq	$xd3,$xt3
895	movdqa		$xd0,$xd1
896	punpcklqdq	$xd2,$xd0		# "d0"
897	movdqa		$xt2,$xd3
898	punpcklqdq	$xt3,$xt2		# "d2"
899	punpckhqdq	$xd2,$xd1		# "d1"
900	punpckhqdq	$xt3,$xd3		# "d3"
901___
902	($xd2,$xt2)=($xt2,$xd2);
903$code.=<<___;
904	cmp		\$64*4,$len
905	jb		.Ltail4x
906
907	movdqu		0x00($inp),$xt0		# xor with input
908	movdqu		0x10($inp),$xt1
909	movdqu		0x20($inp),$xt2
910	movdqu		0x30($inp),$xt3
911	pxor		0x00(%rsp),$xt0		# $xaN is offloaded, remember?
912	pxor		$xb0,$xt1
913	pxor		$xc0,$xt2
914	pxor		$xd0,$xt3
915
916	 movdqu		$xt0,0x00($out)
917	movdqu		0x40($inp),$xt0
918	 movdqu		$xt1,0x10($out)
919	movdqu		0x50($inp),$xt1
920	 movdqu		$xt2,0x20($out)
921	movdqu		0x60($inp),$xt2
922	 movdqu		$xt3,0x30($out)
923	movdqu		0x70($inp),$xt3
924	lea		0x80($inp),$inp		# size optimization
925	pxor		0x10(%rsp),$xt0
926	pxor		$xb1,$xt1
927	pxor		$xc1,$xt2
928	pxor		$xd1,$xt3
929
930	 movdqu		$xt0,0x40($out)
931	movdqu		0x00($inp),$xt0
932	 movdqu		$xt1,0x50($out)
933	movdqu		0x10($inp),$xt1
934	 movdqu		$xt2,0x60($out)
935	movdqu		0x20($inp),$xt2
936	 movdqu		$xt3,0x70($out)
937	 lea		0x80($out),$out		# size optimization
938	movdqu		0x30($inp),$xt3
939	pxor		0x20(%rsp),$xt0
940	pxor		$xb2,$xt1
941	pxor		$xc2,$xt2
942	pxor		$xd2,$xt3
943
944	 movdqu		$xt0,0x00($out)
945	movdqu		0x40($inp),$xt0
946	 movdqu		$xt1,0x10($out)
947	movdqu		0x50($inp),$xt1
948	 movdqu		$xt2,0x20($out)
949	movdqu		0x60($inp),$xt2
950	 movdqu		$xt3,0x30($out)
951	movdqu		0x70($inp),$xt3
952	lea		0x80($inp),$inp		# inp+=64*4
953	pxor		0x30(%rsp),$xt0
954	pxor		$xb3,$xt1
955	pxor		$xc3,$xt2
956	pxor		$xd3,$xt3
957	movdqu		$xt0,0x40($out)
958	movdqu		$xt1,0x50($out)
959	movdqu		$xt2,0x60($out)
960	movdqu		$xt3,0x70($out)
961	lea		0x80($out),$out		# out+=64*4
962
963	sub		\$64*4,$len
964	jnz		.Loop_outer4x
965
966	jmp		.Ldone4x
967
968.Ltail4x:
969	cmp		\$192,$len
970	jae		.L192_or_more4x
971	cmp		\$128,$len
972	jae		.L128_or_more4x
973	cmp		\$64,$len
974	jae		.L64_or_more4x
975
976	#movdqa		0x00(%rsp),$xt0		# $xaN is offloaded, remember?
977	xor		%r10,%r10
978	#movdqa		$xt0,0x00(%rsp)
979	movdqa		$xb0,0x10(%rsp)
980	movdqa		$xc0,0x20(%rsp)
981	movdqa		$xd0,0x30(%rsp)
982	jmp		.Loop_tail4x
983
984.align	32
985.L64_or_more4x:
986	movdqu		0x00($inp),$xt0		# xor with input
987	movdqu		0x10($inp),$xt1
988	movdqu		0x20($inp),$xt2
989	movdqu		0x30($inp),$xt3
990	pxor		0x00(%rsp),$xt0		# $xaxN is offloaded, remember?
991	pxor		$xb0,$xt1
992	pxor		$xc0,$xt2
993	pxor		$xd0,$xt3
994	movdqu		$xt0,0x00($out)
995	movdqu		$xt1,0x10($out)
996	movdqu		$xt2,0x20($out)
997	movdqu		$xt3,0x30($out)
998	je		.Ldone4x
999
1000	movdqa		0x10(%rsp),$xt0		# $xaN is offloaded, remember?
1001	lea		0x40($inp),$inp		# inp+=64*1
1002	xor		%r10,%r10
1003	movdqa		$xt0,0x00(%rsp)
1004	movdqa		$xb1,0x10(%rsp)
1005	lea		0x40($out),$out		# out+=64*1
1006	movdqa		$xc1,0x20(%rsp)
1007	sub		\$64,$len		# len-=64*1
1008	movdqa		$xd1,0x30(%rsp)
1009	jmp		.Loop_tail4x
1010
1011.align	32
1012.L128_or_more4x:
1013	movdqu		0x00($inp),$xt0		# xor with input
1014	movdqu		0x10($inp),$xt1
1015	movdqu		0x20($inp),$xt2
1016	movdqu		0x30($inp),$xt3
1017	pxor		0x00(%rsp),$xt0		# $xaN is offloaded, remember?
1018	pxor		$xb0,$xt1
1019	pxor		$xc0,$xt2
1020	pxor		$xd0,$xt3
1021
1022	 movdqu		$xt0,0x00($out)
1023	movdqu		0x40($inp),$xt0
1024	 movdqu		$xt1,0x10($out)
1025	movdqu		0x50($inp),$xt1
1026	 movdqu		$xt2,0x20($out)
1027	movdqu		0x60($inp),$xt2
1028	 movdqu		$xt3,0x30($out)
1029	movdqu		0x70($inp),$xt3
1030	pxor		0x10(%rsp),$xt0
1031	pxor		$xb1,$xt1
1032	pxor		$xc1,$xt2
1033	pxor		$xd1,$xt3
1034	movdqu		$xt0,0x40($out)
1035	movdqu		$xt1,0x50($out)
1036	movdqu		$xt2,0x60($out)
1037	movdqu		$xt3,0x70($out)
1038	je		.Ldone4x
1039
1040	movdqa		0x20(%rsp),$xt0		# $xaN is offloaded, remember?
1041	lea		0x80($inp),$inp		# inp+=64*2
1042	xor		%r10,%r10
1043	movdqa		$xt0,0x00(%rsp)
1044	movdqa		$xb2,0x10(%rsp)
1045	lea		0x80($out),$out		# out+=64*2
1046	movdqa		$xc2,0x20(%rsp)
1047	sub		\$128,$len		# len-=64*2
1048	movdqa		$xd2,0x30(%rsp)
1049	jmp		.Loop_tail4x
1050
1051.align	32
1052.L192_or_more4x:
1053	movdqu		0x00($inp),$xt0		# xor with input
1054	movdqu		0x10($inp),$xt1
1055	movdqu		0x20($inp),$xt2
1056	movdqu		0x30($inp),$xt3
1057	pxor		0x00(%rsp),$xt0		# $xaN is offloaded, remember?
1058	pxor		$xb0,$xt1
1059	pxor		$xc0,$xt2
1060	pxor		$xd0,$xt3
1061
1062	 movdqu		$xt0,0x00($out)
1063	movdqu		0x40($inp),$xt0
1064	 movdqu		$xt1,0x10($out)
1065	movdqu		0x50($inp),$xt1
1066	 movdqu		$xt2,0x20($out)
1067	movdqu		0x60($inp),$xt2
1068	 movdqu		$xt3,0x30($out)
1069	movdqu		0x70($inp),$xt3
1070	lea		0x80($inp),$inp		# size optimization
1071	pxor		0x10(%rsp),$xt0
1072	pxor		$xb1,$xt1
1073	pxor		$xc1,$xt2
1074	pxor		$xd1,$xt3
1075
1076	 movdqu		$xt0,0x40($out)
1077	movdqu		0x00($inp),$xt0
1078	 movdqu		$xt1,0x50($out)
1079	movdqu		0x10($inp),$xt1
1080	 movdqu		$xt2,0x60($out)
1081	movdqu		0x20($inp),$xt2
1082	 movdqu		$xt3,0x70($out)
1083	 lea		0x80($out),$out		# size optimization
1084	movdqu		0x30($inp),$xt3
1085	pxor		0x20(%rsp),$xt0
1086	pxor		$xb2,$xt1
1087	pxor		$xc2,$xt2
1088	pxor		$xd2,$xt3
1089	movdqu		$xt0,0x00($out)
1090	movdqu		$xt1,0x10($out)
1091	movdqu		$xt2,0x20($out)
1092	movdqu		$xt3,0x30($out)
1093	je		.Ldone4x
1094
1095	movdqa		0x30(%rsp),$xt0		# $xaN is offloaded, remember?
1096	lea		0x40($inp),$inp		# inp+=64*3
1097	xor		%r10,%r10
1098	movdqa		$xt0,0x00(%rsp)
1099	movdqa		$xb3,0x10(%rsp)
1100	lea		0x40($out),$out		# out+=64*3
1101	movdqa		$xc3,0x20(%rsp)
1102	sub		\$192,$len		# len-=64*3
1103	movdqa		$xd3,0x30(%rsp)
1104
1105.Loop_tail4x:
1106	movzb		($inp,%r10),%eax
1107	movzb		(%rsp,%r10),%ecx
1108	lea		1(%r10),%r10
1109	xor		%ecx,%eax
1110	mov		%al,-1($out,%r10)
1111	dec		$len
1112	jnz		.Loop_tail4x
1113
1114.Ldone4x:
1115___
1116$code.=<<___	if ($win64);
1117	movaps		-0xa8(%r9),%xmm6
1118	movaps		-0x98(%r9),%xmm7
1119	movaps		-0x88(%r9),%xmm8
1120	movaps		-0x78(%r9),%xmm9
1121	movaps		-0x68(%r9),%xmm10
1122	movaps		-0x58(%r9),%xmm11
1123	movaps		-0x48(%r9),%xmm12
1124	movaps		-0x38(%r9),%xmm13
1125	movaps		-0x28(%r9),%xmm14
1126	movaps		-0x18(%r9),%xmm15
1127___
1128$code.=<<___;
1129	lea		(%r9),%rsp
1130.cfi_def_cfa_register	rsp
1131.L4x_epilogue:
1132	ret
1133.cfi_endproc
1134.size	ChaCha20_ctr32_ssse3_4x,.-ChaCha20_ctr32_ssse3_4x
1135___
1136}
1137
1138########################################################################
1139# AVX2 code path
1140if ($avx>1) {
1141my ($xb0,$xb1,$xb2,$xb3, $xd0,$xd1,$xd2,$xd3,
1142    $xa0,$xa1,$xa2,$xa3, $xt0,$xt1,$xt2,$xt3)=map("%ymm$_",(0..15));
1143my @xx=($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
1144	"%nox","%nox","%nox","%nox", $xd0,$xd1,$xd2,$xd3);
1145
1146sub AVX2_lane_ROUND {
1147my ($a0,$b0,$c0,$d0)=@_;
1148my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
1149my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
1150my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
1151my ($xc,$xc_,$t0,$t1)=map("\"$_\"",$xt0,$xt1,$xt2,$xt3);
1152my @x=map("\"$_\"",@xx);
1153
1154	# Consider order in which variables are addressed by their
1155	# index:
1156	#
1157	#	a   b   c   d
1158	#
1159	#	0   4   8  12 < even round
1160	#	1   5   9  13
1161	#	2   6  10  14
1162	#	3   7  11  15
1163	#	0   5  10  15 < odd round
1164	#	1   6  11  12
1165	#	2   7   8  13
1166	#	3   4   9  14
1167	#
1168	# 'a', 'b' and 'd's are permanently allocated in registers,
1169	# @x[0..7,12..15], while 'c's are maintained in memory. If
1170	# you observe 'c' column, you'll notice that pair of 'c's is
1171	# invariant between rounds. This means that we have to reload
1172	# them once per round, in the middle. This is why you'll see
1173	# bunch of 'c' stores and loads in the middle, but none in
1174	# the beginning or end.
1175
1176	(
1177	"&vpaddd	(@x[$a0],@x[$a0],@x[$b0])",	# Q1
1178	"&vpxor		(@x[$d0],@x[$a0],@x[$d0])",
1179	"&vpshufb	(@x[$d0],@x[$d0],$t1)",
1180	 "&vpaddd	(@x[$a1],@x[$a1],@x[$b1])",	# Q2
1181	 "&vpxor	(@x[$d1],@x[$a1],@x[$d1])",
1182	 "&vpshufb	(@x[$d1],@x[$d1],$t1)",
1183
1184	"&vpaddd	($xc,$xc,@x[$d0])",
1185	"&vpxor		(@x[$b0],$xc,@x[$b0])",
1186	"&vpslld	($t0,@x[$b0],12)",
1187	"&vpsrld	(@x[$b0],@x[$b0],20)",
1188	"&vpor		(@x[$b0],$t0,@x[$b0])",
1189	"&vbroadcasti128($t0,'(%r11)')",		# .Lrot24(%rip)
1190	 "&vpaddd	($xc_,$xc_,@x[$d1])",
1191	 "&vpxor	(@x[$b1],$xc_,@x[$b1])",
1192	 "&vpslld	($t1,@x[$b1],12)",
1193	 "&vpsrld	(@x[$b1],@x[$b1],20)",
1194	 "&vpor		(@x[$b1],$t1,@x[$b1])",
1195
1196	"&vpaddd	(@x[$a0],@x[$a0],@x[$b0])",
1197	"&vpxor		(@x[$d0],@x[$a0],@x[$d0])",
1198	"&vpshufb	(@x[$d0],@x[$d0],$t0)",
1199	 "&vpaddd	(@x[$a1],@x[$a1],@x[$b1])",
1200	 "&vpxor	(@x[$d1],@x[$a1],@x[$d1])",
1201	 "&vpshufb	(@x[$d1],@x[$d1],$t0)",
1202
1203	"&vpaddd	($xc,$xc,@x[$d0])",
1204	"&vpxor		(@x[$b0],$xc,@x[$b0])",
1205	"&vpslld	($t1,@x[$b0],7)",
1206	"&vpsrld	(@x[$b0],@x[$b0],25)",
1207	"&vpor		(@x[$b0],$t1,@x[$b0])",
1208	"&vbroadcasti128($t1,'(%r10)')",		# .Lrot16(%rip)
1209	 "&vpaddd	($xc_,$xc_,@x[$d1])",
1210	 "&vpxor	(@x[$b1],$xc_,@x[$b1])",
1211	 "&vpslld	($t0,@x[$b1],7)",
1212	 "&vpsrld	(@x[$b1],@x[$b1],25)",
1213	 "&vpor		(@x[$b1],$t0,@x[$b1])",
1214
1215	"&vmovdqa	(\"`32*($c0-8)`(%rsp)\",$xc)",	# reload pair of 'c's
1216	 "&vmovdqa	(\"`32*($c1-8)`(%rsp)\",$xc_)",
1217	"&vmovdqa	($xc,\"`32*($c2-8)`(%rsp)\")",
1218	 "&vmovdqa	($xc_,\"`32*($c3-8)`(%rsp)\")",
1219
1220	"&vpaddd	(@x[$a2],@x[$a2],@x[$b2])",	# Q3
1221	"&vpxor		(@x[$d2],@x[$a2],@x[$d2])",
1222	"&vpshufb	(@x[$d2],@x[$d2],$t1)",
1223	 "&vpaddd	(@x[$a3],@x[$a3],@x[$b3])",	# Q4
1224	 "&vpxor	(@x[$d3],@x[$a3],@x[$d3])",
1225	 "&vpshufb	(@x[$d3],@x[$d3],$t1)",
1226
1227	"&vpaddd	($xc,$xc,@x[$d2])",
1228	"&vpxor		(@x[$b2],$xc,@x[$b2])",
1229	"&vpslld	($t0,@x[$b2],12)",
1230	"&vpsrld	(@x[$b2],@x[$b2],20)",
1231	"&vpor		(@x[$b2],$t0,@x[$b2])",
1232	"&vbroadcasti128($t0,'(%r11)')",		# .Lrot24(%rip)
1233	 "&vpaddd	($xc_,$xc_,@x[$d3])",
1234	 "&vpxor	(@x[$b3],$xc_,@x[$b3])",
1235	 "&vpslld	($t1,@x[$b3],12)",
1236	 "&vpsrld	(@x[$b3],@x[$b3],20)",
1237	 "&vpor		(@x[$b3],$t1,@x[$b3])",
1238
1239	"&vpaddd	(@x[$a2],@x[$a2],@x[$b2])",
1240	"&vpxor		(@x[$d2],@x[$a2],@x[$d2])",
1241	"&vpshufb	(@x[$d2],@x[$d2],$t0)",
1242	 "&vpaddd	(@x[$a3],@x[$a3],@x[$b3])",
1243	 "&vpxor	(@x[$d3],@x[$a3],@x[$d3])",
1244	 "&vpshufb	(@x[$d3],@x[$d3],$t0)",
1245
1246	"&vpaddd	($xc,$xc,@x[$d2])",
1247	"&vpxor		(@x[$b2],$xc,@x[$b2])",
1248	"&vpslld	($t1,@x[$b2],7)",
1249	"&vpsrld	(@x[$b2],@x[$b2],25)",
1250	"&vpor		(@x[$b2],$t1,@x[$b2])",
1251	"&vbroadcasti128($t1,'(%r10)')",		# .Lrot16(%rip)
1252	 "&vpaddd	($xc_,$xc_,@x[$d3])",
1253	 "&vpxor	(@x[$b3],$xc_,@x[$b3])",
1254	 "&vpslld	($t0,@x[$b3],7)",
1255	 "&vpsrld	(@x[$b3],@x[$b3],25)",
1256	 "&vpor		(@x[$b3],$t0,@x[$b3])"
1257	);
1258}
1259
1260my $xframe = $win64 ? 0xa8 : 8;
1261
1262$code.=<<___;
1263.globl	ChaCha20_ctr32_avx2
1264.type	ChaCha20_ctr32_avx2,\@function,5
1265.align	32
1266ChaCha20_ctr32_avx2:
1267.cfi_startproc
1268	_CET_ENDBR
1269	mov		%rsp,%r9		# frame register
1270.cfi_def_cfa_register	r9
1271	sub		\$0x280+$xframe,%rsp
1272	and		\$-32,%rsp
1273___
1274$code.=<<___	if ($win64);
1275	movaps		%xmm6,-0xa8(%r9)
1276	movaps		%xmm7,-0x98(%r9)
1277	movaps		%xmm8,-0x88(%r9)
1278	movaps		%xmm9,-0x78(%r9)
1279	movaps		%xmm10,-0x68(%r9)
1280	movaps		%xmm11,-0x58(%r9)
1281	movaps		%xmm12,-0x48(%r9)
1282	movaps		%xmm13,-0x38(%r9)
1283	movaps		%xmm14,-0x28(%r9)
1284	movaps		%xmm15,-0x18(%r9)
1285.L8x_body:
1286___
1287$code.=<<___;
1288	vzeroupper
1289
1290	################ stack layout
1291	# +0x00		SIMD equivalent of @x[8-12]
1292	# ...
1293	# +0x80		constant copy of key[0-2] smashed by lanes
1294	# ...
1295	# +0x200	SIMD counters (with nonce smashed by lanes)
1296	# ...
1297	# +0x280
1298
1299	vbroadcasti128	.Lsigma(%rip),$xa3	# key[0]
1300	vbroadcasti128	($key),$xb3		# key[1]
1301	vbroadcasti128	16($key),$xt3		# key[2]
1302	vbroadcasti128	($counter),$xd3		# key[3]
1303	lea		0x100(%rsp),%rcx	# size optimization
1304	lea		0x200(%rsp),%rax	# size optimization
1305	lea		.Lrot16(%rip),%r10
1306	lea		.Lrot24(%rip),%r11
1307
1308	vpshufd		\$0x00,$xa3,$xa0	# smash key by lanes...
1309	vpshufd		\$0x55,$xa3,$xa1
1310	vmovdqa		$xa0,0x80-0x100(%rcx)	# ... and offload
1311	vpshufd		\$0xaa,$xa3,$xa2
1312	vmovdqa		$xa1,0xa0-0x100(%rcx)
1313	vpshufd		\$0xff,$xa3,$xa3
1314	vmovdqa		$xa2,0xc0-0x100(%rcx)
1315	vmovdqa		$xa3,0xe0-0x100(%rcx)
1316
1317	vpshufd		\$0x00,$xb3,$xb0
1318	vpshufd		\$0x55,$xb3,$xb1
1319	vmovdqa		$xb0,0x100-0x100(%rcx)
1320	vpshufd		\$0xaa,$xb3,$xb2
1321	vmovdqa		$xb1,0x120-0x100(%rcx)
1322	vpshufd		\$0xff,$xb3,$xb3
1323	vmovdqa		$xb2,0x140-0x100(%rcx)
1324	vmovdqa		$xb3,0x160-0x100(%rcx)
1325
1326	vpshufd		\$0x00,$xt3,$xt0	# "xc0"
1327	vpshufd		\$0x55,$xt3,$xt1	# "xc1"
1328	vmovdqa		$xt0,0x180-0x200(%rax)
1329	vpshufd		\$0xaa,$xt3,$xt2	# "xc2"
1330	vmovdqa		$xt1,0x1a0-0x200(%rax)
1331	vpshufd		\$0xff,$xt3,$xt3	# "xc3"
1332	vmovdqa		$xt2,0x1c0-0x200(%rax)
1333	vmovdqa		$xt3,0x1e0-0x200(%rax)
1334
1335	vpshufd		\$0x00,$xd3,$xd0
1336	vpshufd		\$0x55,$xd3,$xd1
1337	vpaddd		.Lincy(%rip),$xd0,$xd0	# don't save counters yet
1338	vpshufd		\$0xaa,$xd3,$xd2
1339	vmovdqa		$xd1,0x220-0x200(%rax)
1340	vpshufd		\$0xff,$xd3,$xd3
1341	vmovdqa		$xd2,0x240-0x200(%rax)
1342	vmovdqa		$xd3,0x260-0x200(%rax)
1343
1344	jmp		.Loop_enter8x
1345
1346.align	32
1347.Loop_outer8x:
1348	vmovdqa		0x80-0x100(%rcx),$xa0	# re-load smashed key
1349	vmovdqa		0xa0-0x100(%rcx),$xa1
1350	vmovdqa		0xc0-0x100(%rcx),$xa2
1351	vmovdqa		0xe0-0x100(%rcx),$xa3
1352	vmovdqa		0x100-0x100(%rcx),$xb0
1353	vmovdqa		0x120-0x100(%rcx),$xb1
1354	vmovdqa		0x140-0x100(%rcx),$xb2
1355	vmovdqa		0x160-0x100(%rcx),$xb3
1356	vmovdqa		0x180-0x200(%rax),$xt0	# "xc0"
1357	vmovdqa		0x1a0-0x200(%rax),$xt1	# "xc1"
1358	vmovdqa		0x1c0-0x200(%rax),$xt2	# "xc2"
1359	vmovdqa		0x1e0-0x200(%rax),$xt3	# "xc3"
1360	vmovdqa		0x200-0x200(%rax),$xd0
1361	vmovdqa		0x220-0x200(%rax),$xd1
1362	vmovdqa		0x240-0x200(%rax),$xd2
1363	vmovdqa		0x260-0x200(%rax),$xd3
1364	vpaddd		.Leight(%rip),$xd0,$xd0	# next SIMD counters
1365
1366.Loop_enter8x:
1367	vmovdqa		$xt2,0x40(%rsp)		# SIMD equivalent of "@x[10]"
1368	vmovdqa		$xt3,0x60(%rsp)		# SIMD equivalent of "@x[11]"
1369	vbroadcasti128	(%r10),$xt3
1370	vmovdqa		$xd0,0x200-0x200(%rax)	# save SIMD counters
1371	mov		\$10,%eax
1372	jmp		.Loop8x
1373
1374.align	32
1375.Loop8x:
1376___
1377	foreach (&AVX2_lane_ROUND(0, 4, 8,12)) { eval; }
1378	foreach (&AVX2_lane_ROUND(0, 5,10,15)) { eval; }
1379$code.=<<___;
1380	dec		%eax
1381	jnz		.Loop8x
1382
1383	lea		0x200(%rsp),%rax	# size optimization
1384	vpaddd		0x80-0x100(%rcx),$xa0,$xa0	# accumulate key
1385	vpaddd		0xa0-0x100(%rcx),$xa1,$xa1
1386	vpaddd		0xc0-0x100(%rcx),$xa2,$xa2
1387	vpaddd		0xe0-0x100(%rcx),$xa3,$xa3
1388
1389	vpunpckldq	$xa1,$xa0,$xt2		# "de-interlace" data
1390	vpunpckldq	$xa3,$xa2,$xt3
1391	vpunpckhdq	$xa1,$xa0,$xa0
1392	vpunpckhdq	$xa3,$xa2,$xa2
1393	vpunpcklqdq	$xt3,$xt2,$xa1		# "a0"
1394	vpunpckhqdq	$xt3,$xt2,$xt2		# "a1"
1395	vpunpcklqdq	$xa2,$xa0,$xa3		# "a2"
1396	vpunpckhqdq	$xa2,$xa0,$xa0		# "a3"
1397___
1398	($xa0,$xa1,$xa2,$xa3,$xt2)=($xa1,$xt2,$xa3,$xa0,$xa2);
1399$code.=<<___;
1400	vpaddd		0x100-0x100(%rcx),$xb0,$xb0
1401	vpaddd		0x120-0x100(%rcx),$xb1,$xb1
1402	vpaddd		0x140-0x100(%rcx),$xb2,$xb2
1403	vpaddd		0x160-0x100(%rcx),$xb3,$xb3
1404
1405	vpunpckldq	$xb1,$xb0,$xt2
1406	vpunpckldq	$xb3,$xb2,$xt3
1407	vpunpckhdq	$xb1,$xb0,$xb0
1408	vpunpckhdq	$xb3,$xb2,$xb2
1409	vpunpcklqdq	$xt3,$xt2,$xb1		# "b0"
1410	vpunpckhqdq	$xt3,$xt2,$xt2		# "b1"
1411	vpunpcklqdq	$xb2,$xb0,$xb3		# "b2"
1412	vpunpckhqdq	$xb2,$xb0,$xb0		# "b3"
1413___
1414	($xb0,$xb1,$xb2,$xb3,$xt2)=($xb1,$xt2,$xb3,$xb0,$xb2);
1415$code.=<<___;
1416	vperm2i128	\$0x20,$xb0,$xa0,$xt3	# "de-interlace" further
1417	vperm2i128	\$0x31,$xb0,$xa0,$xb0
1418	vperm2i128	\$0x20,$xb1,$xa1,$xa0
1419	vperm2i128	\$0x31,$xb1,$xa1,$xb1
1420	vperm2i128	\$0x20,$xb2,$xa2,$xa1
1421	vperm2i128	\$0x31,$xb2,$xa2,$xb2
1422	vperm2i128	\$0x20,$xb3,$xa3,$xa2
1423	vperm2i128	\$0x31,$xb3,$xa3,$xb3
1424___
1425	($xa0,$xa1,$xa2,$xa3,$xt3)=($xt3,$xa0,$xa1,$xa2,$xa3);
1426	my ($xc0,$xc1,$xc2,$xc3)=($xt0,$xt1,$xa0,$xa1);
1427$code.=<<___;
1428	vmovdqa		$xa0,0x00(%rsp)		# offload $xaN
1429	vmovdqa		$xa1,0x20(%rsp)
1430	vmovdqa		0x40(%rsp),$xc2		# $xa0
1431	vmovdqa		0x60(%rsp),$xc3		# $xa1
1432
1433	vpaddd		0x180-0x200(%rax),$xc0,$xc0
1434	vpaddd		0x1a0-0x200(%rax),$xc1,$xc1
1435	vpaddd		0x1c0-0x200(%rax),$xc2,$xc2
1436	vpaddd		0x1e0-0x200(%rax),$xc3,$xc3
1437
1438	vpunpckldq	$xc1,$xc0,$xt2
1439	vpunpckldq	$xc3,$xc2,$xt3
1440	vpunpckhdq	$xc1,$xc0,$xc0
1441	vpunpckhdq	$xc3,$xc2,$xc2
1442	vpunpcklqdq	$xt3,$xt2,$xc1		# "c0"
1443	vpunpckhqdq	$xt3,$xt2,$xt2		# "c1"
1444	vpunpcklqdq	$xc2,$xc0,$xc3		# "c2"
1445	vpunpckhqdq	$xc2,$xc0,$xc0		# "c3"
1446___
1447	($xc0,$xc1,$xc2,$xc3,$xt2)=($xc1,$xt2,$xc3,$xc0,$xc2);
1448$code.=<<___;
1449	vpaddd		0x200-0x200(%rax),$xd0,$xd0
1450	vpaddd		0x220-0x200(%rax),$xd1,$xd1
1451	vpaddd		0x240-0x200(%rax),$xd2,$xd2
1452	vpaddd		0x260-0x200(%rax),$xd3,$xd3
1453
1454	vpunpckldq	$xd1,$xd0,$xt2
1455	vpunpckldq	$xd3,$xd2,$xt3
1456	vpunpckhdq	$xd1,$xd0,$xd0
1457	vpunpckhdq	$xd3,$xd2,$xd2
1458	vpunpcklqdq	$xt3,$xt2,$xd1		# "d0"
1459	vpunpckhqdq	$xt3,$xt2,$xt2		# "d1"
1460	vpunpcklqdq	$xd2,$xd0,$xd3		# "d2"
1461	vpunpckhqdq	$xd2,$xd0,$xd0		# "d3"
1462___
1463	($xd0,$xd1,$xd2,$xd3,$xt2)=($xd1,$xt2,$xd3,$xd0,$xd2);
1464$code.=<<___;
1465	vperm2i128	\$0x20,$xd0,$xc0,$xt3	# "de-interlace" further
1466	vperm2i128	\$0x31,$xd0,$xc0,$xd0
1467	vperm2i128	\$0x20,$xd1,$xc1,$xc0
1468	vperm2i128	\$0x31,$xd1,$xc1,$xd1
1469	vperm2i128	\$0x20,$xd2,$xc2,$xc1
1470	vperm2i128	\$0x31,$xd2,$xc2,$xd2
1471	vperm2i128	\$0x20,$xd3,$xc3,$xc2
1472	vperm2i128	\$0x31,$xd3,$xc3,$xd3
1473___
1474	($xc0,$xc1,$xc2,$xc3,$xt3)=($xt3,$xc0,$xc1,$xc2,$xc3);
1475	($xb0,$xb1,$xb2,$xb3,$xc0,$xc1,$xc2,$xc3)=
1476	($xc0,$xc1,$xc2,$xc3,$xb0,$xb1,$xb2,$xb3);
1477	($xa0,$xa1)=($xt2,$xt3);
1478$code.=<<___;
1479	vmovdqa		0x00(%rsp),$xa0		# $xaN was offloaded, remember?
1480	vmovdqa		0x20(%rsp),$xa1
1481
1482	cmp		\$64*8,$len
1483	jb		.Ltail8x
1484
1485	vpxor		0x00($inp),$xa0,$xa0	# xor with input
1486	vpxor		0x20($inp),$xb0,$xb0
1487	vpxor		0x40($inp),$xc0,$xc0
1488	vpxor		0x60($inp),$xd0,$xd0
1489	lea		0x80($inp),$inp		# size optimization
1490	vmovdqu		$xa0,0x00($out)
1491	vmovdqu		$xb0,0x20($out)
1492	vmovdqu		$xc0,0x40($out)
1493	vmovdqu		$xd0,0x60($out)
1494	lea		0x80($out),$out		# size optimization
1495
1496	vpxor		0x00($inp),$xa1,$xa1
1497	vpxor		0x20($inp),$xb1,$xb1
1498	vpxor		0x40($inp),$xc1,$xc1
1499	vpxor		0x60($inp),$xd1,$xd1
1500	lea		0x80($inp),$inp		# size optimization
1501	vmovdqu		$xa1,0x00($out)
1502	vmovdqu		$xb1,0x20($out)
1503	vmovdqu		$xc1,0x40($out)
1504	vmovdqu		$xd1,0x60($out)
1505	lea		0x80($out),$out		# size optimization
1506
1507	vpxor		0x00($inp),$xa2,$xa2
1508	vpxor		0x20($inp),$xb2,$xb2
1509	vpxor		0x40($inp),$xc2,$xc2
1510	vpxor		0x60($inp),$xd2,$xd2
1511	lea		0x80($inp),$inp		# size optimization
1512	vmovdqu		$xa2,0x00($out)
1513	vmovdqu		$xb2,0x20($out)
1514	vmovdqu		$xc2,0x40($out)
1515	vmovdqu		$xd2,0x60($out)
1516	lea		0x80($out),$out		# size optimization
1517
1518	vpxor		0x00($inp),$xa3,$xa3
1519	vpxor		0x20($inp),$xb3,$xb3
1520	vpxor		0x40($inp),$xc3,$xc3
1521	vpxor		0x60($inp),$xd3,$xd3
1522	lea		0x80($inp),$inp		# size optimization
1523	vmovdqu		$xa3,0x00($out)
1524	vmovdqu		$xb3,0x20($out)
1525	vmovdqu		$xc3,0x40($out)
1526	vmovdqu		$xd3,0x60($out)
1527	lea		0x80($out),$out		# size optimization
1528
1529	sub		\$64*8,$len
1530	jnz		.Loop_outer8x
1531
1532	jmp		.Ldone8x
1533
1534.Ltail8x:
1535	cmp		\$448,$len
1536	jae		.L448_or_more8x
1537	cmp		\$384,$len
1538	jae		.L384_or_more8x
1539	cmp		\$320,$len
1540	jae		.L320_or_more8x
1541	cmp		\$256,$len
1542	jae		.L256_or_more8x
1543	cmp		\$192,$len
1544	jae		.L192_or_more8x
1545	cmp		\$128,$len
1546	jae		.L128_or_more8x
1547	cmp		\$64,$len
1548	jae		.L64_or_more8x
1549
1550	xor		%r10,%r10
1551	vmovdqa		$xa0,0x00(%rsp)
1552	vmovdqa		$xb0,0x20(%rsp)
1553	jmp		.Loop_tail8x
1554
1555.align	32
1556.L64_or_more8x:
1557	vpxor		0x00($inp),$xa0,$xa0	# xor with input
1558	vpxor		0x20($inp),$xb0,$xb0
1559	vmovdqu		$xa0,0x00($out)
1560	vmovdqu		$xb0,0x20($out)
1561	je		.Ldone8x
1562
1563	lea		0x40($inp),$inp		# inp+=64*1
1564	xor		%r10,%r10
1565	vmovdqa		$xc0,0x00(%rsp)
1566	lea		0x40($out),$out		# out+=64*1
1567	sub		\$64,$len		# len-=64*1
1568	vmovdqa		$xd0,0x20(%rsp)
1569	jmp		.Loop_tail8x
1570
1571.align	32
1572.L128_or_more8x:
1573	vpxor		0x00($inp),$xa0,$xa0	# xor with input
1574	vpxor		0x20($inp),$xb0,$xb0
1575	vpxor		0x40($inp),$xc0,$xc0
1576	vpxor		0x60($inp),$xd0,$xd0
1577	vmovdqu		$xa0,0x00($out)
1578	vmovdqu		$xb0,0x20($out)
1579	vmovdqu		$xc0,0x40($out)
1580	vmovdqu		$xd0,0x60($out)
1581	je		.Ldone8x
1582
1583	lea		0x80($inp),$inp		# inp+=64*2
1584	xor		%r10,%r10
1585	vmovdqa		$xa1,0x00(%rsp)
1586	lea		0x80($out),$out		# out+=64*2
1587	sub		\$128,$len		# len-=64*2
1588	vmovdqa		$xb1,0x20(%rsp)
1589	jmp		.Loop_tail8x
1590
1591.align	32
1592.L192_or_more8x:
1593	vpxor		0x00($inp),$xa0,$xa0	# xor with input
1594	vpxor		0x20($inp),$xb0,$xb0
1595	vpxor		0x40($inp),$xc0,$xc0
1596	vpxor		0x60($inp),$xd0,$xd0
1597	vpxor		0x80($inp),$xa1,$xa1
1598	vpxor		0xa0($inp),$xb1,$xb1
1599	vmovdqu		$xa0,0x00($out)
1600	vmovdqu		$xb0,0x20($out)
1601	vmovdqu		$xc0,0x40($out)
1602	vmovdqu		$xd0,0x60($out)
1603	vmovdqu		$xa1,0x80($out)
1604	vmovdqu		$xb1,0xa0($out)
1605	je		.Ldone8x
1606
1607	lea		0xc0($inp),$inp		# inp+=64*3
1608	xor		%r10,%r10
1609	vmovdqa		$xc1,0x00(%rsp)
1610	lea		0xc0($out),$out		# out+=64*3
1611	sub		\$192,$len		# len-=64*3
1612	vmovdqa		$xd1,0x20(%rsp)
1613	jmp		.Loop_tail8x
1614
1615.align	32
1616.L256_or_more8x:
1617	vpxor		0x00($inp),$xa0,$xa0	# xor with input
1618	vpxor		0x20($inp),$xb0,$xb0
1619	vpxor		0x40($inp),$xc0,$xc0
1620	vpxor		0x60($inp),$xd0,$xd0
1621	vpxor		0x80($inp),$xa1,$xa1
1622	vpxor		0xa0($inp),$xb1,$xb1
1623	vpxor		0xc0($inp),$xc1,$xc1
1624	vpxor		0xe0($inp),$xd1,$xd1
1625	vmovdqu		$xa0,0x00($out)
1626	vmovdqu		$xb0,0x20($out)
1627	vmovdqu		$xc0,0x40($out)
1628	vmovdqu		$xd0,0x60($out)
1629	vmovdqu		$xa1,0x80($out)
1630	vmovdqu		$xb1,0xa0($out)
1631	vmovdqu		$xc1,0xc0($out)
1632	vmovdqu		$xd1,0xe0($out)
1633	je		.Ldone8x
1634
1635	lea		0x100($inp),$inp	# inp+=64*4
1636	xor		%r10,%r10
1637	vmovdqa		$xa2,0x00(%rsp)
1638	lea		0x100($out),$out	# out+=64*4
1639	sub		\$256,$len		# len-=64*4
1640	vmovdqa		$xb2,0x20(%rsp)
1641	jmp		.Loop_tail8x
1642
1643.align	32
1644.L320_or_more8x:
1645	vpxor		0x00($inp),$xa0,$xa0	# xor with input
1646	vpxor		0x20($inp),$xb0,$xb0
1647	vpxor		0x40($inp),$xc0,$xc0
1648	vpxor		0x60($inp),$xd0,$xd0
1649	vpxor		0x80($inp),$xa1,$xa1
1650	vpxor		0xa0($inp),$xb1,$xb1
1651	vpxor		0xc0($inp),$xc1,$xc1
1652	vpxor		0xe0($inp),$xd1,$xd1
1653	vpxor		0x100($inp),$xa2,$xa2
1654	vpxor		0x120($inp),$xb2,$xb2
1655	vmovdqu		$xa0,0x00($out)
1656	vmovdqu		$xb0,0x20($out)
1657	vmovdqu		$xc0,0x40($out)
1658	vmovdqu		$xd0,0x60($out)
1659	vmovdqu		$xa1,0x80($out)
1660	vmovdqu		$xb1,0xa0($out)
1661	vmovdqu		$xc1,0xc0($out)
1662	vmovdqu		$xd1,0xe0($out)
1663	vmovdqu		$xa2,0x100($out)
1664	vmovdqu		$xb2,0x120($out)
1665	je		.Ldone8x
1666
1667	lea		0x140($inp),$inp	# inp+=64*5
1668	xor		%r10,%r10
1669	vmovdqa		$xc2,0x00(%rsp)
1670	lea		0x140($out),$out	# out+=64*5
1671	sub		\$320,$len		# len-=64*5
1672	vmovdqa		$xd2,0x20(%rsp)
1673	jmp		.Loop_tail8x
1674
1675.align	32
1676.L384_or_more8x:
1677	vpxor		0x00($inp),$xa0,$xa0	# xor with input
1678	vpxor		0x20($inp),$xb0,$xb0
1679	vpxor		0x40($inp),$xc0,$xc0
1680	vpxor		0x60($inp),$xd0,$xd0
1681	vpxor		0x80($inp),$xa1,$xa1
1682	vpxor		0xa0($inp),$xb1,$xb1
1683	vpxor		0xc0($inp),$xc1,$xc1
1684	vpxor		0xe0($inp),$xd1,$xd1
1685	vpxor		0x100($inp),$xa2,$xa2
1686	vpxor		0x120($inp),$xb2,$xb2
1687	vpxor		0x140($inp),$xc2,$xc2
1688	vpxor		0x160($inp),$xd2,$xd2
1689	vmovdqu		$xa0,0x00($out)
1690	vmovdqu		$xb0,0x20($out)
1691	vmovdqu		$xc0,0x40($out)
1692	vmovdqu		$xd0,0x60($out)
1693	vmovdqu		$xa1,0x80($out)
1694	vmovdqu		$xb1,0xa0($out)
1695	vmovdqu		$xc1,0xc0($out)
1696	vmovdqu		$xd1,0xe0($out)
1697	vmovdqu		$xa2,0x100($out)
1698	vmovdqu		$xb2,0x120($out)
1699	vmovdqu		$xc2,0x140($out)
1700	vmovdqu		$xd2,0x160($out)
1701	je		.Ldone8x
1702
1703	lea		0x180($inp),$inp	# inp+=64*6
1704	xor		%r10,%r10
1705	vmovdqa		$xa3,0x00(%rsp)
1706	lea		0x180($out),$out	# out+=64*6
1707	sub		\$384,$len		# len-=64*6
1708	vmovdqa		$xb3,0x20(%rsp)
1709	jmp		.Loop_tail8x
1710
1711.align	32
1712.L448_or_more8x:
1713	vpxor		0x00($inp),$xa0,$xa0	# xor with input
1714	vpxor		0x20($inp),$xb0,$xb0
1715	vpxor		0x40($inp),$xc0,$xc0
1716	vpxor		0x60($inp),$xd0,$xd0
1717	vpxor		0x80($inp),$xa1,$xa1
1718	vpxor		0xa0($inp),$xb1,$xb1
1719	vpxor		0xc0($inp),$xc1,$xc1
1720	vpxor		0xe0($inp),$xd1,$xd1
1721	vpxor		0x100($inp),$xa2,$xa2
1722	vpxor		0x120($inp),$xb2,$xb2
1723	vpxor		0x140($inp),$xc2,$xc2
1724	vpxor		0x160($inp),$xd2,$xd2
1725	vpxor		0x180($inp),$xa3,$xa3
1726	vpxor		0x1a0($inp),$xb3,$xb3
1727	vmovdqu		$xa0,0x00($out)
1728	vmovdqu		$xb0,0x20($out)
1729	vmovdqu		$xc0,0x40($out)
1730	vmovdqu		$xd0,0x60($out)
1731	vmovdqu		$xa1,0x80($out)
1732	vmovdqu		$xb1,0xa0($out)
1733	vmovdqu		$xc1,0xc0($out)
1734	vmovdqu		$xd1,0xe0($out)
1735	vmovdqu		$xa2,0x100($out)
1736	vmovdqu		$xb2,0x120($out)
1737	vmovdqu		$xc2,0x140($out)
1738	vmovdqu		$xd2,0x160($out)
1739	vmovdqu		$xa3,0x180($out)
1740	vmovdqu		$xb3,0x1a0($out)
1741	je		.Ldone8x
1742
1743	lea		0x1c0($inp),$inp	# inp+=64*7
1744	xor		%r10,%r10
1745	vmovdqa		$xc3,0x00(%rsp)
1746	lea		0x1c0($out),$out	# out+=64*7
1747	sub		\$448,$len		# len-=64*7
1748	vmovdqa		$xd3,0x20(%rsp)
1749
1750.Loop_tail8x:
1751	movzb		($inp,%r10),%eax
1752	movzb		(%rsp,%r10),%ecx
1753	lea		1(%r10),%r10
1754	xor		%ecx,%eax
1755	mov		%al,-1($out,%r10)
1756	dec		$len
1757	jnz		.Loop_tail8x
1758
1759.Ldone8x:
1760	vzeroall
1761___
1762$code.=<<___	if ($win64);
1763	movaps		-0xa8(%r9),%xmm6
1764	movaps		-0x98(%r9),%xmm7
1765	movaps		-0x88(%r9),%xmm8
1766	movaps		-0x78(%r9),%xmm9
1767	movaps		-0x68(%r9),%xmm10
1768	movaps		-0x58(%r9),%xmm11
1769	movaps		-0x48(%r9),%xmm12
1770	movaps		-0x38(%r9),%xmm13
1771	movaps		-0x28(%r9),%xmm14
1772	movaps		-0x18(%r9),%xmm15
1773___
1774$code.=<<___;
1775	lea		(%r9),%rsp
1776.cfi_def_cfa_register	rsp
1777.L8x_epilogue:
1778	ret
1779.cfi_endproc
1780.size	ChaCha20_ctr32_avx2,.-ChaCha20_ctr32_avx2
1781___
1782}
1783
1784########################################################################
1785# AVX512 code paths
1786if ($avx>2) {
1787# This one handles shorter inputs...
1788
1789my ($a,$b,$c,$d, $a_,$b_,$c_,$d_,$fourz) = map("%zmm$_",(0..3,16..20));
1790my ($t0,$t1,$t2,$t3) = map("%xmm$_",(4..7));
1791
1792sub AVX512ROUND {	# critical path is 14 "SIMD ticks" per round
1793	&vpaddd	($a,$a,$b);
1794	&vpxord	($d,$d,$a);
1795	&vprold	($d,$d,16);
1796
1797	&vpaddd	($c,$c,$d);
1798	&vpxord	($b,$b,$c);
1799	&vprold	($b,$b,12);
1800
1801	&vpaddd	($a,$a,$b);
1802	&vpxord	($d,$d,$a);
1803	&vprold	($d,$d,8);
1804
1805	&vpaddd	($c,$c,$d);
1806	&vpxord	($b,$b,$c);
1807	&vprold	($b,$b,7);
1808}
1809
1810my $xframe = $win64 ? 32+8 : 8;
1811
1812$code.=<<___;
1813.type	ChaCha20_avx512,\@function,5
1814.align	32
1815ChaCha20_avx512:
1816.LChaCha20_avx512:
1817.cfi_startproc
1818	mov	%rsp,%r9		# frame pointer
1819.cfi_def_cfa_register	r9
1820	cmp	\$512,$len
1821	ja	.LChaCha20_16x
1822
1823	sub	\$64+$xframe,%rsp
1824___
1825$code.=<<___	if ($win64);
1826	movaps	%xmm6,-0x28(%r9)
1827	movaps	%xmm7,-0x18(%r9)
1828.Lavx512_body:
1829___
1830$code.=<<___;
1831	vbroadcasti32x4	.Lsigma(%rip),$a
1832	vbroadcasti32x4	($key),$b
1833	vbroadcasti32x4	16($key),$c
1834	vbroadcasti32x4	($counter),$d
1835
1836	vmovdqa32	$a,$a_
1837	vmovdqa32	$b,$b_
1838	vmovdqa32	$c,$c_
1839	vpaddd		.Lzeroz(%rip),$d,$d
1840	vmovdqa32	.Lfourz(%rip),$fourz
1841	mov		\$10,$counter	# reuse $counter
1842	vmovdqa32	$d,$d_
1843	jmp		.Loop_avx512
1844
1845.align	16
1846.Loop_outer_avx512:
1847	vmovdqa32	$a_,$a
1848	vmovdqa32	$b_,$b
1849	vmovdqa32	$c_,$c
1850	vpaddd		$fourz,$d_,$d
1851	mov		\$10,$counter
1852	vmovdqa32	$d,$d_
1853	jmp		.Loop_avx512
1854
1855.align	32
1856.Loop_avx512:
1857___
1858	&AVX512ROUND();
1859	&vpshufd	($c,$c,0b01001110);
1860	&vpshufd	($b,$b,0b00111001);
1861	&vpshufd	($d,$d,0b10010011);
1862
1863	&AVX512ROUND();
1864	&vpshufd	($c,$c,0b01001110);
1865	&vpshufd	($b,$b,0b10010011);
1866	&vpshufd	($d,$d,0b00111001);
1867
1868	&dec		($counter);
1869	&jnz		(".Loop_avx512");
1870
1871$code.=<<___;
1872	vpaddd		$a_,$a,$a
1873	vpaddd		$b_,$b,$b
1874	vpaddd		$c_,$c,$c
1875	vpaddd		$d_,$d,$d
1876
1877	sub		\$64,$len
1878	jb		.Ltail64_avx512
1879
1880	vpxor		0x00($inp),%x#$a,$t0	# xor with input
1881	vpxor		0x10($inp),%x#$b,$t1
1882	vpxor		0x20($inp),%x#$c,$t2
1883	vpxor		0x30($inp),%x#$d,$t3
1884	lea		0x40($inp),$inp		# inp+=64
1885
1886	vmovdqu		$t0,0x00($out)		# write output
1887	vmovdqu		$t1,0x10($out)
1888	vmovdqu		$t2,0x20($out)
1889	vmovdqu		$t3,0x30($out)
1890	lea		0x40($out),$out		# out+=64
1891
1892	jz		.Ldone_avx512
1893
1894	vextracti32x4	\$1,$a,$t0
1895	vextracti32x4	\$1,$b,$t1
1896	vextracti32x4	\$1,$c,$t2
1897	vextracti32x4	\$1,$d,$t3
1898
1899	sub		\$64,$len
1900	jb		.Ltail_avx512
1901
1902	vpxor		0x00($inp),$t0,$t0	# xor with input
1903	vpxor		0x10($inp),$t1,$t1
1904	vpxor		0x20($inp),$t2,$t2
1905	vpxor		0x30($inp),$t3,$t3
1906	lea		0x40($inp),$inp		# inp+=64
1907
1908	vmovdqu		$t0,0x00($out)		# write output
1909	vmovdqu		$t1,0x10($out)
1910	vmovdqu		$t2,0x20($out)
1911	vmovdqu		$t3,0x30($out)
1912	lea		0x40($out),$out		# out+=64
1913
1914	jz		.Ldone_avx512
1915
1916	vextracti32x4	\$2,$a,$t0
1917	vextracti32x4	\$2,$b,$t1
1918	vextracti32x4	\$2,$c,$t2
1919	vextracti32x4	\$2,$d,$t3
1920
1921	sub		\$64,$len
1922	jb		.Ltail_avx512
1923
1924	vpxor		0x00($inp),$t0,$t0	# xor with input
1925	vpxor		0x10($inp),$t1,$t1
1926	vpxor		0x20($inp),$t2,$t2
1927	vpxor		0x30($inp),$t3,$t3
1928	lea		0x40($inp),$inp		# inp+=64
1929
1930	vmovdqu		$t0,0x00($out)		# write output
1931	vmovdqu		$t1,0x10($out)
1932	vmovdqu		$t2,0x20($out)
1933	vmovdqu		$t3,0x30($out)
1934	lea		0x40($out),$out		# out+=64
1935
1936	jz		.Ldone_avx512
1937
1938	vextracti32x4	\$3,$a,$t0
1939	vextracti32x4	\$3,$b,$t1
1940	vextracti32x4	\$3,$c,$t2
1941	vextracti32x4	\$3,$d,$t3
1942
1943	sub		\$64,$len
1944	jb		.Ltail_avx512
1945
1946	vpxor		0x00($inp),$t0,$t0	# xor with input
1947	vpxor		0x10($inp),$t1,$t1
1948	vpxor		0x20($inp),$t2,$t2
1949	vpxor		0x30($inp),$t3,$t3
1950	lea		0x40($inp),$inp		# inp+=64
1951
1952	vmovdqu		$t0,0x00($out)		# write output
1953	vmovdqu		$t1,0x10($out)
1954	vmovdqu		$t2,0x20($out)
1955	vmovdqu		$t3,0x30($out)
1956	lea		0x40($out),$out		# out+=64
1957
1958	jnz		.Loop_outer_avx512
1959
1960	jmp		.Ldone_avx512
1961
1962.align	16
1963.Ltail64_avx512:
1964	vmovdqa		%x#$a,0x00(%rsp)
1965	vmovdqa		%x#$b,0x10(%rsp)
1966	vmovdqa		%x#$c,0x20(%rsp)
1967	vmovdqa		%x#$d,0x30(%rsp)
1968	add		\$64,$len
1969	jmp		.Loop_tail_avx512
1970
1971.align	16
1972.Ltail_avx512:
1973	vmovdqa		$t0,0x00(%rsp)
1974	vmovdqa		$t1,0x10(%rsp)
1975	vmovdqa		$t2,0x20(%rsp)
1976	vmovdqa		$t3,0x30(%rsp)
1977	add		\$64,$len
1978
1979.Loop_tail_avx512:
1980	movzb		($inp,$counter),%eax
1981	movzb		(%rsp,$counter),%ecx
1982	lea		1($counter),$counter
1983	xor		%ecx,%eax
1984	mov		%al,-1($out,$counter)
1985	dec		$len
1986	jnz		.Loop_tail_avx512
1987
1988	vmovdqa32	$a_,0x00(%rsp)
1989
1990.Ldone_avx512:
1991	vzeroall
1992___
1993$code.=<<___	if ($win64);
1994	movaps	-0x28(%r9),%xmm6
1995	movaps	-0x18(%r9),%xmm7
1996___
1997$code.=<<___;
1998	lea	(%r9),%rsp
1999.cfi_def_cfa_register	rsp
2000.Lavx512_epilogue:
2001	ret
2002.cfi_endproc
2003.size	ChaCha20_avx512,.-ChaCha20_avx512
2004___
2005}
2006if ($avx>2) {
2007# This one handles longer inputs...
2008
2009my ($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
2010    $xc0,$xc1,$xc2,$xc3, $xd0,$xd1,$xd2,$xd3)=map("%zmm$_",(0..15));
2011my  @xx=($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
2012	 $xc0,$xc1,$xc2,$xc3, $xd0,$xd1,$xd2,$xd3);
2013my @key=map("%zmm$_",(16..31));
2014my ($xt0,$xt1,$xt2,$xt3)=@key[0..3];
2015
2016sub AVX512_lane_ROUND {
2017my ($a0,$b0,$c0,$d0)=@_;
2018my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
2019my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
2020my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
2021my @x=map("\"$_\"",@xx);
2022
2023	(
2024	"&vpaddd	(@x[$a0],@x[$a0],@x[$b0])",	# Q1
2025	 "&vpaddd	(@x[$a1],@x[$a1],@x[$b1])",	# Q2
2026	  "&vpaddd	(@x[$a2],@x[$a2],@x[$b2])",	# Q3
2027	   "&vpaddd	(@x[$a3],@x[$a3],@x[$b3])",	# Q4
2028	"&vpxord	(@x[$d0],@x[$d0],@x[$a0])",
2029	 "&vpxord	(@x[$d1],@x[$d1],@x[$a1])",
2030	  "&vpxord	(@x[$d2],@x[$d2],@x[$a2])",
2031	   "&vpxord	(@x[$d3],@x[$d3],@x[$a3])",
2032	"&vprold	(@x[$d0],@x[$d0],16)",
2033	 "&vprold	(@x[$d1],@x[$d1],16)",
2034	  "&vprold	(@x[$d2],@x[$d2],16)",
2035	   "&vprold	(@x[$d3],@x[$d3],16)",
2036
2037	"&vpaddd	(@x[$c0],@x[$c0],@x[$d0])",
2038	 "&vpaddd	(@x[$c1],@x[$c1],@x[$d1])",
2039	  "&vpaddd	(@x[$c2],@x[$c2],@x[$d2])",
2040	   "&vpaddd	(@x[$c3],@x[$c3],@x[$d3])",
2041	"&vpxord	(@x[$b0],@x[$b0],@x[$c0])",
2042	 "&vpxord	(@x[$b1],@x[$b1],@x[$c1])",
2043	  "&vpxord	(@x[$b2],@x[$b2],@x[$c2])",
2044	   "&vpxord	(@x[$b3],@x[$b3],@x[$c3])",
2045	"&vprold	(@x[$b0],@x[$b0],12)",
2046	 "&vprold	(@x[$b1],@x[$b1],12)",
2047	  "&vprold	(@x[$b2],@x[$b2],12)",
2048	   "&vprold	(@x[$b3],@x[$b3],12)",
2049
2050	"&vpaddd	(@x[$a0],@x[$a0],@x[$b0])",
2051	 "&vpaddd	(@x[$a1],@x[$a1],@x[$b1])",
2052	  "&vpaddd	(@x[$a2],@x[$a2],@x[$b2])",
2053	   "&vpaddd	(@x[$a3],@x[$a3],@x[$b3])",
2054	"&vpxord	(@x[$d0],@x[$d0],@x[$a0])",
2055	 "&vpxord	(@x[$d1],@x[$d1],@x[$a1])",
2056	  "&vpxord	(@x[$d2],@x[$d2],@x[$a2])",
2057	   "&vpxord	(@x[$d3],@x[$d3],@x[$a3])",
2058	"&vprold	(@x[$d0],@x[$d0],8)",
2059	 "&vprold	(@x[$d1],@x[$d1],8)",
2060	  "&vprold	(@x[$d2],@x[$d2],8)",
2061	   "&vprold	(@x[$d3],@x[$d3],8)",
2062
2063	"&vpaddd	(@x[$c0],@x[$c0],@x[$d0])",
2064	 "&vpaddd	(@x[$c1],@x[$c1],@x[$d1])",
2065	  "&vpaddd	(@x[$c2],@x[$c2],@x[$d2])",
2066	   "&vpaddd	(@x[$c3],@x[$c3],@x[$d3])",
2067	"&vpxord	(@x[$b0],@x[$b0],@x[$c0])",
2068	 "&vpxord	(@x[$b1],@x[$b1],@x[$c1])",
2069	  "&vpxord	(@x[$b2],@x[$b2],@x[$c2])",
2070	   "&vpxord	(@x[$b3],@x[$b3],@x[$c3])",
2071	"&vprold	(@x[$b0],@x[$b0],7)",
2072	 "&vprold	(@x[$b1],@x[$b1],7)",
2073	  "&vprold	(@x[$b2],@x[$b2],7)",
2074	   "&vprold	(@x[$b3],@x[$b3],7)"
2075	);
2076}
2077
2078my $xframe = $win64 ? 0xa8 : 8;
2079
2080$code.=<<___;
2081.type	ChaCha20_16x,\@function,5
2082.align	32
2083ChaCha20_16x:
2084.LChaCha20_16x:
2085.cfi_startproc
2086	mov		%rsp,%r9		# frame register
2087.cfi_def_cfa_register	r9
2088	sub		\$64+$xframe,%rsp
2089	and		\$-64,%rsp
2090___
2091$code.=<<___	if ($win64);
2092	movaps		%xmm6,-0xa8(%r9)
2093	movaps		%xmm7,-0x98(%r9)
2094	movaps		%xmm8,-0x88(%r9)
2095	movaps		%xmm9,-0x78(%r9)
2096	movaps		%xmm10,-0x68(%r9)
2097	movaps		%xmm11,-0x58(%r9)
2098	movaps		%xmm12,-0x48(%r9)
2099	movaps		%xmm13,-0x38(%r9)
2100	movaps		%xmm14,-0x28(%r9)
2101	movaps		%xmm15,-0x18(%r9)
2102.L16x_body:
2103___
2104$code.=<<___;
2105	vzeroupper
2106
2107	lea		.Lsigma(%rip),%r10
2108	vbroadcasti32x4	(%r10),$xa3		# key[0]
2109	vbroadcasti32x4	($key),$xb3		# key[1]
2110	vbroadcasti32x4	16($key),$xc3		# key[2]
2111	vbroadcasti32x4	($counter),$xd3		# key[3]
2112
2113	vpshufd		\$0x00,$xa3,$xa0	# smash key by lanes...
2114	vpshufd		\$0x55,$xa3,$xa1
2115	vpshufd		\$0xaa,$xa3,$xa2
2116	vpshufd		\$0xff,$xa3,$xa3
2117	vmovdqa64	$xa0,@key[0]
2118	vmovdqa64	$xa1,@key[1]
2119	vmovdqa64	$xa2,@key[2]
2120	vmovdqa64	$xa3,@key[3]
2121
2122	vpshufd		\$0x00,$xb3,$xb0
2123	vpshufd		\$0x55,$xb3,$xb1
2124	vpshufd		\$0xaa,$xb3,$xb2
2125	vpshufd		\$0xff,$xb3,$xb3
2126	vmovdqa64	$xb0,@key[4]
2127	vmovdqa64	$xb1,@key[5]
2128	vmovdqa64	$xb2,@key[6]
2129	vmovdqa64	$xb3,@key[7]
2130
2131	vpshufd		\$0x00,$xc3,$xc0
2132	vpshufd		\$0x55,$xc3,$xc1
2133	vpshufd		\$0xaa,$xc3,$xc2
2134	vpshufd		\$0xff,$xc3,$xc3
2135	vmovdqa64	$xc0,@key[8]
2136	vmovdqa64	$xc1,@key[9]
2137	vmovdqa64	$xc2,@key[10]
2138	vmovdqa64	$xc3,@key[11]
2139
2140	vpshufd		\$0x00,$xd3,$xd0
2141	vpshufd		\$0x55,$xd3,$xd1
2142	vpshufd		\$0xaa,$xd3,$xd2
2143	vpshufd		\$0xff,$xd3,$xd3
2144	vpaddd		.Lincz(%rip),$xd0,$xd0	# don't save counters yet
2145	vmovdqa64	$xd0,@key[12]
2146	vmovdqa64	$xd1,@key[13]
2147	vmovdqa64	$xd2,@key[14]
2148	vmovdqa64	$xd3,@key[15]
2149
2150	mov		\$10,%eax
2151	jmp		.Loop16x
2152
2153.align	32
2154.Loop_outer16x:
2155	vpbroadcastd	0(%r10),$xa0		# reload key
2156	vpbroadcastd	4(%r10),$xa1
2157	vpbroadcastd	8(%r10),$xa2
2158	vpbroadcastd	12(%r10),$xa3
2159	vpaddd		.Lsixteen(%rip),@key[12],@key[12]	# next SIMD counters
2160	vmovdqa64	@key[4],$xb0
2161	vmovdqa64	@key[5],$xb1
2162	vmovdqa64	@key[6],$xb2
2163	vmovdqa64	@key[7],$xb3
2164	vmovdqa64	@key[8],$xc0
2165	vmovdqa64	@key[9],$xc1
2166	vmovdqa64	@key[10],$xc2
2167	vmovdqa64	@key[11],$xc3
2168	vmovdqa64	@key[12],$xd0
2169	vmovdqa64	@key[13],$xd1
2170	vmovdqa64	@key[14],$xd2
2171	vmovdqa64	@key[15],$xd3
2172
2173	vmovdqa64	$xa0,@key[0]
2174	vmovdqa64	$xa1,@key[1]
2175	vmovdqa64	$xa2,@key[2]
2176	vmovdqa64	$xa3,@key[3]
2177
2178	mov		\$10,%eax
2179	jmp		.Loop16x
2180
2181.align	32
2182.Loop16x:
2183___
2184	foreach (&AVX512_lane_ROUND(0, 4, 8,12)) { eval; }
2185	foreach (&AVX512_lane_ROUND(0, 5,10,15)) { eval; }
2186$code.=<<___;
2187	dec		%eax
2188	jnz		.Loop16x
2189
2190	vpaddd		@key[0],$xa0,$xa0	# accumulate key
2191	vpaddd		@key[1],$xa1,$xa1
2192	vpaddd		@key[2],$xa2,$xa2
2193	vpaddd		@key[3],$xa3,$xa3
2194
2195	vpunpckldq	$xa1,$xa0,$xt2		# "de-interlace" data
2196	vpunpckldq	$xa3,$xa2,$xt3
2197	vpunpckhdq	$xa1,$xa0,$xa0
2198	vpunpckhdq	$xa3,$xa2,$xa2
2199	vpunpcklqdq	$xt3,$xt2,$xa1		# "a0"
2200	vpunpckhqdq	$xt3,$xt2,$xt2		# "a1"
2201	vpunpcklqdq	$xa2,$xa0,$xa3		# "a2"
2202	vpunpckhqdq	$xa2,$xa0,$xa0		# "a3"
2203___
2204	($xa0,$xa1,$xa2,$xa3,$xt2)=($xa1,$xt2,$xa3,$xa0,$xa2);
2205$code.=<<___;
2206	vpaddd		@key[4],$xb0,$xb0
2207	vpaddd		@key[5],$xb1,$xb1
2208	vpaddd		@key[6],$xb2,$xb2
2209	vpaddd		@key[7],$xb3,$xb3
2210
2211	vpunpckldq	$xb1,$xb0,$xt2
2212	vpunpckldq	$xb3,$xb2,$xt3
2213	vpunpckhdq	$xb1,$xb0,$xb0
2214	vpunpckhdq	$xb3,$xb2,$xb2
2215	vpunpcklqdq	$xt3,$xt2,$xb1		# "b0"
2216	vpunpckhqdq	$xt3,$xt2,$xt2		# "b1"
2217	vpunpcklqdq	$xb2,$xb0,$xb3		# "b2"
2218	vpunpckhqdq	$xb2,$xb0,$xb0		# "b3"
2219___
2220	($xb0,$xb1,$xb2,$xb3,$xt2)=($xb1,$xt2,$xb3,$xb0,$xb2);
2221$code.=<<___;
2222	vshufi32x4	\$0x44,$xb0,$xa0,$xt3	# "de-interlace" further
2223	vshufi32x4	\$0xee,$xb0,$xa0,$xb0
2224	vshufi32x4	\$0x44,$xb1,$xa1,$xa0
2225	vshufi32x4	\$0xee,$xb1,$xa1,$xb1
2226	vshufi32x4	\$0x44,$xb2,$xa2,$xa1
2227	vshufi32x4	\$0xee,$xb2,$xa2,$xb2
2228	vshufi32x4	\$0x44,$xb3,$xa3,$xa2
2229	vshufi32x4	\$0xee,$xb3,$xa3,$xb3
2230___
2231	($xa0,$xa1,$xa2,$xa3,$xt3)=($xt3,$xa0,$xa1,$xa2,$xa3);
2232$code.=<<___;
2233	vpaddd		@key[8],$xc0,$xc0
2234	vpaddd		@key[9],$xc1,$xc1
2235	vpaddd		@key[10],$xc2,$xc2
2236	vpaddd		@key[11],$xc3,$xc3
2237
2238	vpunpckldq	$xc1,$xc0,$xt2
2239	vpunpckldq	$xc3,$xc2,$xt3
2240	vpunpckhdq	$xc1,$xc0,$xc0
2241	vpunpckhdq	$xc3,$xc2,$xc2
2242	vpunpcklqdq	$xt3,$xt2,$xc1		# "c0"
2243	vpunpckhqdq	$xt3,$xt2,$xt2		# "c1"
2244	vpunpcklqdq	$xc2,$xc0,$xc3		# "c2"
2245	vpunpckhqdq	$xc2,$xc0,$xc0		# "c3"
2246___
2247	($xc0,$xc1,$xc2,$xc3,$xt2)=($xc1,$xt2,$xc3,$xc0,$xc2);
2248$code.=<<___;
2249	vpaddd		@key[12],$xd0,$xd0
2250	vpaddd		@key[13],$xd1,$xd1
2251	vpaddd		@key[14],$xd2,$xd2
2252	vpaddd		@key[15],$xd3,$xd3
2253
2254	vpunpckldq	$xd1,$xd0,$xt2
2255	vpunpckldq	$xd3,$xd2,$xt3
2256	vpunpckhdq	$xd1,$xd0,$xd0
2257	vpunpckhdq	$xd3,$xd2,$xd2
2258	vpunpcklqdq	$xt3,$xt2,$xd1		# "d0"
2259	vpunpckhqdq	$xt3,$xt2,$xt2		# "d1"
2260	vpunpcklqdq	$xd2,$xd0,$xd3		# "d2"
2261	vpunpckhqdq	$xd2,$xd0,$xd0		# "d3"
2262___
2263	($xd0,$xd1,$xd2,$xd3,$xt2)=($xd1,$xt2,$xd3,$xd0,$xd2);
2264$code.=<<___;
2265	vshufi32x4	\$0x44,$xd0,$xc0,$xt3	# "de-interlace" further
2266	vshufi32x4	\$0xee,$xd0,$xc0,$xd0
2267	vshufi32x4	\$0x44,$xd1,$xc1,$xc0
2268	vshufi32x4	\$0xee,$xd1,$xc1,$xd1
2269	vshufi32x4	\$0x44,$xd2,$xc2,$xc1
2270	vshufi32x4	\$0xee,$xd2,$xc2,$xd2
2271	vshufi32x4	\$0x44,$xd3,$xc3,$xc2
2272	vshufi32x4	\$0xee,$xd3,$xc3,$xd3
2273___
2274	($xc0,$xc1,$xc2,$xc3,$xt3)=($xt3,$xc0,$xc1,$xc2,$xc3);
2275$code.=<<___;
2276	vshufi32x4	\$0x88,$xc0,$xa0,$xt0	# "de-interlace" further
2277	vshufi32x4	\$0xdd,$xc0,$xa0,$xa0
2278	 vshufi32x4	\$0x88,$xd0,$xb0,$xc0
2279	 vshufi32x4	\$0xdd,$xd0,$xb0,$xd0
2280	vshufi32x4	\$0x88,$xc1,$xa1,$xt1
2281	vshufi32x4	\$0xdd,$xc1,$xa1,$xa1
2282	 vshufi32x4	\$0x88,$xd1,$xb1,$xc1
2283	 vshufi32x4	\$0xdd,$xd1,$xb1,$xd1
2284	vshufi32x4	\$0x88,$xc2,$xa2,$xt2
2285	vshufi32x4	\$0xdd,$xc2,$xa2,$xa2
2286	 vshufi32x4	\$0x88,$xd2,$xb2,$xc2
2287	 vshufi32x4	\$0xdd,$xd2,$xb2,$xd2
2288	vshufi32x4	\$0x88,$xc3,$xa3,$xt3
2289	vshufi32x4	\$0xdd,$xc3,$xa3,$xa3
2290	 vshufi32x4	\$0x88,$xd3,$xb3,$xc3
2291	 vshufi32x4	\$0xdd,$xd3,$xb3,$xd3
2292___
2293	($xa0,$xa1,$xa2,$xa3,$xb0,$xb1,$xb2,$xb3)=
2294	($xt0,$xt1,$xt2,$xt3,$xa0,$xa1,$xa2,$xa3);
2295
2296	($xa0,$xb0,$xc0,$xd0, $xa1,$xb1,$xc1,$xd1,
2297	 $xa2,$xb2,$xc2,$xd2, $xa3,$xb3,$xc3,$xd3) =
2298	($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
2299	 $xc0,$xc1,$xc2,$xc3, $xd0,$xd1,$xd2,$xd3);
2300$code.=<<___;
2301	cmp		\$64*16,$len
2302	jb		.Ltail16x
2303
2304	vpxord		0x00($inp),$xa0,$xa0	# xor with input
2305	vpxord		0x40($inp),$xb0,$xb0
2306	vpxord		0x80($inp),$xc0,$xc0
2307	vpxord		0xc0($inp),$xd0,$xd0
2308	vmovdqu32	$xa0,0x00($out)
2309	vmovdqu32	$xb0,0x40($out)
2310	vmovdqu32	$xc0,0x80($out)
2311	vmovdqu32	$xd0,0xc0($out)
2312
2313	vpxord		0x100($inp),$xa1,$xa1
2314	vpxord		0x140($inp),$xb1,$xb1
2315	vpxord		0x180($inp),$xc1,$xc1
2316	vpxord		0x1c0($inp),$xd1,$xd1
2317	vmovdqu32	$xa1,0x100($out)
2318	vmovdqu32	$xb1,0x140($out)
2319	vmovdqu32	$xc1,0x180($out)
2320	vmovdqu32	$xd1,0x1c0($out)
2321
2322	vpxord		0x200($inp),$xa2,$xa2
2323	vpxord		0x240($inp),$xb2,$xb2
2324	vpxord		0x280($inp),$xc2,$xc2
2325	vpxord		0x2c0($inp),$xd2,$xd2
2326	vmovdqu32	$xa2,0x200($out)
2327	vmovdqu32	$xb2,0x240($out)
2328	vmovdqu32	$xc2,0x280($out)
2329	vmovdqu32	$xd2,0x2c0($out)
2330
2331	vpxord		0x300($inp),$xa3,$xa3
2332	vpxord		0x340($inp),$xb3,$xb3
2333	vpxord		0x380($inp),$xc3,$xc3
2334	vpxord		0x3c0($inp),$xd3,$xd3
2335	lea		0x400($inp),$inp
2336	vmovdqu32	$xa3,0x300($out)
2337	vmovdqu32	$xb3,0x340($out)
2338	vmovdqu32	$xc3,0x380($out)
2339	vmovdqu32	$xd3,0x3c0($out)
2340	lea		0x400($out),$out
2341
2342	sub		\$64*16,$len
2343	jnz		.Loop_outer16x
2344
2345	jmp		.Ldone16x
2346
2347.align	32
2348.Ltail16x:
2349	xor		%r10,%r10
2350	sub		$inp,$out
2351	cmp		\$64*1,$len
2352	jb		.Less_than_64_16x
2353	vpxord		($inp),$xa0,$xa0	# xor with input
2354	vmovdqu32	$xa0,($out,$inp)
2355	je		.Ldone16x
2356	vmovdqa32	$xb0,$xa0
2357	lea		64($inp),$inp
2358
2359	cmp		\$64*2,$len
2360	jb		.Less_than_64_16x
2361	vpxord		($inp),$xb0,$xb0
2362	vmovdqu32	$xb0,($out,$inp)
2363	je		.Ldone16x
2364	vmovdqa32	$xc0,$xa0
2365	lea		64($inp),$inp
2366
2367	cmp		\$64*3,$len
2368	jb		.Less_than_64_16x
2369	vpxord		($inp),$xc0,$xc0
2370	vmovdqu32	$xc0,($out,$inp)
2371	je		.Ldone16x
2372	vmovdqa32	$xd0,$xa0
2373	lea		64($inp),$inp
2374
2375	cmp		\$64*4,$len
2376	jb		.Less_than_64_16x
2377	vpxord		($inp),$xd0,$xd0
2378	vmovdqu32	$xd0,($out,$inp)
2379	je		.Ldone16x
2380	vmovdqa32	$xa1,$xa0
2381	lea		64($inp),$inp
2382
2383	cmp		\$64*5,$len
2384	jb		.Less_than_64_16x
2385	vpxord		($inp),$xa1,$xa1
2386	vmovdqu32	$xa1,($out,$inp)
2387	je		.Ldone16x
2388	vmovdqa32	$xb1,$xa0
2389	lea		64($inp),$inp
2390
2391	cmp		\$64*6,$len
2392	jb		.Less_than_64_16x
2393	vpxord		($inp),$xb1,$xb1
2394	vmovdqu32	$xb1,($out,$inp)
2395	je		.Ldone16x
2396	vmovdqa32	$xc1,$xa0
2397	lea		64($inp),$inp
2398
2399	cmp		\$64*7,$len
2400	jb		.Less_than_64_16x
2401	vpxord		($inp),$xc1,$xc1
2402	vmovdqu32	$xc1,($out,$inp)
2403	je		.Ldone16x
2404	vmovdqa32	$xd1,$xa0
2405	lea		64($inp),$inp
2406
2407	cmp		\$64*8,$len
2408	jb		.Less_than_64_16x
2409	vpxord		($inp),$xd1,$xd1
2410	vmovdqu32	$xd1,($out,$inp)
2411	je		.Ldone16x
2412	vmovdqa32	$xa2,$xa0
2413	lea		64($inp),$inp
2414
2415	cmp		\$64*9,$len
2416	jb		.Less_than_64_16x
2417	vpxord		($inp),$xa2,$xa2
2418	vmovdqu32	$xa2,($out,$inp)
2419	je		.Ldone16x
2420	vmovdqa32	$xb2,$xa0
2421	lea		64($inp),$inp
2422
2423	cmp		\$64*10,$len
2424	jb		.Less_than_64_16x
2425	vpxord		($inp),$xb2,$xb2
2426	vmovdqu32	$xb2,($out,$inp)
2427	je		.Ldone16x
2428	vmovdqa32	$xc2,$xa0
2429	lea		64($inp),$inp
2430
2431	cmp		\$64*11,$len
2432	jb		.Less_than_64_16x
2433	vpxord		($inp),$xc2,$xc2
2434	vmovdqu32	$xc2,($out,$inp)
2435	je		.Ldone16x
2436	vmovdqa32	$xd2,$xa0
2437	lea		64($inp),$inp
2438
2439	cmp		\$64*12,$len
2440	jb		.Less_than_64_16x
2441	vpxord		($inp),$xd2,$xd2
2442	vmovdqu32	$xd2,($out,$inp)
2443	je		.Ldone16x
2444	vmovdqa32	$xa3,$xa0
2445	lea		64($inp),$inp
2446
2447	cmp		\$64*13,$len
2448	jb		.Less_than_64_16x
2449	vpxord		($inp),$xa3,$xa3
2450	vmovdqu32	$xa3,($out,$inp)
2451	je		.Ldone16x
2452	vmovdqa32	$xb3,$xa0
2453	lea		64($inp),$inp
2454
2455	cmp		\$64*14,$len
2456	jb		.Less_than_64_16x
2457	vpxord		($inp),$xb3,$xb3
2458	vmovdqu32	$xb3,($out,$inp)
2459	je		.Ldone16x
2460	vmovdqa32	$xc3,$xa0
2461	lea		64($inp),$inp
2462
2463	cmp		\$64*15,$len
2464	jb		.Less_than_64_16x
2465	vpxord		($inp),$xc3,$xc3
2466	vmovdqu32	$xc3,($out,$inp)
2467	je		.Ldone16x
2468	vmovdqa32	$xd3,$xa0
2469	lea		64($inp),$inp
2470
2471.Less_than_64_16x:
2472	vmovdqa32	$xa0,0x00(%rsp)
2473	lea		($out,$inp),$out
2474	and		\$63,$len
2475
2476.Loop_tail16x:
2477	movzb		($inp,%r10),%eax
2478	movzb		(%rsp,%r10),%ecx
2479	lea		1(%r10),%r10
2480	xor		%ecx,%eax
2481	mov		%al,-1($out,%r10)
2482	dec		$len
2483	jnz		.Loop_tail16x
2484
2485	vpxord		$xa0,$xa0,$xa0
2486	vmovdqa32	$xa0,0(%rsp)
2487
2488.Ldone16x:
2489	vzeroall
2490___
2491$code.=<<___	if ($win64);
2492	movaps		-0xa8(%r9),%xmm6
2493	movaps		-0x98(%r9),%xmm7
2494	movaps		-0x88(%r9),%xmm8
2495	movaps		-0x78(%r9),%xmm9
2496	movaps		-0x68(%r9),%xmm10
2497	movaps		-0x58(%r9),%xmm11
2498	movaps		-0x48(%r9),%xmm12
2499	movaps		-0x38(%r9),%xmm13
2500	movaps		-0x28(%r9),%xmm14
2501	movaps		-0x18(%r9),%xmm15
2502___
2503$code.=<<___;
2504	lea		(%r9),%rsp
2505.cfi_def_cfa_register	rsp
2506.L16x_epilogue:
2507	ret
2508.cfi_endproc
2509.size	ChaCha20_16x,.-ChaCha20_16x
2510___
2511}
2512
2513# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
2514#		CONTEXT *context,DISPATCHER_CONTEXT *disp)
2515if ($win64) {
2516$rec="%rcx";
2517$frame="%rdx";
2518$context="%r8";
2519$disp="%r9";
2520
2521$code.=<<___;
2522.extern	__imp_RtlVirtualUnwind
2523.type	se_handler,\@abi-omnipotent
2524.align	16
2525se_handler:
2526	push	%rsi
2527	push	%rdi
2528	push	%rbx
2529	push	%rbp
2530	push	%r12
2531	push	%r13
2532	push	%r14
2533	push	%r15
2534	pushfq
2535	sub	\$64,%rsp
2536
2537	mov	120($context),%rax	# pull context->Rax
2538	mov	248($context),%rbx	# pull context->Rip
2539
2540	mov	8($disp),%rsi		# disp->ImageBase
2541	mov	56($disp),%r11		# disp->HandlerData
2542
2543	lea	.Lctr32_body(%rip),%r10
2544	cmp	%r10,%rbx		# context->Rip<.Lprologue
2545	jb	.Lcommon_seh_tail
2546
2547	mov	152($context),%rax	# pull context->Rsp
2548
2549	lea	.Lno_data(%rip),%r10	# epilogue label
2550	cmp	%r10,%rbx		# context->Rip>=.Lepilogue
2551	jae	.Lcommon_seh_tail
2552
2553	lea	64+24+48(%rax),%rax
2554
2555	mov	-8(%rax),%rbx
2556	mov	-16(%rax),%rbp
2557	mov	-24(%rax),%r12
2558	mov	-32(%rax),%r13
2559	mov	-40(%rax),%r14
2560	mov	-48(%rax),%r15
2561	mov	%rbx,144($context)	# restore context->Rbx
2562	mov	%rbp,160($context)	# restore context->Rbp
2563	mov	%r12,216($context)	# restore context->R12
2564	mov	%r13,224($context)	# restore context->R13
2565	mov	%r14,232($context)	# restore context->R14
2566	mov	%r15,240($context)	# restore context->R14
2567
2568.Lcommon_seh_tail:
2569	mov	8(%rax),%rdi
2570	mov	16(%rax),%rsi
2571	mov	%rax,152($context)	# restore context->Rsp
2572	mov	%rsi,168($context)	# restore context->Rsi
2573	mov	%rdi,176($context)	# restore context->Rdi
2574
2575	mov	40($disp),%rdi		# disp->ContextRecord
2576	mov	$context,%rsi		# context
2577	mov	\$154,%ecx		# sizeof(CONTEXT)
2578	.long	0xa548f3fc		# cld; rep movsq
2579
2580	mov	$disp,%rsi
2581	xor	%rcx,%rcx		# arg1, UNW_FLAG_NHANDLER
2582	mov	8(%rsi),%rdx		# arg2, disp->ImageBase
2583	mov	0(%rsi),%r8		# arg3, disp->ControlPc
2584	mov	16(%rsi),%r9		# arg4, disp->FunctionEntry
2585	mov	40(%rsi),%r10		# disp->ContextRecord
2586	lea	56(%rsi),%r11		# &disp->HandlerData
2587	lea	24(%rsi),%r12		# &disp->EstablisherFrame
2588	mov	%r10,32(%rsp)		# arg5
2589	mov	%r11,40(%rsp)		# arg6
2590	mov	%r12,48(%rsp)		# arg7
2591	mov	%rcx,56(%rsp)		# arg8, (NULL)
2592	call	*__imp_RtlVirtualUnwind(%rip)
2593
2594	mov	\$1,%eax		# ExceptionContinueSearch
2595	add	\$64,%rsp
2596	popfq
2597	pop	%r15
2598	pop	%r14
2599	pop	%r13
2600	pop	%r12
2601	pop	%rbp
2602	pop	%rbx
2603	pop	%rdi
2604	pop	%rsi
2605	ret
2606.size	se_handler,.-se_handler
2607
2608.type	ssse3_handler,\@abi-omnipotent
2609.align	16
2610ssse3_handler:
2611	push	%rsi
2612	push	%rdi
2613	push	%rbx
2614	push	%rbp
2615	push	%r12
2616	push	%r13
2617	push	%r14
2618	push	%r15
2619	pushfq
2620	sub	\$64,%rsp
2621
2622	mov	120($context),%rax	# pull context->Rax
2623	mov	248($context),%rbx	# pull context->Rip
2624
2625	mov	8($disp),%rsi		# disp->ImageBase
2626	mov	56($disp),%r11		# disp->HandlerData
2627
2628	mov	0(%r11),%r10d		# HandlerData[0]
2629	lea	(%rsi,%r10),%r10	# prologue label
2630	cmp	%r10,%rbx		# context->Rip<prologue label
2631	jb	.Lcommon_seh_tail
2632
2633	mov	192($context),%rax	# pull context->R9
2634
2635	mov	4(%r11),%r10d		# HandlerData[1]
2636	lea	(%rsi,%r10),%r10	# epilogue label
2637	cmp	%r10,%rbx		# context->Rip>=epilogue label
2638	jae	.Lcommon_seh_tail
2639
2640	lea	-0x28(%rax),%rsi
2641	lea	512($context),%rdi	# &context.Xmm6
2642	mov	\$4,%ecx
2643	.long	0xa548f3fc		# cld; rep movsq
2644
2645	jmp	.Lcommon_seh_tail
2646.size	ssse3_handler,.-ssse3_handler
2647
2648.type	full_handler,\@abi-omnipotent
2649.align	16
2650full_handler:
2651	push	%rsi
2652	push	%rdi
2653	push	%rbx
2654	push	%rbp
2655	push	%r12
2656	push	%r13
2657	push	%r14
2658	push	%r15
2659	pushfq
2660	sub	\$64,%rsp
2661
2662	mov	120($context),%rax	# pull context->Rax
2663	mov	248($context),%rbx	# pull context->Rip
2664
2665	mov	8($disp),%rsi		# disp->ImageBase
2666	mov	56($disp),%r11		# disp->HandlerData
2667
2668	mov	0(%r11),%r10d		# HandlerData[0]
2669	lea	(%rsi,%r10),%r10	# prologue label
2670	cmp	%r10,%rbx		# context->Rip<prologue label
2671	jb	.Lcommon_seh_tail
2672
2673	mov	192($context),%rax	# pull context->R9
2674
2675	mov	4(%r11),%r10d		# HandlerData[1]
2676	lea	(%rsi,%r10),%r10	# epilogue label
2677	cmp	%r10,%rbx		# context->Rip>=epilogue label
2678	jae	.Lcommon_seh_tail
2679
2680	lea	-0xa8(%rax),%rsi
2681	lea	512($context),%rdi	# &context.Xmm6
2682	mov	\$20,%ecx
2683	.long	0xa548f3fc		# cld; rep movsq
2684
2685	jmp	.Lcommon_seh_tail
2686.size	full_handler,.-full_handler
2687
2688.section	.pdata
2689.align	4
2690	.rva	.LSEH_begin_ChaCha20_ctr32_nohw
2691	.rva	.LSEH_end_ChaCha20_ctr32_nohw
2692	.rva	.LSEH_info_ChaCha20_ctr32_nohw
2693
2694	.rva	.LSEH_begin_ChaCha20_ctr32_ssse3
2695	.rva	.LSEH_end_ChaCha20_ctr32_ssse3
2696	.rva	.LSEH_info_ChaCha20_ctr32_ssse3
2697
2698	.rva	.LSEH_begin_ChaCha20_ctr32_ssse3_4x
2699	.rva	.LSEH_end_ChaCha20_ctr32_ssse3_4x
2700	.rva	.LSEH_info_ChaCha20_ctr32_ssse3_4x
2701___
2702$code.=<<___ if ($avx>1);
2703	.rva	.LSEH_begin_ChaCha20_ctr32_avx2
2704	.rva	.LSEH_end_ChaCha20_ctr32_avx2
2705	.rva	.LSEH_info_ChaCha20_ctr32_avx2
2706___
2707$code.=<<___ if ($avx>2);
2708	.rva	.LSEH_begin_ChaCha20_avx512
2709	.rva	.LSEH_end_ChaCha20_avx512
2710	.rva	.LSEH_info_ChaCha20_avx512
2711
2712	.rva	.LSEH_begin_ChaCha20_16x
2713	.rva	.LSEH_end_ChaCha20_16x
2714	.rva	.LSEH_info_ChaCha20_16x
2715___
2716$code.=<<___;
2717.section	.xdata
2718.align	8
2719.LSEH_info_ChaCha20_ctr32_nohw:
2720	.byte	9,0,0,0
2721	.rva	se_handler
2722
2723.LSEH_info_ChaCha20_ctr32_ssse3:
2724	.byte	9,0,0,0
2725	.rva	ssse3_handler
2726	.rva	.Lssse3_body,.Lssse3_epilogue
2727
2728.LSEH_info_ChaCha20_ctr32_ssse3_4x:
2729	.byte	9,0,0,0
2730	.rva	full_handler
2731	.rva	.L4x_body,.L4x_epilogue
2732___
2733$code.=<<___ if ($avx>1);
2734.LSEH_info_ChaCha20_ctr32_avx2:
2735	.byte	9,0,0,0
2736	.rva	full_handler
2737	.rva	.L8x_body,.L8x_epilogue			# HandlerData[]
2738___
2739$code.=<<___ if ($avx>2);
2740.LSEH_info_ChaCha20_avx512:
2741	.byte	9,0,0,0
2742	.rva	ssse3_handler
2743	.rva	.Lavx512_body,.Lavx512_epilogue		# HandlerData[]
2744
2745.LSEH_info_ChaCha20_16x:
2746	.byte	9,0,0,0
2747	.rva	full_handler
2748	.rva	.L16x_body,.L16x_epilogue		# HandlerData[]
2749___
2750}
2751
2752foreach (split("\n",$code)) {
2753	s/\`([^\`]*)\`/eval $1/ge;
2754
2755	s/%x#%[yz]/%x/g;	# "down-shift"
2756
2757	print $_,"\n";
2758}
2759
2760close STDOUT or die "error closing STDOUT: $!";
2761