• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1#! /usr/bin/env perl
2# Copyright 2014-2020 The OpenSSL Project Authors. All Rights Reserved.
3#
4# Licensed under the OpenSSL license (the "License").  You may not use
5# this file except in compliance with the License.  You can obtain a copy
6# in the file LICENSE in the source distribution or at
7# https://www.openssl.org/source/license.html
8
9# ====================================================================
10# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
11# project. The module is, however, dual licensed under OpenSSL and
12# CRYPTOGAMS licenses depending on where you obtain it. For further
13# details see http://www.openssl.org/~appro/cryptogams/.
14#
15# Permission to use under GPLv2 terms is granted.
16# ====================================================================
17#
18# SHA256/512 for ARMv8.
19#
20# Performance in cycles per processed byte and improvement coefficient
21# over code generated with "default" compiler:
22#
23#		SHA256-hw	SHA256(*)	SHA512
24# Apple A7	1.97		10.5 (+33%)	6.73 (-1%(**))
25# Cortex-A53	2.38		15.5 (+115%)	10.0 (+150%(***))
26# Cortex-A57	2.31		11.6 (+86%)	7.51 (+260%(***))
27# Denver	2.01		10.5 (+26%)	6.70 (+8%)
28# X-Gene			20.0 (+100%)	12.8 (+300%(***))
29# Mongoose	2.36		13.0 (+50%)	8.36 (+33%)
30# Kryo		1.92		17.4 (+30%)	11.2 (+8%)
31#
32# (*)	Software SHA256 results are of lesser relevance, presented
33#	mostly for informational purposes.
34# (**)	The result is a trade-off: it's possible to improve it by
35#	10% (or by 1 cycle per round), but at the cost of 20% loss
36#	on Cortex-A53 (or by 4 cycles per round).
37# (***)	Super-impressive coefficients over gcc-generated code are
38#	indication of some compiler "pathology", most notably code
39#	generated with -mgeneral-regs-only is significantly faster
40#	and the gap is only 40-90%.
41
42$output=pop;
43$flavour=pop;
44
45if ($flavour && $flavour ne "void") {
46    $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
47    ( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or
48    ( $xlate="${dir}../../../perlasm/arm-xlate.pl" and -f $xlate) or
49    die "can't locate arm-xlate.pl";
50
51    open OUT,"| \"$^X\" \"$xlate\" $flavour \"$output\"";
52    *STDOUT=*OUT;
53} else {
54    open OUT,">$output";
55    *STDOUT=*OUT;
56}
57
58if ($output =~ /512/) {
59	$BITS=512;
60	$SZ=8;
61	@Sigma0=(28,34,39);
62	@Sigma1=(14,18,41);
63	@sigma0=(1,  8, 7);
64	@sigma1=(19,61, 6);
65	$rounds=80;
66	$reg_t="x";
67} else {
68	$BITS=256;
69	$SZ=4;
70	@Sigma0=( 2,13,22);
71	@Sigma1=( 6,11,25);
72	@sigma0=( 7,18, 3);
73	@sigma1=(17,19,10);
74	$rounds=64;
75	$reg_t="w";
76}
77
78$func="sha${BITS}_block_data_order";
79
80($ctx,$inp,$num,$Ktbl)=map("x$_",(0..2,30));
81
82@X=map("$reg_t$_",(3..15,0..2));
83@V=($A,$B,$C,$D,$E,$F,$G,$H)=map("$reg_t$_",(20..27));
84($t0,$t1,$t2,$t3)=map("$reg_t$_",(16,17,19,28));
85
86sub BODY_00_xx {
87my ($i,$a,$b,$c,$d,$e,$f,$g,$h)=@_;
88my $j=($i+1)&15;
89my ($T0,$T1,$T2)=(@X[($i-8)&15],@X[($i-9)&15],@X[($i-10)&15]);
90   $T0=@X[$i+3] if ($i<11);
91
92$code.=<<___	if ($i<16);
93#ifndef	__AARCH64EB__
94	rev	@X[$i],@X[$i]			// $i
95#endif
96___
97$code.=<<___	if ($i<13 && ($i&1));
98	ldp	@X[$i+1],@X[$i+2],[$inp],#2*$SZ
99___
100$code.=<<___	if ($i==13);
101	ldp	@X[14],@X[15],[$inp]
102___
103$code.=<<___	if ($i>=14);
104	ldr	@X[($i-11)&15],[sp,#`$SZ*(($i-11)%4)`]
105___
106$code.=<<___	if ($i>0 && $i<16);
107	add	$a,$a,$t1			// h+=Sigma0(a)
108___
109$code.=<<___	if ($i>=11);
110	str	@X[($i-8)&15],[sp,#`$SZ*(($i-8)%4)`]
111___
112# While ARMv8 specifies merged rotate-n-logical operation such as
113# 'eor x,y,z,ror#n', it was found to negatively affect performance
114# on Apple A7. The reason seems to be that it requires even 'y' to
115# be available earlier. This means that such merged instruction is
116# not necessarily best choice on critical path... On the other hand
117# Cortex-A5x handles merged instructions much better than disjoint
118# rotate and logical... See (**) footnote above.
119$code.=<<___	if ($i<15);
120	ror	$t0,$e,#$Sigma1[0]
121	add	$h,$h,$t2			// h+=K[i]
122	eor	$T0,$e,$e,ror#`$Sigma1[2]-$Sigma1[1]`
123	and	$t1,$f,$e
124	bic	$t2,$g,$e
125	add	$h,$h,@X[$i&15]			// h+=X[i]
126	orr	$t1,$t1,$t2			// Ch(e,f,g)
127	eor	$t2,$a,$b			// a^b, b^c in next round
128	eor	$t0,$t0,$T0,ror#$Sigma1[1]	// Sigma1(e)
129	ror	$T0,$a,#$Sigma0[0]
130	add	$h,$h,$t1			// h+=Ch(e,f,g)
131	eor	$t1,$a,$a,ror#`$Sigma0[2]-$Sigma0[1]`
132	add	$h,$h,$t0			// h+=Sigma1(e)
133	and	$t3,$t3,$t2			// (b^c)&=(a^b)
134	add	$d,$d,$h			// d+=h
135	eor	$t3,$t3,$b			// Maj(a,b,c)
136	eor	$t1,$T0,$t1,ror#$Sigma0[1]	// Sigma0(a)
137	add	$h,$h,$t3			// h+=Maj(a,b,c)
138	ldr	$t3,[$Ktbl],#$SZ		// *K++, $t2 in next round
139	//add	$h,$h,$t1			// h+=Sigma0(a)
140___
141$code.=<<___	if ($i>=15);
142	ror	$t0,$e,#$Sigma1[0]
143	add	$h,$h,$t2			// h+=K[i]
144	ror	$T1,@X[($j+1)&15],#$sigma0[0]
145	and	$t1,$f,$e
146	ror	$T2,@X[($j+14)&15],#$sigma1[0]
147	bic	$t2,$g,$e
148	ror	$T0,$a,#$Sigma0[0]
149	add	$h,$h,@X[$i&15]			// h+=X[i]
150	eor	$t0,$t0,$e,ror#$Sigma1[1]
151	eor	$T1,$T1,@X[($j+1)&15],ror#$sigma0[1]
152	orr	$t1,$t1,$t2			// Ch(e,f,g)
153	eor	$t2,$a,$b			// a^b, b^c in next round
154	eor	$t0,$t0,$e,ror#$Sigma1[2]	// Sigma1(e)
155	eor	$T0,$T0,$a,ror#$Sigma0[1]
156	add	$h,$h,$t1			// h+=Ch(e,f,g)
157	and	$t3,$t3,$t2			// (b^c)&=(a^b)
158	eor	$T2,$T2,@X[($j+14)&15],ror#$sigma1[1]
159	eor	$T1,$T1,@X[($j+1)&15],lsr#$sigma0[2]	// sigma0(X[i+1])
160	add	$h,$h,$t0			// h+=Sigma1(e)
161	eor	$t3,$t3,$b			// Maj(a,b,c)
162	eor	$t1,$T0,$a,ror#$Sigma0[2]	// Sigma0(a)
163	eor	$T2,$T2,@X[($j+14)&15],lsr#$sigma1[2]	// sigma1(X[i+14])
164	add	@X[$j],@X[$j],@X[($j+9)&15]
165	add	$d,$d,$h			// d+=h
166	add	$h,$h,$t3			// h+=Maj(a,b,c)
167	ldr	$t3,[$Ktbl],#$SZ		// *K++, $t2 in next round
168	add	@X[$j],@X[$j],$T1
169	add	$h,$h,$t1			// h+=Sigma0(a)
170	add	@X[$j],@X[$j],$T2
171___
172	($t2,$t3)=($t3,$t2);
173}
174
175$code.=<<___;
176#ifndef	__KERNEL__
177# include <openssl/arm_arch.h>
178#endif
179
180.text
181
182.extern	OPENSSL_armcap_P
183.hidden	OPENSSL_armcap_P
184.globl	$func
185.type	$func,%function
186.align	6
187$func:
188	AARCH64_VALID_CALL_TARGET
189#ifndef	__KERNEL__
190#if __has_feature(hwaddress_sanitizer) && __clang_major__ >= 10
191	adrp	x16,:pg_hi21_nc:OPENSSL_armcap_P
192#else
193	adrp	x16,:pg_hi21:OPENSSL_armcap_P
194#endif
195	ldr	w16,[x16,:lo12:OPENSSL_armcap_P]
196___
197$code.=<<___	if ($SZ==4);
198	tst	w16,#ARMV8_SHA256
199	b.ne	.Lv8_entry
200___
201$code.=<<___	if ($SZ==8);
202	tst	w16,#ARMV8_SHA512
203	b.ne	.Lv8_entry
204___
205$code.=<<___;
206#endif
207	AARCH64_SIGN_LINK_REGISTER
208	stp	x29,x30,[sp,#-128]!
209	add	x29,sp,#0
210
211	stp	x19,x20,[sp,#16]
212	stp	x21,x22,[sp,#32]
213	stp	x23,x24,[sp,#48]
214	stp	x25,x26,[sp,#64]
215	stp	x27,x28,[sp,#80]
216	sub	sp,sp,#4*$SZ
217
218	ldp	$A,$B,[$ctx]				// load context
219	ldp	$C,$D,[$ctx,#2*$SZ]
220	ldp	$E,$F,[$ctx,#4*$SZ]
221	add	$num,$inp,$num,lsl#`log(16*$SZ)/log(2)`	// end of input
222	ldp	$G,$H,[$ctx,#6*$SZ]
223	adrp	$Ktbl,:pg_hi21:.LK$BITS
224	add	$Ktbl,$Ktbl,:lo12:.LK$BITS
225	stp	$ctx,$num,[x29,#96]
226
227.Loop:
228	ldp	@X[0],@X[1],[$inp],#2*$SZ
229	ldr	$t2,[$Ktbl],#$SZ			// *K++
230	eor	$t3,$B,$C				// magic seed
231	str	$inp,[x29,#112]
232___
233for ($i=0;$i<16;$i++)	{ &BODY_00_xx($i,@V); unshift(@V,pop(@V)); }
234$code.=".Loop_16_xx:\n";
235for (;$i<32;$i++)	{ &BODY_00_xx($i,@V); unshift(@V,pop(@V)); }
236$code.=<<___;
237	cbnz	$t2,.Loop_16_xx
238
239	ldp	$ctx,$num,[x29,#96]
240	ldr	$inp,[x29,#112]
241	sub	$Ktbl,$Ktbl,#`$SZ*($rounds+1)`		// rewind
242
243	ldp	@X[0],@X[1],[$ctx]
244	ldp	@X[2],@X[3],[$ctx,#2*$SZ]
245	add	$inp,$inp,#14*$SZ			// advance input pointer
246	ldp	@X[4],@X[5],[$ctx,#4*$SZ]
247	add	$A,$A,@X[0]
248	ldp	@X[6],@X[7],[$ctx,#6*$SZ]
249	add	$B,$B,@X[1]
250	add	$C,$C,@X[2]
251	add	$D,$D,@X[3]
252	stp	$A,$B,[$ctx]
253	add	$E,$E,@X[4]
254	add	$F,$F,@X[5]
255	stp	$C,$D,[$ctx,#2*$SZ]
256	add	$G,$G,@X[6]
257	add	$H,$H,@X[7]
258	cmp	$inp,$num
259	stp	$E,$F,[$ctx,#4*$SZ]
260	stp	$G,$H,[$ctx,#6*$SZ]
261	b.ne	.Loop
262
263	ldp	x19,x20,[x29,#16]
264	add	sp,sp,#4*$SZ
265	ldp	x21,x22,[x29,#32]
266	ldp	x23,x24,[x29,#48]
267	ldp	x25,x26,[x29,#64]
268	ldp	x27,x28,[x29,#80]
269	ldp	x29,x30,[sp],#128
270	AARCH64_VALIDATE_LINK_REGISTER
271	ret
272.size	$func,.-$func
273
274.section .rodata
275.align	6
276.type	.LK$BITS,%object
277.LK$BITS:
278___
279$code.=<<___ if ($SZ==8);
280	.quad	0x428a2f98d728ae22,0x7137449123ef65cd
281	.quad	0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc
282	.quad	0x3956c25bf348b538,0x59f111f1b605d019
283	.quad	0x923f82a4af194f9b,0xab1c5ed5da6d8118
284	.quad	0xd807aa98a3030242,0x12835b0145706fbe
285	.quad	0x243185be4ee4b28c,0x550c7dc3d5ffb4e2
286	.quad	0x72be5d74f27b896f,0x80deb1fe3b1696b1
287	.quad	0x9bdc06a725c71235,0xc19bf174cf692694
288	.quad	0xe49b69c19ef14ad2,0xefbe4786384f25e3
289	.quad	0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65
290	.quad	0x2de92c6f592b0275,0x4a7484aa6ea6e483
291	.quad	0x5cb0a9dcbd41fbd4,0x76f988da831153b5
292	.quad	0x983e5152ee66dfab,0xa831c66d2db43210
293	.quad	0xb00327c898fb213f,0xbf597fc7beef0ee4
294	.quad	0xc6e00bf33da88fc2,0xd5a79147930aa725
295	.quad	0x06ca6351e003826f,0x142929670a0e6e70
296	.quad	0x27b70a8546d22ffc,0x2e1b21385c26c926
297	.quad	0x4d2c6dfc5ac42aed,0x53380d139d95b3df
298	.quad	0x650a73548baf63de,0x766a0abb3c77b2a8
299	.quad	0x81c2c92e47edaee6,0x92722c851482353b
300	.quad	0xa2bfe8a14cf10364,0xa81a664bbc423001
301	.quad	0xc24b8b70d0f89791,0xc76c51a30654be30
302	.quad	0xd192e819d6ef5218,0xd69906245565a910
303	.quad	0xf40e35855771202a,0x106aa07032bbd1b8
304	.quad	0x19a4c116b8d2d0c8,0x1e376c085141ab53
305	.quad	0x2748774cdf8eeb99,0x34b0bcb5e19b48a8
306	.quad	0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb
307	.quad	0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3
308	.quad	0x748f82ee5defb2fc,0x78a5636f43172f60
309	.quad	0x84c87814a1f0ab72,0x8cc702081a6439ec
310	.quad	0x90befffa23631e28,0xa4506cebde82bde9
311	.quad	0xbef9a3f7b2c67915,0xc67178f2e372532b
312	.quad	0xca273eceea26619c,0xd186b8c721c0c207
313	.quad	0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178
314	.quad	0x06f067aa72176fba,0x0a637dc5a2c898a6
315	.quad	0x113f9804bef90dae,0x1b710b35131c471b
316	.quad	0x28db77f523047d84,0x32caab7b40c72493
317	.quad	0x3c9ebe0a15c9bebc,0x431d67c49c100d4c
318	.quad	0x4cc5d4becb3e42b6,0x597f299cfc657e2a
319	.quad	0x5fcb6fab3ad6faec,0x6c44198c4a475817
320	.quad	0	// terminator
321___
322$code.=<<___ if ($SZ==4);
323	.long	0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
324	.long	0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
325	.long	0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
326	.long	0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
327	.long	0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
328	.long	0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
329	.long	0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
330	.long	0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
331	.long	0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
332	.long	0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
333	.long	0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
334	.long	0xd192e819,0xd6990624,0xf40e3585,0x106aa070
335	.long	0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
336	.long	0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
337	.long	0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
338	.long	0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
339	.long	0	//terminator
340___
341$code.=<<___;
342.size	.LK$BITS,.-.LK$BITS
343.asciz	"SHA$BITS block transform for ARMv8, CRYPTOGAMS by <appro\@openssl.org>"
344.align	2
345___
346
347if ($SZ==4) {
348my $Ktbl="x3";
349
350my ($ABCD,$EFGH,$abcd)=map("v$_.16b",(0..2));
351my @MSG=map("v$_.16b",(4..7));
352my ($W0,$W1)=("v16.4s","v17.4s");
353my ($ABCD_SAVE,$EFGH_SAVE)=("v18.16b","v19.16b");
354
355$code.=<<___;
356.text
357#ifndef	__KERNEL__
358.type	sha256_block_armv8,%function
359.align	6
360sha256_block_armv8:
361.Lv8_entry:
362	// Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later.
363	stp		x29,x30,[sp,#-16]!
364	add		x29,sp,#0
365
366	ld1.32		{$ABCD,$EFGH},[$ctx]
367	adrp		$Ktbl,:pg_hi21:.LK256
368	add		$Ktbl,$Ktbl,:lo12:.LK256
369
370.Loop_hw:
371	ld1		{@MSG[0]-@MSG[3]},[$inp],#64
372	sub		$num,$num,#1
373	ld1.32		{$W0},[$Ktbl],#16
374	rev32		@MSG[0],@MSG[0]
375	rev32		@MSG[1],@MSG[1]
376	rev32		@MSG[2],@MSG[2]
377	rev32		@MSG[3],@MSG[3]
378	orr		$ABCD_SAVE,$ABCD,$ABCD		// offload
379	orr		$EFGH_SAVE,$EFGH,$EFGH
380___
381for($i=0;$i<12;$i++) {
382$code.=<<___;
383	ld1.32		{$W1},[$Ktbl],#16
384	add.i32		$W0,$W0,@MSG[0]
385	sha256su0	@MSG[0],@MSG[1]
386	orr		$abcd,$ABCD,$ABCD
387	sha256h		$ABCD,$EFGH,$W0
388	sha256h2	$EFGH,$abcd,$W0
389	sha256su1	@MSG[0],@MSG[2],@MSG[3]
390___
391	($W0,$W1)=($W1,$W0);	push(@MSG,shift(@MSG));
392}
393$code.=<<___;
394	ld1.32		{$W1},[$Ktbl],#16
395	add.i32		$W0,$W0,@MSG[0]
396	orr		$abcd,$ABCD,$ABCD
397	sha256h		$ABCD,$EFGH,$W0
398	sha256h2	$EFGH,$abcd,$W0
399
400	ld1.32		{$W0},[$Ktbl],#16
401	add.i32		$W1,$W1,@MSG[1]
402	orr		$abcd,$ABCD,$ABCD
403	sha256h		$ABCD,$EFGH,$W1
404	sha256h2	$EFGH,$abcd,$W1
405
406	ld1.32		{$W1},[$Ktbl]
407	add.i32		$W0,$W0,@MSG[2]
408	sub		$Ktbl,$Ktbl,#$rounds*$SZ-16	// rewind
409	orr		$abcd,$ABCD,$ABCD
410	sha256h		$ABCD,$EFGH,$W0
411	sha256h2	$EFGH,$abcd,$W0
412
413	add.i32		$W1,$W1,@MSG[3]
414	orr		$abcd,$ABCD,$ABCD
415	sha256h		$ABCD,$EFGH,$W1
416	sha256h2	$EFGH,$abcd,$W1
417
418	add.i32		$ABCD,$ABCD,$ABCD_SAVE
419	add.i32		$EFGH,$EFGH,$EFGH_SAVE
420
421	cbnz		$num,.Loop_hw
422
423	st1.32		{$ABCD,$EFGH},[$ctx]
424
425	ldr		x29,[sp],#16
426	ret
427.size	sha256_block_armv8,.-sha256_block_armv8
428#endif
429___
430}
431
432if ($SZ==8) {
433my $Ktbl="x3";
434
435my @H = map("v$_.16b",(0..4));
436my ($fg,$de,$m9_10)=map("v$_.16b",(5..7));
437my @MSG=map("v$_.16b",(16..23));
438my ($W0,$W1)=("v24.2d","v25.2d");
439my ($AB,$CD,$EF,$GH)=map("v$_.16b",(26..29));
440
441$code.=<<___;
442.text
443#ifndef	__KERNEL__
444.type	sha512_block_armv8,%function
445.align	6
446sha512_block_armv8:
447.Lv8_entry:
448	stp		x29,x30,[sp,#-16]!
449	add		x29,sp,#0
450
451	ld1		{@MSG[0]-@MSG[3]},[$inp],#64	// load input
452	ld1		{@MSG[4]-@MSG[7]},[$inp],#64
453
454	ld1.64		{@H[0]-@H[3]},[$ctx]		// load context
455	adrp		$Ktbl,:pg_hi21:.LK512
456	add		$Ktbl,$Ktbl,:lo12:.LK512
457
458	rev64		@MSG[0],@MSG[0]
459	rev64		@MSG[1],@MSG[1]
460	rev64		@MSG[2],@MSG[2]
461	rev64		@MSG[3],@MSG[3]
462	rev64		@MSG[4],@MSG[4]
463	rev64		@MSG[5],@MSG[5]
464	rev64		@MSG[6],@MSG[6]
465	rev64		@MSG[7],@MSG[7]
466	b		.Loop_hw
467
468.align	4
469.Loop_hw:
470	ld1.64		{$W0},[$Ktbl],#16
471	subs		$num,$num,#1
472	sub		x4,$inp,#128
473	orr		$AB,@H[0],@H[0]			// offload
474	orr		$CD,@H[1],@H[1]
475	orr		$EF,@H[2],@H[2]
476	orr		$GH,@H[3],@H[3]
477	csel		$inp,$inp,x4,ne			// conditional rewind
478___
479for($i=0;$i<32;$i++) {
480$code.=<<___;
481	add.i64		$W0,$W0,@MSG[0]
482	ld1.64		{$W1},[$Ktbl],#16
483	ext		$W0,$W0,$W0,#8
484	ext		$fg,@H[2],@H[3],#8
485	ext		$de,@H[1],@H[2],#8
486	add.i64		@H[3],@H[3],$W0			// "T1 + H + K512[i]"
487	 sha512su0	@MSG[0],@MSG[1]
488	 ext		$m9_10,@MSG[4],@MSG[5],#8
489	sha512h		@H[3],$fg,$de
490	 sha512su1	@MSG[0],@MSG[7],$m9_10
491	add.i64		@H[4],@H[1],@H[3]		// "D + T1"
492	sha512h2	@H[3],$H[1],@H[0]
493___
494	($W0,$W1)=($W1,$W0);	push(@MSG,shift(@MSG));
495	@H = (@H[3],@H[0],@H[4],@H[2],@H[1]);
496}
497for(;$i<40;$i++) {
498$code.=<<___	if ($i<39);
499	ld1.64		{$W1},[$Ktbl],#16
500___
501$code.=<<___	if ($i==39);
502	sub		$Ktbl,$Ktbl,#$rounds*$SZ	// rewind
503___
504$code.=<<___;
505	add.i64		$W0,$W0,@MSG[0]
506	 ld1		{@MSG[0]},[$inp],#16		// load next input
507	ext		$W0,$W0,$W0,#8
508	ext		$fg,@H[2],@H[3],#8
509	ext		$de,@H[1],@H[2],#8
510	add.i64		@H[3],@H[3],$W0			// "T1 + H + K512[i]"
511	sha512h		@H[3],$fg,$de
512	 rev64		@MSG[0],@MSG[0]
513	add.i64		@H[4],@H[1],@H[3]		// "D + T1"
514	sha512h2	@H[3],$H[1],@H[0]
515___
516	($W0,$W1)=($W1,$W0);	push(@MSG,shift(@MSG));
517	@H = (@H[3],@H[0],@H[4],@H[2],@H[1]);
518}
519$code.=<<___;
520	add.i64		@H[0],@H[0],$AB			// accumulate
521	add.i64		@H[1],@H[1],$CD
522	add.i64		@H[2],@H[2],$EF
523	add.i64		@H[3],@H[3],$GH
524
525	cbnz		$num,.Loop_hw
526
527	st1.64		{@H[0]-@H[3]},[$ctx]		// store context
528
529	ldr		x29,[sp],#16
530	ret
531.size	sha512_block_armv8,.-sha512_block_armv8
532#endif
533___
534}
535
536{   my  %opcode = (
537	"sha256h"	=> 0x5e004000,	"sha256h2"	=> 0x5e005000,
538	"sha256su0"	=> 0x5e282800,	"sha256su1"	=> 0x5e006000	);
539
540    sub unsha256 {
541	my ($mnemonic,$arg)=@_;
542
543	$arg =~ m/[qv]([0-9]+)[^,]*,\s*[qv]([0-9]+)[^,]*(?:,\s*[qv]([0-9]+))?/o
544	&&
545	sprintf ".inst\t0x%08x\t//%s %s",
546			$opcode{$mnemonic}|$1|($2<<5)|($3<<16),
547			$mnemonic,$arg;
548    }
549}
550
551{   my  %opcode = (
552	"sha512h"	=> 0xce608000,	"sha512h2"	=> 0xce608400,
553	"sha512su0"	=> 0xcec08000,	"sha512su1"	=> 0xce608800	);
554
555    sub unsha512 {
556	my ($mnemonic,$arg)=@_;
557
558	$arg =~ m/[qv]([0-9]+)[^,]*,\s*[qv]([0-9]+)[^,]*(?:,\s*[qv]([0-9]+))?/o
559	&&
560	sprintf ".inst\t0x%08x\t//%s %s",
561			$opcode{$mnemonic}|$1|($2<<5)|($3<<16),
562			$mnemonic,$arg;
563    }
564}
565
566open SELF,$0;
567while(<SELF>) {
568        next if (/^#!/);
569        last if (!s/^#/\/\// and !/^$/);
570        print;
571}
572close SELF;
573
574foreach(split("\n",$code)) {
575
576	s/\`([^\`]*)\`/eval($1)/ge;
577
578	s/\b(sha512\w+)\s+([qv].*)/unsha512($1,$2)/ge	or
579	s/\b(sha256\w+)\s+([qv].*)/unsha256($1,$2)/ge;
580
581	s/\bq([0-9]+)\b/v$1.16b/g;		# old->new registers
582
583	s/\.[ui]?8(\s)/$1/;
584	s/\.\w?64\b//		and s/\.16b/\.2d/g	or
585	s/\.\w?32\b//		and s/\.16b/\.4s/g;
586	m/\bext\b/		and s/\.2d/\.16b/g	or
587	m/(ld|st)1[^\[]+\[0\]/	and s/\.4s/\.s/g;
588
589	print $_,"\n";
590}
591
592close STDOUT or die "error closing STDOUT: $!";
593