• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1#! /usr/bin/env perl
2# Copyright 2007-2016 The OpenSSL Project Authors. All Rights Reserved.
3#
4# Licensed under the OpenSSL license (the "License").  You may not use
5# this file except in compliance with the License.  You can obtain a copy
6# in the file LICENSE in the source distribution or at
7# https://www.openssl.org/source/license.html
8
9
10# ====================================================================
11# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
12# project. The module is, however, dual licensed under OpenSSL and
13# CRYPTOGAMS licenses depending on where you obtain it. For further
14# details see http://www.openssl.org/~appro/cryptogams/.
15# ====================================================================
16
17# sha1_block procedure for ARMv4.
18#
19# January 2007.
20
21# Size/performance trade-off
22# ====================================================================
23# impl		size in bytes	comp cycles[*]	measured performance
24# ====================================================================
25# thumb		304		3212		4420
26# armv4-small	392/+29%	1958/+64%	2250/+96%
27# armv4-compact	740/+89%	1552/+26%	1840/+22%
28# armv4-large	1420/+92%	1307/+19%	1370/+34%[***]
29# full unroll	~5100/+260%	~1260/+4%	~1300/+5%
30# ====================================================================
31# thumb		= same as 'small' but in Thumb instructions[**] and
32#		  with recurring code in two private functions;
33# small		= detached Xload/update, loops are folded;
34# compact	= detached Xload/update, 5x unroll;
35# large		= interleaved Xload/update, 5x unroll;
36# full unroll	= interleaved Xload/update, full unroll, estimated[!];
37#
38# [*]	Manually counted instructions in "grand" loop body. Measured
39#	performance is affected by prologue and epilogue overhead,
40#	i-cache availability, branch penalties, etc.
41# [**]	While each Thumb instruction is twice smaller, they are not as
42#	diverse as ARM ones: e.g., there are only two arithmetic
43#	instructions with 3 arguments, no [fixed] rotate, addressing
44#	modes are limited. As result it takes more instructions to do
45#	the same job in Thumb, therefore the code is never twice as
46#	small and always slower.
47# [***]	which is also ~35% better than compiler generated code. Dual-
48#	issue Cortex A8 core was measured to process input block in
49#	~990 cycles.
50
51# August 2010.
52#
53# Rescheduling for dual-issue pipeline resulted in 13% improvement on
54# Cortex A8 core and in absolute terms ~870 cycles per input block
55# [or 13.6 cycles per byte].
56
57# February 2011.
58#
59# Profiler-assisted and platform-specific optimization resulted in 10%
60# improvement on Cortex A8 core and 12.2 cycles per byte.
61
62# September 2013.
63#
64# Add NEON implementation (see sha1-586.pl for background info). On
65# Cortex A8 it was measured to process one byte in 6.7 cycles or >80%
66# faster than integer-only code. Because [fully unrolled] NEON code
67# is ~2.5x larger and there are some redundant instructions executed
68# when processing last block, improvement is not as big for smallest
69# blocks, only ~30%. Snapdragon S4 is a tad faster, 6.4 cycles per
70# byte, which is also >80% faster than integer-only code. Cortex-A15
71# is even faster spending 5.6 cycles per byte outperforming integer-
72# only code by factor of 2.
73
74# May 2014.
75#
76# Add ARMv8 code path performing at 2.35 cpb on Apple A7.
77
78$flavour = shift;
79if ($flavour=~/\w[\w\-]*\.\w+$/) { $output=$flavour; undef $flavour; }
80else { while (($output=shift) && ($output!~/\w[\w\-]*\.\w+$/)) {} }
81
82if ($flavour && $flavour ne "void") {
83    $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
84    ( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or
85    ( $xlate="${dir}../../../perlasm/arm-xlate.pl" and -f $xlate) or
86    die "can't locate arm-xlate.pl";
87
88    open OUT,"| \"$^X\" \"$xlate\" $flavour \"$output\"";
89    *STDOUT=*OUT;
90} else {
91    open OUT,">$output";
92    *STDOUT=*OUT;
93}
94
95$ctx="r0";
96$inp="r1";
97$len="r2";
98$a="r3";
99$b="r4";
100$c="r5";
101$d="r6";
102$e="r7";
103$K="r8";
104$t0="r9";
105$t1="r10";
106$t2="r11";
107$t3="r12";
108$Xi="r14";
109@V=($a,$b,$c,$d,$e);
110
111sub Xupdate {
112my ($a,$b,$c,$d,$e,$opt1,$opt2)=@_;
113$code.=<<___;
114	ldr	$t0,[$Xi,#15*4]
115	ldr	$t1,[$Xi,#13*4]
116	ldr	$t2,[$Xi,#7*4]
117	add	$e,$K,$e,ror#2			@ E+=K_xx_xx
118	ldr	$t3,[$Xi,#2*4]
119	eor	$t0,$t0,$t1
120	eor	$t2,$t2,$t3			@ 1 cycle stall
121	eor	$t1,$c,$d			@ F_xx_xx
122	mov	$t0,$t0,ror#31
123	add	$e,$e,$a,ror#27			@ E+=ROR(A,27)
124	eor	$t0,$t0,$t2,ror#31
125	str	$t0,[$Xi,#-4]!
126	$opt1					@ F_xx_xx
127	$opt2					@ F_xx_xx
128	add	$e,$e,$t0			@ E+=X[i]
129___
130}
131
132sub BODY_00_15 {
133my ($a,$b,$c,$d,$e)=@_;
134$code.=<<___;
135#if __ARM_ARCH__<7
136	ldrb	$t1,[$inp,#2]
137	ldrb	$t0,[$inp,#3]
138	ldrb	$t2,[$inp,#1]
139	add	$e,$K,$e,ror#2			@ E+=K_00_19
140	ldrb	$t3,[$inp],#4
141	orr	$t0,$t0,$t1,lsl#8
142	eor	$t1,$c,$d			@ F_xx_xx
143	orr	$t0,$t0,$t2,lsl#16
144	add	$e,$e,$a,ror#27			@ E+=ROR(A,27)
145	orr	$t0,$t0,$t3,lsl#24
146#else
147	ldr	$t0,[$inp],#4			@ handles unaligned
148	add	$e,$K,$e,ror#2			@ E+=K_00_19
149	eor	$t1,$c,$d			@ F_xx_xx
150	add	$e,$e,$a,ror#27			@ E+=ROR(A,27)
151#ifdef __ARMEL__
152	rev	$t0,$t0				@ byte swap
153#endif
154#endif
155	and	$t1,$b,$t1,ror#2
156	add	$e,$e,$t0			@ E+=X[i]
157	eor	$t1,$t1,$d,ror#2		@ F_00_19(B,C,D)
158	str	$t0,[$Xi,#-4]!
159	add	$e,$e,$t1			@ E+=F_00_19(B,C,D)
160___
161}
162
163sub BODY_16_19 {
164my ($a,$b,$c,$d,$e)=@_;
165	&Xupdate(@_,"and $t1,$b,$t1,ror#2");
166$code.=<<___;
167	eor	$t1,$t1,$d,ror#2		@ F_00_19(B,C,D)
168	add	$e,$e,$t1			@ E+=F_00_19(B,C,D)
169___
170}
171
172sub BODY_20_39 {
173my ($a,$b,$c,$d,$e)=@_;
174	&Xupdate(@_,"eor $t1,$b,$t1,ror#2");
175$code.=<<___;
176	add	$e,$e,$t1			@ E+=F_20_39(B,C,D)
177___
178}
179
180sub BODY_40_59 {
181my ($a,$b,$c,$d,$e)=@_;
182	&Xupdate(@_,"and $t1,$b,$t1,ror#2","and $t2,$c,$d");
183$code.=<<___;
184	add	$e,$e,$t1			@ E+=F_40_59(B,C,D)
185	add	$e,$e,$t2,ror#2
186___
187}
188
189$code=<<___;
190#include <openssl/arm_arch.h>
191
192.text
193#if defined(__thumb2__)
194.syntax	unified
195.thumb
196#else
197.code	32
198#endif
199
200.global	sha1_block_data_order
201.type	sha1_block_data_order,%function
202
203.align	5
204sha1_block_data_order:
205#if __ARM_MAX_ARCH__>=7
206.Lsha1_block:
207	adr	r3,.Lsha1_block
208	ldr	r12,.LOPENSSL_armcap
209	ldr	r12,[r3,r12]		@ OPENSSL_armcap_P
210#ifdef	__APPLE__
211	ldr	r12,[r12]
212#endif
213	tst	r12,#ARMV8_SHA1
214	bne	.LARMv8
215	tst	r12,#ARMV7_NEON
216	bne	.LNEON
217#endif
218	stmdb	sp!,{r4-r12,lr}
219	add	$len,$inp,$len,lsl#6	@ $len to point at the end of $inp
220	ldmia	$ctx,{$a,$b,$c,$d,$e}
221.Lloop:
222	ldr	$K,.LK_00_19
223	mov	$Xi,sp
224	sub	sp,sp,#15*4
225	mov	$c,$c,ror#30
226	mov	$d,$d,ror#30
227	mov	$e,$e,ror#30		@ [6]
228.L_00_15:
229___
230for($i=0;$i<5;$i++) {
231	&BODY_00_15(@V);	unshift(@V,pop(@V));
232}
233$code.=<<___;
234#if defined(__thumb2__)
235	mov	$t3,sp
236	teq	$Xi,$t3
237#else
238	teq	$Xi,sp
239#endif
240	bne	.L_00_15		@ [((11+4)*5+2)*3]
241	sub	sp,sp,#25*4
242___
243	&BODY_00_15(@V);	unshift(@V,pop(@V));
244	&BODY_16_19(@V);	unshift(@V,pop(@V));
245	&BODY_16_19(@V);	unshift(@V,pop(@V));
246	&BODY_16_19(@V);	unshift(@V,pop(@V));
247	&BODY_16_19(@V);	unshift(@V,pop(@V));
248$code.=<<___;
249
250	ldr	$K,.LK_20_39		@ [+15+16*4]
251	cmn	sp,#0			@ [+3], clear carry to denote 20_39
252.L_20_39_or_60_79:
253___
254for($i=0;$i<5;$i++) {
255	&BODY_20_39(@V);	unshift(@V,pop(@V));
256}
257$code.=<<___;
258#if defined(__thumb2__)
259	mov	$t3,sp
260	teq	$Xi,$t3
261#else
262	teq	$Xi,sp			@ preserve carry
263#endif
264	bne	.L_20_39_or_60_79	@ [+((12+3)*5+2)*4]
265	bcs	.L_done			@ [+((12+3)*5+2)*4], spare 300 bytes
266
267	ldr	$K,.LK_40_59
268	sub	sp,sp,#20*4		@ [+2]
269.L_40_59:
270___
271for($i=0;$i<5;$i++) {
272	&BODY_40_59(@V);	unshift(@V,pop(@V));
273}
274$code.=<<___;
275#if defined(__thumb2__)
276	mov	$t3,sp
277	teq	$Xi,$t3
278#else
279	teq	$Xi,sp
280#endif
281	bne	.L_40_59		@ [+((12+5)*5+2)*4]
282
283	ldr	$K,.LK_60_79
284	sub	sp,sp,#20*4
285	cmp	sp,#0			@ set carry to denote 60_79
286	b	.L_20_39_or_60_79	@ [+4], spare 300 bytes
287.L_done:
288	add	sp,sp,#80*4		@ "deallocate" stack frame
289	ldmia	$ctx,{$K,$t0,$t1,$t2,$t3}
290	add	$a,$K,$a
291	add	$b,$t0,$b
292	add	$c,$t1,$c,ror#2
293	add	$d,$t2,$d,ror#2
294	add	$e,$t3,$e,ror#2
295	stmia	$ctx,{$a,$b,$c,$d,$e}
296	teq	$inp,$len
297	bne	.Lloop			@ [+18], total 1307
298
299#if __ARM_ARCH__>=5
300	ldmia	sp!,{r4-r12,pc}
301#else
302	ldmia	sp!,{r4-r12,lr}
303	tst	lr,#1
304	moveq	pc,lr			@ be binary compatible with V4, yet
305	bx	lr			@ interoperable with Thumb ISA:-)
306#endif
307.size	sha1_block_data_order,.-sha1_block_data_order
308
309.align	5
310.LK_00_19:	.word	0x5a827999
311.LK_20_39:	.word	0x6ed9eba1
312.LK_40_59:	.word	0x8f1bbcdc
313.LK_60_79:	.word	0xca62c1d6
314#if __ARM_MAX_ARCH__>=7
315.LOPENSSL_armcap:
316.word	OPENSSL_armcap_P-.Lsha1_block
317#endif
318.asciz	"SHA1 block transform for ARMv4/NEON/ARMv8, CRYPTOGAMS by <appro\@openssl.org>"
319.align	5
320___
321#####################################################################
322# NEON stuff
323#
324{{{
325my @V=($a,$b,$c,$d,$e);
326my ($K_XX_XX,$Ki,$t0,$t1,$Xfer,$saved_sp)=map("r$_",(8..12,14));
327my $Xi=4;
328my @X=map("q$_",(8..11,0..3));
329my @Tx=("q12","q13");
330my ($K,$zero)=("q14","q15");
331my $j=0;
332
333sub AUTOLOAD()          # thunk [simplified] x86-style perlasm
334{ my $opcode = $AUTOLOAD; $opcode =~ s/.*:://; $opcode =~ s/_/\./;
335  my $arg = pop;
336    $arg = "#$arg" if ($arg*1 eq $arg);
337    $code .= "\t$opcode\t".join(',',@_,$arg)."\n";
338}
339
340sub body_00_19 () {
341	(
342	'($a,$b,$c,$d,$e)=@V;'.		# '$code.="@ $j\n";'.
343	'&bic	($t0,$d,$b)',
344	'&add	($e,$e,$Ki)',		# e+=X[i]+K
345	'&and	($t1,$c,$b)',
346	'&ldr	($Ki,sprintf "[sp,#%d]",4*(($j+1)&15))',
347	'&add	($e,$e,$a,"ror#27")',	# e+=ROR(A,27)
348	'&eor	($t1,$t1,$t0)',		# F_00_19
349	'&mov	($b,$b,"ror#2")',	# b=ROR(b,2)
350	'&add	($e,$e,$t1);'.		# e+=F_00_19
351	'$j++;	unshift(@V,pop(@V));'
352	)
353}
354sub body_20_39 () {
355	(
356	'($a,$b,$c,$d,$e)=@V;'.		# '$code.="@ $j\n";'.
357	'&eor	($t0,$b,$d)',
358	'&add	($e,$e,$Ki)',		# e+=X[i]+K
359	'&ldr	($Ki,sprintf "[sp,#%d]",4*(($j+1)&15)) if ($j<79)',
360	'&eor	($t1,$t0,$c)',		# F_20_39
361	'&add	($e,$e,$a,"ror#27")',	# e+=ROR(A,27)
362	'&mov	($b,$b,"ror#2")',	# b=ROR(b,2)
363	'&add	($e,$e,$t1);'.		# e+=F_20_39
364	'$j++;	unshift(@V,pop(@V));'
365	)
366}
367sub body_40_59 () {
368	(
369	'($a,$b,$c,$d,$e)=@V;'.		# '$code.="@ $j\n";'.
370	'&add	($e,$e,$Ki)',		# e+=X[i]+K
371	'&and	($t0,$c,$d)',
372	'&ldr	($Ki,sprintf "[sp,#%d]",4*(($j+1)&15))',
373	'&add	($e,$e,$a,"ror#27")',	# e+=ROR(A,27)
374	'&eor	($t1,$c,$d)',
375	'&add	($e,$e,$t0)',
376	'&and	($t1,$t1,$b)',
377	'&mov	($b,$b,"ror#2")',	# b=ROR(b,2)
378	'&add	($e,$e,$t1);'.		# e+=F_40_59
379	'$j++;	unshift(@V,pop(@V));'
380	)
381}
382
383sub Xupdate_16_31 ()
384{ use integer;
385  my $body = shift;
386  my @insns = (&$body,&$body,&$body,&$body);
387  my ($a,$b,$c,$d,$e);
388
389	&vext_8		(@X[0],@X[-4&7],@X[-3&7],8);	# compose "X[-14]" in "X[0]"
390	 eval(shift(@insns));
391	 eval(shift(@insns));
392	 eval(shift(@insns));
393	  &vadd_i32	(@Tx[1],@X[-1&7],$K);
394	 eval(shift(@insns));
395	  &vld1_32	("{$K\[]}","[$K_XX_XX,:32]!")	if ($Xi%5==0);
396	 eval(shift(@insns));
397	&vext_8		(@Tx[0],@X[-1&7],$zero,4);	# "X[-3]", 3 words
398	 eval(shift(@insns));
399	 eval(shift(@insns));
400	 eval(shift(@insns));
401	&veor		(@X[0],@X[0],@X[-4&7]);		# "X[0]"^="X[-16]"
402	 eval(shift(@insns));
403	 eval(shift(@insns));
404	&veor		(@Tx[0],@Tx[0],@X[-2&7]);	# "X[-3]"^"X[-8]"
405	 eval(shift(@insns));
406	 eval(shift(@insns));
407	&veor		(@Tx[0],@Tx[0],@X[0]);		# "X[0]"^="X[-3]"^"X[-8]
408	 eval(shift(@insns));
409	 eval(shift(@insns));
410	  &vst1_32	("{@Tx[1]}","[$Xfer,:128]!");	# X[]+K xfer
411	  &sub		($Xfer,$Xfer,64)		if ($Xi%4==0);
412	 eval(shift(@insns));
413	 eval(shift(@insns));
414	&vext_8		(@Tx[1],$zero,@Tx[0],4);	# "X[0]"<<96, extract one dword
415	 eval(shift(@insns));
416	 eval(shift(@insns));
417	&vadd_i32	(@X[0],@Tx[0],@Tx[0]);
418	 eval(shift(@insns));
419	 eval(shift(@insns));
420	&vsri_32	(@X[0],@Tx[0],31);		# "X[0]"<<<=1
421	 eval(shift(@insns));
422	 eval(shift(@insns));
423	 eval(shift(@insns));
424	&vshr_u32	(@Tx[0],@Tx[1],30);
425	 eval(shift(@insns));
426	 eval(shift(@insns));
427	&vshl_u32	(@Tx[1],@Tx[1],2);
428	 eval(shift(@insns));
429	 eval(shift(@insns));
430	&veor		(@X[0],@X[0],@Tx[0]);
431	 eval(shift(@insns));
432	 eval(shift(@insns));
433	&veor		(@X[0],@X[0],@Tx[1]);		# "X[0]"^=("X[0]">>96)<<<2
434
435	foreach (@insns) { eval; }	# remaining instructions [if any]
436
437  $Xi++;	push(@X,shift(@X));	# "rotate" X[]
438}
439
440sub Xupdate_32_79 ()
441{ use integer;
442  my $body = shift;
443  my @insns = (&$body,&$body,&$body,&$body);
444  my ($a,$b,$c,$d,$e);
445
446	&vext_8		(@Tx[0],@X[-2&7],@X[-1&7],8);	# compose "X[-6]"
447	 eval(shift(@insns));
448	 eval(shift(@insns));
449	 eval(shift(@insns));
450	&veor		(@X[0],@X[0],@X[-4&7]);		# "X[0]"="X[-32]"^"X[-16]"
451	 eval(shift(@insns));
452	 eval(shift(@insns));
453	&veor		(@X[0],@X[0],@X[-7&7]);		# "X[0]"^="X[-28]"
454	 eval(shift(@insns));
455	 eval(shift(@insns));
456	  &vadd_i32	(@Tx[1],@X[-1&7],$K);
457	 eval(shift(@insns));
458	  &vld1_32	("{$K\[]}","[$K_XX_XX,:32]!")	if ($Xi%5==0);
459	 eval(shift(@insns));
460	&veor		(@Tx[0],@Tx[0],@X[0]);		# "X[-6]"^="X[0]"
461	 eval(shift(@insns));
462	 eval(shift(@insns));
463	&vshr_u32	(@X[0],@Tx[0],30);
464	 eval(shift(@insns));
465	 eval(shift(@insns));
466	  &vst1_32	("{@Tx[1]}","[$Xfer,:128]!");	# X[]+K xfer
467	  &sub		($Xfer,$Xfer,64)		if ($Xi%4==0);
468	 eval(shift(@insns));
469	 eval(shift(@insns));
470	&vsli_32	(@X[0],@Tx[0],2);		# "X[0]"="X[-6]"<<<2
471
472	foreach (@insns) { eval; }	# remaining instructions [if any]
473
474  $Xi++;	push(@X,shift(@X));	# "rotate" X[]
475}
476
477sub Xuplast_80 ()
478{ use integer;
479  my $body = shift;
480  my @insns = (&$body,&$body,&$body,&$body);
481  my ($a,$b,$c,$d,$e);
482
483	&vadd_i32	(@Tx[1],@X[-1&7],$K);
484	 eval(shift(@insns));
485	 eval(shift(@insns));
486	&vst1_32	("{@Tx[1]}","[$Xfer,:128]!");
487	&sub		($Xfer,$Xfer,64);
488
489	&teq		($inp,$len);
490	&sub		($K_XX_XX,$K_XX_XX,16);	# rewind $K_XX_XX
491	&it		("eq");
492	&subeq		($inp,$inp,64);		# reload last block to avoid SEGV
493	&vld1_8		("{@X[-4&7]-@X[-3&7]}","[$inp]!");
494	 eval(shift(@insns));
495	 eval(shift(@insns));
496	&vld1_8		("{@X[-2&7]-@X[-1&7]}","[$inp]!");
497	 eval(shift(@insns));
498	 eval(shift(@insns));
499	&vld1_32	("{$K\[]}","[$K_XX_XX,:32]!");	# load K_00_19
500	 eval(shift(@insns));
501	 eval(shift(@insns));
502	&vrev32_8	(@X[-4&7],@X[-4&7]);
503
504	foreach (@insns) { eval; }		# remaining instructions
505
506   $Xi=0;
507}
508
509sub Xloop()
510{ use integer;
511  my $body = shift;
512  my @insns = (&$body,&$body,&$body,&$body);
513  my ($a,$b,$c,$d,$e);
514
515	&vrev32_8	(@X[($Xi-3)&7],@X[($Xi-3)&7]);
516	 eval(shift(@insns));
517	 eval(shift(@insns));
518	&vadd_i32	(@X[$Xi&7],@X[($Xi-4)&7],$K);
519	 eval(shift(@insns));
520	 eval(shift(@insns));
521	&vst1_32	("{@X[$Xi&7]}","[$Xfer,:128]!");# X[]+K xfer to IALU
522
523	foreach (@insns) { eval; }
524
525  $Xi++;
526}
527
528$code.=<<___;
529#if __ARM_MAX_ARCH__>=7
530.arch	armv7-a
531.fpu	neon
532
533.type	sha1_block_data_order_neon,%function
534.align	4
535sha1_block_data_order_neon:
536.LNEON:
537	stmdb	sp!,{r4-r12,lr}
538	add	$len,$inp,$len,lsl#6	@ $len to point at the end of $inp
539	@ dmb				@ errata #451034 on early Cortex A8
540	@ vstmdb	sp!,{d8-d15}	@ ABI specification says so
541	mov	$saved_sp,sp
542	sub	$Xfer,sp,#64
543	adr	$K_XX_XX,.LK_00_19
544	bic	$Xfer,$Xfer,#15		@ align for 128-bit stores
545
546	ldmia	$ctx,{$a,$b,$c,$d,$e}	@ load context
547	mov	sp,$Xfer		@ alloca
548
549	vld1.8		{@X[-4&7]-@X[-3&7]},[$inp]!	@ handles unaligned
550	veor		$zero,$zero,$zero
551	vld1.8		{@X[-2&7]-@X[-1&7]},[$inp]!
552	vld1.32		{${K}\[]},[$K_XX_XX,:32]!	@ load K_00_19
553	vrev32.8	@X[-4&7],@X[-4&7]		@ yes, even on
554	vrev32.8	@X[-3&7],@X[-3&7]		@ big-endian...
555	vrev32.8	@X[-2&7],@X[-2&7]
556	vadd.i32	@X[0],@X[-4&7],$K
557	vrev32.8	@X[-1&7],@X[-1&7]
558	vadd.i32	@X[1],@X[-3&7],$K
559	vst1.32		{@X[0]},[$Xfer,:128]!
560	vadd.i32	@X[2],@X[-2&7],$K
561	vst1.32		{@X[1]},[$Xfer,:128]!
562	vst1.32		{@X[2]},[$Xfer,:128]!
563	ldr		$Ki,[sp]			@ big RAW stall
564
565.Loop_neon:
566___
567	&Xupdate_16_31(\&body_00_19);
568	&Xupdate_16_31(\&body_00_19);
569	&Xupdate_16_31(\&body_00_19);
570	&Xupdate_16_31(\&body_00_19);
571	&Xupdate_32_79(\&body_00_19);
572	&Xupdate_32_79(\&body_20_39);
573	&Xupdate_32_79(\&body_20_39);
574	&Xupdate_32_79(\&body_20_39);
575	&Xupdate_32_79(\&body_20_39);
576	&Xupdate_32_79(\&body_20_39);
577	&Xupdate_32_79(\&body_40_59);
578	&Xupdate_32_79(\&body_40_59);
579	&Xupdate_32_79(\&body_40_59);
580	&Xupdate_32_79(\&body_40_59);
581	&Xupdate_32_79(\&body_40_59);
582	&Xupdate_32_79(\&body_20_39);
583	&Xuplast_80(\&body_20_39);
584	&Xloop(\&body_20_39);
585	&Xloop(\&body_20_39);
586	&Xloop(\&body_20_39);
587$code.=<<___;
588	ldmia	$ctx,{$Ki,$t0,$t1,$Xfer}	@ accumulate context
589	add	$a,$a,$Ki
590	ldr	$Ki,[$ctx,#16]
591	add	$b,$b,$t0
592	add	$c,$c,$t1
593	add	$d,$d,$Xfer
594	it	eq
595	moveq	sp,$saved_sp
596	add	$e,$e,$Ki
597	it	ne
598	ldrne	$Ki,[sp]
599	stmia	$ctx,{$a,$b,$c,$d,$e}
600	itt	ne
601	addne	$Xfer,sp,#3*16
602	bne	.Loop_neon
603
604	@ vldmia	sp!,{d8-d15}
605	ldmia	sp!,{r4-r12,pc}
606.size	sha1_block_data_order_neon,.-sha1_block_data_order_neon
607#endif
608___
609}}}
610#####################################################################
611# ARMv8 stuff
612#
613{{{
614my ($ABCD,$E,$E0,$E1)=map("q$_",(0..3));
615my @MSG=map("q$_",(4..7));
616my @Kxx=map("q$_",(8..11));
617my ($W0,$W1,$ABCD_SAVE)=map("q$_",(12..14));
618
619$code.=<<___;
620#if __ARM_MAX_ARCH__>=7
621
622# if defined(__thumb2__)
623#  define INST(a,b,c,d)	.byte	c,d|0xf,a,b
624# else
625#  define INST(a,b,c,d)	.byte	a,b,c,d|0x10
626# endif
627
628.type	sha1_block_data_order_armv8,%function
629.align	5
630sha1_block_data_order_armv8:
631.LARMv8:
632	vstmdb	sp!,{d8-d15}		@ ABI specification says so
633
634	veor	$E,$E,$E
635	adr	r3,.LK_00_19
636	vld1.32	{$ABCD},[$ctx]!
637	vld1.32	{$E\[0]},[$ctx]
638	sub	$ctx,$ctx,#16
639	vld1.32	{@Kxx[0]\[]},[r3,:32]!
640	vld1.32	{@Kxx[1]\[]},[r3,:32]!
641	vld1.32	{@Kxx[2]\[]},[r3,:32]!
642	vld1.32	{@Kxx[3]\[]},[r3,:32]
643
644.Loop_v8:
645	vld1.8		{@MSG[0]-@MSG[1]},[$inp]!
646	vld1.8		{@MSG[2]-@MSG[3]},[$inp]!
647	vrev32.8	@MSG[0],@MSG[0]
648	vrev32.8	@MSG[1],@MSG[1]
649
650	vadd.i32	$W0,@Kxx[0],@MSG[0]
651	vrev32.8	@MSG[2],@MSG[2]
652	vmov		$ABCD_SAVE,$ABCD	@ offload
653	subs		$len,$len,#1
654
655	vadd.i32	$W1,@Kxx[0],@MSG[1]
656	vrev32.8	@MSG[3],@MSG[3]
657	sha1h		$E1,$ABCD		@ 0
658	sha1c		$ABCD,$E,$W0
659	vadd.i32	$W0,@Kxx[$j],@MSG[2]
660	sha1su0		@MSG[0],@MSG[1],@MSG[2]
661___
662for ($j=0,$i=1;$i<20-3;$i++) {
663my $f=("c","p","m","p")[$i/5];
664$code.=<<___;
665	sha1h		$E0,$ABCD		@ $i
666	sha1$f		$ABCD,$E1,$W1
667	vadd.i32	$W1,@Kxx[$j],@MSG[3]
668	sha1su1		@MSG[0],@MSG[3]
669___
670$code.=<<___ if ($i<20-4);
671	sha1su0		@MSG[1],@MSG[2],@MSG[3]
672___
673	($E0,$E1)=($E1,$E0);	($W0,$W1)=($W1,$W0);
674	push(@MSG,shift(@MSG));	$j++ if ((($i+3)%5)==0);
675}
676$code.=<<___;
677	sha1h		$E0,$ABCD		@ $i
678	sha1p		$ABCD,$E1,$W1
679	vadd.i32	$W1,@Kxx[$j],@MSG[3]
680
681	sha1h		$E1,$ABCD		@ 18
682	sha1p		$ABCD,$E0,$W0
683
684	sha1h		$E0,$ABCD		@ 19
685	sha1p		$ABCD,$E1,$W1
686
687	vadd.i32	$E,$E,$E0
688	vadd.i32	$ABCD,$ABCD,$ABCD_SAVE
689	bne		.Loop_v8
690
691	vst1.32		{$ABCD},[$ctx]!
692	vst1.32		{$E\[0]},[$ctx]
693
694	vldmia	sp!,{d8-d15}
695	ret					@ bx lr
696.size	sha1_block_data_order_armv8,.-sha1_block_data_order_armv8
697#endif
698___
699}}}
700$code.=<<___;
701#if __ARM_MAX_ARCH__>=7
702.comm	OPENSSL_armcap_P,4,4
703.hidden	OPENSSL_armcap_P
704#endif
705___
706
707{   my  %opcode = (
708	"sha1c"		=> 0xf2000c40,	"sha1p"		=> 0xf2100c40,
709	"sha1m"		=> 0xf2200c40,	"sha1su0"	=> 0xf2300c40,
710	"sha1h"		=> 0xf3b902c0,	"sha1su1"	=> 0xf3ba0380	);
711
712    sub unsha1 {
713	my ($mnemonic,$arg)=@_;
714
715	if ($arg =~ m/q([0-9]+)(?:,\s*q([0-9]+))?,\s*q([0-9]+)/o) {
716	    my $word = $opcode{$mnemonic}|(($1&7)<<13)|(($1&8)<<19)
717					 |(($2&7)<<17)|(($2&8)<<4)
718					 |(($3&7)<<1) |(($3&8)<<2);
719	    # since ARMv7 instructions are always encoded little-endian.
720	    # correct solution is to use .inst directive, but older
721	    # assemblers don't implement it:-(
722
723	    # this fix-up provides Thumb encoding in conjunction with INST
724	    $word &= ~0x10000000 if (($word & 0x0f000000) == 0x02000000);
725	    sprintf "INST(0x%02x,0x%02x,0x%02x,0x%02x)\t@ %s %s",
726			$word&0xff,($word>>8)&0xff,
727			($word>>16)&0xff,($word>>24)&0xff,
728			$mnemonic,$arg;
729	}
730    }
731}
732
733foreach (split($/,$code)) {
734	s/{q([0-9]+)\[\]}/sprintf "{d%d[],d%d[]}",2*$1,2*$1+1/eo	or
735	s/{q([0-9]+)\[0\]}/sprintf "{d%d[0]}",2*$1/eo;
736
737	s/\b(sha1\w+)\s+(q.*)/unsha1($1,$2)/geo;
738
739	s/\bret\b/bx	lr/o		or
740	s/\bbx\s+lr\b/.word\t0xe12fff1e/o;	# make it possible to compile with -march=armv4
741
742	print $_,$/;
743}
744
745close STDOUT or die "error closing STDOUT: $!"; # enforce flush
746