• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1#!/usr/bin/env perl
2
3# ====================================================================
4# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
5# project. The module is, however, dual licensed under OpenSSL and
6# CRYPTOGAMS licenses depending on where you obtain it. For further
7# details see http://www.openssl.org/~appro/cryptogams/.
8# ====================================================================
9
10# sha1_block procedure for ARMv4.
11#
12# January 2007.
13
14# Size/performance trade-off
15# ====================================================================
16# impl		size in bytes	comp cycles[*]	measured performance
17# ====================================================================
18# thumb		304		3212		4420
19# armv4-small	392/+29%	1958/+64%	2250/+96%
20# armv4-compact	740/+89%	1552/+26%	1840/+22%
21# armv4-large	1420/+92%	1307/+19%	1370/+34%[***]
22# full unroll	~5100/+260%	~1260/+4%	~1300/+5%
23# ====================================================================
24# thumb		= same as 'small' but in Thumb instructions[**] and
25#		  with recurring code in two private functions;
26# small		= detached Xload/update, loops are folded;
27# compact	= detached Xload/update, 5x unroll;
28# large		= interleaved Xload/update, 5x unroll;
29# full unroll	= interleaved Xload/update, full unroll, estimated[!];
30#
31# [*]	Manually counted instructions in "grand" loop body. Measured
32#	performance is affected by prologue and epilogue overhead,
33#	i-cache availability, branch penalties, etc.
34# [**]	While each Thumb instruction is twice smaller, they are not as
35#	diverse as ARM ones: e.g., there are only two arithmetic
36#	instructions with 3 arguments, no [fixed] rotate, addressing
37#	modes are limited. As result it takes more instructions to do
38#	the same job in Thumb, therefore the code is never twice as
39#	small and always slower.
40# [***]	which is also ~35% better than compiler generated code. Dual-
41#	issue Cortex A8 core was measured to process input block in
42#	~990 cycles.
43
44# August 2010.
45#
46# Rescheduling for dual-issue pipeline resulted in 13% improvement on
47# Cortex A8 core and in absolute terms ~870 cycles per input block
48# [or 13.6 cycles per byte].
49
50# February 2011.
51#
52# Profiler-assisted and platform-specific optimization resulted in 10%
53# improvement on Cortex A8 core and 12.2 cycles per byte.
54
55while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {}
56open STDOUT,">$output";
57
58$ctx="r0";
59$inp="r1";
60$len="r2";
61$a="r3";
62$b="r4";
63$c="r5";
64$d="r6";
65$e="r7";
66$K="r8";
67$t0="r9";
68$t1="r10";
69$t2="r11";
70$t3="r12";
71$Xi="r14";
72@V=($a,$b,$c,$d,$e);
73
74sub Xupdate {
75my ($a,$b,$c,$d,$e,$opt1,$opt2)=@_;
76$code.=<<___;
77	ldr	$t0,[$Xi,#15*4]
78	ldr	$t1,[$Xi,#13*4]
79	ldr	$t2,[$Xi,#7*4]
80	add	$e,$K,$e,ror#2			@ E+=K_xx_xx
81	ldr	$t3,[$Xi,#2*4]
82	eor	$t0,$t0,$t1
83	eor	$t2,$t2,$t3			@ 1 cycle stall
84	eor	$t1,$c,$d			@ F_xx_xx
85	mov	$t0,$t0,ror#31
86	add	$e,$e,$a,ror#27			@ E+=ROR(A,27)
87	eor	$t0,$t0,$t2,ror#31
88	str	$t0,[$Xi,#-4]!
89	$opt1					@ F_xx_xx
90	$opt2					@ F_xx_xx
91	add	$e,$e,$t0			@ E+=X[i]
92___
93}
94
95sub BODY_00_15 {
96my ($a,$b,$c,$d,$e)=@_;
97$code.=<<___;
98#if __ARM_ARCH__<7
99	ldrb	$t1,[$inp,#2]
100	ldrb	$t0,[$inp,#3]
101	ldrb	$t2,[$inp,#1]
102	add	$e,$K,$e,ror#2			@ E+=K_00_19
103	ldrb	$t3,[$inp],#4
104	orr	$t0,$t0,$t1,lsl#8
105	eor	$t1,$c,$d			@ F_xx_xx
106	orr	$t0,$t0,$t2,lsl#16
107	add	$e,$e,$a,ror#27			@ E+=ROR(A,27)
108	orr	$t0,$t0,$t3,lsl#24
109#else
110	ldr	$t0,[$inp],#4			@ handles unaligned
111	add	$e,$K,$e,ror#2			@ E+=K_00_19
112	eor	$t1,$c,$d			@ F_xx_xx
113	add	$e,$e,$a,ror#27			@ E+=ROR(A,27)
114#ifdef __ARMEL__
115	rev	$t0,$t0				@ byte swap
116#endif
117#endif
118	and	$t1,$b,$t1,ror#2
119	add	$e,$e,$t0			@ E+=X[i]
120	eor	$t1,$t1,$d,ror#2		@ F_00_19(B,C,D)
121	str	$t0,[$Xi,#-4]!
122	add	$e,$e,$t1			@ E+=F_00_19(B,C,D)
123___
124}
125
126sub BODY_16_19 {
127my ($a,$b,$c,$d,$e)=@_;
128	&Xupdate(@_,"and $t1,$b,$t1,ror#2");
129$code.=<<___;
130	eor	$t1,$t1,$d,ror#2		@ F_00_19(B,C,D)
131	add	$e,$e,$t1			@ E+=F_00_19(B,C,D)
132___
133}
134
135sub BODY_20_39 {
136my ($a,$b,$c,$d,$e)=@_;
137	&Xupdate(@_,"eor $t1,$b,$t1,ror#2");
138$code.=<<___;
139	add	$e,$e,$t1			@ E+=F_20_39(B,C,D)
140___
141}
142
143sub BODY_40_59 {
144my ($a,$b,$c,$d,$e)=@_;
145	&Xupdate(@_,"and $t1,$b,$t1,ror#2","and $t2,$c,$d");
146$code.=<<___;
147	add	$e,$e,$t1			@ E+=F_40_59(B,C,D)
148	add	$e,$e,$t2,ror#2
149___
150}
151
152$code=<<___;
153#include "arm_arch.h"
154
155.text
156
157.global	sha1_block_data_order
158.type	sha1_block_data_order,%function
159
160.align	2
161sha1_block_data_order:
162	stmdb	sp!,{r4-r12,lr}
163	add	$len,$inp,$len,lsl#6	@ $len to point at the end of $inp
164	ldmia	$ctx,{$a,$b,$c,$d,$e}
165.Lloop:
166	ldr	$K,.LK_00_19
167	mov	$Xi,sp
168	sub	sp,sp,#15*4
169	mov	$c,$c,ror#30
170	mov	$d,$d,ror#30
171	mov	$e,$e,ror#30		@ [6]
172.L_00_15:
173___
174for($i=0;$i<5;$i++) {
175	&BODY_00_15(@V);	unshift(@V,pop(@V));
176}
177$code.=<<___;
178	teq	$Xi,sp
179	bne	.L_00_15		@ [((11+4)*5+2)*3]
180	sub	sp,sp,#5*4
181___
182	&BODY_00_15(@V);	unshift(@V,pop(@V));
183	&BODY_16_19(@V);	unshift(@V,pop(@V));
184	&BODY_16_19(@V);	unshift(@V,pop(@V));
185	&BODY_16_19(@V);	unshift(@V,pop(@V));
186	&BODY_16_19(@V);	unshift(@V,pop(@V));
187$code.=<<___;
188
189	ldr	$K,.LK_20_39		@ [+15+16*4]
190	sub	sp,sp,#20*4
191	cmn	sp,#0			@ [+3], clear carry to denote 20_39
192.L_20_39_or_60_79:
193___
194for($i=0;$i<5;$i++) {
195	&BODY_20_39(@V);	unshift(@V,pop(@V));
196}
197$code.=<<___;
198	teq	$Xi,sp			@ preserve carry
199	bne	.L_20_39_or_60_79	@ [+((12+3)*5+2)*4]
200	bcs	.L_done			@ [+((12+3)*5+2)*4], spare 300 bytes
201
202	ldr	$K,.LK_40_59
203	sub	sp,sp,#20*4		@ [+2]
204.L_40_59:
205___
206for($i=0;$i<5;$i++) {
207	&BODY_40_59(@V);	unshift(@V,pop(@V));
208}
209$code.=<<___;
210	teq	$Xi,sp
211	bne	.L_40_59		@ [+((12+5)*5+2)*4]
212
213	ldr	$K,.LK_60_79
214	sub	sp,sp,#20*4
215	cmp	sp,#0			@ set carry to denote 60_79
216	b	.L_20_39_or_60_79	@ [+4], spare 300 bytes
217.L_done:
218	add	sp,sp,#80*4		@ "deallocate" stack frame
219	ldmia	$ctx,{$K,$t0,$t1,$t2,$t3}
220	add	$a,$K,$a
221	add	$b,$t0,$b
222	add	$c,$t1,$c,ror#2
223	add	$d,$t2,$d,ror#2
224	add	$e,$t3,$e,ror#2
225	stmia	$ctx,{$a,$b,$c,$d,$e}
226	teq	$inp,$len
227	bne	.Lloop			@ [+18], total 1307
228
229#if __ARM_ARCH__>=5
230	ldmia	sp!,{r4-r12,pc}
231#else
232	ldmia	sp!,{r4-r12,lr}
233	tst	lr,#1
234	moveq	pc,lr			@ be binary compatible with V4, yet
235	bx	lr			@ interoperable with Thumb ISA:-)
236#endif
237.align	2
238.LK_00_19:	.word	0x5a827999
239.LK_20_39:	.word	0x6ed9eba1
240.LK_40_59:	.word	0x8f1bbcdc
241.LK_60_79:	.word	0xca62c1d6
242.size	sha1_block_data_order,.-sha1_block_data_order
243.asciz	"SHA1 block transform for ARMv4, CRYPTOGAMS by <appro\@openssl.org>"
244.align	2
245___
246
247$code =~ s/\bbx\s+lr\b/.word\t0xe12fff1e/gm;	# make it possible to compile with -march=armv4
248print $code;
249close STDOUT; # enforce flush
250