• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1#! /usr/bin/env perl
2# Copyright 2007-2016 The OpenSSL Project Authors. All Rights Reserved.
3#
4# Licensed under the OpenSSL license (the "License").  You may not use
5# this file except in compliance with the License.  You can obtain a copy
6# in the file LICENSE in the source distribution or at
7# https://www.openssl.org/source/license.html
8
9
10# ====================================================================
11# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
12# project. The module is, however, dual licensed under OpenSSL and
13# CRYPTOGAMS licenses depending on where you obtain it. For further
14# details see http://www.openssl.org/~appro/cryptogams/.
15# ====================================================================
16
17# January 2007.
18
19# Montgomery multiplication for ARMv4.
20#
21# Performance improvement naturally varies among CPU implementations
22# and compilers. The code was observed to provide +65-35% improvement
23# [depending on key length, less for longer keys] on ARM920T, and
24# +115-80% on Intel IXP425. This is compared to pre-bn_mul_mont code
25# base and compiler generated code with in-lined umull and even umlal
26# instructions. The latter means that this code didn't really have an
27# "advantage" of utilizing some "secret" instruction.
28#
29# The code is interoperable with Thumb ISA and is rather compact, less
30# than 1/2KB. Windows CE port would be trivial, as it's exclusively
31# about decorations, ABI and instruction syntax are identical.
32
33# November 2013
34#
35# Add NEON code path, which handles lengths divisible by 8. RSA/DSA
36# performance improvement on Cortex-A8 is ~45-100% depending on key
37# length, more for longer keys. On Cortex-A15 the span is ~10-105%.
38# On Snapdragon S4 improvement was measured to vary from ~70% to
39# incredible ~380%, yes, 4.8x faster, for RSA4096 sign. But this is
40# rather because original integer-only code seems to perform
41# suboptimally on S4. Situation on Cortex-A9 is unfortunately
42# different. It's being looked into, but the trouble is that
43# performance for vectors longer than 256 bits is actually couple
44# of percent worse than for integer-only code. The code is chosen
45# for execution on all NEON-capable processors, because gain on
46# others outweighs the marginal loss on Cortex-A9.
47
48# September 2015
49#
50# Align Cortex-A9 performance with November 2013 improvements, i.e.
51# NEON code is now ~20-105% faster than integer-only one on this
52# processor. But this optimization further improved performance even
53# on other processors: NEON code path is ~45-180% faster than original
54# integer-only on Cortex-A8, ~10-210% on Cortex-A15, ~70-450% on
55# Snapdragon S4.
56
57$flavour = shift;
58if ($flavour=~/\w[\w\-]*\.\w+$/) { $output=$flavour; undef $flavour; }
59else { while (($output=shift) && ($output!~/\w[\w\-]*\.\w+$/)) {} }
60
61if ($flavour && $flavour ne "void") {
62    $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
63    ( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or
64    ( $xlate="${dir}../../../perlasm/arm-xlate.pl" and -f $xlate) or
65    die "can't locate arm-xlate.pl";
66
67    open STDOUT,"| \"$^X\" $xlate $flavour $output";
68} else {
69    open STDOUT,">$output";
70}
71
72$num="r0";	# starts as num argument, but holds &tp[num-1]
73$ap="r1";
74$bp="r2"; $bi="r2"; $rp="r2";
75$np="r3";
76$tp="r4";
77$aj="r5";
78$nj="r6";
79$tj="r7";
80$n0="r8";
81###########	# r9 is reserved by ELF as platform specific, e.g. TLS pointer
82$alo="r10";	# sl, gcc uses it to keep @GOT
83$ahi="r11";	# fp
84$nlo="r12";	# ip
85###########	# r13 is stack pointer
86$nhi="r14";	# lr
87###########	# r15 is program counter
88
89#### argument block layout relative to &tp[num-1], a.k.a. $num
90$_rp="$num,#12*4";
91# ap permanently resides in r1
92$_bp="$num,#13*4";
93# np permanently resides in r3
94$_n0="$num,#14*4";
95$_num="$num,#15*4";	$_bpend=$_num;
96
97$code=<<___;
98#include <openssl/arm_arch.h>
99
100@ Silence ARMv8 deprecated IT instruction warnings. This file is used by both
101@ ARMv7 and ARMv8 processors and does not use ARMv8 instructions.
102.arch  armv7-a
103
104.text
105#if defined(__thumb2__)
106.syntax	unified
107.thumb
108#else
109.code	32
110#endif
111
112#if __ARM_MAX_ARCH__>=7
113.align	5
114.LOPENSSL_armcap:
115.word	OPENSSL_armcap_P-.Lbn_mul_mont
116#endif
117
118.global	bn_mul_mont
119.type	bn_mul_mont,%function
120
121.align	5
122bn_mul_mont:
123.Lbn_mul_mont:
124	ldr	ip,[sp,#4]		@ load num
125	stmdb	sp!,{r0,r2}		@ sp points at argument block
126#if __ARM_MAX_ARCH__>=7
127	tst	ip,#7
128	bne	.Lialu
129	adr	r0,.Lbn_mul_mont
130	ldr	r2,.LOPENSSL_armcap
131	ldr	r0,[r0,r2]
132#ifdef	__APPLE__
133	ldr	r0,[r0]
134#endif
135	tst	r0,#ARMV7_NEON		@ NEON available?
136	ldmia	sp, {r0,r2}
137	beq	.Lialu
138	add	sp,sp,#8
139	b	bn_mul8x_mont_neon
140.align	4
141.Lialu:
142#endif
143	cmp	ip,#2
144	mov	$num,ip			@ load num
145#ifdef	__thumb2__
146	ittt	lt
147#endif
148	movlt	r0,#0
149	addlt	sp,sp,#2*4
150	blt	.Labrt
151
152	stmdb	sp!,{r4-r12,lr}		@ save 10 registers
153
154	mov	$num,$num,lsl#2		@ rescale $num for byte count
155	sub	sp,sp,$num		@ alloca(4*num)
156	sub	sp,sp,#4		@ +extra dword
157	sub	$num,$num,#4		@ "num=num-1"
158	add	$tp,$bp,$num		@ &bp[num-1]
159
160	add	$num,sp,$num		@ $num to point at &tp[num-1]
161	ldr	$n0,[$_n0]		@ &n0
162	ldr	$bi,[$bp]		@ bp[0]
163	ldr	$aj,[$ap],#4		@ ap[0],ap++
164	ldr	$nj,[$np],#4		@ np[0],np++
165	ldr	$n0,[$n0]		@ *n0
166	str	$tp,[$_bpend]		@ save &bp[num]
167
168	umull	$alo,$ahi,$aj,$bi	@ ap[0]*bp[0]
169	str	$n0,[$_n0]		@ save n0 value
170	mul	$n0,$alo,$n0		@ "tp[0]"*n0
171	mov	$nlo,#0
172	umlal	$alo,$nlo,$nj,$n0	@ np[0]*n0+"t[0]"
173	mov	$tp,sp
174
175.L1st:
176	ldr	$aj,[$ap],#4		@ ap[j],ap++
177	mov	$alo,$ahi
178	ldr	$nj,[$np],#4		@ np[j],np++
179	mov	$ahi,#0
180	umlal	$alo,$ahi,$aj,$bi	@ ap[j]*bp[0]
181	mov	$nhi,#0
182	umlal	$nlo,$nhi,$nj,$n0	@ np[j]*n0
183	adds	$nlo,$nlo,$alo
184	str	$nlo,[$tp],#4		@ tp[j-1]=,tp++
185	adc	$nlo,$nhi,#0
186	cmp	$tp,$num
187	bne	.L1st
188
189	adds	$nlo,$nlo,$ahi
190	ldr	$tp,[$_bp]		@ restore bp
191	mov	$nhi,#0
192	ldr	$n0,[$_n0]		@ restore n0
193	adc	$nhi,$nhi,#0
194	str	$nlo,[$num]		@ tp[num-1]=
195	mov	$tj,sp
196	str	$nhi,[$num,#4]		@ tp[num]=
197
198.Louter:
199	sub	$tj,$num,$tj		@ "original" $num-1 value
200	sub	$ap,$ap,$tj		@ "rewind" ap to &ap[1]
201	ldr	$bi,[$tp,#4]!		@ *(++bp)
202	sub	$np,$np,$tj		@ "rewind" np to &np[1]
203	ldr	$aj,[$ap,#-4]		@ ap[0]
204	ldr	$alo,[sp]		@ tp[0]
205	ldr	$nj,[$np,#-4]		@ np[0]
206	ldr	$tj,[sp,#4]		@ tp[1]
207
208	mov	$ahi,#0
209	umlal	$alo,$ahi,$aj,$bi	@ ap[0]*bp[i]+tp[0]
210	str	$tp,[$_bp]		@ save bp
211	mul	$n0,$alo,$n0
212	mov	$nlo,#0
213	umlal	$alo,$nlo,$nj,$n0	@ np[0]*n0+"tp[0]"
214	mov	$tp,sp
215
216.Linner:
217	ldr	$aj,[$ap],#4		@ ap[j],ap++
218	adds	$alo,$ahi,$tj		@ +=tp[j]
219	ldr	$nj,[$np],#4		@ np[j],np++
220	mov	$ahi,#0
221	umlal	$alo,$ahi,$aj,$bi	@ ap[j]*bp[i]
222	mov	$nhi,#0
223	umlal	$nlo,$nhi,$nj,$n0	@ np[j]*n0
224	adc	$ahi,$ahi,#0
225	ldr	$tj,[$tp,#8]		@ tp[j+1]
226	adds	$nlo,$nlo,$alo
227	str	$nlo,[$tp],#4		@ tp[j-1]=,tp++
228	adc	$nlo,$nhi,#0
229	cmp	$tp,$num
230	bne	.Linner
231
232	adds	$nlo,$nlo,$ahi
233	mov	$nhi,#0
234	ldr	$tp,[$_bp]		@ restore bp
235	adc	$nhi,$nhi,#0
236	ldr	$n0,[$_n0]		@ restore n0
237	adds	$nlo,$nlo,$tj
238	ldr	$tj,[$_bpend]		@ restore &bp[num]
239	adc	$nhi,$nhi,#0
240	str	$nlo,[$num]		@ tp[num-1]=
241	str	$nhi,[$num,#4]		@ tp[num]=
242
243	cmp	$tp,$tj
244#ifdef	__thumb2__
245	itt	ne
246#endif
247	movne	$tj,sp
248	bne	.Louter
249
250	ldr	$rp,[$_rp]		@ pull rp
251	mov	$aj,sp
252	add	$num,$num,#4		@ $num to point at &tp[num]
253	sub	$aj,$num,$aj		@ "original" num value
254	mov	$tp,sp			@ "rewind" $tp
255	mov	$ap,$tp			@ "borrow" $ap
256	sub	$np,$np,$aj		@ "rewind" $np to &np[0]
257
258	subs	$tj,$tj,$tj		@ "clear" carry flag
259.Lsub:	ldr	$tj,[$tp],#4
260	ldr	$nj,[$np],#4
261	sbcs	$tj,$tj,$nj		@ tp[j]-np[j]
262	str	$tj,[$rp],#4		@ rp[j]=
263	teq	$tp,$num		@ preserve carry
264	bne	.Lsub
265	sbcs	$nhi,$nhi,#0		@ upmost carry
266	mov	$tp,sp			@ "rewind" $tp
267	sub	$rp,$rp,$aj		@ "rewind" $rp
268
269.Lcopy:	ldr	$tj,[$tp]		@ conditional copy
270	ldr	$aj,[$rp]
271	str	sp,[$tp],#4		@ zap tp
272#ifdef	__thumb2__
273	it	cc
274#endif
275	movcc	$aj,$tj
276	str	$aj,[$rp],#4
277	teq	$tp,$num		@ preserve carry
278	bne	.Lcopy
279
280	mov	sp,$num
281	add	sp,sp,#4		@ skip over tp[num+1]
282	ldmia	sp!,{r4-r12,lr}		@ restore registers
283	add	sp,sp,#2*4		@ skip over {r0,r2}
284	mov	r0,#1
285.Labrt:
286#if __ARM_ARCH__>=5
287	ret				@ bx lr
288#else
289	tst	lr,#1
290	moveq	pc,lr			@ be binary compatible with V4, yet
291	bx	lr			@ interoperable with Thumb ISA:-)
292#endif
293.size	bn_mul_mont,.-bn_mul_mont
294___
295{
296my ($A0,$A1,$A2,$A3)=map("d$_",(0..3));
297my ($N0,$N1,$N2,$N3)=map("d$_",(4..7));
298my ($Z,$Temp)=("q4","q5");
299my @ACC=map("q$_",(6..13));
300my ($Bi,$Ni,$M0)=map("d$_",(28..31));
301my $zero="$Z#lo";
302my $temp="$Temp#lo";
303
304my ($rptr,$aptr,$bptr,$nptr,$n0,$num)=map("r$_",(0..5));
305my ($tinptr,$toutptr,$inner,$outer,$bnptr)=map("r$_",(6..11));
306
307$code.=<<___;
308#if __ARM_MAX_ARCH__>=7
309.arch	armv7-a
310.fpu	neon
311
312.type	bn_mul8x_mont_neon,%function
313.align	5
314bn_mul8x_mont_neon:
315	mov	ip,sp
316	stmdb	sp!,{r4-r11}
317	vstmdb	sp!,{d8-d15}		@ ABI specification says so
318	ldmia	ip,{r4-r5}		@ load rest of parameter block
319	mov	ip,sp
320
321	cmp	$num,#8
322	bhi	.LNEON_8n
323
324	@ special case for $num==8, everything is in register bank...
325
326	vld1.32		{${Bi}[0]}, [$bptr,:32]!
327	veor		$zero,$zero,$zero
328	sub		$toutptr,sp,$num,lsl#4
329	vld1.32		{$A0-$A3},  [$aptr]!		@ can't specify :32 :-(
330	and		$toutptr,$toutptr,#-64
331	vld1.32		{${M0}[0]}, [$n0,:32]
332	mov		sp,$toutptr			@ alloca
333	vzip.16		$Bi,$zero
334
335	vmull.u32	@ACC[0],$Bi,${A0}[0]
336	vmull.u32	@ACC[1],$Bi,${A0}[1]
337	vmull.u32	@ACC[2],$Bi,${A1}[0]
338	vshl.i64	$Ni,@ACC[0]#hi,#16
339	vmull.u32	@ACC[3],$Bi,${A1}[1]
340
341	vadd.u64	$Ni,$Ni,@ACC[0]#lo
342	veor		$zero,$zero,$zero
343	vmul.u32	$Ni,$Ni,$M0
344
345	vmull.u32	@ACC[4],$Bi,${A2}[0]
346	 vld1.32	{$N0-$N3}, [$nptr]!
347	vmull.u32	@ACC[5],$Bi,${A2}[1]
348	vmull.u32	@ACC[6],$Bi,${A3}[0]
349	vzip.16		$Ni,$zero
350	vmull.u32	@ACC[7],$Bi,${A3}[1]
351
352	vmlal.u32	@ACC[0],$Ni,${N0}[0]
353	sub		$outer,$num,#1
354	vmlal.u32	@ACC[1],$Ni,${N0}[1]
355	vmlal.u32	@ACC[2],$Ni,${N1}[0]
356	vmlal.u32	@ACC[3],$Ni,${N1}[1]
357
358	vmlal.u32	@ACC[4],$Ni,${N2}[0]
359	vmov		$Temp,@ACC[0]
360	vmlal.u32	@ACC[5],$Ni,${N2}[1]
361	vmov		@ACC[0],@ACC[1]
362	vmlal.u32	@ACC[6],$Ni,${N3}[0]
363	vmov		@ACC[1],@ACC[2]
364	vmlal.u32	@ACC[7],$Ni,${N3}[1]
365	vmov		@ACC[2],@ACC[3]
366	vmov		@ACC[3],@ACC[4]
367	vshr.u64	$temp,$temp,#16
368	vmov		@ACC[4],@ACC[5]
369	vmov		@ACC[5],@ACC[6]
370	vadd.u64	$temp,$temp,$Temp#hi
371	vmov		@ACC[6],@ACC[7]
372	veor		@ACC[7],@ACC[7]
373	vshr.u64	$temp,$temp,#16
374
375	b	.LNEON_outer8
376
377.align	4
378.LNEON_outer8:
379	vld1.32		{${Bi}[0]}, [$bptr,:32]!
380	veor		$zero,$zero,$zero
381	vzip.16		$Bi,$zero
382	vadd.u64	@ACC[0]#lo,@ACC[0]#lo,$temp
383
384	vmlal.u32	@ACC[0],$Bi,${A0}[0]
385	vmlal.u32	@ACC[1],$Bi,${A0}[1]
386	vmlal.u32	@ACC[2],$Bi,${A1}[0]
387	vshl.i64	$Ni,@ACC[0]#hi,#16
388	vmlal.u32	@ACC[3],$Bi,${A1}[1]
389
390	vadd.u64	$Ni,$Ni,@ACC[0]#lo
391	veor		$zero,$zero,$zero
392	subs		$outer,$outer,#1
393	vmul.u32	$Ni,$Ni,$M0
394
395	vmlal.u32	@ACC[4],$Bi,${A2}[0]
396	vmlal.u32	@ACC[5],$Bi,${A2}[1]
397	vmlal.u32	@ACC[6],$Bi,${A3}[0]
398	vzip.16		$Ni,$zero
399	vmlal.u32	@ACC[7],$Bi,${A3}[1]
400
401	vmlal.u32	@ACC[0],$Ni,${N0}[0]
402	vmlal.u32	@ACC[1],$Ni,${N0}[1]
403	vmlal.u32	@ACC[2],$Ni,${N1}[0]
404	vmlal.u32	@ACC[3],$Ni,${N1}[1]
405
406	vmlal.u32	@ACC[4],$Ni,${N2}[0]
407	vmov		$Temp,@ACC[0]
408	vmlal.u32	@ACC[5],$Ni,${N2}[1]
409	vmov		@ACC[0],@ACC[1]
410	vmlal.u32	@ACC[6],$Ni,${N3}[0]
411	vmov		@ACC[1],@ACC[2]
412	vmlal.u32	@ACC[7],$Ni,${N3}[1]
413	vmov		@ACC[2],@ACC[3]
414	vmov		@ACC[3],@ACC[4]
415	vshr.u64	$temp,$temp,#16
416	vmov		@ACC[4],@ACC[5]
417	vmov		@ACC[5],@ACC[6]
418	vadd.u64	$temp,$temp,$Temp#hi
419	vmov		@ACC[6],@ACC[7]
420	veor		@ACC[7],@ACC[7]
421	vshr.u64	$temp,$temp,#16
422
423	bne	.LNEON_outer8
424
425	vadd.u64	@ACC[0]#lo,@ACC[0]#lo,$temp
426	mov		$toutptr,sp
427	vshr.u64	$temp,@ACC[0]#lo,#16
428	mov		$inner,$num
429	vadd.u64	@ACC[0]#hi,@ACC[0]#hi,$temp
430	add		$tinptr,sp,#96
431	vshr.u64	$temp,@ACC[0]#hi,#16
432	vzip.16		@ACC[0]#lo,@ACC[0]#hi
433
434	b	.LNEON_tail_entry
435
436.align	4
437.LNEON_8n:
438	veor		@ACC[0],@ACC[0],@ACC[0]
439	 sub		$toutptr,sp,#128
440	veor		@ACC[1],@ACC[1],@ACC[1]
441	 sub		$toutptr,$toutptr,$num,lsl#4
442	veor		@ACC[2],@ACC[2],@ACC[2]
443	 and		$toutptr,$toutptr,#-64
444	veor		@ACC[3],@ACC[3],@ACC[3]
445	 mov		sp,$toutptr			@ alloca
446	veor		@ACC[4],@ACC[4],@ACC[4]
447	 add		$toutptr,$toutptr,#256
448	veor		@ACC[5],@ACC[5],@ACC[5]
449	 sub		$inner,$num,#8
450	veor		@ACC[6],@ACC[6],@ACC[6]
451	veor		@ACC[7],@ACC[7],@ACC[7]
452
453.LNEON_8n_init:
454	vst1.64		{@ACC[0]-@ACC[1]},[$toutptr,:256]!
455	subs		$inner,$inner,#8
456	vst1.64		{@ACC[2]-@ACC[3]},[$toutptr,:256]!
457	vst1.64		{@ACC[4]-@ACC[5]},[$toutptr,:256]!
458	vst1.64		{@ACC[6]-@ACC[7]},[$toutptr,:256]!
459	bne		.LNEON_8n_init
460
461	add		$tinptr,sp,#256
462	vld1.32		{$A0-$A3},[$aptr]!
463	add		$bnptr,sp,#8
464	vld1.32		{${M0}[0]},[$n0,:32]
465	mov		$outer,$num
466	b		.LNEON_8n_outer
467
468.align	4
469.LNEON_8n_outer:
470	vld1.32		{${Bi}[0]},[$bptr,:32]!	@ *b++
471	veor		$zero,$zero,$zero
472	vzip.16		$Bi,$zero
473	add		$toutptr,sp,#128
474	vld1.32		{$N0-$N3},[$nptr]!
475
476	vmlal.u32	@ACC[0],$Bi,${A0}[0]
477	vmlal.u32	@ACC[1],$Bi,${A0}[1]
478	 veor		$zero,$zero,$zero
479	vmlal.u32	@ACC[2],$Bi,${A1}[0]
480	 vshl.i64	$Ni,@ACC[0]#hi,#16
481	vmlal.u32	@ACC[3],$Bi,${A1}[1]
482	 vadd.u64	$Ni,$Ni,@ACC[0]#lo
483	vmlal.u32	@ACC[4],$Bi,${A2}[0]
484	 vmul.u32	$Ni,$Ni,$M0
485	vmlal.u32	@ACC[5],$Bi,${A2}[1]
486	vst1.32		{$Bi},[sp,:64]		@ put aside smashed b[8*i+0]
487	vmlal.u32	@ACC[6],$Bi,${A3}[0]
488	 vzip.16	$Ni,$zero
489	vmlal.u32	@ACC[7],$Bi,${A3}[1]
490___
491for ($i=0; $i<7;) {
492$code.=<<___;
493	vld1.32		{${Bi}[0]},[$bptr,:32]!	@ *b++
494	vmlal.u32	@ACC[0],$Ni,${N0}[0]
495	veor		$temp,$temp,$temp
496	vmlal.u32	@ACC[1],$Ni,${N0}[1]
497	vzip.16		$Bi,$temp
498	vmlal.u32	@ACC[2],$Ni,${N1}[0]
499	 vshr.u64	@ACC[0]#lo,@ACC[0]#lo,#16
500	vmlal.u32	@ACC[3],$Ni,${N1}[1]
501	vmlal.u32	@ACC[4],$Ni,${N2}[0]
502	 vadd.u64	@ACC[0]#lo,@ACC[0]#lo,@ACC[0]#hi
503	vmlal.u32	@ACC[5],$Ni,${N2}[1]
504	 vshr.u64	@ACC[0]#lo,@ACC[0]#lo,#16
505	vmlal.u32	@ACC[6],$Ni,${N3}[0]
506	vmlal.u32	@ACC[7],$Ni,${N3}[1]
507	 vadd.u64	@ACC[1]#lo,@ACC[1]#lo,@ACC[0]#lo
508	vst1.32		{$Ni},[$bnptr,:64]!	@ put aside smashed m[8*i+$i]
509___
510	push(@ACC,shift(@ACC));	$i++;
511$code.=<<___;
512	vmlal.u32	@ACC[0],$Bi,${A0}[0]
513	vld1.64		{@ACC[7]},[$tinptr,:128]!
514	vmlal.u32	@ACC[1],$Bi,${A0}[1]
515	 veor		$zero,$zero,$zero
516	vmlal.u32	@ACC[2],$Bi,${A1}[0]
517	 vshl.i64	$Ni,@ACC[0]#hi,#16
518	vmlal.u32	@ACC[3],$Bi,${A1}[1]
519	 vadd.u64	$Ni,$Ni,@ACC[0]#lo
520	vmlal.u32	@ACC[4],$Bi,${A2}[0]
521	 vmul.u32	$Ni,$Ni,$M0
522	vmlal.u32	@ACC[5],$Bi,${A2}[1]
523	vst1.32		{$Bi},[$bnptr,:64]!	@ put aside smashed b[8*i+$i]
524	vmlal.u32	@ACC[6],$Bi,${A3}[0]
525	 vzip.16	$Ni,$zero
526	vmlal.u32	@ACC[7],$Bi,${A3}[1]
527___
528}
529$code.=<<___;
530	vld1.32		{$Bi},[sp,:64]		@ pull smashed b[8*i+0]
531	vmlal.u32	@ACC[0],$Ni,${N0}[0]
532	vld1.32		{$A0-$A3},[$aptr]!
533	vmlal.u32	@ACC[1],$Ni,${N0}[1]
534	vmlal.u32	@ACC[2],$Ni,${N1}[0]
535	 vshr.u64	@ACC[0]#lo,@ACC[0]#lo,#16
536	vmlal.u32	@ACC[3],$Ni,${N1}[1]
537	vmlal.u32	@ACC[4],$Ni,${N2}[0]
538	 vadd.u64	@ACC[0]#lo,@ACC[0]#lo,@ACC[0]#hi
539	vmlal.u32	@ACC[5],$Ni,${N2}[1]
540	 vshr.u64	@ACC[0]#lo,@ACC[0]#lo,#16
541	vmlal.u32	@ACC[6],$Ni,${N3}[0]
542	vmlal.u32	@ACC[7],$Ni,${N3}[1]
543	 vadd.u64	@ACC[1]#lo,@ACC[1]#lo,@ACC[0]#lo
544	vst1.32		{$Ni},[$bnptr,:64]	@ put aside smashed m[8*i+$i]
545	add		$bnptr,sp,#8		@ rewind
546___
547	push(@ACC,shift(@ACC));
548$code.=<<___;
549	sub		$inner,$num,#8
550	b		.LNEON_8n_inner
551
552.align	4
553.LNEON_8n_inner:
554	subs		$inner,$inner,#8
555	vmlal.u32	@ACC[0],$Bi,${A0}[0]
556	vld1.64		{@ACC[7]},[$tinptr,:128]
557	vmlal.u32	@ACC[1],$Bi,${A0}[1]
558	vld1.32		{$Ni},[$bnptr,:64]!	@ pull smashed m[8*i+0]
559	vmlal.u32	@ACC[2],$Bi,${A1}[0]
560	vld1.32		{$N0-$N3},[$nptr]!
561	vmlal.u32	@ACC[3],$Bi,${A1}[1]
562	it		ne
563	addne		$tinptr,$tinptr,#16	@ don't advance in last iteration
564	vmlal.u32	@ACC[4],$Bi,${A2}[0]
565	vmlal.u32	@ACC[5],$Bi,${A2}[1]
566	vmlal.u32	@ACC[6],$Bi,${A3}[0]
567	vmlal.u32	@ACC[7],$Bi,${A3}[1]
568___
569for ($i=1; $i<8; $i++) {
570$code.=<<___;
571	vld1.32		{$Bi},[$bnptr,:64]!	@ pull smashed b[8*i+$i]
572	vmlal.u32	@ACC[0],$Ni,${N0}[0]
573	vmlal.u32	@ACC[1],$Ni,${N0}[1]
574	vmlal.u32	@ACC[2],$Ni,${N1}[0]
575	vmlal.u32	@ACC[3],$Ni,${N1}[1]
576	vmlal.u32	@ACC[4],$Ni,${N2}[0]
577	vmlal.u32	@ACC[5],$Ni,${N2}[1]
578	vmlal.u32	@ACC[6],$Ni,${N3}[0]
579	vmlal.u32	@ACC[7],$Ni,${N3}[1]
580	vst1.64		{@ACC[0]},[$toutptr,:128]!
581___
582	push(@ACC,shift(@ACC));
583$code.=<<___;
584	vmlal.u32	@ACC[0],$Bi,${A0}[0]
585	vld1.64		{@ACC[7]},[$tinptr,:128]
586	vmlal.u32	@ACC[1],$Bi,${A0}[1]
587	vld1.32		{$Ni},[$bnptr,:64]!	@ pull smashed m[8*i+$i]
588	vmlal.u32	@ACC[2],$Bi,${A1}[0]
589	it		ne
590	addne		$tinptr,$tinptr,#16	@ don't advance in last iteration
591	vmlal.u32	@ACC[3],$Bi,${A1}[1]
592	vmlal.u32	@ACC[4],$Bi,${A2}[0]
593	vmlal.u32	@ACC[5],$Bi,${A2}[1]
594	vmlal.u32	@ACC[6],$Bi,${A3}[0]
595	vmlal.u32	@ACC[7],$Bi,${A3}[1]
596___
597}
598$code.=<<___;
599	it		eq
600	subeq		$aptr,$aptr,$num,lsl#2	@ rewind
601	vmlal.u32	@ACC[0],$Ni,${N0}[0]
602	vld1.32		{$Bi},[sp,:64]		@ pull smashed b[8*i+0]
603	vmlal.u32	@ACC[1],$Ni,${N0}[1]
604	vld1.32		{$A0-$A3},[$aptr]!
605	vmlal.u32	@ACC[2],$Ni,${N1}[0]
606	add		$bnptr,sp,#8		@ rewind
607	vmlal.u32	@ACC[3],$Ni,${N1}[1]
608	vmlal.u32	@ACC[4],$Ni,${N2}[0]
609	vmlal.u32	@ACC[5],$Ni,${N2}[1]
610	vmlal.u32	@ACC[6],$Ni,${N3}[0]
611	vst1.64		{@ACC[0]},[$toutptr,:128]!
612	vmlal.u32	@ACC[7],$Ni,${N3}[1]
613
614	bne		.LNEON_8n_inner
615___
616	push(@ACC,shift(@ACC));
617$code.=<<___;
618	add		$tinptr,sp,#128
619	vst1.64		{@ACC[0]-@ACC[1]},[$toutptr,:256]!
620	veor		q2,q2,q2		@ $N0-$N1
621	vst1.64		{@ACC[2]-@ACC[3]},[$toutptr,:256]!
622	veor		q3,q3,q3		@ $N2-$N3
623	vst1.64		{@ACC[4]-@ACC[5]},[$toutptr,:256]!
624	vst1.64		{@ACC[6]},[$toutptr,:128]
625
626	subs		$outer,$outer,#8
627	vld1.64		{@ACC[0]-@ACC[1]},[$tinptr,:256]!
628	vld1.64		{@ACC[2]-@ACC[3]},[$tinptr,:256]!
629	vld1.64		{@ACC[4]-@ACC[5]},[$tinptr,:256]!
630	vld1.64		{@ACC[6]-@ACC[7]},[$tinptr,:256]!
631
632	itt		ne
633	subne		$nptr,$nptr,$num,lsl#2	@ rewind
634	bne		.LNEON_8n_outer
635
636	add		$toutptr,sp,#128
637	vst1.64		{q2-q3}, [sp,:256]!	@ start wiping stack frame
638	vshr.u64	$temp,@ACC[0]#lo,#16
639	vst1.64		{q2-q3},[sp,:256]!
640	vadd.u64	@ACC[0]#hi,@ACC[0]#hi,$temp
641	vst1.64		{q2-q3}, [sp,:256]!
642	vshr.u64	$temp,@ACC[0]#hi,#16
643	vst1.64		{q2-q3}, [sp,:256]!
644	vzip.16		@ACC[0]#lo,@ACC[0]#hi
645
646	mov		$inner,$num
647	b		.LNEON_tail_entry
648
649.align	4
650.LNEON_tail:
651	vadd.u64	@ACC[0]#lo,@ACC[0]#lo,$temp
652	vshr.u64	$temp,@ACC[0]#lo,#16
653	vld1.64		{@ACC[2]-@ACC[3]}, [$tinptr, :256]!
654	vadd.u64	@ACC[0]#hi,@ACC[0]#hi,$temp
655	vld1.64		{@ACC[4]-@ACC[5]}, [$tinptr, :256]!
656	vshr.u64	$temp,@ACC[0]#hi,#16
657	vld1.64		{@ACC[6]-@ACC[7]}, [$tinptr, :256]!
658	vzip.16		@ACC[0]#lo,@ACC[0]#hi
659
660.LNEON_tail_entry:
661___
662for ($i=1; $i<8; $i++) {
663$code.=<<___;
664	vadd.u64	@ACC[1]#lo,@ACC[1]#lo,$temp
665	vst1.32		{@ACC[0]#lo[0]}, [$toutptr, :32]!
666	vshr.u64	$temp,@ACC[1]#lo,#16
667	vadd.u64	@ACC[1]#hi,@ACC[1]#hi,$temp
668	vshr.u64	$temp,@ACC[1]#hi,#16
669	vzip.16		@ACC[1]#lo,@ACC[1]#hi
670___
671	push(@ACC,shift(@ACC));
672}
673	push(@ACC,shift(@ACC));
674$code.=<<___;
675	vld1.64		{@ACC[0]-@ACC[1]}, [$tinptr, :256]!
676	subs		$inner,$inner,#8
677	vst1.32		{@ACC[7]#lo[0]},   [$toutptr, :32]!
678	bne	.LNEON_tail
679
680	vst1.32	{${temp}[0]}, [$toutptr, :32]		@ top-most bit
681	sub	$nptr,$nptr,$num,lsl#2			@ rewind $nptr
682	subs	$aptr,sp,#0				@ clear carry flag
683	add	$bptr,sp,$num,lsl#2
684
685.LNEON_sub:
686	ldmia	$aptr!, {r4-r7}
687	ldmia	$nptr!, {r8-r11}
688	sbcs	r8, r4,r8
689	sbcs	r9, r5,r9
690	sbcs	r10,r6,r10
691	sbcs	r11,r7,r11
692	teq	$aptr,$bptr				@ preserves carry
693	stmia	$rptr!, {r8-r11}
694	bne	.LNEON_sub
695
696	ldr	r10, [$aptr]				@ load top-most bit
697	mov	r11,sp
698	veor	q0,q0,q0
699	sub	r11,$bptr,r11				@ this is num*4
700	veor	q1,q1,q1
701	mov	$aptr,sp
702	sub	$rptr,$rptr,r11				@ rewind $rptr
703	mov	$nptr,$bptr				@ second 3/4th of frame
704	sbcs	r10,r10,#0				@ result is carry flag
705
706.LNEON_copy_n_zap:
707	ldmia	$aptr!, {r4-r7}
708	ldmia	$rptr,  {r8-r11}
709	it	cc
710	movcc	r8, r4
711	vst1.64	{q0-q1}, [$nptr,:256]!			@ wipe
712	itt	cc
713	movcc	r9, r5
714	movcc	r10,r6
715	vst1.64	{q0-q1}, [$nptr,:256]!			@ wipe
716	it	cc
717	movcc	r11,r7
718	ldmia	$aptr, {r4-r7}
719	stmia	$rptr!, {r8-r11}
720	sub	$aptr,$aptr,#16
721	ldmia	$rptr, {r8-r11}
722	it	cc
723	movcc	r8, r4
724	vst1.64	{q0-q1}, [$aptr,:256]!			@ wipe
725	itt	cc
726	movcc	r9, r5
727	movcc	r10,r6
728	vst1.64	{q0-q1}, [$nptr,:256]!			@ wipe
729	it	cc
730	movcc	r11,r7
731	teq	$aptr,$bptr				@ preserves carry
732	stmia	$rptr!, {r8-r11}
733	bne	.LNEON_copy_n_zap
734
735	mov	sp,ip
736        vldmia  sp!,{d8-d15}
737        ldmia   sp!,{r4-r11}
738	ret						@ bx lr
739.size	bn_mul8x_mont_neon,.-bn_mul8x_mont_neon
740#endif
741___
742}
743$code.=<<___;
744.asciz	"Montgomery multiplication for ARMv4/NEON, CRYPTOGAMS by <appro\@openssl.org>"
745.align	2
746#if __ARM_MAX_ARCH__>=7
747.comm	OPENSSL_armcap_P,4,4
748.hidden	OPENSSL_armcap_P
749#endif
750___
751
752foreach (split("\n",$code)) {
753	s/\`([^\`]*)\`/eval $1/ge;
754
755	s/\bq([0-9]+)#(lo|hi)/sprintf "d%d",2*$1+($2 eq "hi")/ge	or
756	s/\bret\b/bx    lr/g						or
757	s/\bbx\s+lr\b/.word\t0xe12fff1e/g;	# make it possible to compile with -march=armv4
758
759	print $_,"\n";
760}
761
762close STDOUT;
763