• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1#include "arm_arch.h"
2
3.text
4#if defined(__thumb2__)
5.syntax	unified
6.thumb
7#else
8.code	32
9#endif
10.type	mul_1x1_ialu,%function
11.align	5
12mul_1x1_ialu:
13	mov	r4,#0
14	bic	r5,r1,#3<<30		@ a1=a&0x3fffffff
15	str	r4,[sp,#0]		@ tab[0]=0
16	add	r6,r5,r5		@ a2=a1<<1
17	str	r5,[sp,#4]		@ tab[1]=a1
18	eor	r7,r5,r6		@ a1^a2
19	str	r6,[sp,#8]		@ tab[2]=a2
20	mov	r8,r5,lsl#2		@ a4=a1<<2
21	str	r7,[sp,#12]		@ tab[3]=a1^a2
22	eor	r9,r5,r8		@ a1^a4
23	str	r8,[sp,#16]		@ tab[4]=a4
24	eor	r4,r6,r8		@ a2^a4
25	str	r9,[sp,#20]		@ tab[5]=a1^a4
26	eor	r7,r7,r8		@ a1^a2^a4
27	str	r4,[sp,#24]		@ tab[6]=a2^a4
28	and	r8,r12,r0,lsl#2
29	str	r7,[sp,#28]		@ tab[7]=a1^a2^a4
30
31	and	r9,r12,r0,lsr#1
32	ldr	r5,[sp,r8]		@ tab[b       & 0x7]
33	and	r8,r12,r0,lsr#4
34	ldr	r7,[sp,r9]		@ tab[b >>  3 & 0x7]
35	and	r9,r12,r0,lsr#7
36	ldr	r6,[sp,r8]		@ tab[b >>  6 & 0x7]
37	eor	r5,r5,r7,lsl#3	@ stall
38	mov	r4,r7,lsr#29
39	ldr	r7,[sp,r9]		@ tab[b >>  9 & 0x7]
40
41	and	r8,r12,r0,lsr#10
42	eor	r5,r5,r6,lsl#6
43	eor	r4,r4,r6,lsr#26
44	ldr	r6,[sp,r8]		@ tab[b >> 12 & 0x7]
45
46	and	r9,r12,r0,lsr#13
47	eor	r5,r5,r7,lsl#9
48	eor	r4,r4,r7,lsr#23
49	ldr	r7,[sp,r9]		@ tab[b >> 15 & 0x7]
50
51	and	r8,r12,r0,lsr#16
52	eor	r5,r5,r6,lsl#12
53	eor	r4,r4,r6,lsr#20
54	ldr	r6,[sp,r8]		@ tab[b >> 18 & 0x7]
55
56	and	r9,r12,r0,lsr#19
57	eor	r5,r5,r7,lsl#15
58	eor	r4,r4,r7,lsr#17
59	ldr	r7,[sp,r9]		@ tab[b >> 21 & 0x7]
60
61	and	r8,r12,r0,lsr#22
62	eor	r5,r5,r6,lsl#18
63	eor	r4,r4,r6,lsr#14
64	ldr	r6,[sp,r8]		@ tab[b >> 24 & 0x7]
65
66	and	r9,r12,r0,lsr#25
67	eor	r5,r5,r7,lsl#21
68	eor	r4,r4,r7,lsr#11
69	ldr	r7,[sp,r9]		@ tab[b >> 27 & 0x7]
70
71	tst	r1,#1<<30
72	and	r8,r12,r0,lsr#28
73	eor	r5,r5,r6,lsl#24
74	eor	r4,r4,r6,lsr#8
75	ldr	r6,[sp,r8]		@ tab[b >> 30      ]
76
77#ifdef	__thumb2__
78	itt	ne
79#endif
80	eorne	r5,r5,r0,lsl#30
81	eorne	r4,r4,r0,lsr#2
82	tst	r1,#1<<31
83	eor	r5,r5,r7,lsl#27
84	eor	r4,r4,r7,lsr#5
85#ifdef	__thumb2__
86	itt	ne
87#endif
88	eorne	r5,r5,r0,lsl#31
89	eorne	r4,r4,r0,lsr#1
90	eor	r5,r5,r6,lsl#30
91	eor	r4,r4,r6,lsr#2
92
93	mov	pc,lr
94.size	mul_1x1_ialu,.-mul_1x1_ialu
95.global	bn_GF2m_mul_2x2
96.type	bn_GF2m_mul_2x2,%function
97.align	5
98bn_GF2m_mul_2x2:
99#if __ARM_MAX_ARCH__>=7
100	stmdb	sp!,{r10,lr}
101	ldr	r12,.LOPENSSL_armcap
102	adr	r10,.LOPENSSL_armcap
103	ldr	r12,[r12,r10]
104#ifdef	__APPLE__
105	ldr	r12,[r12]
106#endif
107	tst	r12,#ARMV7_NEON
108	itt	ne
109	ldrne	r10,[sp],#8
110	bne	.LNEON
111	stmdb	sp!,{r4-r9}
112#else
113	stmdb	sp!,{r4-r10,lr}
114#endif
115	mov	r10,r0			@ reassign 1st argument
116	mov	r0,r3			@ r0=b1
117	sub	r7,sp,#36
118	mov	r8,sp
119	and	r7,r7,#-32
120	ldr	r3,[sp,#32]		@ load b0
121	mov	r12,#7<<2
122	mov	sp,r7			@ allocate tab[8]
123	str	r8,[r7,#32]
124
125	bl	mul_1x1_ialu		@ a1·b1
126	str	r5,[r10,#8]
127	str	r4,[r10,#12]
128
129	eor	r0,r0,r3		@ flip b0 and b1
130	 eor	r1,r1,r2		@ flip a0 and a1
131	eor	r3,r3,r0
132	 eor	r2,r2,r1
133	eor	r0,r0,r3
134	 eor	r1,r1,r2
135	bl	mul_1x1_ialu		@ a0·b0
136	str	r5,[r10]
137	str	r4,[r10,#4]
138
139	eor	r1,r1,r2
140	eor	r0,r0,r3
141	bl	mul_1x1_ialu		@ (a1+a0)·(b1+b0)
142	ldmia	r10,{r6-r9}
143	eor	r5,r5,r4
144	ldr	sp,[sp,#32]		@ destroy tab[8]
145	eor	r4,r4,r7
146	eor	r5,r5,r6
147	eor	r4,r4,r8
148	eor	r5,r5,r9
149	eor	r4,r4,r9
150	str	r4,[r10,#8]
151	eor	r5,r5,r4
152	str	r5,[r10,#4]
153
154#if __ARM_ARCH__>=5
155	ldmia	sp!,{r4-r10,pc}
156#else
157	ldmia	sp!,{r4-r10,lr}
158	tst	lr,#1
159	moveq	pc,lr			@ be binary compatible with V4, yet
160	.word	0xe12fff1e			@ interoperable with Thumb ISA:-)
161#endif
162#if __ARM_MAX_ARCH__>=7
163.arch	armv7-a
164.fpu	neon
165
166.align	5
167.LNEON:
168	ldr		r12, [sp]		@ 5th argument
169	vmov		d26, r2, r1
170	vmov		d27, r12, r3
171	vmov.i64	d28, #0x0000ffffffffffff
172	vmov.i64	d29, #0x00000000ffffffff
173	vmov.i64	d30, #0x000000000000ffff
174
175	vext.8		d2, d26, d26, #1	@ A1
176	vmull.p8	q1, d2, d27		@ F = A1*B
177	vext.8		d0, d27, d27, #1	@ B1
178	vmull.p8	q0, d26, d0		@ E = A*B1
179	vext.8		d4, d26, d26, #2	@ A2
180	vmull.p8	q2, d4, d27		@ H = A2*B
181	vext.8		d16, d27, d27, #2	@ B2
182	vmull.p8	q8, d26, d16		@ G = A*B2
183	vext.8		d6, d26, d26, #3	@ A3
184	veor		q1, q1, q0		@ L = E + F
185	vmull.p8	q3, d6, d27		@ J = A3*B
186	vext.8		d0, d27, d27, #3	@ B3
187	veor		q2, q2, q8		@ M = G + H
188	vmull.p8	q0, d26, d0		@ I = A*B3
189	veor		d2, d2, d3	@ t0 = (L) (P0 + P1) << 8
190	vand		d3, d3, d28
191	vext.8		d16, d27, d27, #4	@ B4
192	veor		d4, d4, d5	@ t1 = (M) (P2 + P3) << 16
193	vand		d5, d5, d29
194	vmull.p8	q8, d26, d16		@ K = A*B4
195	veor		q3, q3, q0		@ N = I + J
196	veor		d2, d2, d3
197	veor		d4, d4, d5
198	veor		d6, d6, d7	@ t2 = (N) (P4 + P5) << 24
199	vand		d7, d7, d30
200	vext.8		q1, q1, q1, #15
201	veor		d16, d16, d17	@ t3 = (K) (P6 + P7) << 32
202	vmov.i64	d17, #0
203	vext.8		q2, q2, q2, #14
204	veor		d6, d6, d7
205	vmull.p8	q0, d26, d27		@ D = A*B
206	vext.8		q8, q8, q8, #12
207	vext.8		q3, q3, q3, #13
208	veor		q1, q1, q2
209	veor		q3, q3, q8
210	veor		q0, q0, q1
211	veor		q0, q0, q3
212
213	vst1.32		{q0}, [r0]
214	bx	lr		@ bx lr
215#endif
216.size	bn_GF2m_mul_2x2,.-bn_GF2m_mul_2x2
217#if __ARM_MAX_ARCH__>=7
218.align	5
219.LOPENSSL_armcap:
220.word	OPENSSL_armcap_P-.
221#endif
222.asciz	"GF(2^m) Multiplication for ARMv4/NEON, CRYPTOGAMS by <appro@openssl.org>"
223.align	5
224
225#if __ARM_MAX_ARCH__>=7
226.comm	OPENSSL_armcap_P,4,4
227#endif
228