• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1#if defined(__arm__)
2#include <openssl/arm_arch.h>
3
4.text
5.fpu	neon
6.code	32
7#undef	__thumb2__
8.globl	gcm_init_v8
9.hidden	gcm_init_v8
10.type	gcm_init_v8,%function
11.align	4
12gcm_init_v8:
13	vld1.64	{q9},[r1]		@ load input H
14	vmov.i8	q11,#0xe1
15	vshl.i64	q11,q11,#57		@ 0xc2.0
16	vext.8	q3,q9,q9,#8
17	vshr.u64	q10,q11,#63
18	vdup.32	q9,d18[1]
19	vext.8	q8,q10,q11,#8		@ t0=0xc2....01
20	vshr.u64	q10,q3,#63
21	vshr.s32	q9,q9,#31		@ broadcast carry bit
22	vand	q10,q10,q8
23	vshl.i64	q3,q3,#1
24	vext.8	q10,q10,q10,#8
25	vand	q8,q8,q9
26	vorr	q3,q3,q10		@ H<<<=1
27	veor	q12,q3,q8		@ twisted H
28	vst1.64	{q12},[r0]!		@ store Htable[0]
29
30	@ calculate H^2
31	vext.8	q8,q12,q12,#8		@ Karatsuba pre-processing
32.byte	0xa8,0x0e,0xa8,0xf2	@ pmull q0,q12,q12
33	veor	q8,q8,q12
34.byte	0xa9,0x4e,0xa9,0xf2	@ pmull2 q2,q12,q12
35.byte	0xa0,0x2e,0xa0,0xf2	@ pmull q1,q8,q8
36
37	vext.8	q9,q0,q2,#8		@ Karatsuba post-processing
38	veor	q10,q0,q2
39	veor	q1,q1,q9
40	veor	q1,q1,q10
41.byte	0x26,0x4e,0xe0,0xf2	@ pmull q10,q0,q11		@ 1st phase
42
43	vmov	d4,d3		@ Xh|Xm - 256-bit result
44	vmov	d3,d0		@ Xm is rotated Xl
45	veor	q0,q1,q10
46
47	vext.8	q10,q0,q0,#8		@ 2nd phase
48.byte	0x26,0x0e,0xa0,0xf2	@ pmull q0,q0,q11
49	veor	q10,q10,q2
50	veor	q14,q0,q10
51
52	vext.8	q9,q14,q14,#8		@ Karatsuba pre-processing
53	veor	q9,q9,q14
54	vext.8	q13,q8,q9,#8		@ pack Karatsuba pre-processed
55	vst1.64	{q13,q14},[r0]		@ store Htable[1..2]
56
57	bx	lr
58.size	gcm_init_v8,.-gcm_init_v8
59.globl	gcm_gmult_v8
60.hidden	gcm_gmult_v8
61.type	gcm_gmult_v8,%function
62.align	4
63gcm_gmult_v8:
64	vld1.64	{q9},[r0]		@ load Xi
65	vmov.i8	q11,#0xe1
66	vld1.64	{q12,q13},[r1]	@ load twisted H, ...
67	vshl.u64	q11,q11,#57
68#ifndef __ARMEB__
69	vrev64.8	q9,q9
70#endif
71	vext.8	q3,q9,q9,#8
72
73.byte	0x86,0x0e,0xa8,0xf2	@ pmull q0,q12,q3		@ H.lo·Xi.lo
74	veor	q9,q9,q3		@ Karatsuba pre-processing
75.byte	0x87,0x4e,0xa9,0xf2	@ pmull2 q2,q12,q3		@ H.hi·Xi.hi
76.byte	0xa2,0x2e,0xaa,0xf2	@ pmull q1,q13,q9		@ (H.lo+H.hi)·(Xi.lo+Xi.hi)
77
78	vext.8	q9,q0,q2,#8		@ Karatsuba post-processing
79	veor	q10,q0,q2
80	veor	q1,q1,q9
81	veor	q1,q1,q10
82.byte	0x26,0x4e,0xe0,0xf2	@ pmull q10,q0,q11		@ 1st phase of reduction
83
84	vmov	d4,d3		@ Xh|Xm - 256-bit result
85	vmov	d3,d0		@ Xm is rotated Xl
86	veor	q0,q1,q10
87
88	vext.8	q10,q0,q0,#8		@ 2nd phase of reduction
89.byte	0x26,0x0e,0xa0,0xf2	@ pmull q0,q0,q11
90	veor	q10,q10,q2
91	veor	q0,q0,q10
92
93#ifndef __ARMEB__
94	vrev64.8	q0,q0
95#endif
96	vext.8	q0,q0,q0,#8
97	vst1.64	{q0},[r0]		@ write out Xi
98
99	bx	lr
100.size	gcm_gmult_v8,.-gcm_gmult_v8
101.globl	gcm_ghash_v8
102.hidden	gcm_ghash_v8
103.type	gcm_ghash_v8,%function
104.align	4
105gcm_ghash_v8:
106	vstmdb	sp!,{d8,d9,d10,d11,d12,d13,d14,d15}		@ 32-bit ABI says so
107	vld1.64	{q0},[r0]		@ load [rotated] Xi
108						@ "[rotated]" means that
109						@ loaded value would have
110						@ to be rotated in order to
111						@ make it appear as in
112						@ alorithm specification
113	subs	r3,r3,#32		@ see if r3 is 32 or larger
114	mov	r12,#16		@ r12 is used as post-
115						@ increment for input pointer;
116						@ as loop is modulo-scheduled
117						@ r12 is zeroed just in time
118						@ to preclude oversteping
119						@ inp[len], which means that
120						@ last block[s] are actually
121						@ loaded twice, but last
122						@ copy is not processed
123	vld1.64	{q12,q13},[r1]!	@ load twisted H, ..., H^2
124	vmov.i8	q11,#0xe1
125	vld1.64	{q14},[r1]
126	moveq	r12,#0			@ is it time to zero r12?
127	vext.8	q0,q0,q0,#8		@ rotate Xi
128	vld1.64	{q8},[r2]!	@ load [rotated] I[0]
129	vshl.u64	q11,q11,#57		@ compose 0xc2.0 constant
130#ifndef __ARMEB__
131	vrev64.8	q8,q8
132	vrev64.8	q0,q0
133#endif
134	vext.8	q3,q8,q8,#8		@ rotate I[0]
135	blo	.Lodd_tail_v8		@ r3 was less than 32
136	vld1.64	{q9},[r2],r12	@ load [rotated] I[1]
137#ifndef __ARMEB__
138	vrev64.8	q9,q9
139#endif
140	vext.8	q7,q9,q9,#8
141	veor	q3,q3,q0		@ I[i]^=Xi
142.byte	0x8e,0x8e,0xa8,0xf2	@ pmull q4,q12,q7		@ H·Ii+1
143	veor	q9,q9,q7		@ Karatsuba pre-processing
144.byte	0x8f,0xce,0xa9,0xf2	@ pmull2 q6,q12,q7
145	b	.Loop_mod2x_v8
146
147.align	4
148.Loop_mod2x_v8:
149	vext.8	q10,q3,q3,#8
150	subs	r3,r3,#32		@ is there more data?
151.byte	0x86,0x0e,0xac,0xf2	@ pmull q0,q14,q3		@ H^2.lo·Xi.lo
152	movlo	r12,#0			@ is it time to zero r12?
153
154.byte	0xa2,0xae,0xaa,0xf2	@ pmull q5,q13,q9
155	veor	q10,q10,q3		@ Karatsuba pre-processing
156.byte	0x87,0x4e,0xad,0xf2	@ pmull2 q2,q14,q3		@ H^2.hi·Xi.hi
157	veor	q0,q0,q4		@ accumulate
158.byte	0xa5,0x2e,0xab,0xf2	@ pmull2 q1,q13,q10		@ (H^2.lo+H^2.hi)·(Xi.lo+Xi.hi)
159	vld1.64	{q8},[r2],r12	@ load [rotated] I[i+2]
160
161	veor	q2,q2,q6
162	moveq	r12,#0			@ is it time to zero r12?
163	veor	q1,q1,q5
164
165	vext.8	q9,q0,q2,#8		@ Karatsuba post-processing
166	veor	q10,q0,q2
167	veor	q1,q1,q9
168	vld1.64	{q9},[r2],r12	@ load [rotated] I[i+3]
169#ifndef __ARMEB__
170	vrev64.8	q8,q8
171#endif
172	veor	q1,q1,q10
173.byte	0x26,0x4e,0xe0,0xf2	@ pmull q10,q0,q11		@ 1st phase of reduction
174
175#ifndef __ARMEB__
176	vrev64.8	q9,q9
177#endif
178	vmov	d4,d3		@ Xh|Xm - 256-bit result
179	vmov	d3,d0		@ Xm is rotated Xl
180	vext.8	q7,q9,q9,#8
181	vext.8	q3,q8,q8,#8
182	veor	q0,q1,q10
183.byte	0x8e,0x8e,0xa8,0xf2	@ pmull q4,q12,q7		@ H·Ii+1
184	veor	q3,q3,q2		@ accumulate q3 early
185
186	vext.8	q10,q0,q0,#8		@ 2nd phase of reduction
187.byte	0x26,0x0e,0xa0,0xf2	@ pmull q0,q0,q11
188	veor	q3,q3,q10
189	veor	q9,q9,q7		@ Karatsuba pre-processing
190	veor	q3,q3,q0
191.byte	0x8f,0xce,0xa9,0xf2	@ pmull2 q6,q12,q7
192	bhs	.Loop_mod2x_v8		@ there was at least 32 more bytes
193
194	veor	q2,q2,q10
195	vext.8	q3,q8,q8,#8		@ re-construct q3
196	adds	r3,r3,#32		@ re-construct r3
197	veor	q0,q0,q2		@ re-construct q0
198	beq	.Ldone_v8		@ is r3 zero?
199.Lodd_tail_v8:
200	vext.8	q10,q0,q0,#8
201	veor	q3,q3,q0		@ inp^=Xi
202	veor	q9,q8,q10		@ q9 is rotated inp^Xi
203
204.byte	0x86,0x0e,0xa8,0xf2	@ pmull q0,q12,q3		@ H.lo·Xi.lo
205	veor	q9,q9,q3		@ Karatsuba pre-processing
206.byte	0x87,0x4e,0xa9,0xf2	@ pmull2 q2,q12,q3		@ H.hi·Xi.hi
207.byte	0xa2,0x2e,0xaa,0xf2	@ pmull q1,q13,q9		@ (H.lo+H.hi)·(Xi.lo+Xi.hi)
208
209	vext.8	q9,q0,q2,#8		@ Karatsuba post-processing
210	veor	q10,q0,q2
211	veor	q1,q1,q9
212	veor	q1,q1,q10
213.byte	0x26,0x4e,0xe0,0xf2	@ pmull q10,q0,q11		@ 1st phase of reduction
214
215	vmov	d4,d3		@ Xh|Xm - 256-bit result
216	vmov	d3,d0		@ Xm is rotated Xl
217	veor	q0,q1,q10
218
219	vext.8	q10,q0,q0,#8		@ 2nd phase of reduction
220.byte	0x26,0x0e,0xa0,0xf2	@ pmull q0,q0,q11
221	veor	q10,q10,q2
222	veor	q0,q0,q10
223
224.Ldone_v8:
225#ifndef __ARMEB__
226	vrev64.8	q0,q0
227#endif
228	vext.8	q0,q0,q0,#8
229	vst1.64	{q0},[r0]		@ write out Xi
230
231	vldmia	sp!,{d8,d9,d10,d11,d12,d13,d14,d15}		@ 32-bit ABI says so
232	bx	lr
233.size	gcm_ghash_v8,.-gcm_ghash_v8
234.byte	71,72,65,83,72,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
235.align	2
236.align	2
237#endif
238