• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1#include "arm_arch.h"
2
3#if __ARM_MAX_ARCH__>=7
4.fpu	neon
5#ifdef __thumb2__
6.syntax	unified
7.thumb
8# define INST(a,b,c,d) .byte  c,0xef,a,b
9#else
10.code	32
11# define INST(a,b,c,d) .byte  a,b,c,0xf2
12#endif
13
14.text
15.globl	gcm_init_v8
16.type	gcm_init_v8,%function
17.align	4
18gcm_init_v8:
19	vld1.64	{q9},[r1]		@ load input H
20	vmov.i8	q11,#0xe1
21	vshl.i64	q11,q11,#57		@ 0xc2.0
22	vext.8	q3,q9,q9,#8
23	vshr.u64	q10,q11,#63
24	vdup.32	q9,d18[1]
25	vext.8	q8,q10,q11,#8		@ t0=0xc2....01
26	vshr.u64	q10,q3,#63
27	vshr.s32	q9,q9,#31		@ broadcast carry bit
28	vand	q10,q10,q8
29	vshl.i64	q3,q3,#1
30	vext.8	q10,q10,q10,#8
31	vand	q8,q8,q9
32	vorr	q3,q3,q10		@ H<<<=1
33	veor	q12,q3,q8		@ twisted H
34	vst1.64	{q12},[r0]!		@ store Htable[0]
35
36	@ calculate H^2
37	vext.8	q8,q12,q12,#8		@ Karatsuba pre-processing
38	INST(0xa8,0x0e,0xa8,0xf2)	@ pmull q0,q12,q12
39	veor	q8,q8,q12
40	INST(0xa9,0x4e,0xa9,0xf2)	@ pmull2 q2,q12,q12
41	INST(0xa0,0x2e,0xa0,0xf2)	@ pmull q1,q8,q8
42
43	vext.8	q9,q0,q2,#8		@ Karatsuba post-processing
44	veor	q10,q0,q2
45	veor	q1,q1,q9
46	veor	q1,q1,q10
47	INST(0x26,0x4e,0xe0,0xf2)	@ pmull q10,q0,q11		@ 1st phase
48
49	vmov	d4,d3		@ Xh|Xm - 256-bit result
50	vmov	d3,d0		@ Xm is rotated Xl
51	veor	q0,q1,q10
52
53	vext.8	q10,q0,q0,#8		@ 2nd phase
54	INST(0x26,0x0e,0xa0,0xf2)	@ pmull q0,q0,q11
55	veor	q10,q10,q2
56	veor	q14,q0,q10
57
58	vext.8	q9,q14,q14,#8		@ Karatsuba pre-processing
59	veor	q9,q9,q14
60	vext.8	q13,q8,q9,#8		@ pack Karatsuba pre-processed
61	vst1.64	{q13,q14},[r0]!	@ store Htable[1..2]
62	bx	lr
63.size	gcm_init_v8,.-gcm_init_v8
64.globl	gcm_gmult_v8
65.type	gcm_gmult_v8,%function
66.align	4
67gcm_gmult_v8:
68	vld1.64	{q9},[r0]		@ load Xi
69	vmov.i8	q11,#0xe1
70	vld1.64	{q12,q13},[r1]	@ load twisted H, ...
71	vshl.u64	q11,q11,#57
72#ifndef __ARMEB__
73	vrev64.8	q9,q9
74#endif
75	vext.8	q3,q9,q9,#8
76
77	INST(0x86,0x0e,0xa8,0xf2)	@ pmull q0,q12,q3		@ H.lo·Xi.lo
78	veor	q9,q9,q3		@ Karatsuba pre-processing
79	INST(0x87,0x4e,0xa9,0xf2)	@ pmull2 q2,q12,q3		@ H.hi·Xi.hi
80	INST(0xa2,0x2e,0xaa,0xf2)	@ pmull q1,q13,q9		@ (H.lo+H.hi)·(Xi.lo+Xi.hi)
81
82	vext.8	q9,q0,q2,#8		@ Karatsuba post-processing
83	veor	q10,q0,q2
84	veor	q1,q1,q9
85	veor	q1,q1,q10
86	INST(0x26,0x4e,0xe0,0xf2)	@ pmull q10,q0,q11		@ 1st phase of reduction
87
88	vmov	d4,d3		@ Xh|Xm - 256-bit result
89	vmov	d3,d0		@ Xm is rotated Xl
90	veor	q0,q1,q10
91
92	vext.8	q10,q0,q0,#8		@ 2nd phase of reduction
93	INST(0x26,0x0e,0xa0,0xf2)	@ pmull q0,q0,q11
94	veor	q10,q10,q2
95	veor	q0,q0,q10
96
97#ifndef __ARMEB__
98	vrev64.8	q0,q0
99#endif
100	vext.8	q0,q0,q0,#8
101	vst1.64	{q0},[r0]		@ write out Xi
102
103	bx	lr
104.size	gcm_gmult_v8,.-gcm_gmult_v8
105.globl	gcm_ghash_v8
106.type	gcm_ghash_v8,%function
107.align	4
108gcm_ghash_v8:
109	vstmdb	sp!,{d8,d9,d10,d11,d12,d13,d14,d15}		@ 32-bit ABI says so
110	vld1.64	{q0},[r0]		@ load [rotated] Xi
111						@ "[rotated]" means that
112						@ loaded value would have
113						@ to be rotated in order to
114						@ make it appear as in
115						@ algorithm specification
116	subs	r3,r3,#32		@ see if r3 is 32 or larger
117	mov	r12,#16		@ r12 is used as post-
118						@ increment for input pointer;
119						@ as loop is modulo-scheduled
120						@ r12 is zeroed just in time
121						@ to preclude overstepping
122						@ inp[len], which means that
123						@ last block[s] are actually
124						@ loaded twice, but last
125						@ copy is not processed
126	vld1.64	{q12,q13},[r1]!	@ load twisted H, ..., H^2
127	vmov.i8	q11,#0xe1
128	vld1.64	{q14},[r1]
129	it	eq
130	moveq	r12,#0			@ is it time to zero r12?
131	vext.8	q0,q0,q0,#8		@ rotate Xi
132	vld1.64	{q8},[r2]!	@ load [rotated] I[0]
133	vshl.u64	q11,q11,#57		@ compose 0xc2.0 constant
134#ifndef __ARMEB__
135	vrev64.8	q8,q8
136	vrev64.8	q0,q0
137#endif
138	vext.8	q3,q8,q8,#8		@ rotate I[0]
139	blo	.Lodd_tail_v8		@ r3 was less than 32
140	vld1.64	{q9},[r2],r12	@ load [rotated] I[1]
141#ifndef __ARMEB__
142	vrev64.8	q9,q9
143#endif
144	vext.8	q7,q9,q9,#8
145	veor	q3,q3,q0		@ I[i]^=Xi
146	INST(0x8e,0x8e,0xa8,0xf2)	@ pmull q4,q12,q7		@ H·Ii+1
147	veor	q9,q9,q7		@ Karatsuba pre-processing
148	INST(0x8f,0xce,0xa9,0xf2)	@ pmull2 q6,q12,q7
149	b	.Loop_mod2x_v8
150
151.align	4
152.Loop_mod2x_v8:
153	vext.8	q10,q3,q3,#8
154	subs	r3,r3,#32		@ is there more data?
155	INST(0x86,0x0e,0xac,0xf2)	@ pmull q0,q14,q3		@ H^2.lo·Xi.lo
156	it	lo
157	movlo	r12,#0			@ is it time to zero r12?
158
159	INST(0xa2,0xae,0xaa,0xf2)	@ pmull q5,q13,q9
160	veor	q10,q10,q3		@ Karatsuba pre-processing
161	INST(0x87,0x4e,0xad,0xf2)	@ pmull2 q2,q14,q3		@ H^2.hi·Xi.hi
162	veor	q0,q0,q4		@ accumulate
163	INST(0xa5,0x2e,0xab,0xf2)	@ pmull2 q1,q13,q10		@ (H^2.lo+H^2.hi)·(Xi.lo+Xi.hi)
164	vld1.64	{q8},[r2],r12	@ load [rotated] I[i+2]
165
166	veor	q2,q2,q6
167	it	eq
168	moveq	r12,#0			@ is it time to zero r12?
169	veor	q1,q1,q5
170
171	vext.8	q9,q0,q2,#8		@ Karatsuba post-processing
172	veor	q10,q0,q2
173	veor	q1,q1,q9
174	vld1.64	{q9},[r2],r12	@ load [rotated] I[i+3]
175#ifndef __ARMEB__
176	vrev64.8	q8,q8
177#endif
178	veor	q1,q1,q10
179	INST(0x26,0x4e,0xe0,0xf2)	@ pmull q10,q0,q11		@ 1st phase of reduction
180
181#ifndef __ARMEB__
182	vrev64.8	q9,q9
183#endif
184	vmov	d4,d3		@ Xh|Xm - 256-bit result
185	vmov	d3,d0		@ Xm is rotated Xl
186	vext.8	q7,q9,q9,#8
187	vext.8	q3,q8,q8,#8
188	veor	q0,q1,q10
189	INST(0x8e,0x8e,0xa8,0xf2)	@ pmull q4,q12,q7		@ H·Ii+1
190	veor	q3,q3,q2		@ accumulate q3 early
191
192	vext.8	q10,q0,q0,#8		@ 2nd phase of reduction
193	INST(0x26,0x0e,0xa0,0xf2)	@ pmull q0,q0,q11
194	veor	q3,q3,q10
195	veor	q9,q9,q7		@ Karatsuba pre-processing
196	veor	q3,q3,q0
197	INST(0x8f,0xce,0xa9,0xf2)	@ pmull2 q6,q12,q7
198	bhs	.Loop_mod2x_v8		@ there was at least 32 more bytes
199
200	veor	q2,q2,q10
201	vext.8	q3,q8,q8,#8		@ re-construct q3
202	adds	r3,r3,#32		@ re-construct r3
203	veor	q0,q0,q2		@ re-construct q0
204	beq	.Ldone_v8		@ is r3 zero?
205.Lodd_tail_v8:
206	vext.8	q10,q0,q0,#8
207	veor	q3,q3,q0		@ inp^=Xi
208	veor	q9,q8,q10		@ q9 is rotated inp^Xi
209
210	INST(0x86,0x0e,0xa8,0xf2)	@ pmull q0,q12,q3		@ H.lo·Xi.lo
211	veor	q9,q9,q3		@ Karatsuba pre-processing
212	INST(0x87,0x4e,0xa9,0xf2)	@ pmull2 q2,q12,q3		@ H.hi·Xi.hi
213	INST(0xa2,0x2e,0xaa,0xf2)	@ pmull q1,q13,q9		@ (H.lo+H.hi)·(Xi.lo+Xi.hi)
214
215	vext.8	q9,q0,q2,#8		@ Karatsuba post-processing
216	veor	q10,q0,q2
217	veor	q1,q1,q9
218	veor	q1,q1,q10
219	INST(0x26,0x4e,0xe0,0xf2)	@ pmull q10,q0,q11		@ 1st phase of reduction
220
221	vmov	d4,d3		@ Xh|Xm - 256-bit result
222	vmov	d3,d0		@ Xm is rotated Xl
223	veor	q0,q1,q10
224
225	vext.8	q10,q0,q0,#8		@ 2nd phase of reduction
226	INST(0x26,0x0e,0xa0,0xf2)	@ pmull q0,q0,q11
227	veor	q10,q10,q2
228	veor	q0,q0,q10
229
230.Ldone_v8:
231#ifndef __ARMEB__
232	vrev64.8	q0,q0
233#endif
234	vext.8	q0,q0,q0,#8
235	vst1.64	{q0},[r0]		@ write out Xi
236
237	vldmia	sp!,{d8,d9,d10,d11,d12,d13,d14,d15}		@ 32-bit ABI says so
238	bx	lr
239.size	gcm_ghash_v8,.-gcm_ghash_v8
240.byte	71,72,65,83,72,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
241.align	2
242.align	2
243#endif
244