• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1#include "arm_arch.h"
2
3.text
4.fpu	neon
5.code	32
6.globl	gcm_init_v8
7.type	gcm_init_v8,%function
8.align	4
9gcm_init_v8:
10	vld1.64	{q9},[r1]		@ load input H
11	vmov.i8	q11,#0xe1
12	vshl.i64	q11,q11,#57		@ 0xc2.0
13	vext.8	q3,q9,q9,#8
14	vshr.u64	q10,q11,#63
15	vdup.32	q9,d18[1]
16	vext.8	q8,q10,q11,#8		@ t0=0xc2....01
17	vshr.u64	q10,q3,#63
18	vshr.s32	q9,q9,#31		@ broadcast carry bit
19	vand	q10,q10,q8
20	vshl.i64	q3,q3,#1
21	vext.8	q10,q10,q10,#8
22	vand	q8,q8,q9
23	vorr	q3,q3,q10		@ H<<<=1
24	veor	q12,q3,q8		@ twisted H
25	vst1.64	{q12},[r0]!		@ store Htable[0]
26
27	@ calculate H^2
28	vext.8	q8,q12,q12,#8		@ Karatsuba pre-processing
29.byte	0xa8,0x0e,0xa8,0xf2	@ pmull q0,q12,q12
30	veor	q8,q8,q12
31.byte	0xa9,0x4e,0xa9,0xf2	@ pmull2 q2,q12,q12
32.byte	0xa0,0x2e,0xa0,0xf2	@ pmull q1,q8,q8
33
34	vext.8	q9,q0,q2,#8		@ Karatsuba post-processing
35	veor	q10,q0,q2
36	veor	q1,q1,q9
37	veor	q1,q1,q10
38.byte	0x26,0x4e,0xe0,0xf2	@ pmull q10,q0,q11		@ 1st phase
39
40	vmov	d4,d3		@ Xh|Xm - 256-bit result
41	vmov	d3,d0		@ Xm is rotated Xl
42	veor	q0,q1,q10
43
44	vext.8	q10,q0,q0,#8		@ 2nd phase
45.byte	0x26,0x0e,0xa0,0xf2	@ pmull q0,q0,q11
46	veor	q10,q10,q2
47	veor	q14,q0,q10
48
49	vext.8	q9,q14,q14,#8		@ Karatsuba pre-processing
50	veor	q9,q9,q14
51	vext.8	q13,q8,q9,#8		@ pack Karatsuba pre-processed
52	vst1.64	{q13,q14},[r0]		@ store Htable[1..2]
53
54	bx	lr
55.size	gcm_init_v8,.-gcm_init_v8
56.globl	gcm_gmult_v8
57.type	gcm_gmult_v8,%function
58.align	4
59gcm_gmult_v8:
60	vld1.64	{q9},[r0]		@ load Xi
61	vmov.i8	q11,#0xe1
62	vld1.64	{q12,q13},[r1]	@ load twisted H, ...
63	vshl.u64	q11,q11,#57
64#ifndef __ARMEB__
65	vrev64.8	q9,q9
66#endif
67	vext.8	q3,q9,q9,#8
68
69.byte	0x86,0x0e,0xa8,0xf2	@ pmull q0,q12,q3		@ H.loXi.lo
70	veor	q9,q9,q3		@ Karatsuba pre-processing
71.byte	0x87,0x4e,0xa9,0xf2	@ pmull2 q2,q12,q3		@ H.hiXi.hi
72.byte	0xa2,0x2e,0xaa,0xf2	@ pmull q1,q13,q9		@ (H.lo+H.hi)�(Xi.lo+Xi.hi)
73
74	vext.8	q9,q0,q2,#8		@ Karatsuba post-processing
75	veor	q10,q0,q2
76	veor	q1,q1,q9
77	veor	q1,q1,q10
78.byte	0x26,0x4e,0xe0,0xf2	@ pmull q10,q0,q11		@ 1st phase of reduction
79
80	vmov	d4,d3		@ Xh|Xm - 256-bit result
81	vmov	d3,d0		@ Xm is rotated Xl
82	veor	q0,q1,q10
83
84	vext.8	q10,q0,q0,#8		@ 2nd phase of reduction
85.byte	0x26,0x0e,0xa0,0xf2	@ pmull q0,q0,q11
86	veor	q10,q10,q2
87	veor	q0,q0,q10
88
89#ifndef __ARMEB__
90	vrev64.8	q0,q0
91#endif
92	vext.8	q0,q0,q0,#8
93	vst1.64	{q0},[r0]		@ write out Xi
94
95	bx	lr
96.size	gcm_gmult_v8,.-gcm_gmult_v8
97.globl	gcm_ghash_v8
98.type	gcm_ghash_v8,%function
99.align	4
100gcm_ghash_v8:
101	vstmdb	sp!,{d8,d9,d10,d11,d12,d13,d14,d15}		@ 32-bit ABI says so
102	vld1.64	{q0},[r0]		@ load [rotated] Xi
103						@ "[rotated]" means that
104						@ loaded value would have
105						@ to be rotated in order to
106						@ make it appear as in
107						@ alorithm specification
108	subs	r3,r3,#32		@ see if r3 is 32 or larger
109	mov	r12,#16		@ r12 is used as post-
110						@ increment for input pointer;
111						@ as loop is modulo-scheduled
112						@ r12 is zeroed just in time
113						@ to preclude oversteping
114						@ inp[len], which means that
115						@ last block[s] are actually
116						@ loaded twice, but last
117						@ copy is not processed
118	vld1.64	{q12,q13},[r1]!	@ load twisted H, ..., H^2
119	vmov.i8	q11,#0xe1
120	vld1.64	{q14},[r1]
121	moveq	r12,#0			@ is it time to zero r12?
122	vext.8	q0,q0,q0,#8		@ rotate Xi
123	vld1.64	{q8},[r2]!	@ load [rotated] I[0]
124	vshl.u64	q11,q11,#57		@ compose 0xc2.0 constant
125#ifndef __ARMEB__
126	vrev64.8	q8,q8
127	vrev64.8	q0,q0
128#endif
129	vext.8	q3,q8,q8,#8		@ rotate I[0]
130	blo	.Lodd_tail_v8		@ r3 was less than 32
131	vld1.64	{q9},[r2],r12	@ load [rotated] I[1]
132#ifndef __ARMEB__
133	vrev64.8	q9,q9
134#endif
135	vext.8	q7,q9,q9,#8
136	veor	q3,q3,q0		@ I[i]^=Xi
137.byte	0x8e,0x8e,0xa8,0xf2	@ pmull q4,q12,q7		@ HIi+1
138	veor	q9,q9,q7		@ Karatsuba pre-processing
139.byte	0x8f,0xce,0xa9,0xf2	@ pmull2 q6,q12,q7
140	b	.Loop_mod2x_v8
141
142.align	4
143.Loop_mod2x_v8:
144	vext.8	q10,q3,q3,#8
145	subs	r3,r3,#32		@ is there more data?
146.byte	0x86,0x0e,0xac,0xf2	@ pmull q0,q14,q3		@ H^2.loXi.lo
147	movlo	r12,#0			@ is it time to zero r12?
148
149.byte	0xa2,0xae,0xaa,0xf2	@ pmull q5,q13,q9
150	veor	q10,q10,q3		@ Karatsuba pre-processing
151.byte	0x87,0x4e,0xad,0xf2	@ pmull2 q2,q14,q3		@ H^2.hiXi.hi
152	veor	q0,q0,q4		@ accumulate
153.byte	0xa5,0x2e,0xab,0xf2	@ pmull2 q1,q13,q10		@ (H^2.lo+H^2.hi)�(Xi.lo+Xi.hi)
154	vld1.64	{q8},[r2],r12	@ load [rotated] I[i+2]
155
156	veor	q2,q2,q6
157	moveq	r12,#0			@ is it time to zero r12?
158	veor	q1,q1,q5
159
160	vext.8	q9,q0,q2,#8		@ Karatsuba post-processing
161	veor	q10,q0,q2
162	veor	q1,q1,q9
163	vld1.64	{q9},[r2],r12	@ load [rotated] I[i+3]
164#ifndef __ARMEB__
165	vrev64.8	q8,q8
166#endif
167	veor	q1,q1,q10
168.byte	0x26,0x4e,0xe0,0xf2	@ pmull q10,q0,q11		@ 1st phase of reduction
169
170#ifndef __ARMEB__
171	vrev64.8	q9,q9
172#endif
173	vmov	d4,d3		@ Xh|Xm - 256-bit result
174	vmov	d3,d0		@ Xm is rotated Xl
175	vext.8	q7,q9,q9,#8
176	vext.8	q3,q8,q8,#8
177	veor	q0,q1,q10
178.byte	0x8e,0x8e,0xa8,0xf2	@ pmull q4,q12,q7		@ HIi+1
179	veor	q3,q3,q2		@ accumulate q3 early
180
181	vext.8	q10,q0,q0,#8		@ 2nd phase of reduction
182.byte	0x26,0x0e,0xa0,0xf2	@ pmull q0,q0,q11
183	veor	q3,q3,q10
184	veor	q9,q9,q7		@ Karatsuba pre-processing
185	veor	q3,q3,q0
186.byte	0x8f,0xce,0xa9,0xf2	@ pmull2 q6,q12,q7
187	bhs	.Loop_mod2x_v8		@ there was at least 32 more bytes
188
189	veor	q2,q2,q10
190	vext.8	q3,q8,q8,#8		@ re-construct q3
191	adds	r3,r3,#32		@ re-construct r3
192	veor	q0,q0,q2		@ re-construct q0
193	beq	.Ldone_v8		@ is r3 zero?
194.Lodd_tail_v8:
195	vext.8	q10,q0,q0,#8
196	veor	q3,q3,q0		@ inp^=Xi
197	veor	q9,q8,q10		@ q9 is rotated inp^Xi
198
199.byte	0x86,0x0e,0xa8,0xf2	@ pmull q0,q12,q3		@ H.loXi.lo
200	veor	q9,q9,q3		@ Karatsuba pre-processing
201.byte	0x87,0x4e,0xa9,0xf2	@ pmull2 q2,q12,q3		@ H.hiXi.hi
202.byte	0xa2,0x2e,0xaa,0xf2	@ pmull q1,q13,q9		@ (H.lo+H.hi)�(Xi.lo+Xi.hi)
203
204	vext.8	q9,q0,q2,#8		@ Karatsuba post-processing
205	veor	q10,q0,q2
206	veor	q1,q1,q9
207	veor	q1,q1,q10
208.byte	0x26,0x4e,0xe0,0xf2	@ pmull q10,q0,q11		@ 1st phase of reduction
209
210	vmov	d4,d3		@ Xh|Xm - 256-bit result
211	vmov	d3,d0		@ Xm is rotated Xl
212	veor	q0,q1,q10
213
214	vext.8	q10,q0,q0,#8		@ 2nd phase of reduction
215.byte	0x26,0x0e,0xa0,0xf2	@ pmull q0,q0,q11
216	veor	q10,q10,q2
217	veor	q0,q0,q10
218
219.Ldone_v8:
220#ifndef __ARMEB__
221	vrev64.8	q0,q0
222#endif
223	vext.8	q0,q0,q0,#8
224	vst1.64	{q0},[r0]		@ write out Xi
225
226	vldmia	sp!,{d8,d9,d10,d11,d12,d13,d14,d15}		@ 32-bit ABI says so
227	bx	lr
228.size	gcm_ghash_v8,.-gcm_ghash_v8
229.byte	71,72,65,83,72,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
230.align	2
231.align	2
232