• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1// This file is generated from a similarly-named Perl script in the BoringSSL
2// source tree. Do not edit by hand.
3
4#if defined(__has_feature)
5#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM)
6#define OPENSSL_NO_ASM
7#endif
8#endif
9
10#if !defined(OPENSSL_NO_ASM)
11#if defined(__arm__)
12#if defined(BORINGSSL_PREFIX)
13#include <boringssl_prefix_symbols_asm.h>
14#endif
15#include <openssl/arm_arch.h>
16
17.text
18.fpu	neon
19.code	32
20#undef	__thumb2__
21.globl	gcm_init_v8
22.hidden	gcm_init_v8
23.type	gcm_init_v8,%function
24.align	4
25gcm_init_v8:
26	vld1.64	{q9},[r1]		@ load input H
27	vmov.i8	q11,#0xe1
28	vshl.i64	q11,q11,#57		@ 0xc2.0
29	vext.8	q3,q9,q9,#8
30	vshr.u64	q10,q11,#63
31	vdup.32	q9,d18[1]
32	vext.8	q8,q10,q11,#8		@ t0=0xc2....01
33	vshr.u64	q10,q3,#63
34	vshr.s32	q9,q9,#31		@ broadcast carry bit
35	vand	q10,q10,q8
36	vshl.i64	q3,q3,#1
37	vext.8	q10,q10,q10,#8
38	vand	q8,q8,q9
39	vorr	q3,q3,q10		@ H<<<=1
40	veor	q12,q3,q8		@ twisted H
41	vst1.64	{q12},[r0]!		@ store Htable[0]
42
43	@ calculate H^2
44	vext.8	q8,q12,q12,#8		@ Karatsuba pre-processing
45.byte	0xa8,0x0e,0xa8,0xf2	@ pmull q0,q12,q12
46	veor	q8,q8,q12
47.byte	0xa9,0x4e,0xa9,0xf2	@ pmull2 q2,q12,q12
48.byte	0xa0,0x2e,0xa0,0xf2	@ pmull q1,q8,q8
49
50	vext.8	q9,q0,q2,#8		@ Karatsuba post-processing
51	veor	q10,q0,q2
52	veor	q1,q1,q9
53	veor	q1,q1,q10
54.byte	0x26,0x4e,0xe0,0xf2	@ pmull q10,q0,q11		@ 1st phase
55
56	vmov	d4,d3		@ Xh|Xm - 256-bit result
57	vmov	d3,d0		@ Xm is rotated Xl
58	veor	q0,q1,q10
59
60	vext.8	q10,q0,q0,#8		@ 2nd phase
61.byte	0x26,0x0e,0xa0,0xf2	@ pmull q0,q0,q11
62	veor	q10,q10,q2
63	veor	q14,q0,q10
64
65	vext.8	q9,q14,q14,#8		@ Karatsuba pre-processing
66	veor	q9,q9,q14
67	vext.8	q13,q8,q9,#8		@ pack Karatsuba pre-processed
68	vst1.64	{q13,q14},[r0]		@ store Htable[1..2]
69
70	bx	lr
71.size	gcm_init_v8,.-gcm_init_v8
72.globl	gcm_gmult_v8
73.hidden	gcm_gmult_v8
74.type	gcm_gmult_v8,%function
75.align	4
76gcm_gmult_v8:
77	vld1.64	{q9},[r0]		@ load Xi
78	vmov.i8	q11,#0xe1
79	vld1.64	{q12,q13},[r1]	@ load twisted H, ...
80	vshl.u64	q11,q11,#57
81#ifndef __ARMEB__
82	vrev64.8	q9,q9
83#endif
84	vext.8	q3,q9,q9,#8
85
86.byte	0x86,0x0e,0xa8,0xf2	@ pmull q0,q12,q3		@ H.lo·Xi.lo
87	veor	q9,q9,q3		@ Karatsuba pre-processing
88.byte	0x87,0x4e,0xa9,0xf2	@ pmull2 q2,q12,q3		@ H.hi·Xi.hi
89.byte	0xa2,0x2e,0xaa,0xf2	@ pmull q1,q13,q9		@ (H.lo+H.hi)·(Xi.lo+Xi.hi)
90
91	vext.8	q9,q0,q2,#8		@ Karatsuba post-processing
92	veor	q10,q0,q2
93	veor	q1,q1,q9
94	veor	q1,q1,q10
95.byte	0x26,0x4e,0xe0,0xf2	@ pmull q10,q0,q11		@ 1st phase of reduction
96
97	vmov	d4,d3		@ Xh|Xm - 256-bit result
98	vmov	d3,d0		@ Xm is rotated Xl
99	veor	q0,q1,q10
100
101	vext.8	q10,q0,q0,#8		@ 2nd phase of reduction
102.byte	0x26,0x0e,0xa0,0xf2	@ pmull q0,q0,q11
103	veor	q10,q10,q2
104	veor	q0,q0,q10
105
106#ifndef __ARMEB__
107	vrev64.8	q0,q0
108#endif
109	vext.8	q0,q0,q0,#8
110	vst1.64	{q0},[r0]		@ write out Xi
111
112	bx	lr
113.size	gcm_gmult_v8,.-gcm_gmult_v8
114.globl	gcm_ghash_v8
115.hidden	gcm_ghash_v8
116.type	gcm_ghash_v8,%function
117.align	4
118gcm_ghash_v8:
119	vstmdb	sp!,{d8,d9,d10,d11,d12,d13,d14,d15}		@ 32-bit ABI says so
120	vld1.64	{q0},[r0]		@ load [rotated] Xi
121						@ "[rotated]" means that
122						@ loaded value would have
123						@ to be rotated in order to
124						@ make it appear as in
125						@ algorithm specification
126	subs	r3,r3,#32		@ see if r3 is 32 or larger
127	mov	r12,#16		@ r12 is used as post-
128						@ increment for input pointer;
129						@ as loop is modulo-scheduled
130						@ r12 is zeroed just in time
131						@ to preclude overstepping
132						@ inp[len], which means that
133						@ last block[s] are actually
134						@ loaded twice, but last
135						@ copy is not processed
136	vld1.64	{q12,q13},[r1]!	@ load twisted H, ..., H^2
137	vmov.i8	q11,#0xe1
138	vld1.64	{q14},[r1]
139	moveq	r12,#0			@ is it time to zero r12?
140	vext.8	q0,q0,q0,#8		@ rotate Xi
141	vld1.64	{q8},[r2]!	@ load [rotated] I[0]
142	vshl.u64	q11,q11,#57		@ compose 0xc2.0 constant
143#ifndef __ARMEB__
144	vrev64.8	q8,q8
145	vrev64.8	q0,q0
146#endif
147	vext.8	q3,q8,q8,#8		@ rotate I[0]
148	blo	.Lodd_tail_v8		@ r3 was less than 32
149	vld1.64	{q9},[r2],r12	@ load [rotated] I[1]
150#ifndef __ARMEB__
151	vrev64.8	q9,q9
152#endif
153	vext.8	q7,q9,q9,#8
154	veor	q3,q3,q0		@ I[i]^=Xi
155.byte	0x8e,0x8e,0xa8,0xf2	@ pmull q4,q12,q7		@ H·Ii+1
156	veor	q9,q9,q7		@ Karatsuba pre-processing
157.byte	0x8f,0xce,0xa9,0xf2	@ pmull2 q6,q12,q7
158	b	.Loop_mod2x_v8
159
160.align	4
161.Loop_mod2x_v8:
162	vext.8	q10,q3,q3,#8
163	subs	r3,r3,#32		@ is there more data?
164.byte	0x86,0x0e,0xac,0xf2	@ pmull q0,q14,q3		@ H^2.lo·Xi.lo
165	movlo	r12,#0			@ is it time to zero r12?
166
167.byte	0xa2,0xae,0xaa,0xf2	@ pmull q5,q13,q9
168	veor	q10,q10,q3		@ Karatsuba pre-processing
169.byte	0x87,0x4e,0xad,0xf2	@ pmull2 q2,q14,q3		@ H^2.hi·Xi.hi
170	veor	q0,q0,q4		@ accumulate
171.byte	0xa5,0x2e,0xab,0xf2	@ pmull2 q1,q13,q10		@ (H^2.lo+H^2.hi)·(Xi.lo+Xi.hi)
172	vld1.64	{q8},[r2],r12	@ load [rotated] I[i+2]
173
174	veor	q2,q2,q6
175	moveq	r12,#0			@ is it time to zero r12?
176	veor	q1,q1,q5
177
178	vext.8	q9,q0,q2,#8		@ Karatsuba post-processing
179	veor	q10,q0,q2
180	veor	q1,q1,q9
181	vld1.64	{q9},[r2],r12	@ load [rotated] I[i+3]
182#ifndef __ARMEB__
183	vrev64.8	q8,q8
184#endif
185	veor	q1,q1,q10
186.byte	0x26,0x4e,0xe0,0xf2	@ pmull q10,q0,q11		@ 1st phase of reduction
187
188#ifndef __ARMEB__
189	vrev64.8	q9,q9
190#endif
191	vmov	d4,d3		@ Xh|Xm - 256-bit result
192	vmov	d3,d0		@ Xm is rotated Xl
193	vext.8	q7,q9,q9,#8
194	vext.8	q3,q8,q8,#8
195	veor	q0,q1,q10
196.byte	0x8e,0x8e,0xa8,0xf2	@ pmull q4,q12,q7		@ H·Ii+1
197	veor	q3,q3,q2		@ accumulate q3 early
198
199	vext.8	q10,q0,q0,#8		@ 2nd phase of reduction
200.byte	0x26,0x0e,0xa0,0xf2	@ pmull q0,q0,q11
201	veor	q3,q3,q10
202	veor	q9,q9,q7		@ Karatsuba pre-processing
203	veor	q3,q3,q0
204.byte	0x8f,0xce,0xa9,0xf2	@ pmull2 q6,q12,q7
205	bhs	.Loop_mod2x_v8		@ there was at least 32 more bytes
206
207	veor	q2,q2,q10
208	vext.8	q3,q8,q8,#8		@ re-construct q3
209	adds	r3,r3,#32		@ re-construct r3
210	veor	q0,q0,q2		@ re-construct q0
211	beq	.Ldone_v8		@ is r3 zero?
212.Lodd_tail_v8:
213	vext.8	q10,q0,q0,#8
214	veor	q3,q3,q0		@ inp^=Xi
215	veor	q9,q8,q10		@ q9 is rotated inp^Xi
216
217.byte	0x86,0x0e,0xa8,0xf2	@ pmull q0,q12,q3		@ H.lo·Xi.lo
218	veor	q9,q9,q3		@ Karatsuba pre-processing
219.byte	0x87,0x4e,0xa9,0xf2	@ pmull2 q2,q12,q3		@ H.hi·Xi.hi
220.byte	0xa2,0x2e,0xaa,0xf2	@ pmull q1,q13,q9		@ (H.lo+H.hi)·(Xi.lo+Xi.hi)
221
222	vext.8	q9,q0,q2,#8		@ Karatsuba post-processing
223	veor	q10,q0,q2
224	veor	q1,q1,q9
225	veor	q1,q1,q10
226.byte	0x26,0x4e,0xe0,0xf2	@ pmull q10,q0,q11		@ 1st phase of reduction
227
228	vmov	d4,d3		@ Xh|Xm - 256-bit result
229	vmov	d3,d0		@ Xm is rotated Xl
230	veor	q0,q1,q10
231
232	vext.8	q10,q0,q0,#8		@ 2nd phase of reduction
233.byte	0x26,0x0e,0xa0,0xf2	@ pmull q0,q0,q11
234	veor	q10,q10,q2
235	veor	q0,q0,q10
236
237.Ldone_v8:
238#ifndef __ARMEB__
239	vrev64.8	q0,q0
240#endif
241	vext.8	q0,q0,q0,#8
242	vst1.64	{q0},[r0]		@ write out Xi
243
244	vldmia	sp!,{d8,d9,d10,d11,d12,d13,d14,d15}		@ 32-bit ABI says so
245	bx	lr
246.size	gcm_ghash_v8,.-gcm_ghash_v8
247.byte	71,72,65,83,72,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
248.align	2
249.align	2
250#endif
251#endif  // !OPENSSL_NO_ASM
252