• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1// This file is generated from a similarly-named Perl script in the BoringSSL
2// source tree. Do not edit by hand.
3
4#if !defined(__has_feature)
5#define __has_feature(x) 0
6#endif
7#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM)
8#define OPENSSL_NO_ASM
9#endif
10
11#if !defined(OPENSSL_NO_ASM)
12#if defined(BORINGSSL_PREFIX)
13#include <boringssl_prefix_symbols_asm.h>
14#endif
15#include <openssl/arm_arch.h>
16
17.text
18
19.code	32
20#undef	__thumb2__
21.globl	_gcm_init_v8
22.private_extern	_gcm_init_v8
23#ifdef __thumb2__
24.thumb_func	_gcm_init_v8
25#endif
26.align	4
27_gcm_init_v8:
28	AARCH64_VALID_CALL_TARGET
29	vld1.64	{q9},[r1]		@ load input H
30	vmov.i8	q11,#0xe1
31	vshl.i64	q11,q11,#57		@ 0xc2.0
32	vext.8	q3,q9,q9,#8
33	vshr.u64	q10,q11,#63
34	vdup.32	q9,d18[1]
35	vext.8	q8,q10,q11,#8		@ t0=0xc2....01
36	vshr.u64	q10,q3,#63
37	vshr.s32	q9,q9,#31		@ broadcast carry bit
38	vand	q10,q10,q8
39	vshl.i64	q3,q3,#1
40	vext.8	q10,q10,q10,#8
41	vand	q8,q8,q9
42	vorr	q3,q3,q10		@ H<<<=1
43	veor	q12,q3,q8		@ twisted H
44	vst1.64	{q12},[r0]!		@ store Htable[0]
45
46	@ calculate H^2
47	vext.8	q8,q12,q12,#8		@ Karatsuba pre-processing
48.byte	0xa8,0x0e,0xa8,0xf2	@ pmull q0,q12,q12
49	veor	q8,q8,q12
50.byte	0xa9,0x4e,0xa9,0xf2	@ pmull2 q2,q12,q12
51.byte	0xa0,0x2e,0xa0,0xf2	@ pmull q1,q8,q8
52
53	vext.8	q9,q0,q2,#8		@ Karatsuba post-processing
54	veor	q10,q0,q2
55	veor	q1,q1,q9
56	veor	q1,q1,q10
57.byte	0x26,0x4e,0xe0,0xf2	@ pmull q10,q0,q11		@ 1st phase
58
59	vmov	d4,d3		@ Xh|Xm - 256-bit result
60	vmov	d3,d0		@ Xm is rotated Xl
61	veor	q0,q1,q10
62
63	vext.8	q10,q0,q0,#8		@ 2nd phase
64.byte	0x26,0x0e,0xa0,0xf2	@ pmull q0,q0,q11
65	veor	q10,q10,q2
66	veor	q14,q0,q10
67
68	vext.8	q9,q14,q14,#8		@ Karatsuba pre-processing
69	veor	q9,q9,q14
70	vext.8	q13,q8,q9,#8		@ pack Karatsuba pre-processed
71	vst1.64	{q13,q14},[r0]		@ store Htable[1..2]
72
73	bx	lr
74
75.globl	_gcm_gmult_v8
76.private_extern	_gcm_gmult_v8
77#ifdef __thumb2__
78.thumb_func	_gcm_gmult_v8
79#endif
80.align	4
81_gcm_gmult_v8:
82	AARCH64_VALID_CALL_TARGET
83	vld1.64	{q9},[r0]		@ load Xi
84	vmov.i8	q11,#0xe1
85	vld1.64	{q12,q13},[r1]	@ load twisted H, ...
86	vshl.u64	q11,q11,#57
87#ifndef __ARMEB__
88	vrev64.8	q9,q9
89#endif
90	vext.8	q3,q9,q9,#8
91
92.byte	0x86,0x0e,0xa8,0xf2	@ pmull q0,q12,q3		@ H.lo·Xi.lo
93	veor	q9,q9,q3		@ Karatsuba pre-processing
94.byte	0x87,0x4e,0xa9,0xf2	@ pmull2 q2,q12,q3		@ H.hi·Xi.hi
95.byte	0xa2,0x2e,0xaa,0xf2	@ pmull q1,q13,q9		@ (H.lo+H.hi)·(Xi.lo+Xi.hi)
96
97	vext.8	q9,q0,q2,#8		@ Karatsuba post-processing
98	veor	q10,q0,q2
99	veor	q1,q1,q9
100	veor	q1,q1,q10
101.byte	0x26,0x4e,0xe0,0xf2	@ pmull q10,q0,q11		@ 1st phase of reduction
102
103	vmov	d4,d3		@ Xh|Xm - 256-bit result
104	vmov	d3,d0		@ Xm is rotated Xl
105	veor	q0,q1,q10
106
107	vext.8	q10,q0,q0,#8		@ 2nd phase of reduction
108.byte	0x26,0x0e,0xa0,0xf2	@ pmull q0,q0,q11
109	veor	q10,q10,q2
110	veor	q0,q0,q10
111
112#ifndef __ARMEB__
113	vrev64.8	q0,q0
114#endif
115	vext.8	q0,q0,q0,#8
116	vst1.64	{q0},[r0]		@ write out Xi
117
118	bx	lr
119
120.globl	_gcm_ghash_v8
121.private_extern	_gcm_ghash_v8
122#ifdef __thumb2__
123.thumb_func	_gcm_ghash_v8
124#endif
125.align	4
126_gcm_ghash_v8:
127	AARCH64_VALID_CALL_TARGET
128	vstmdb	sp!,{d8,d9,d10,d11,d12,d13,d14,d15}		@ 32-bit ABI says so
129	vld1.64	{q0},[r0]		@ load [rotated] Xi
130						@ "[rotated]" means that
131						@ loaded value would have
132						@ to be rotated in order to
133						@ make it appear as in
134						@ algorithm specification
135	subs	r3,r3,#32		@ see if r3 is 32 or larger
136	mov	r12,#16		@ r12 is used as post-
137						@ increment for input pointer;
138						@ as loop is modulo-scheduled
139						@ r12 is zeroed just in time
140						@ to preclude overstepping
141						@ inp[len], which means that
142						@ last block[s] are actually
143						@ loaded twice, but last
144						@ copy is not processed
145	vld1.64	{q12,q13},[r1]!	@ load twisted H, ..., H^2
146	vmov.i8	q11,#0xe1
147	vld1.64	{q14},[r1]
148	moveq	r12,#0			@ is it time to zero r12?
149	vext.8	q0,q0,q0,#8		@ rotate Xi
150	vld1.64	{q8},[r2]!	@ load [rotated] I[0]
151	vshl.u64	q11,q11,#57		@ compose 0xc2.0 constant
152#ifndef __ARMEB__
153	vrev64.8	q8,q8
154	vrev64.8	q0,q0
155#endif
156	vext.8	q3,q8,q8,#8		@ rotate I[0]
157	blo	Lodd_tail_v8		@ r3 was less than 32
158	vld1.64	{q9},[r2],r12	@ load [rotated] I[1]
159#ifndef __ARMEB__
160	vrev64.8	q9,q9
161#endif
162	vext.8	q7,q9,q9,#8
163	veor	q3,q3,q0		@ I[i]^=Xi
164.byte	0x8e,0x8e,0xa8,0xf2	@ pmull q4,q12,q7		@ H·Ii+1
165	veor	q9,q9,q7		@ Karatsuba pre-processing
166.byte	0x8f,0xce,0xa9,0xf2	@ pmull2 q6,q12,q7
167	b	Loop_mod2x_v8
168
169.align	4
170Loop_mod2x_v8:
171	vext.8	q10,q3,q3,#8
172	subs	r3,r3,#32		@ is there more data?
173.byte	0x86,0x0e,0xac,0xf2	@ pmull q0,q14,q3		@ H^2.lo·Xi.lo
174	movlo	r12,#0			@ is it time to zero r12?
175
176.byte	0xa2,0xae,0xaa,0xf2	@ pmull q5,q13,q9
177	veor	q10,q10,q3		@ Karatsuba pre-processing
178.byte	0x87,0x4e,0xad,0xf2	@ pmull2 q2,q14,q3		@ H^2.hi·Xi.hi
179	veor	q0,q0,q4		@ accumulate
180.byte	0xa5,0x2e,0xab,0xf2	@ pmull2 q1,q13,q10		@ (H^2.lo+H^2.hi)·(Xi.lo+Xi.hi)
181	vld1.64	{q8},[r2],r12	@ load [rotated] I[i+2]
182
183	veor	q2,q2,q6
184	moveq	r12,#0			@ is it time to zero r12?
185	veor	q1,q1,q5
186
187	vext.8	q9,q0,q2,#8		@ Karatsuba post-processing
188	veor	q10,q0,q2
189	veor	q1,q1,q9
190	vld1.64	{q9},[r2],r12	@ load [rotated] I[i+3]
191#ifndef __ARMEB__
192	vrev64.8	q8,q8
193#endif
194	veor	q1,q1,q10
195.byte	0x26,0x4e,0xe0,0xf2	@ pmull q10,q0,q11		@ 1st phase of reduction
196
197#ifndef __ARMEB__
198	vrev64.8	q9,q9
199#endif
200	vmov	d4,d3		@ Xh|Xm - 256-bit result
201	vmov	d3,d0		@ Xm is rotated Xl
202	vext.8	q7,q9,q9,#8
203	vext.8	q3,q8,q8,#8
204	veor	q0,q1,q10
205.byte	0x8e,0x8e,0xa8,0xf2	@ pmull q4,q12,q7		@ H·Ii+1
206	veor	q3,q3,q2		@ accumulate q3 early
207
208	vext.8	q10,q0,q0,#8		@ 2nd phase of reduction
209.byte	0x26,0x0e,0xa0,0xf2	@ pmull q0,q0,q11
210	veor	q3,q3,q10
211	veor	q9,q9,q7		@ Karatsuba pre-processing
212	veor	q3,q3,q0
213.byte	0x8f,0xce,0xa9,0xf2	@ pmull2 q6,q12,q7
214	bhs	Loop_mod2x_v8		@ there was at least 32 more bytes
215
216	veor	q2,q2,q10
217	vext.8	q3,q8,q8,#8		@ re-construct q3
218	adds	r3,r3,#32		@ re-construct r3
219	veor	q0,q0,q2		@ re-construct q0
220	beq	Ldone_v8		@ is r3 zero?
221Lodd_tail_v8:
222	vext.8	q10,q0,q0,#8
223	veor	q3,q3,q0		@ inp^=Xi
224	veor	q9,q8,q10		@ q9 is rotated inp^Xi
225
226.byte	0x86,0x0e,0xa8,0xf2	@ pmull q0,q12,q3		@ H.lo·Xi.lo
227	veor	q9,q9,q3		@ Karatsuba pre-processing
228.byte	0x87,0x4e,0xa9,0xf2	@ pmull2 q2,q12,q3		@ H.hi·Xi.hi
229.byte	0xa2,0x2e,0xaa,0xf2	@ pmull q1,q13,q9		@ (H.lo+H.hi)·(Xi.lo+Xi.hi)
230
231	vext.8	q9,q0,q2,#8		@ Karatsuba post-processing
232	veor	q10,q0,q2
233	veor	q1,q1,q9
234	veor	q1,q1,q10
235.byte	0x26,0x4e,0xe0,0xf2	@ pmull q10,q0,q11		@ 1st phase of reduction
236
237	vmov	d4,d3		@ Xh|Xm - 256-bit result
238	vmov	d3,d0		@ Xm is rotated Xl
239	veor	q0,q1,q10
240
241	vext.8	q10,q0,q0,#8		@ 2nd phase of reduction
242.byte	0x26,0x0e,0xa0,0xf2	@ pmull q0,q0,q11
243	veor	q10,q10,q2
244	veor	q0,q0,q10
245
246Ldone_v8:
247#ifndef __ARMEB__
248	vrev64.8	q0,q0
249#endif
250	vext.8	q0,q0,q0,#8
251	vst1.64	{q0},[r0]		@ write out Xi
252
253	vldmia	sp!,{d8,d9,d10,d11,d12,d13,d14,d15}		@ 32-bit ABI says so
254	bx	lr
255
256.byte	71,72,65,83,72,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
257.align	2
258.align	2
259#endif  // !OPENSSL_NO_ASM
260