• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1// This file is generated from a similarly-named Perl script in the BoringSSL
2// source tree. Do not edit by hand.
3
4#include <openssl/asm_base.h>
5
6#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_ARM) && defined(__APPLE__)
7#include <openssl/arm_arch.h>
8
9#if __ARM_MAX_ARCH__>=7
10.text
11
12.code	32
13#undef	__thumb2__
14.globl	_gcm_init_v8
15.private_extern	_gcm_init_v8
16#ifdef __thumb2__
17.thumb_func	_gcm_init_v8
18#endif
19.align	4
20_gcm_init_v8:
21	AARCH64_VALID_CALL_TARGET
22	vld1.64	{q9},[r1]		@ load input H
23	vmov.i8	q11,#0xe1
24	vshl.i64	q11,q11,#57		@ 0xc2.0
25	vext.8	q3,q9,q9,#8
26	vshr.u64	q10,q11,#63
27	vdup.32	q9,d18[1]
28	vext.8	q8,q10,q11,#8		@ t0=0xc2....01
29	vshr.u64	q10,q3,#63
30	vshr.s32	q9,q9,#31		@ broadcast carry bit
31	vand	q10,q10,q8
32	vshl.i64	q3,q3,#1
33	vext.8	q10,q10,q10,#8
34	vand	q8,q8,q9
35	vorr	q3,q3,q10		@ H<<<=1
36	veor	q12,q3,q8		@ twisted H
37	vst1.64	{q12},[r0]!		@ store Htable[0]
38
39	@ calculate H^2
40	vext.8	q8,q12,q12,#8		@ Karatsuba pre-processing
41.byte	0xa8,0x0e,0xa8,0xf2	@ pmull q0,q12,q12
42	veor	q8,q8,q12
43.byte	0xa9,0x4e,0xa9,0xf2	@ pmull2 q2,q12,q12
44.byte	0xa0,0x2e,0xa0,0xf2	@ pmull q1,q8,q8
45
46	vext.8	q9,q0,q2,#8		@ Karatsuba post-processing
47	veor	q10,q0,q2
48	veor	q1,q1,q9
49	veor	q1,q1,q10
50.byte	0x26,0x4e,0xe0,0xf2	@ pmull q10,q0,q11		@ 1st phase
51
52	vmov	d4,d3		@ Xh|Xm - 256-bit result
53	vmov	d3,d0		@ Xm is rotated Xl
54	veor	q0,q1,q10
55
56	vext.8	q10,q0,q0,#8		@ 2nd phase
57.byte	0x26,0x0e,0xa0,0xf2	@ pmull q0,q0,q11
58	veor	q10,q10,q2
59	veor	q14,q0,q10
60
61	vext.8	q9,q14,q14,#8		@ Karatsuba pre-processing
62	veor	q9,q9,q14
63	vext.8	q13,q8,q9,#8		@ pack Karatsuba pre-processed
64	vst1.64	{q13,q14},[r0]!	@ store Htable[1..2]
65	bx	lr
66
67.globl	_gcm_gmult_v8
68.private_extern	_gcm_gmult_v8
69#ifdef __thumb2__
70.thumb_func	_gcm_gmult_v8
71#endif
72.align	4
73_gcm_gmult_v8:
74	AARCH64_VALID_CALL_TARGET
75	vld1.64	{q9},[r0]		@ load Xi
76	vmov.i8	q11,#0xe1
77	vld1.64	{q12,q13},[r1]	@ load twisted H, ...
78	vshl.u64	q11,q11,#57
79#ifndef __ARMEB__
80	vrev64.8	q9,q9
81#endif
82	vext.8	q3,q9,q9,#8
83
84.byte	0x86,0x0e,0xa8,0xf2	@ pmull q0,q12,q3		@ H.lo·Xi.lo
85	veor	q9,q9,q3		@ Karatsuba pre-processing
86.byte	0x87,0x4e,0xa9,0xf2	@ pmull2 q2,q12,q3		@ H.hi·Xi.hi
87.byte	0xa2,0x2e,0xaa,0xf2	@ pmull q1,q13,q9		@ (H.lo+H.hi)·(Xi.lo+Xi.hi)
88
89	vext.8	q9,q0,q2,#8		@ Karatsuba post-processing
90	veor	q10,q0,q2
91	veor	q1,q1,q9
92	veor	q1,q1,q10
93.byte	0x26,0x4e,0xe0,0xf2	@ pmull q10,q0,q11		@ 1st phase of reduction
94
95	vmov	d4,d3		@ Xh|Xm - 256-bit result
96	vmov	d3,d0		@ Xm is rotated Xl
97	veor	q0,q1,q10
98
99	vext.8	q10,q0,q0,#8		@ 2nd phase of reduction
100.byte	0x26,0x0e,0xa0,0xf2	@ pmull q0,q0,q11
101	veor	q10,q10,q2
102	veor	q0,q0,q10
103
104#ifndef __ARMEB__
105	vrev64.8	q0,q0
106#endif
107	vext.8	q0,q0,q0,#8
108	vst1.64	{q0},[r0]		@ write out Xi
109
110	bx	lr
111
112.globl	_gcm_ghash_v8
113.private_extern	_gcm_ghash_v8
114#ifdef __thumb2__
115.thumb_func	_gcm_ghash_v8
116#endif
117.align	4
118_gcm_ghash_v8:
119	AARCH64_VALID_CALL_TARGET
120	vstmdb	sp!,{d8,d9,d10,d11,d12,d13,d14,d15}		@ 32-bit ABI says so
121	vld1.64	{q0},[r0]		@ load [rotated] Xi
122						@ "[rotated]" means that
123						@ loaded value would have
124						@ to be rotated in order to
125						@ make it appear as in
126						@ algorithm specification
127	subs	r3,r3,#32		@ see if r3 is 32 or larger
128	mov	r12,#16		@ r12 is used as post-
129						@ increment for input pointer;
130						@ as loop is modulo-scheduled
131						@ r12 is zeroed just in time
132						@ to preclude overstepping
133						@ inp[len], which means that
134						@ last block[s] are actually
135						@ loaded twice, but last
136						@ copy is not processed
137	vld1.64	{q12,q13},[r1]!	@ load twisted H, ..., H^2
138	vmov.i8	q11,#0xe1
139	vld1.64	{q14},[r1]
140	moveq	r12,#0			@ is it time to zero r12?
141	vext.8	q0,q0,q0,#8		@ rotate Xi
142	vld1.64	{q8},[r2]!	@ load [rotated] I[0]
143	vshl.u64	q11,q11,#57		@ compose 0xc2.0 constant
144#ifndef __ARMEB__
145	vrev64.8	q8,q8
146	vrev64.8	q0,q0
147#endif
148	vext.8	q3,q8,q8,#8		@ rotate I[0]
149	blo	Lodd_tail_v8		@ r3 was less than 32
150	vld1.64	{q9},[r2],r12	@ load [rotated] I[1]
151#ifndef __ARMEB__
152	vrev64.8	q9,q9
153#endif
154	vext.8	q7,q9,q9,#8
155	veor	q3,q3,q0		@ I[i]^=Xi
156.byte	0x8e,0x8e,0xa8,0xf2	@ pmull q4,q12,q7		@ H·Ii+1
157	veor	q9,q9,q7		@ Karatsuba pre-processing
158.byte	0x8f,0xce,0xa9,0xf2	@ pmull2 q6,q12,q7
159	b	Loop_mod2x_v8
160
161.align	4
162Loop_mod2x_v8:
163	vext.8	q10,q3,q3,#8
164	subs	r3,r3,#32		@ is there more data?
165.byte	0x86,0x0e,0xac,0xf2	@ pmull q0,q14,q3		@ H^2.lo·Xi.lo
166	movlo	r12,#0			@ is it time to zero r12?
167
168.byte	0xa2,0xae,0xaa,0xf2	@ pmull q5,q13,q9
169	veor	q10,q10,q3		@ Karatsuba pre-processing
170.byte	0x87,0x4e,0xad,0xf2	@ pmull2 q2,q14,q3		@ H^2.hi·Xi.hi
171	veor	q0,q0,q4		@ accumulate
172.byte	0xa5,0x2e,0xab,0xf2	@ pmull2 q1,q13,q10		@ (H^2.lo+H^2.hi)·(Xi.lo+Xi.hi)
173	vld1.64	{q8},[r2],r12	@ load [rotated] I[i+2]
174
175	veor	q2,q2,q6
176	moveq	r12,#0			@ is it time to zero r12?
177	veor	q1,q1,q5
178
179	vext.8	q9,q0,q2,#8		@ Karatsuba post-processing
180	veor	q10,q0,q2
181	veor	q1,q1,q9
182	vld1.64	{q9},[r2],r12	@ load [rotated] I[i+3]
183#ifndef __ARMEB__
184	vrev64.8	q8,q8
185#endif
186	veor	q1,q1,q10
187.byte	0x26,0x4e,0xe0,0xf2	@ pmull q10,q0,q11		@ 1st phase of reduction
188
189#ifndef __ARMEB__
190	vrev64.8	q9,q9
191#endif
192	vmov	d4,d3		@ Xh|Xm - 256-bit result
193	vmov	d3,d0		@ Xm is rotated Xl
194	vext.8	q7,q9,q9,#8
195	vext.8	q3,q8,q8,#8
196	veor	q0,q1,q10
197.byte	0x8e,0x8e,0xa8,0xf2	@ pmull q4,q12,q7		@ H·Ii+1
198	veor	q3,q3,q2		@ accumulate q3 early
199
200	vext.8	q10,q0,q0,#8		@ 2nd phase of reduction
201.byte	0x26,0x0e,0xa0,0xf2	@ pmull q0,q0,q11
202	veor	q3,q3,q10
203	veor	q9,q9,q7		@ Karatsuba pre-processing
204	veor	q3,q3,q0
205.byte	0x8f,0xce,0xa9,0xf2	@ pmull2 q6,q12,q7
206	bhs	Loop_mod2x_v8		@ there was at least 32 more bytes
207
208	veor	q2,q2,q10
209	vext.8	q3,q8,q8,#8		@ re-construct q3
210	adds	r3,r3,#32		@ re-construct r3
211	veor	q0,q0,q2		@ re-construct q0
212	beq	Ldone_v8		@ is r3 zero?
213Lodd_tail_v8:
214	vext.8	q10,q0,q0,#8
215	veor	q3,q3,q0		@ inp^=Xi
216	veor	q9,q8,q10		@ q9 is rotated inp^Xi
217
218.byte	0x86,0x0e,0xa8,0xf2	@ pmull q0,q12,q3		@ H.lo·Xi.lo
219	veor	q9,q9,q3		@ Karatsuba pre-processing
220.byte	0x87,0x4e,0xa9,0xf2	@ pmull2 q2,q12,q3		@ H.hi·Xi.hi
221.byte	0xa2,0x2e,0xaa,0xf2	@ pmull q1,q13,q9		@ (H.lo+H.hi)·(Xi.lo+Xi.hi)
222
223	vext.8	q9,q0,q2,#8		@ Karatsuba post-processing
224	veor	q10,q0,q2
225	veor	q1,q1,q9
226	veor	q1,q1,q10
227.byte	0x26,0x4e,0xe0,0xf2	@ pmull q10,q0,q11		@ 1st phase of reduction
228
229	vmov	d4,d3		@ Xh|Xm - 256-bit result
230	vmov	d3,d0		@ Xm is rotated Xl
231	veor	q0,q1,q10
232
233	vext.8	q10,q0,q0,#8		@ 2nd phase of reduction
234.byte	0x26,0x0e,0xa0,0xf2	@ pmull q0,q0,q11
235	veor	q10,q10,q2
236	veor	q0,q0,q10
237
238Ldone_v8:
239#ifndef __ARMEB__
240	vrev64.8	q0,q0
241#endif
242	vext.8	q0,q0,q0,#8
243	vst1.64	{q0},[r0]		@ write out Xi
244
245	vldmia	sp!,{d8,d9,d10,d11,d12,d13,d14,d15}		@ 32-bit ABI says so
246	bx	lr
247
248.byte	71,72,65,83,72,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
249.align	2
250.align	2
251#endif
252#endif  // !OPENSSL_NO_ASM && defined(OPENSSL_ARM) && defined(__APPLE__)
253