• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1// This file is generated from a similarly-named Perl script in the BoringSSL
2// source tree. Do not edit by hand.
3
4#if !defined(__has_feature)
5#define __has_feature(x) 0
6#endif
7#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM)
8#define OPENSSL_NO_ASM
9#endif
10
11#if !defined(OPENSSL_NO_ASM)
12#include <GFp/arm_arch.h>
13
14.text
15
16.globl	_GFp_gcm_init_clmul
17.private_extern	_GFp_gcm_init_clmul
18
19.align	4
20_GFp_gcm_init_clmul:
21	AARCH64_VALID_CALL_TARGET
22	ld1	{v17.2d},[x1]		//load input H
23	movi	v19.16b,#0xe1
24	shl	v19.2d,v19.2d,#57		//0xc2.0
25	ext	v3.16b,v17.16b,v17.16b,#8
26	ushr	v18.2d,v19.2d,#63
27	dup	v17.4s,v17.s[1]
28	ext	v16.16b,v18.16b,v19.16b,#8		//t0=0xc2....01
29	ushr	v18.2d,v3.2d,#63
30	sshr	v17.4s,v17.4s,#31		//broadcast carry bit
31	and	v18.16b,v18.16b,v16.16b
32	shl	v3.2d,v3.2d,#1
33	ext	v18.16b,v18.16b,v18.16b,#8
34	and	v16.16b,v16.16b,v17.16b
35	orr	v3.16b,v3.16b,v18.16b		//H<<<=1
36	eor	v20.16b,v3.16b,v16.16b		//twisted H
37	st1	{v20.2d},[x0],#16		//store Htable[0]
38
39	//calculate H^2
40	ext	v16.16b,v20.16b,v20.16b,#8		//Karatsuba pre-processing
41	pmull	v0.1q,v20.1d,v20.1d
42	eor	v16.16b,v16.16b,v20.16b
43	pmull2	v2.1q,v20.2d,v20.2d
44	pmull	v1.1q,v16.1d,v16.1d
45
46	ext	v17.16b,v0.16b,v2.16b,#8		//Karatsuba post-processing
47	eor	v18.16b,v0.16b,v2.16b
48	eor	v1.16b,v1.16b,v17.16b
49	eor	v1.16b,v1.16b,v18.16b
50	pmull	v18.1q,v0.1d,v19.1d		//1st phase
51
52	ins	v2.d[0],v1.d[1]
53	ins	v1.d[1],v0.d[0]
54	eor	v0.16b,v1.16b,v18.16b
55
56	ext	v18.16b,v0.16b,v0.16b,#8		//2nd phase
57	pmull	v0.1q,v0.1d,v19.1d
58	eor	v18.16b,v18.16b,v2.16b
59	eor	v22.16b,v0.16b,v18.16b
60
61	ext	v17.16b,v22.16b,v22.16b,#8		//Karatsuba pre-processing
62	eor	v17.16b,v17.16b,v22.16b
63	ext	v21.16b,v16.16b,v17.16b,#8		//pack Karatsuba pre-processed
64	st1	{v21.2d,v22.2d},[x0]		//store Htable[1..2]
65
66	ret
67
68.globl	_GFp_gcm_gmult_clmul
69.private_extern	_GFp_gcm_gmult_clmul
70
71.align	4
72_GFp_gcm_gmult_clmul:
73	AARCH64_VALID_CALL_TARGET
74	ld1	{v17.2d},[x0]		//load Xi
75	movi	v19.16b,#0xe1
76	ld1	{v20.2d,v21.2d},[x1]	//load twisted H, ...
77	shl	v19.2d,v19.2d,#57
78#ifndef __ARMEB__
79	rev64	v17.16b,v17.16b
80#endif
81	ext	v3.16b,v17.16b,v17.16b,#8
82
83	pmull	v0.1q,v20.1d,v3.1d		//H.lo·Xi.lo
84	eor	v17.16b,v17.16b,v3.16b		//Karatsuba pre-processing
85	pmull2	v2.1q,v20.2d,v3.2d		//H.hXi.hi
86	pmull	v1.1q,v21.1d,v17.1d		//(H.lo+H.hi)·(Xi.lo+Xi.hi)
87
88	ext	v17.16b,v0.16b,v2.16b,#8		//Karatsuba post-processing
89	eor	v18.16b,v0.16b,v2.16b
90	eor	v1.16b,v1.16b,v17.16b
91	eor	v1.16b,v1.16b,v18.16b
92	pmull	v18.1q,v0.1d,v19.1d		//1st phase of reduction
93
94	ins	v2.d[0],v1.d[1]
95	ins	v1.d[1],v0.d[0]
96	eor	v0.16b,v1.16b,v18.16b
97
98	ext	v18.16b,v0.16b,v0.16b,#8		//2nd phase of reduction
99	pmull	v0.1q,v0.1d,v19.1d
100	eor	v18.16b,v18.16b,v2.16b
101	eor	v0.16b,v0.16b,v18.16b
102
103#ifndef __ARMEB__
104	rev64	v0.16b,v0.16b
105#endif
106	ext	v0.16b,v0.16b,v0.16b,#8
107	st1	{v0.2d},[x0]		//write out Xi
108
109	ret
110
111.globl	_GFp_gcm_ghash_clmul
112.private_extern	_GFp_gcm_ghash_clmul
113
114.align	4
115_GFp_gcm_ghash_clmul:
116	AARCH64_VALID_CALL_TARGET
117	ld1	{v0.2d},[x0]		//load [rotated] Xi
118						//"[rotated]" means that
119						//loaded value would have
120						//to be rotated in order to
121						//make it appear as in
122						//algorithm specification
123	subs	x3,x3,#32		//see if x3 is 32 or larger
124	mov	x12,#16		//x12 is used as post-
125						//increment for input pointer;
126						//as loop is modulo-scheduled
127						//x12 is zeroed just in time
128						//to preclude overstepping
129						//inp[len], which means that
130						//last block[s] are actually
131						//loaded twice, but last
132						//copy is not processed
133	ld1	{v20.2d,v21.2d},[x1],#32	//load twisted H, ..., H^2
134	movi	v19.16b,#0xe1
135	ld1	{v22.2d},[x1]
136	csel	x12,xzr,x12,eq			//is it time to zero x12?
137	ext	v0.16b,v0.16b,v0.16b,#8		//rotate Xi
138	ld1	{v16.2d},[x2],#16	//load [rotated] I[0]
139	shl	v19.2d,v19.2d,#57		//compose 0xc2.0 constant
140#ifndef __ARMEB__
141	rev64	v16.16b,v16.16b
142	rev64	v0.16b,v0.16b
143#endif
144	ext	v3.16b,v16.16b,v16.16b,#8		//rotate I[0]
145	b.lo	Lodd_tail_v8		//x3 was less than 32
146	ld1	{v17.2d},[x2],x12	//load [rotated] I[1]
147#ifndef __ARMEB__
148	rev64	v17.16b,v17.16b
149#endif
150	ext	v7.16b,v17.16b,v17.16b,#8
151	eor	v3.16b,v3.16b,v0.16b		//I[i]^=Xi
152	pmull	v4.1q,v20.1d,v7.1d		//H·Ii+1
153	eor	v17.16b,v17.16b,v7.16b		//Karatsuba pre-processing
154	pmull2	v6.1q,v20.2d,v7.2d
155	b	Loop_mod2x_v8
156
157.align	4
158Loop_mod2x_v8:
159	ext	v18.16b,v3.16b,v3.16b,#8
160	subs	x3,x3,#32		//is there more data?
161	pmull	v0.1q,v22.1d,v3.1d		//H^2.lo·Xi.lo
162	csel	x12,xzr,x12,lo			//is it time to zero x12?
163
164	pmull	v5.1q,v21.1d,v17.1d
165	eor	v18.16b,v18.16b,v3.16b		//Karatsuba pre-processing
166	pmull2	v2.1q,v22.2d,v3.2d		//H^2.hi·Xi.hi
167	eor	v0.16b,v0.16b,v4.16b		//accumulate
168	pmull2	v1.1q,v21.2d,v18.2d		//(H^2.lo+H^2.hi)·(Xi.lo+Xi.hi)
169	ld1	{v16.2d},[x2],x12	//load [rotated] I[i+2]
170
171	eor	v2.16b,v2.16b,v6.16b
172	csel	x12,xzr,x12,eq			//is it time to zero x12?
173	eor	v1.16b,v1.16b,v5.16b
174
175	ext	v17.16b,v0.16b,v2.16b,#8		//Karatsuba post-processing
176	eor	v18.16b,v0.16b,v2.16b
177	eor	v1.16b,v1.16b,v17.16b
178	ld1	{v17.2d},[x2],x12	//load [rotated] I[i+3]
179#ifndef __ARMEB__
180	rev64	v16.16b,v16.16b
181#endif
182	eor	v1.16b,v1.16b,v18.16b
183	pmull	v18.1q,v0.1d,v19.1d		//1st phase of reduction
184
185#ifndef __ARMEB__
186	rev64	v17.16b,v17.16b
187#endif
188	ins	v2.d[0],v1.d[1]
189	ins	v1.d[1],v0.d[0]
190	ext	v7.16b,v17.16b,v17.16b,#8
191	ext	v3.16b,v16.16b,v16.16b,#8
192	eor	v0.16b,v1.16b,v18.16b
193	pmull	v4.1q,v20.1d,v7.1d		//H·Ii+1
194	eor	v3.16b,v3.16b,v2.16b		//accumulate v3.16b early
195
196	ext	v18.16b,v0.16b,v0.16b,#8		//2nd phase of reduction
197	pmull	v0.1q,v0.1d,v19.1d
198	eor	v3.16b,v3.16b,v18.16b
199	eor	v17.16b,v17.16b,v7.16b		//Karatsuba pre-processing
200	eor	v3.16b,v3.16b,v0.16b
201	pmull2	v6.1q,v20.2d,v7.2d
202	b.hs	Loop_mod2x_v8		//there was at least 32 more bytes
203
204	eor	v2.16b,v2.16b,v18.16b
205	ext	v3.16b,v16.16b,v16.16b,#8		//re-construct v3.16b
206	adds	x3,x3,#32		//re-construct x3
207	eor	v0.16b,v0.16b,v2.16b		//re-construct v0.16b
208	b.eq	Ldone_v8		//is x3 zero?
209Lodd_tail_v8:
210	ext	v18.16b,v0.16b,v0.16b,#8
211	eor	v3.16b,v3.16b,v0.16b		//inp^=Xi
212	eor	v17.16b,v16.16b,v18.16b		//v17.16b is rotated inp^Xi
213
214	pmull	v0.1q,v20.1d,v3.1d		//H.lo·Xi.lo
215	eor	v17.16b,v17.16b,v3.16b		//Karatsuba pre-processing
216	pmull2	v2.1q,v20.2d,v3.2d		//H.hXi.hi
217	pmull	v1.1q,v21.1d,v17.1d		//(H.lo+H.hi)·(Xi.lo+Xi.hi)
218
219	ext	v17.16b,v0.16b,v2.16b,#8		//Karatsuba post-processing
220	eor	v18.16b,v0.16b,v2.16b
221	eor	v1.16b,v1.16b,v17.16b
222	eor	v1.16b,v1.16b,v18.16b
223	pmull	v18.1q,v0.1d,v19.1d		//1st phase of reduction
224
225	ins	v2.d[0],v1.d[1]
226	ins	v1.d[1],v0.d[0]
227	eor	v0.16b,v1.16b,v18.16b
228
229	ext	v18.16b,v0.16b,v0.16b,#8		//2nd phase of reduction
230	pmull	v0.1q,v0.1d,v19.1d
231	eor	v18.16b,v18.16b,v2.16b
232	eor	v0.16b,v0.16b,v18.16b
233
234Ldone_v8:
235#ifndef __ARMEB__
236	rev64	v0.16b,v0.16b
237#endif
238	ext	v0.16b,v0.16b,v0.16b,#8
239	st1	{v0.2d},[x0]		//write out Xi
240
241	ret
242
243.byte	71,72,65,83,72,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
244.align	2
245.align	2
246#endif  // !OPENSSL_NO_ASM
247