• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1// This file is generated from a similarly-named Perl script in the BoringSSL
2// source tree. Do not edit by hand.
3
4#if !defined(__has_feature)
5#define __has_feature(x) 0
6#endif
7#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM)
8#define OPENSSL_NO_ASM
9#endif
10
11#if !defined(OPENSSL_NO_ASM)
12#if defined(BORINGSSL_PREFIX)
13#include <boringssl_prefix_symbols_asm.h>
14#endif
15.syntax	unified
16
17
18
19
20#if defined(__thumb2__)
21.thumb
22#else
23.code	32
24#endif
25
26.text
27
28
29.align	7	@ totally strategic alignment
30_vpaes_consts:
31Lk_mc_forward:@ mc_forward
32.quad	0x0407060500030201, 0x0C0F0E0D080B0A09
33.quad	0x080B0A0904070605, 0x000302010C0F0E0D
34.quad	0x0C0F0E0D080B0A09, 0x0407060500030201
35.quad	0x000302010C0F0E0D, 0x080B0A0904070605
36Lk_mc_backward:@ mc_backward
37.quad	0x0605040702010003, 0x0E0D0C0F0A09080B
38.quad	0x020100030E0D0C0F, 0x0A09080B06050407
39.quad	0x0E0D0C0F0A09080B, 0x0605040702010003
40.quad	0x0A09080B06050407, 0x020100030E0D0C0F
41Lk_sr:@ sr
42.quad	0x0706050403020100, 0x0F0E0D0C0B0A0908
43.quad	0x030E09040F0A0500, 0x0B06010C07020D08
44.quad	0x0F060D040B020900, 0x070E050C030A0108
45.quad	0x0B0E0104070A0D00, 0x0306090C0F020508
46
47@
48@ "Hot" constants
49@
50Lk_inv:@ inv, inva
51.quad	0x0E05060F0D080180, 0x040703090A0B0C02
52.quad	0x01040A060F0B0780, 0x030D0E0C02050809
53Lk_ipt:@ input transform (lo, hi)
54.quad	0xC2B2E8985A2A7000, 0xCABAE09052227808
55.quad	0x4C01307D317C4D00, 0xCD80B1FCB0FDCC81
56Lk_sbo:@ sbou, sbot
57.quad	0xD0D26D176FBDC700, 0x15AABF7AC502A878
58.quad	0xCFE474A55FBB6A00, 0x8E1E90D1412B35FA
59Lk_sb1:@ sb1u, sb1t
60.quad	0x3618D415FAE22300, 0x3BF7CCC10D2ED9EF
61.quad	0xB19BE18FCB503E00, 0xA5DF7A6E142AF544
62Lk_sb2:@ sb2u, sb2t
63.quad	0x69EB88400AE12900, 0xC2A163C8AB82234A
64.quad	0xE27A93C60B712400, 0x5EB7E955BC982FCD
65
66.byte	86,101,99,116,111,114,32,80,101,114,109,117,116,97,116,105,111,110,32,65,69,83,32,102,111,114,32,65,82,77,118,55,32,78,69,79,78,44,32,77,105,107,101,32,72,97,109,98,117,114,103,32,40,83,116,97,110,102,111,114,100,32,85,110,105,118,101,114,115,105,116,121,41,0
67.align	2
68
69.align	6
70@@
71@@  _aes_preheat
72@@
73@@  Fills q9-q15 as specified below.
74@@
75#ifdef __thumb2__
76.thumb_func	_vpaes_preheat
77#endif
78.align	4
79_vpaes_preheat:
80	adr	r10, Lk_inv
81	vmov.i8	q9, #0x0f		@ Lk_s0F
82	vld1.64	{q10,q11}, [r10]!	@ Lk_inv
83	add	r10, r10, #64		@ Skip Lk_ipt, Lk_sbo
84	vld1.64	{q12,q13}, [r10]!	@ Lk_sb1
85	vld1.64	{q14,q15}, [r10]	@ Lk_sb2
86	bx	lr
87
88@@
89@@  _aes_encrypt_core
90@@
91@@  AES-encrypt q0.
92@@
93@@  Inputs:
94@@     q0 = input
95@@     q9-q15 as in _vpaes_preheat
96@@    [r2] = scheduled keys
97@@
98@@  Output in q0
99@@  Clobbers  q1-q5, r8-r11
100@@  Preserves q6-q8 so you get some local vectors
101@@
102@@
103#ifdef __thumb2__
104.thumb_func	_vpaes_encrypt_core
105#endif
106.align	4
107_vpaes_encrypt_core:
108	mov	r9, r2
109	ldr	r8, [r2,#240]		@ pull rounds
110	adr	r11, Lk_ipt
111	@ vmovdqa	.Lk_ipt(%rip),	%xmm2	# iptlo
112	@ vmovdqa	.Lk_ipt+16(%rip), %xmm3	# ipthi
113	vld1.64	{q2, q3}, [r11]
114	adr	r11, Lk_mc_forward+16
115	vld1.64	{q5}, [r9]!		@ vmovdqu	(%r9),	%xmm5		# round0 key
116	vand	q1, q0, q9		@ vpand	%xmm9,	%xmm0,	%xmm1
117	vshr.u8	q0, q0, #4		@ vpsrlb	$4,	%xmm0,	%xmm0
118	vtbl.8	d2, {q2}, d2	@ vpshufb	%xmm1,	%xmm2,	%xmm1
119	vtbl.8	d3, {q2}, d3
120	vtbl.8	d4, {q3}, d0	@ vpshufb	%xmm0,	%xmm3,	%xmm2
121	vtbl.8	d5, {q3}, d1
122	veor	q0, q1, q5		@ vpxor	%xmm5,	%xmm1,	%xmm0
123	veor	q0, q0, q2		@ vpxor	%xmm2,	%xmm0,	%xmm0
124
125	@ .Lenc_entry ends with a bnz instruction which is normally paired with
126	@ subs in .Lenc_loop.
127	tst	r8, r8
128	b	Lenc_entry
129
130.align	4
131Lenc_loop:
132	@ middle of middle round
133	add	r10, r11, #0x40
134	vtbl.8	d8, {q13}, d4	@ vpshufb	%xmm2,	%xmm13,	%xmm4	# 4 = sb1u
135	vtbl.8	d9, {q13}, d5
136	vld1.64	{q1}, [r11]!		@ vmovdqa	-0x40(%r11,%r10), %xmm1	# Lk_mc_forward[]
137	vtbl.8	d0, {q12}, d6	@ vpshufb	%xmm3,	%xmm12,	%xmm0	# 0 = sb1t
138	vtbl.8	d1, {q12}, d7
139	veor	q4, q4, q5		@ vpxor		%xmm5,	%xmm4,	%xmm4	# 4 = sb1u + k
140	vtbl.8	d10, {q15}, d4	@ vpshufb	%xmm2,	%xmm15,	%xmm5	# 4 = sb2u
141	vtbl.8	d11, {q15}, d5
142	veor	q0, q0, q4		@ vpxor		%xmm4,	%xmm0,	%xmm0	# 0 = A
143	vtbl.8	d4, {q14}, d6	@ vpshufb	%xmm3,	%xmm14,	%xmm2	# 2 = sb2t
144	vtbl.8	d5, {q14}, d7
145	vld1.64	{q4}, [r10]		@ vmovdqa	(%r11,%r10), %xmm4	# Lk_mc_backward[]
146	vtbl.8	d6, {q0}, d2	@ vpshufb	%xmm1,	%xmm0,	%xmm3	# 0 = B
147	vtbl.8	d7, {q0}, d3
148	veor	q2, q2, q5		@ vpxor		%xmm5,	%xmm2,	%xmm2	# 2 = 2A
149	@ Write to q5 instead of q0, so the table and destination registers do
150	@ not overlap.
151	vtbl.8	d10, {q0}, d8	@ vpshufb	%xmm4,	%xmm0,	%xmm0	# 3 = D
152	vtbl.8	d11, {q0}, d9
153	veor	q3, q3, q2		@ vpxor		%xmm2,	%xmm3,	%xmm3	# 0 = 2A+B
154	vtbl.8	d8, {q3}, d2	@ vpshufb	%xmm1,	%xmm3,	%xmm4	# 0 = 2B+C
155	vtbl.8	d9, {q3}, d3
156	@ Here we restore the original q0/q5 usage.
157	veor	q0, q5, q3		@ vpxor		%xmm3,	%xmm0,	%xmm0	# 3 = 2A+B+D
158	and	r11, r11, #~(1<<6)	@ and		$0x30,	%r11		# ... mod 4
159	veor	q0, q0, q4		@ vpxor		%xmm4,	%xmm0, %xmm0	# 0 = 2A+3B+C+D
160	subs	r8, r8, #1		@ nr--
161
162Lenc_entry:
163	@ top of round
164	vand	q1, q0, q9		@ vpand		%xmm0,	%xmm9,	%xmm1   # 0 = k
165	vshr.u8	q0, q0, #4		@ vpsrlb	$4,	%xmm0,	%xmm0	# 1 = i
166	vtbl.8	d10, {q11}, d2	@ vpshufb	%xmm1,	%xmm11,	%xmm5	# 2 = a/k
167	vtbl.8	d11, {q11}, d3
168	veor	q1, q1, q0		@ vpxor		%xmm0,	%xmm1,	%xmm1	# 0 = j
169	vtbl.8	d6, {q10}, d0	@ vpshufb	%xmm0, 	%xmm10,	%xmm3  	# 3 = 1/i
170	vtbl.8	d7, {q10}, d1
171	vtbl.8	d8, {q10}, d2	@ vpshufb	%xmm1, 	%xmm10,	%xmm4  	# 4 = 1/j
172	vtbl.8	d9, {q10}, d3
173	veor	q3, q3, q5		@ vpxor		%xmm5,	%xmm3,	%xmm3	# 3 = iak = 1/i + a/k
174	veor	q4, q4, q5		@ vpxor		%xmm5,	%xmm4,	%xmm4  	# 4 = jak = 1/j + a/k
175	vtbl.8	d4, {q10}, d6	@ vpshufb	%xmm3,	%xmm10,	%xmm2  	# 2 = 1/iak
176	vtbl.8	d5, {q10}, d7
177	vtbl.8	d6, {q10}, d8	@ vpshufb	%xmm4,	%xmm10,	%xmm3	# 3 = 1/jak
178	vtbl.8	d7, {q10}, d9
179	veor	q2, q2, q1		@ vpxor		%xmm1,	%xmm2,	%xmm2  	# 2 = io
180	veor	q3, q3, q0		@ vpxor		%xmm0,	%xmm3,	%xmm3	# 3 = jo
181	vld1.64	{q5}, [r9]!		@ vmovdqu	(%r9),	%xmm5
182	bne	Lenc_loop
183
184	@ middle of last round
185	add	r10, r11, #0x80
186
187	adr	r11, Lk_sbo
188	@ Read to q1 instead of q4, so the vtbl.8 instruction below does not
189	@ overlap table and destination registers.
190	vld1.64	{q1}, [r11]!		@ vmovdqa	-0x60(%r10), %xmm4	# 3 : sbou
191	vld1.64	{q0}, [r11]		@ vmovdqa	-0x50(%r10), %xmm0	# 0 : sbot	Lk_sbo+16
192	vtbl.8	d8, {q1}, d4	@ vpshufb	%xmm2,	%xmm4,	%xmm4	# 4 = sbou
193	vtbl.8	d9, {q1}, d5
194	vld1.64	{q1}, [r10]		@ vmovdqa	0x40(%r11,%r10), %xmm1	# Lk_sr[]
195	@ Write to q2 instead of q0 below, to avoid overlapping table and
196	@ destination registers.
197	vtbl.8	d4, {q0}, d6	@ vpshufb	%xmm3,	%xmm0,	%xmm0	# 0 = sb1t
198	vtbl.8	d5, {q0}, d7
199	veor	q4, q4, q5		@ vpxor	%xmm5,	%xmm4,	%xmm4	# 4 = sb1u + k
200	veor	q2, q2, q4		@ vpxor	%xmm4,	%xmm0,	%xmm0	# 0 = A
201	@ Here we restore the original q0/q2 usage.
202	vtbl.8	d0, {q2}, d2	@ vpshufb	%xmm1,	%xmm0,	%xmm0
203	vtbl.8	d1, {q2}, d3
204	bx	lr
205
206
207.globl	_vpaes_encrypt
208.private_extern	_vpaes_encrypt
209#ifdef __thumb2__
210.thumb_func	_vpaes_encrypt
211#endif
212.align	4
213_vpaes_encrypt:
214	@ _vpaes_encrypt_core uses r8-r11. Round up to r7-r11 to maintain stack
215	@ alignment.
216	stmdb	sp!, {r7,r8,r9,r10,r11,lr}
217	@ _vpaes_encrypt_core uses q4-q5 (d8-d11), which are callee-saved.
218	vstmdb	sp!, {d8,d9,d10,d11}
219
220	vld1.64	{q0}, [r0]
221	bl	_vpaes_preheat
222	bl	_vpaes_encrypt_core
223	vst1.64	{q0}, [r1]
224
225	vldmia	sp!, {d8,d9,d10,d11}
226	ldmia	sp!, {r7,r8,r9,r10,r11, pc}	@ return
227
228
229@
230@  Decryption stuff
231@
232
233.align	4
234_vpaes_decrypt_consts:
235Lk_dipt:@ decryption input transform
236.quad	0x0F505B040B545F00, 0x154A411E114E451A
237.quad	0x86E383E660056500, 0x12771772F491F194
238Lk_dsbo:@ decryption sbox final output
239.quad	0x1387EA537EF94000, 0xC7AA6DB9D4943E2D
240.quad	0x12D7560F93441D00, 0xCA4B8159D8C58E9C
241Lk_dsb9:@ decryption sbox output *9*u, *9*t
242.quad	0x851C03539A86D600, 0xCAD51F504F994CC9
243.quad	0xC03B1789ECD74900, 0x725E2C9EB2FBA565
244Lk_dsbd:@ decryption sbox output *D*u, *D*t
245.quad	0x7D57CCDFE6B1A200, 0xF56E9B13882A4439
246.quad	0x3CE2FAF724C6CB00, 0x2931180D15DEEFD3
247Lk_dsbb:@ decryption sbox output *B*u, *B*t
248.quad	0xD022649296B44200, 0x602646F6B0F2D404
249.quad	0xC19498A6CD596700, 0xF3FF0C3E3255AA6B
250Lk_dsbe:@ decryption sbox output *E*u, *E*t
251.quad	0x46F2929626D4D000, 0x2242600464B4F6B0
252.quad	0x0C55A6CDFFAAC100, 0x9467F36B98593E32
253
254
255@@
256@@  Decryption core
257@@
258@@  Same API as encryption core, except it clobbers q12-q15 rather than using
259@@  the values from _vpaes_preheat. q9-q11 must still be set from
260@@  _vpaes_preheat.
261@@
262#ifdef __thumb2__
263.thumb_func	_vpaes_decrypt_core
264#endif
265.align	4
266_vpaes_decrypt_core:
267	mov	r9, r2
268	ldr	r8, [r2,#240]		@ pull rounds
269
270	@ This function performs shuffles with various constants. The x86_64
271	@ version loads them on-demand into %xmm0-%xmm5. This does not work well
272	@ for ARMv7 because those registers are shuffle destinations. The ARMv8
273	@ version preloads those constants into registers, but ARMv7 has half
274	@ the registers to work with. Instead, we load them on-demand into
275	@ q12-q15, registers normally use for preloaded constants. This is fine
276	@ because decryption doesn't use those constants. The values are
277	@ constant, so this does not interfere with potential 2x optimizations.
278	adr	r7, Lk_dipt
279
280	vld1.64	{q12,q13}, [r7]		@ vmovdqa	Lk_dipt(%rip), %xmm2	# iptlo
281	lsl	r11, r8, #4		@ mov		%rax,	%r11;	shl	$4, %r11
282	eor	r11, r11, #0x30		@ xor		$0x30,	%r11
283	adr	r10, Lk_sr
284	and	r11, r11, #0x30		@ and		$0x30,	%r11
285	add	r11, r11, r10
286	adr	r10, Lk_mc_forward+48
287
288	vld1.64	{q4}, [r9]!		@ vmovdqu	(%r9),	%xmm4		# round0 key
289	vand	q1, q0, q9		@ vpand		%xmm9,	%xmm0,	%xmm1
290	vshr.u8	q0, q0, #4		@ vpsrlb	$4,	%xmm0,	%xmm0
291	vtbl.8	d4, {q12}, d2	@ vpshufb	%xmm1,	%xmm2,	%xmm2
292	vtbl.8	d5, {q12}, d3
293	vld1.64	{q5}, [r10]		@ vmovdqa	Lk_mc_forward+48(%rip), %xmm5
294					@ vmovdqa	.Lk_dipt+16(%rip), %xmm1 # ipthi
295	vtbl.8	d0, {q13}, d0	@ vpshufb	%xmm0,	%xmm1,	%xmm0
296	vtbl.8	d1, {q13}, d1
297	veor	q2, q2, q4		@ vpxor		%xmm4,	%xmm2,	%xmm2
298	veor	q0, q0, q2		@ vpxor		%xmm2,	%xmm0,	%xmm0
299
300	@ .Ldec_entry ends with a bnz instruction which is normally paired with
301	@ subs in .Ldec_loop.
302	tst	r8, r8
303	b	Ldec_entry
304
305.align	4
306Ldec_loop:
307@
308@  Inverse mix columns
309@
310
311	@ We load .Lk_dsb* into q12-q15 on-demand. See the comment at the top of
312	@ the function.
313	adr	r10, Lk_dsb9
314	vld1.64	{q12,q13}, [r10]!	@ vmovdqa	-0x20(%r10),%xmm4		# 4 : sb9u
315					@ vmovdqa	-0x10(%r10),%xmm1		# 0 : sb9t
316	@ Load sbd* ahead of time.
317	vld1.64	{q14,q15}, [r10]!	@ vmovdqa	0x00(%r10),%xmm4		# 4 : sbdu
318					@ vmovdqa	0x10(%r10),%xmm1		# 0 : sbdt
319	vtbl.8	d8, {q12}, d4	@ vpshufb	%xmm2,	%xmm4,	%xmm4		# 4 = sb9u
320	vtbl.8	d9, {q12}, d5
321	vtbl.8	d2, {q13}, d6	@ vpshufb	%xmm3,	%xmm1,	%xmm1		# 0 = sb9t
322	vtbl.8	d3, {q13}, d7
323	veor	q0, q4, q0		@ vpxor		%xmm4,	%xmm0,	%xmm0
324
325	veor	q0, q0, q1		@ vpxor		%xmm1,	%xmm0,	%xmm0		# 0 = ch
326
327	@ Load sbb* ahead of time.
328	vld1.64	{q12,q13}, [r10]!	@ vmovdqa	0x20(%r10),%xmm4		# 4 : sbbu
329					@ vmovdqa	0x30(%r10),%xmm1		# 0 : sbbt
330
331	vtbl.8	d8, {q14}, d4	@ vpshufb	%xmm2,	%xmm4,	%xmm4		# 4 = sbdu
332	vtbl.8	d9, {q14}, d5
333	@ Write to q1 instead of q0, so the table and destination registers do
334	@ not overlap.
335	vtbl.8	d2, {q0}, d10	@ vpshufb	%xmm5,	%xmm0,	%xmm0		# MC ch
336	vtbl.8	d3, {q0}, d11
337	@ Here we restore the original q0/q1 usage. This instruction is
338	@ reordered from the ARMv8 version so we do not clobber the vtbl.8
339	@ below.
340	veor	q0, q1, q4		@ vpxor		%xmm4,	%xmm0,	%xmm0		# 4 = ch
341	vtbl.8	d2, {q15}, d6	@ vpshufb	%xmm3,	%xmm1,	%xmm1		# 0 = sbdt
342	vtbl.8	d3, {q15}, d7
343					@ vmovdqa	0x20(%r10),	%xmm4		# 4 : sbbu
344	veor	q0, q0, q1		@ vpxor		%xmm1,	%xmm0,	%xmm0		# 0 = ch
345					@ vmovdqa	0x30(%r10),	%xmm1		# 0 : sbbt
346
347	@ Load sbd* ahead of time.
348	vld1.64	{q14,q15}, [r10]!	@ vmovdqa	0x40(%r10),%xmm4		# 4 : sbeu
349					@ vmovdqa	0x50(%r10),%xmm1		# 0 : sbet
350
351	vtbl.8	d8, {q12}, d4	@ vpshufb	%xmm2,	%xmm4,	%xmm4		# 4 = sbbu
352	vtbl.8	d9, {q12}, d5
353	@ Write to q1 instead of q0, so the table and destination registers do
354	@ not overlap.
355	vtbl.8	d2, {q0}, d10	@ vpshufb	%xmm5,	%xmm0,	%xmm0		# MC ch
356	vtbl.8	d3, {q0}, d11
357	@ Here we restore the original q0/q1 usage. This instruction is
358	@ reordered from the ARMv8 version so we do not clobber the vtbl.8
359	@ below.
360	veor	q0, q1, q4		@ vpxor		%xmm4,	%xmm0,	%xmm0		# 4 = ch
361	vtbl.8	d2, {q13}, d6	@ vpshufb	%xmm3,	%xmm1,	%xmm1		# 0 = sbbt
362	vtbl.8	d3, {q13}, d7
363	veor	q0, q0, q1		@ vpxor		%xmm1,	%xmm0,	%xmm0		# 0 = ch
364
365	vtbl.8	d8, {q14}, d4	@ vpshufb	%xmm2,	%xmm4,	%xmm4		# 4 = sbeu
366	vtbl.8	d9, {q14}, d5
367	@ Write to q1 instead of q0, so the table and destination registers do
368	@ not overlap.
369	vtbl.8	d2, {q0}, d10	@ vpshufb	%xmm5,	%xmm0,	%xmm0		# MC ch
370	vtbl.8	d3, {q0}, d11
371	@ Here we restore the original q0/q1 usage. This instruction is
372	@ reordered from the ARMv8 version so we do not clobber the vtbl.8
373	@ below.
374	veor	q0, q1, q4		@ vpxor		%xmm4,	%xmm0,	%xmm0		# 4 = ch
375	vtbl.8	d2, {q15}, d6	@ vpshufb	%xmm3,	%xmm1,	%xmm1		# 0 = sbet
376	vtbl.8	d3, {q15}, d7
377	vext.8	q5, q5, q5, #12		@ vpalignr 	$12,	%xmm5,	%xmm5,	%xmm5
378	veor	q0, q0, q1		@ vpxor		%xmm1,	%xmm0,	%xmm0		# 0 = ch
379	subs	r8, r8, #1		@ sub		$1,%rax			# nr--
380
381Ldec_entry:
382	@ top of round
383	vand	q1, q0, q9		@ vpand		%xmm9,	%xmm0,	%xmm1	# 0 = k
384	vshr.u8	q0, q0, #4		@ vpsrlb	$4,	%xmm0,	%xmm0	# 1 = i
385	vtbl.8	d4, {q11}, d2	@ vpshufb	%xmm1,	%xmm11,	%xmm2	# 2 = a/k
386	vtbl.8	d5, {q11}, d3
387	veor	q1, q1, q0		@ vpxor		%xmm0,	%xmm1,	%xmm1	# 0 = j
388	vtbl.8	d6, {q10}, d0	@ vpshufb	%xmm0, 	%xmm10,	%xmm3	# 3 = 1/i
389	vtbl.8	d7, {q10}, d1
390	vtbl.8	d8, {q10}, d2	@ vpshufb	%xmm1,	%xmm10,	%xmm4	# 4 = 1/j
391	vtbl.8	d9, {q10}, d3
392	veor	q3, q3, q2		@ vpxor		%xmm2,	%xmm3,	%xmm3	# 3 = iak = 1/i + a/k
393	veor	q4, q4, q2		@ vpxor		%xmm2, 	%xmm4,	%xmm4	# 4 = jak = 1/j + a/k
394	vtbl.8	d4, {q10}, d6	@ vpshufb	%xmm3,	%xmm10,	%xmm2	# 2 = 1/iak
395	vtbl.8	d5, {q10}, d7
396	vtbl.8	d6, {q10}, d8	@ vpshufb	%xmm4,  %xmm10,	%xmm3	# 3 = 1/jak
397	vtbl.8	d7, {q10}, d9
398	veor	q2, q2, q1		@ vpxor		%xmm1,	%xmm2,	%xmm2	# 2 = io
399	veor	q3, q3, q0		@ vpxor		%xmm0,  %xmm3,	%xmm3	# 3 = jo
400	vld1.64	{q0}, [r9]!		@ vmovdqu	(%r9),	%xmm0
401	bne	Ldec_loop
402
403	@ middle of last round
404
405	adr	r10, Lk_dsbo
406
407	@ Write to q1 rather than q4 to avoid overlapping table and destination.
408	vld1.64	{q1}, [r10]!		@ vmovdqa	0x60(%r10),	%xmm4	# 3 : sbou
409	vtbl.8	d8, {q1}, d4	@ vpshufb	%xmm2,	%xmm4,	%xmm4	# 4 = sbou
410	vtbl.8	d9, {q1}, d5
411	@ Write to q2 rather than q1 to avoid overlapping table and destination.
412	vld1.64	{q2}, [r10]		@ vmovdqa	0x70(%r10),	%xmm1	# 0 : sbot
413	vtbl.8	d2, {q2}, d6	@ vpshufb	%xmm3,	%xmm1,	%xmm1	# 0 = sb1t
414	vtbl.8	d3, {q2}, d7
415	vld1.64	{q2}, [r11]		@ vmovdqa	-0x160(%r11),	%xmm2	# Lk_sr-Lk_dsbd=-0x160
416	veor	q4, q4, q0		@ vpxor		%xmm0,	%xmm4,	%xmm4	# 4 = sb1u + k
417	@ Write to q1 rather than q0 so the table and destination registers
418	@ below do not overlap.
419	veor	q1, q1, q4		@ vpxor		%xmm4,	%xmm1,	%xmm0	# 0 = A
420	vtbl.8	d0, {q1}, d4	@ vpshufb	%xmm2,	%xmm0,	%xmm0
421	vtbl.8	d1, {q1}, d5
422	bx	lr
423
424
425.globl	_vpaes_decrypt
426.private_extern	_vpaes_decrypt
427#ifdef __thumb2__
428.thumb_func	_vpaes_decrypt
429#endif
430.align	4
431_vpaes_decrypt:
432	@ _vpaes_decrypt_core uses r7-r11.
433	stmdb	sp!, {r7,r8,r9,r10,r11,lr}
434	@ _vpaes_decrypt_core uses q4-q5 (d8-d11), which are callee-saved.
435	vstmdb	sp!, {d8,d9,d10,d11}
436
437	vld1.64	{q0}, [r0]
438	bl	_vpaes_preheat
439	bl	_vpaes_decrypt_core
440	vst1.64	{q0}, [r1]
441
442	vldmia	sp!, {d8,d9,d10,d11}
443	ldmia	sp!, {r7,r8,r9,r10,r11, pc}	@ return
444
445@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
446@@                                                    @@
447@@                  AES key schedule                  @@
448@@                                                    @@
449@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
450
451@ This function diverges from both x86_64 and armv7 in which constants are
452@ pinned. x86_64 has a common preheat function for all operations. aarch64
453@ separates them because it has enough registers to pin nearly all constants.
454@ armv7 does not have enough registers, but needing explicit loads and stores
455@ also complicates using x86_64's register allocation directly.
456@
457@ We pin some constants for convenience and leave q14 and q15 free to load
458@ others on demand.
459
460@
461@  Key schedule constants
462@
463
464.align	4
465_vpaes_key_consts:
466Lk_dksd:@ decryption key schedule: invskew x*D
467.quad	0xFEB91A5DA3E44700, 0x0740E3A45A1DBEF9
468.quad	0x41C277F4B5368300, 0x5FDC69EAAB289D1E
469Lk_dksb:@ decryption key schedule: invskew x*B
470.quad	0x9A4FCA1F8550D500, 0x03D653861CC94C99
471.quad	0x115BEDA7B6FC4A00, 0xD993256F7E3482C8
472Lk_dkse:@ decryption key schedule: invskew x*E + 0x63
473.quad	0xD5031CCA1FC9D600, 0x53859A4C994F5086
474.quad	0xA23196054FDC7BE8, 0xCD5EF96A20B31487
475Lk_dks9:@ decryption key schedule: invskew x*9
476.quad	0xB6116FC87ED9A700, 0x4AED933482255BFC
477.quad	0x4576516227143300, 0x8BB89FACE9DAFDCE
478
479Lk_rcon:@ rcon
480.quad	0x1F8391B9AF9DEEB6, 0x702A98084D7C7D81
481
482Lk_opt:@ output transform
483.quad	0xFF9F4929D6B66000, 0xF7974121DEBE6808
484.quad	0x01EDBD5150BCEC00, 0xE10D5DB1B05C0CE0
485Lk_deskew:@ deskew tables: inverts the sbox's "skew"
486.quad	0x07E4A34047A4E300, 0x1DFEB95A5DBEF91A
487.quad	0x5F36B5DC83EA6900, 0x2841C2ABF49D1E77
488
489
490#ifdef __thumb2__
491.thumb_func	_vpaes_key_preheat
492#endif
493.align	4
494_vpaes_key_preheat:
495	adr	r11, Lk_rcon
496	vmov.i8	q12, #0x5b			@ Lk_s63
497	adr	r10, Lk_inv			@ Must be aligned to 8 mod 16.
498	vmov.i8	q9, #0x0f			@ Lk_s0F
499	vld1.64	{q10,q11}, [r10]		@ Lk_inv
500	vld1.64	{q8}, [r11]			@ Lk_rcon
501	bx	lr
502
503
504#ifdef __thumb2__
505.thumb_func	_vpaes_schedule_core
506#endif
507.align	4
508_vpaes_schedule_core:
509	@ We only need to save lr, but ARM requires an 8-byte stack alignment,
510	@ so save an extra register.
511	stmdb	sp!, {r3,lr}
512
513	bl	_vpaes_key_preheat	@ load the tables
514
515	adr	r11, Lk_ipt		@ Must be aligned to 8 mod 16.
516	vld1.64	{q0}, [r0]!		@ vmovdqu	(%rdi),	%xmm0		# load key (unaligned)
517
518	@ input transform
519	@ Use q4 here rather than q3 so .Lschedule_am_decrypting does not
520	@ overlap table and destination.
521	vmov	q4, q0			@ vmovdqa	%xmm0,	%xmm3
522	bl	_vpaes_schedule_transform
523	adr	r10, Lk_sr		@ Must be aligned to 8 mod 16.
524	vmov	q7, q0			@ vmovdqa	%xmm0,	%xmm7
525
526	add	r8, r8, r10
527	tst	r3, r3
528	bne	Lschedule_am_decrypting
529
530	@ encrypting, output zeroth round key after transform
531	vst1.64	{q0}, [r2]		@ vmovdqu	%xmm0,	(%rdx)
532	b	Lschedule_go
533
534Lschedule_am_decrypting:
535	@ decrypting, output zeroth round key after shiftrows
536	vld1.64	{q1}, [r8]		@ vmovdqa	(%r8,%r10),	%xmm1
537	vtbl.8	d6, {q4}, d2	@ vpshufb  	%xmm1,	%xmm3,	%xmm3
538	vtbl.8	d7, {q4}, d3
539	vst1.64	{q3}, [r2]		@ vmovdqu	%xmm3,	(%rdx)
540	eor	r8, r8, #0x30		@ xor	$0x30, %r8
541
542Lschedule_go:
543	cmp	r1, #192		@ cmp	$192,	%esi
544	bhi	Lschedule_256
545	beq	Lschedule_192
546	@ 128: fall though
547
548@@
549@@  .schedule_128
550@@
551@@  128-bit specific part of key schedule.
552@@
553@@  This schedule is really simple, because all its parts
554@@  are accomplished by the subroutines.
555@@
556Lschedule_128:
557	mov	r0, #10		@ mov	$10, %esi
558
559Loop_schedule_128:
560	bl	_vpaes_schedule_round
561	subs	r0, r0, #1		@ dec	%esi
562	beq	Lschedule_mangle_last
563	bl	_vpaes_schedule_mangle	@ write output
564	b	Loop_schedule_128
565
566@@
567@@  .aes_schedule_192
568@@
569@@  192-bit specific part of key schedule.
570@@
571@@  The main body of this schedule is the same as the 128-bit
572@@  schedule, but with more smearing.  The long, high side is
573@@  stored in q7 as before, and the short, low side is in
574@@  the high bits of q6.
575@@
576@@  This schedule is somewhat nastier, however, because each
577@@  round produces 192 bits of key material, or 1.5 round keys.
578@@  Therefore, on each cycle we do 2 rounds and produce 3 round
579@@  keys.
580@@
581.align	4
582Lschedule_192:
583	sub	r0, r0, #8
584	vld1.64	{q0}, [r0]			@ vmovdqu	8(%rdi),%xmm0		# load key part 2 (very unaligned)
585	bl	_vpaes_schedule_transform	@ input transform
586	vmov	q6, q0				@ vmovdqa	%xmm0,	%xmm6		# save short part
587	vmov.i8	d12, #0			@ vpxor	%xmm4,	%xmm4, %xmm4	# clear 4
588						@ vmovhlps	%xmm4,	%xmm6,	%xmm6		# clobber low side with zeros
589	mov	r0, #4			@ mov	$4,	%esi
590
591Loop_schedule_192:
592	bl	_vpaes_schedule_round
593	vext.8	q0, q6, q0, #8			@ vpalignr	$8,%xmm6,%xmm0,%xmm0
594	bl	_vpaes_schedule_mangle		@ save key n
595	bl	_vpaes_schedule_192_smear
596	bl	_vpaes_schedule_mangle		@ save key n+1
597	bl	_vpaes_schedule_round
598	subs	r0, r0, #1			@ dec	%esi
599	beq	Lschedule_mangle_last
600	bl	_vpaes_schedule_mangle		@ save key n+2
601	bl	_vpaes_schedule_192_smear
602	b	Loop_schedule_192
603
604@@
605@@  .aes_schedule_256
606@@
607@@  256-bit specific part of key schedule.
608@@
609@@  The structure here is very similar to the 128-bit
610@@  schedule, but with an additional "low side" in
611@@  q6.  The low side's rounds are the same as the
612@@  high side's, except no rcon and no rotation.
613@@
614.align	4
615Lschedule_256:
616	vld1.64	{q0}, [r0]			@ vmovdqu	16(%rdi),%xmm0		# load key part 2 (unaligned)
617	bl	_vpaes_schedule_transform	@ input transform
618	mov	r0, #7			@ mov	$7, %esi
619
620Loop_schedule_256:
621	bl	_vpaes_schedule_mangle		@ output low result
622	vmov	q6, q0				@ vmovdqa	%xmm0,	%xmm6		# save cur_lo in xmm6
623
624	@ high round
625	bl	_vpaes_schedule_round
626	subs	r0, r0, #1			@ dec	%esi
627	beq	Lschedule_mangle_last
628	bl	_vpaes_schedule_mangle
629
630	@ low round. swap xmm7 and xmm6
631	vdup.32	q0, d1[1]		@ vpshufd	$0xFF,	%xmm0,	%xmm0
632	vmov.i8	q4, #0
633	vmov	q5, q7			@ vmovdqa	%xmm7,	%xmm5
634	vmov	q7, q6			@ vmovdqa	%xmm6,	%xmm7
635	bl	_vpaes_schedule_low_round
636	vmov	q7, q5			@ vmovdqa	%xmm5,	%xmm7
637
638	b	Loop_schedule_256
639
640@@
641@@  .aes_schedule_mangle_last
642@@
643@@  Mangler for last round of key schedule
644@@  Mangles q0
645@@    when encrypting, outputs out(q0) ^ 63
646@@    when decrypting, outputs unskew(q0)
647@@
648@@  Always called right before return... jumps to cleanup and exits
649@@
650.align	4
651Lschedule_mangle_last:
652	@ schedule last round key from xmm0
653	adr	r11, Lk_deskew			@ lea	Lk_deskew(%rip),%r11	# prepare to deskew
654	tst	r3, r3
655	bne	Lschedule_mangle_last_dec
656
657	@ encrypting
658	vld1.64	{q1}, [r8]		@ vmovdqa	(%r8,%r10),%xmm1
659	adr	r11, Lk_opt		@ lea		Lk_opt(%rip),	%r11		# prepare to output transform
660	add	r2, r2, #32		@ add		$32,	%rdx
661	vmov	q2, q0
662	vtbl.8	d0, {q2}, d2	@ vpshufb	%xmm1,	%xmm0,	%xmm0		# output permute
663	vtbl.8	d1, {q2}, d3
664
665Lschedule_mangle_last_dec:
666	sub	r2, r2, #16			@ add	$-16,	%rdx
667	veor	q0, q0, q12			@ vpxor	Lk_s63(%rip),	%xmm0,	%xmm0
668	bl	_vpaes_schedule_transform	@ output transform
669	vst1.64	{q0}, [r2]			@ vmovdqu	%xmm0,	(%rdx)		# save last key
670
671	@ cleanup
672	veor	q0, q0, q0		@ vpxor	%xmm0,	%xmm0,	%xmm0
673	veor	q1, q1, q1		@ vpxor	%xmm1,	%xmm1,	%xmm1
674	veor	q2, q2, q2		@ vpxor	%xmm2,	%xmm2,	%xmm2
675	veor	q3, q3, q3		@ vpxor	%xmm3,	%xmm3,	%xmm3
676	veor	q4, q4, q4		@ vpxor	%xmm4,	%xmm4,	%xmm4
677	veor	q5, q5, q5		@ vpxor	%xmm5,	%xmm5,	%xmm5
678	veor	q6, q6, q6		@ vpxor	%xmm6,	%xmm6,	%xmm6
679	veor	q7, q7, q7		@ vpxor	%xmm7,	%xmm7,	%xmm7
680	ldmia	sp!, {r3,pc}		@ return
681
682
683@@
684@@  .aes_schedule_192_smear
685@@
686@@  Smear the short, low side in the 192-bit key schedule.
687@@
688@@  Inputs:
689@@    q7: high side, b  a  x  y
690@@    q6:  low side, d  c  0  0
691@@
692@@  Outputs:
693@@    q6: b+c+d  b+c  0  0
694@@    q0: b+c+d  b+c  b  a
695@@
696#ifdef __thumb2__
697.thumb_func	_vpaes_schedule_192_smear
698#endif
699.align	4
700_vpaes_schedule_192_smear:
701	vmov.i8	q1, #0
702	vdup.32	q0, d15[1]
703	vshl.i64	q1, q6, #32		@ vpshufd	$0x80,	%xmm6,	%xmm1	# d c 0 0 -> c 0 0 0
704	vmov	d0, d15		@ vpshufd	$0xFE,	%xmm7,	%xmm0	# b a _ _ -> b b b a
705	veor	q6, q6, q1		@ vpxor	%xmm1,	%xmm6,	%xmm6	# -> c+d c 0 0
706	veor	q1, q1, q1		@ vpxor	%xmm1,	%xmm1,	%xmm1
707	veor	q6, q6, q0		@ vpxor	%xmm0,	%xmm6,	%xmm6	# -> b+c+d b+c b a
708	vmov	q0, q6			@ vmovdqa	%xmm6,	%xmm0
709	vmov	d12, d2		@ vmovhlps	%xmm1,	%xmm6,	%xmm6	# clobber low side with zeros
710	bx	lr
711
712
713@@
714@@  .aes_schedule_round
715@@
716@@  Runs one main round of the key schedule on q0, q7
717@@
718@@  Specifically, runs subbytes on the high dword of q0
719@@  then rotates it by one byte and xors into the low dword of
720@@  q7.
721@@
722@@  Adds rcon from low byte of q8, then rotates q8 for
723@@  next rcon.
724@@
725@@  Smears the dwords of q7 by xoring the low into the
726@@  second low, result into third, result into highest.
727@@
728@@  Returns results in q7 = q0.
729@@  Clobbers q1-q4, r11.
730@@
731#ifdef __thumb2__
732.thumb_func	_vpaes_schedule_round
733#endif
734.align	4
735_vpaes_schedule_round:
736	@ extract rcon from xmm8
737	vmov.i8	q4, #0				@ vpxor		%xmm4,	%xmm4,	%xmm4
738	vext.8	q1, q8, q4, #15		@ vpalignr	$15,	%xmm8,	%xmm4,	%xmm1
739	vext.8	q8, q8, q8, #15	@ vpalignr	$15,	%xmm8,	%xmm8,	%xmm8
740	veor	q7, q7, q1			@ vpxor		%xmm1,	%xmm7,	%xmm7
741
742	@ rotate
743	vdup.32	q0, d1[1]			@ vpshufd	$0xFF,	%xmm0,	%xmm0
744	vext.8	q0, q0, q0, #1			@ vpalignr	$1,	%xmm0,	%xmm0,	%xmm0
745
746	@ fall through...
747
748	@ low round: same as high round, but no rotation and no rcon.
749_vpaes_schedule_low_round:
750	@ The x86_64 version pins .Lk_sb1 in %xmm13 and .Lk_sb1+16 in %xmm12.
751	@ We pin other values in _vpaes_key_preheat, so load them now.
752	adr	r11, Lk_sb1
753	vld1.64	{q14,q15}, [r11]
754
755	@ smear xmm7
756	vext.8	q1, q4, q7, #12			@ vpslldq	$4,	%xmm7,	%xmm1
757	veor	q7, q7, q1			@ vpxor	%xmm1,	%xmm7,	%xmm7
758	vext.8	q4, q4, q7, #8			@ vpslldq	$8,	%xmm7,	%xmm4
759
760	@ subbytes
761	vand	q1, q0, q9			@ vpand		%xmm9,	%xmm0,	%xmm1		# 0 = k
762	vshr.u8	q0, q0, #4			@ vpsrlb	$4,	%xmm0,	%xmm0		# 1 = i
763	veor	q7, q7, q4			@ vpxor		%xmm4,	%xmm7,	%xmm7
764	vtbl.8	d4, {q11}, d2		@ vpshufb	%xmm1,	%xmm11,	%xmm2		# 2 = a/k
765	vtbl.8	d5, {q11}, d3
766	veor	q1, q1, q0			@ vpxor		%xmm0,	%xmm1,	%xmm1		# 0 = j
767	vtbl.8	d6, {q10}, d0		@ vpshufb	%xmm0, 	%xmm10,	%xmm3		# 3 = 1/i
768	vtbl.8	d7, {q10}, d1
769	veor	q3, q3, q2			@ vpxor		%xmm2,	%xmm3,	%xmm3		# 3 = iak = 1/i + a/k
770	vtbl.8	d8, {q10}, d2		@ vpshufb	%xmm1,	%xmm10,	%xmm4		# 4 = 1/j
771	vtbl.8	d9, {q10}, d3
772	veor	q7, q7, q12			@ vpxor		Lk_s63(%rip),	%xmm7,	%xmm7
773	vtbl.8	d6, {q10}, d6		@ vpshufb	%xmm3,	%xmm10,	%xmm3		# 2 = 1/iak
774	vtbl.8	d7, {q10}, d7
775	veor	q4, q4, q2			@ vpxor		%xmm2,	%xmm4,	%xmm4		# 4 = jak = 1/j + a/k
776	vtbl.8	d4, {q10}, d8		@ vpshufb	%xmm4,	%xmm10,	%xmm2		# 3 = 1/jak
777	vtbl.8	d5, {q10}, d9
778	veor	q3, q3, q1			@ vpxor		%xmm1,	%xmm3,	%xmm3		# 2 = io
779	veor	q2, q2, q0			@ vpxor		%xmm0,	%xmm2,	%xmm2		# 3 = jo
780	vtbl.8	d8, {q15}, d6		@ vpshufb	%xmm3,	%xmm13,	%xmm4		# 4 = sbou
781	vtbl.8	d9, {q15}, d7
782	vtbl.8	d2, {q14}, d4		@ vpshufb	%xmm2,	%xmm12,	%xmm1		# 0 = sb1t
783	vtbl.8	d3, {q14}, d5
784	veor	q1, q1, q4			@ vpxor		%xmm4,	%xmm1,	%xmm1		# 0 = sbox output
785
786	@ add in smeared stuff
787	veor	q0, q1, q7			@ vpxor	%xmm7,	%xmm1,	%xmm0
788	veor	q7, q1, q7			@ vmovdqa	%xmm0,	%xmm7
789	bx	lr
790
791
792@@
793@@  .aes_schedule_transform
794@@
795@@  Linear-transform q0 according to tables at [r11]
796@@
797@@  Requires that q9 = 0x0F0F... as in preheat
798@@  Output in q0
799@@  Clobbers q1, q2, q14, q15
800@@
801#ifdef __thumb2__
802.thumb_func	_vpaes_schedule_transform
803#endif
804.align	4
805_vpaes_schedule_transform:
806	vld1.64	{q14,q15}, [r11]	@ vmovdqa	(%r11),	%xmm2 	# lo
807					@ vmovdqa	16(%r11),	%xmm1 # hi
808	vand	q1, q0, q9		@ vpand	%xmm9,	%xmm0,	%xmm1
809	vshr.u8	q0, q0, #4		@ vpsrlb	$4,	%xmm0,	%xmm0
810	vtbl.8	d4, {q14}, d2	@ vpshufb	%xmm1,	%xmm2,	%xmm2
811	vtbl.8	d5, {q14}, d3
812	vtbl.8	d0, {q15}, d0	@ vpshufb	%xmm0,	%xmm1,	%xmm0
813	vtbl.8	d1, {q15}, d1
814	veor	q0, q0, q2		@ vpxor	%xmm2,	%xmm0,	%xmm0
815	bx	lr
816
817
818@@
819@@  .aes_schedule_mangle
820@@
821@@  Mangles q0 from (basis-transformed) standard version
822@@  to our version.
823@@
824@@  On encrypt,
825@@    xor with 0x63
826@@    multiply by circulant 0,1,1,1
827@@    apply shiftrows transform
828@@
829@@  On decrypt,
830@@    xor with 0x63
831@@    multiply by "inverse mixcolumns" circulant E,B,D,9
832@@    deskew
833@@    apply shiftrows transform
834@@
835@@
836@@  Writes out to [r2], and increments or decrements it
837@@  Keeps track of round number mod 4 in r8
838@@  Preserves q0
839@@  Clobbers q1-q5
840@@
841#ifdef __thumb2__
842.thumb_func	_vpaes_schedule_mangle
843#endif
844.align	4
845_vpaes_schedule_mangle:
846	tst	r3, r3
847	vmov	q4, q0			@ vmovdqa	%xmm0,	%xmm4	# save xmm0 for later
848	adr	r11, Lk_mc_forward	@ Must be aligned to 8 mod 16.
849	vld1.64	{q5}, [r11]		@ vmovdqa	Lk_mc_forward(%rip),%xmm5
850	bne	Lschedule_mangle_dec
851
852	@ encrypting
853	@ Write to q2 so we do not overlap table and destination below.
854	veor	q2, q0, q12		@ vpxor		Lk_s63(%rip),	%xmm0,	%xmm4
855	add	r2, r2, #16		@ add		$16,	%rdx
856	vtbl.8	d8, {q2}, d10	@ vpshufb	%xmm5,	%xmm4,	%xmm4
857	vtbl.8	d9, {q2}, d11
858	vtbl.8	d2, {q4}, d10	@ vpshufb	%xmm5,	%xmm4,	%xmm1
859	vtbl.8	d3, {q4}, d11
860	vtbl.8	d6, {q1}, d10	@ vpshufb	%xmm5,	%xmm1,	%xmm3
861	vtbl.8	d7, {q1}, d11
862	veor	q4, q4, q1		@ vpxor		%xmm1,	%xmm4,	%xmm4
863	vld1.64	{q1}, [r8]		@ vmovdqa	(%r8,%r10),	%xmm1
864	veor	q3, q3, q4		@ vpxor		%xmm4,	%xmm3,	%xmm3
865
866	b	Lschedule_mangle_both
867.align	4
868Lschedule_mangle_dec:
869	@ inverse mix columns
870	adr	r11, Lk_dksd 		@ lea		Lk_dksd(%rip),%r11
871	vshr.u8	q1, q4, #4		@ vpsrlb	$4,	%xmm4,	%xmm1	# 1 = hi
872	vand	q4, q4, q9		@ vpand		%xmm9,	%xmm4,	%xmm4	# 4 = lo
873
874	vld1.64	{q14,q15}, [r11]! 	@ vmovdqa	0x00(%r11),	%xmm2
875					@ vmovdqa	0x10(%r11),	%xmm3
876	vtbl.8	d4, {q14}, d8	@ vpshufb	%xmm4,	%xmm2,	%xmm2
877	vtbl.8	d5, {q14}, d9
878	vtbl.8	d6, {q15}, d2	@ vpshufb	%xmm1,	%xmm3,	%xmm3
879	vtbl.8	d7, {q15}, d3
880	@ Load .Lk_dksb ahead of time.
881	vld1.64	{q14,q15}, [r11]! 	@ vmovdqa	0x20(%r11),	%xmm2
882					@ vmovdqa	0x30(%r11),	%xmm3
883	@ Write to q13 so we do not overlap table and destination.
884	veor	q13, q3, q2		@ vpxor		%xmm2,	%xmm3,	%xmm3
885	vtbl.8	d6, {q13}, d10	@ vpshufb	%xmm5,	%xmm3,	%xmm3
886	vtbl.8	d7, {q13}, d11
887
888	vtbl.8	d4, {q14}, d8	@ vpshufb	%xmm4,	%xmm2,	%xmm2
889	vtbl.8	d5, {q14}, d9
890	veor	q2, q2, q3		@ vpxor		%xmm3,	%xmm2,	%xmm2
891	vtbl.8	d6, {q15}, d2	@ vpshufb	%xmm1,	%xmm3,	%xmm3
892	vtbl.8	d7, {q15}, d3
893	@ Load .Lk_dkse ahead of time.
894	vld1.64	{q14,q15}, [r11]! 	@ vmovdqa	0x40(%r11),	%xmm2
895					@ vmovdqa	0x50(%r11),	%xmm3
896	@ Write to q13 so we do not overlap table and destination.
897	veor	q13, q3, q2		@ vpxor		%xmm2,	%xmm3,	%xmm3
898	vtbl.8	d6, {q13}, d10	@ vpshufb	%xmm5,	%xmm3,	%xmm3
899	vtbl.8	d7, {q13}, d11
900
901	vtbl.8	d4, {q14}, d8	@ vpshufb	%xmm4,	%xmm2,	%xmm2
902	vtbl.8	d5, {q14}, d9
903	veor	q2, q2, q3		@ vpxor		%xmm3,	%xmm2,	%xmm2
904	vtbl.8	d6, {q15}, d2	@ vpshufb	%xmm1,	%xmm3,	%xmm3
905	vtbl.8	d7, {q15}, d3
906	@ Load .Lk_dkse ahead of time.
907	vld1.64	{q14,q15}, [r11]! 	@ vmovdqa	0x60(%r11),	%xmm2
908					@ vmovdqa	0x70(%r11),	%xmm4
909	@ Write to q13 so we do not overlap table and destination.
910	veor	q13, q3, q2		@ vpxor		%xmm2,	%xmm3,	%xmm3
911
912	vtbl.8	d4, {q14}, d8	@ vpshufb	%xmm4,	%xmm2,	%xmm2
913	vtbl.8	d5, {q14}, d9
914	vtbl.8	d6, {q13}, d10	@ vpshufb	%xmm5,	%xmm3,	%xmm3
915	vtbl.8	d7, {q13}, d11
916	vtbl.8	d8, {q15}, d2	@ vpshufb	%xmm1,	%xmm4,	%xmm4
917	vtbl.8	d9, {q15}, d3
918	vld1.64	{q1}, [r8]		@ vmovdqa	(%r8,%r10),	%xmm1
919	veor	q2, q2, q3		@ vpxor	%xmm3,	%xmm2,	%xmm2
920	veor	q3, q4, q2		@ vpxor	%xmm2,	%xmm4,	%xmm3
921
922	sub	r2, r2, #16		@ add	$-16,	%rdx
923
924Lschedule_mangle_both:
925	@ Write to q2 so table and destination do not overlap.
926	vtbl.8	d4, {q3}, d2	@ vpshufb	%xmm1,	%xmm3,	%xmm3
927	vtbl.8	d5, {q3}, d3
928	add	r8, r8, #64-16		@ add	$-16,	%r8
929	and	r8, r8, #~(1<<6)	@ and	$0x30,	%r8
930	vst1.64	{q2}, [r2]		@ vmovdqu	%xmm3,	(%rdx)
931	bx	lr
932
933
934.globl	_vpaes_set_encrypt_key
935.private_extern	_vpaes_set_encrypt_key
936#ifdef __thumb2__
937.thumb_func	_vpaes_set_encrypt_key
938#endif
939.align	4
940_vpaes_set_encrypt_key:
941	stmdb	sp!, {r7,r8,r9,r10,r11, lr}
942	vstmdb	sp!, {d8,d9,d10,d11,d12,d13,d14,d15}
943
944	lsr	r9, r1, #5		@ shr	$5,%eax
945	add	r9, r9, #5		@ $5,%eax
946	str	r9, [r2,#240]		@ mov	%eax,240(%rdx)	# AES_KEY->rounds = nbits/32+5;
947
948	mov	r3, #0		@ mov	$0,%ecx
949	mov	r8, #0x30		@ mov	$0x30,%r8d
950	bl	_vpaes_schedule_core
951	eor	r0, r0, r0
952
953	vldmia	sp!, {d8,d9,d10,d11,d12,d13,d14,d15}
954	ldmia	sp!, {r7,r8,r9,r10,r11, pc}	@ return
955
956
957.globl	_vpaes_set_decrypt_key
958.private_extern	_vpaes_set_decrypt_key
959#ifdef __thumb2__
960.thumb_func	_vpaes_set_decrypt_key
961#endif
962.align	4
963_vpaes_set_decrypt_key:
964	stmdb	sp!, {r7,r8,r9,r10,r11, lr}
965	vstmdb	sp!, {d8,d9,d10,d11,d12,d13,d14,d15}
966
967	lsr	r9, r1, #5		@ shr	$5,%eax
968	add	r9, r9, #5		@ $5,%eax
969	str	r9, [r2,#240]		@ mov	%eax,240(%rdx)	# AES_KEY->rounds = nbits/32+5;
970	lsl	r9, r9, #4		@ shl	$4,%eax
971	add	r2, r2, #16		@ lea	16(%rdx,%rax),%rdx
972	add	r2, r2, r9
973
974	mov	r3, #1		@ mov	$1,%ecx
975	lsr	r8, r1, #1		@ shr	$1,%r8d
976	and	r8, r8, #32		@ and	$32,%r8d
977	eor	r8, r8, #32		@ xor	$32,%r8d	# nbits==192?0:32
978	bl	_vpaes_schedule_core
979
980	vldmia	sp!, {d8,d9,d10,d11,d12,d13,d14,d15}
981	ldmia	sp!, {r7,r8,r9,r10,r11, pc}	@ return
982
983
984@ Additional constants for converting to bsaes.
985
986.align	4
987_vpaes_convert_consts:
988@ .Lk_opt_then_skew applies skew(opt(x)) XOR 0x63, where skew is the linear
989@ transform in the AES S-box. 0x63 is incorporated into the low half of the
990@ table. This was computed with the following script:
991@
992@   def u64s_to_u128(x, y):
993@       return x | (y << 64)
994@   def u128_to_u64s(w):
995@       return w & ((1<<64)-1), w >> 64
996@   def get_byte(w, i):
997@       return (w >> (i*8)) & 0xff
998@   def apply_table(table, b):
999@       lo = b & 0xf
1000@       hi = b >> 4
1001@       return get_byte(table[0], lo) ^ get_byte(table[1], hi)
1002@   def opt(b):
1003@       table = [
1004@           u64s_to_u128(0xFF9F4929D6B66000, 0xF7974121DEBE6808),
1005@           u64s_to_u128(0x01EDBD5150BCEC00, 0xE10D5DB1B05C0CE0),
1006@       ]
1007@       return apply_table(table, b)
1008@   def rot_byte(b, n):
1009@       return 0xff & ((b << n) | (b >> (8-n)))
1010@   def skew(x):
1011@       return (x ^ rot_byte(x, 1) ^ rot_byte(x, 2) ^ rot_byte(x, 3) ^
1012@               rot_byte(x, 4))
1013@   table = [0, 0]
1014@   for i in range(16):
1015@       table[0] |= (skew(opt(i)) ^ 0x63) << (i*8)
1016@       table[1] |= skew(opt(i<<4)) << (i*8)
1017@   print("	.quad	0x%016x, 0x%016x" % u128_to_u64s(table[0]))
1018@   print("	.quad	0x%016x, 0x%016x" % u128_to_u64s(table[1]))
1019Lk_opt_then_skew:
1020.quad	0x9cb8436798bc4763, 0x6440bb9f6044bf9b
1021.quad	0x1f30062936192f00, 0xb49bad829db284ab
1022
1023@ .Lk_decrypt_transform is a permutation which performs an 8-bit left-rotation
1024@ followed by a byte-swap on each 32-bit word of a vector. E.g., 0x11223344
1025@ becomes 0x22334411 and then 0x11443322.
1026Lk_decrypt_transform:
1027.quad	0x0704050603000102, 0x0f0c0d0e0b08090a
1028
1029
1030@ void vpaes_encrypt_key_to_bsaes(AES_KEY *bsaes, const AES_KEY *vpaes);
1031.globl	_vpaes_encrypt_key_to_bsaes
1032.private_extern	_vpaes_encrypt_key_to_bsaes
1033#ifdef __thumb2__
1034.thumb_func	_vpaes_encrypt_key_to_bsaes
1035#endif
1036.align	4
1037_vpaes_encrypt_key_to_bsaes:
1038	stmdb	sp!, {r11, lr}
1039
1040	@ See _vpaes_schedule_core for the key schedule logic. In particular,
1041	@ _vpaes_schedule_transform(.Lk_ipt) (section 2.2 of the paper),
1042	@ _vpaes_schedule_mangle (section 4.3), and .Lschedule_mangle_last
1043	@ contain the transformations not in the bsaes representation. This
1044	@ function inverts those transforms.
1045	@
1046	@ Note also that bsaes-armv7.pl expects aes-armv4.pl's key
1047	@ representation, which does not match the other aes_nohw_*
1048	@ implementations. The ARM aes_nohw_* stores each 32-bit word
1049	@ byteswapped, as a convenience for (unsupported) big-endian ARM, at the
1050	@ cost of extra REV and VREV32 operations in little-endian ARM.
1051
1052	vmov.i8	q9, #0x0f		@ Required by _vpaes_schedule_transform
1053	adr	r2, Lk_mc_forward	@ Must be aligned to 8 mod 16.
1054	add	r3, r2, 0x90		@ Lk_sr+0x10-Lk_mc_forward = 0x90 (Apple's toolchain doesn't support the expression)
1055
1056	vld1.64	{q12}, [r2]
1057	vmov.i8	q10, #0x5b		@ Lk_s63 from vpaes-x86_64
1058	adr	r11, Lk_opt		@ Must be aligned to 8 mod 16.
1059	vmov.i8	q11, #0x63		@ LK_s63 without Lk_ipt applied
1060
1061	@ vpaes stores one fewer round count than bsaes, but the number of keys
1062	@ is the same.
1063	ldr	r2, [r1,#240]
1064	add	r2, r2, #1
1065	str	r2, [r0,#240]
1066
1067	@ The first key is transformed with _vpaes_schedule_transform(.Lk_ipt).
1068	@ Invert this with .Lk_opt.
1069	vld1.64	{q0}, [r1]!
1070	bl	_vpaes_schedule_transform
1071	vrev32.8	q0, q0
1072	vst1.64	{q0}, [r0]!
1073
1074	@ The middle keys have _vpaes_schedule_transform(.Lk_ipt) applied,
1075	@ followed by _vpaes_schedule_mangle. _vpaes_schedule_mangle XORs 0x63,
1076	@ multiplies by the circulant 0,1,1,1, then applies ShiftRows.
1077Loop_enc_key_to_bsaes:
1078	vld1.64	{q0}, [r1]!
1079
1080	@ Invert the ShiftRows step (see .Lschedule_mangle_both). Note we cycle
1081	@ r3 in the opposite direction and start at .Lk_sr+0x10 instead of 0x30.
1082	@ We use r3 rather than r8 to avoid a callee-saved register.
1083	vld1.64	{q1}, [r3]
1084	vtbl.8	d4, {q0}, d2
1085	vtbl.8	d5, {q0}, d3
1086	add	r3, r3, #16
1087	and	r3, r3, #~(1<<6)
1088	vmov	q0, q2
1089
1090	@ Handle the last key differently.
1091	subs	r2, r2, #1
1092	beq	Loop_enc_key_to_bsaes_last
1093
1094	@ Multiply by the circulant. This is its own inverse.
1095	vtbl.8	d2, {q0}, d24
1096	vtbl.8	d3, {q0}, d25
1097	vmov	q0, q1
1098	vtbl.8	d4, {q1}, d24
1099	vtbl.8	d5, {q1}, d25
1100	veor	q0, q0, q2
1101	vtbl.8	d2, {q2}, d24
1102	vtbl.8	d3, {q2}, d25
1103	veor	q0, q0, q1
1104
1105	@ XOR and finish.
1106	veor	q0, q0, q10
1107	bl	_vpaes_schedule_transform
1108	vrev32.8	q0, q0
1109	vst1.64	{q0}, [r0]!
1110	b	Loop_enc_key_to_bsaes
1111
1112Loop_enc_key_to_bsaes_last:
1113	@ The final key does not have a basis transform (note
1114	@ .Lschedule_mangle_last inverts the original transform). It only XORs
1115	@ 0x63 and applies ShiftRows. The latter was already inverted in the
1116	@ loop. Note that, because we act on the original representation, we use
1117	@ q11, not q10.
1118	veor	q0, q0, q11
1119	vrev32.8	q0, q0
1120	vst1.64	{q0}, [r0]
1121
1122	@ Wipe registers which contained key material.
1123	veor	q0, q0, q0
1124	veor	q1, q1, q1
1125	veor	q2, q2, q2
1126
1127	ldmia	sp!, {r11, pc}	@ return
1128
1129
1130@ void vpaes_decrypt_key_to_bsaes(AES_KEY *vpaes, const AES_KEY *bsaes);
1131.globl	_vpaes_decrypt_key_to_bsaes
1132.private_extern	_vpaes_decrypt_key_to_bsaes
1133#ifdef __thumb2__
1134.thumb_func	_vpaes_decrypt_key_to_bsaes
1135#endif
1136.align	4
1137_vpaes_decrypt_key_to_bsaes:
1138	stmdb	sp!, {r11, lr}
1139
1140	@ See _vpaes_schedule_core for the key schedule logic. Note vpaes
1141	@ computes the decryption key schedule in reverse. Additionally,
1142	@ aes-x86_64.pl shares some transformations, so we must only partially
1143	@ invert vpaes's transformations. In general, vpaes computes in a
1144	@ different basis (.Lk_ipt and .Lk_opt) and applies the inverses of
1145	@ MixColumns, ShiftRows, and the affine part of the AES S-box (which is
1146	@ split into a linear skew and XOR of 0x63). We undo all but MixColumns.
1147	@
1148	@ Note also that bsaes-armv7.pl expects aes-armv4.pl's key
1149	@ representation, which does not match the other aes_nohw_*
1150	@ implementations. The ARM aes_nohw_* stores each 32-bit word
1151	@ byteswapped, as a convenience for (unsupported) big-endian ARM, at the
1152	@ cost of extra REV and VREV32 operations in little-endian ARM.
1153
1154	adr	r2, Lk_decrypt_transform
1155	adr	r3, Lk_sr+0x30
1156	adr	r11, Lk_opt_then_skew	@ Input to _vpaes_schedule_transform.
1157	vld1.64	{q12}, [r2]	@ Reuse q12 from encryption.
1158	vmov.i8	q9, #0x0f		@ Required by _vpaes_schedule_transform
1159
1160	@ vpaes stores one fewer round count than bsaes, but the number of keys
1161	@ is the same.
1162	ldr	r2, [r1,#240]
1163	add	r2, r2, #1
1164	str	r2, [r0,#240]
1165
1166	@ Undo the basis change and reapply the S-box affine transform. See
1167	@ .Lschedule_mangle_last.
1168	vld1.64	{q0}, [r1]!
1169	bl	_vpaes_schedule_transform
1170	vrev32.8	q0, q0
1171	vst1.64	{q0}, [r0]!
1172
1173	@ See _vpaes_schedule_mangle for the transform on the middle keys. Note
1174	@ it simultaneously inverts MixColumns and the S-box affine transform.
1175	@ See .Lk_dksd through .Lk_dks9.
1176Loop_dec_key_to_bsaes:
1177	vld1.64	{q0}, [r1]!
1178
1179	@ Invert the ShiftRows step (see .Lschedule_mangle_both). Note going
1180	@ forwards cancels inverting for which direction we cycle r3. We use r3
1181	@ rather than r8 to avoid a callee-saved register.
1182	vld1.64	{q1}, [r3]
1183	vtbl.8	d4, {q0}, d2
1184	vtbl.8	d5, {q0}, d3
1185	add	r3, r3, #64-16
1186	and	r3, r3, #~(1<<6)
1187	vmov	q0, q2
1188
1189	@ Handle the last key differently.
1190	subs	r2, r2, #1
1191	beq	Loop_dec_key_to_bsaes_last
1192
1193	@ Undo the basis change and reapply the S-box affine transform.
1194	bl	_vpaes_schedule_transform
1195
1196	@ Rotate each word by 8 bytes (cycle the rows) and then byte-swap. We
1197	@ combine the two operations in .Lk_decrypt_transform.
1198	@
1199	@ TODO(davidben): Where does the rotation come from?
1200	vtbl.8	d2, {q0}, d24
1201	vtbl.8	d3, {q0}, d25
1202
1203	vst1.64	{q1}, [r0]!
1204	b	Loop_dec_key_to_bsaes
1205
1206Loop_dec_key_to_bsaes_last:
1207	@ The final key only inverts ShiftRows (already done in the loop). See
1208	@ .Lschedule_am_decrypting. Its basis is not transformed.
1209	vrev32.8	q0, q0
1210	vst1.64	{q0}, [r0]!
1211
1212	@ Wipe registers which contained key material.
1213	veor	q0, q0, q0
1214	veor	q1, q1, q1
1215	veor	q2, q2, q2
1216
1217	ldmia	sp!, {r11, pc}	@ return
1218
1219.globl	_vpaes_ctr32_encrypt_blocks
1220.private_extern	_vpaes_ctr32_encrypt_blocks
1221#ifdef __thumb2__
1222.thumb_func	_vpaes_ctr32_encrypt_blocks
1223#endif
1224.align	4
1225_vpaes_ctr32_encrypt_blocks:
1226	mov	ip, sp
1227	stmdb	sp!, {r7,r8,r9,r10,r11, lr}
1228	@ This function uses q4-q7 (d8-d15), which are callee-saved.
1229	vstmdb	sp!, {d8,d9,d10,d11,d12,d13,d14,d15}
1230
1231	cmp	r2, #0
1232	@ r8 is passed on the stack.
1233	ldr	r8, [ip]
1234	beq	Lctr32_done
1235
1236	@ _vpaes_encrypt_core expects the key in r2, so swap r2 and r3.
1237	mov	r9, r3
1238	mov	r3, r2
1239	mov	r2, r9
1240
1241	@ Load the IV and counter portion.
1242	ldr	r7, [r8, #12]
1243	vld1.8	{q7}, [r8]
1244
1245	bl	_vpaes_preheat
1246	rev	r7, r7		@ The counter is big-endian.
1247
1248Lctr32_loop:
1249	vmov	q0, q7
1250	vld1.8	{q6}, [r0]!		@ Load input ahead of time
1251	bl	_vpaes_encrypt_core
1252	veor	q0, q0, q6		@ XOR input and result
1253	vst1.8	{q0}, [r1]!
1254	subs	r3, r3, #1
1255	@ Update the counter.
1256	add	r7, r7, #1
1257	rev	r9, r7
1258	vmov.32	d15[1], r9
1259	bne	Lctr32_loop
1260
1261Lctr32_done:
1262	vldmia	sp!, {d8,d9,d10,d11,d12,d13,d14,d15}
1263	ldmia	sp!, {r7,r8,r9,r10,r11, pc}	@ return
1264
1265#endif  // !OPENSSL_NO_ASM
1266