• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1// This file is generated from a similarly-named Perl script in the BoringSSL
2// source tree. Do not edit by hand.
3
4#if !defined(__has_feature)
5#define __has_feature(x) 0
6#endif
7#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM)
8#define OPENSSL_NO_ASM
9#endif
10
11#if !defined(OPENSSL_NO_ASM)
12#if defined(__arm__)
13#if defined(BORINGSSL_PREFIX)
14#include <boringssl_prefix_symbols_asm.h>
15#endif
16#include <openssl/arm_arch.h>
17
18@ Silence ARMv8 deprecated IT instruction warnings. This file is used by both
19@ ARMv7 and ARMv8 processors and does not use ARMv8 instructions. (ARMv8 PMULL
20@ instructions are in aesv8-armx.pl.)
21.arch	armv7-a
22
23.text
24#if defined(__thumb2__) || defined(__clang__)
25.syntax	unified
26#define ldrplb  ldrbpl
27#define ldrneb  ldrbne
28#endif
29#if defined(__thumb2__)
30.thumb
31#else
32.code	32
33#endif
34
35.type	rem_4bit,%object
36.align	5
37rem_4bit:
38.short	0x0000,0x1C20,0x3840,0x2460
39.short	0x7080,0x6CA0,0x48C0,0x54E0
40.short	0xE100,0xFD20,0xD940,0xC560
41.short	0x9180,0x8DA0,0xA9C0,0xB5E0
42.size	rem_4bit,.-rem_4bit
43
44.type	rem_4bit_get,%function
45rem_4bit_get:
46#if defined(__thumb2__)
47	adr	r2,rem_4bit
48#else
49	sub	r2,pc,#8+32	@ &rem_4bit
50#endif
51	b	.Lrem_4bit_got
52	nop
53	nop
54.size	rem_4bit_get,.-rem_4bit_get
55
56.globl	gcm_ghash_4bit
57.hidden	gcm_ghash_4bit
58.type	gcm_ghash_4bit,%function
59.align	4
60gcm_ghash_4bit:
61#if defined(__thumb2__)
62	adr	r12,rem_4bit
63#else
64	sub	r12,pc,#8+48		@ &rem_4bit
65#endif
66	add	r3,r2,r3		@ r3 to point at the end
67	stmdb	sp!,{r3,r4,r5,r6,r7,r8,r9,r10,r11,lr}		@ save r3/end too
68
69	ldmia	r12,{r4,r5,r6,r7,r8,r9,r10,r11}		@ copy rem_4bit ...
70	stmdb	sp!,{r4,r5,r6,r7,r8,r9,r10,r11}		@ ... to stack
71
72	ldrb	r12,[r2,#15]
73	ldrb	r14,[r0,#15]
74.Louter:
75	eor	r12,r12,r14
76	and	r14,r12,#0xf0
77	and	r12,r12,#0x0f
78	mov	r3,#14
79
80	add	r7,r1,r12,lsl#4
81	ldmia	r7,{r4,r5,r6,r7}	@ load Htbl[nlo]
82	add	r11,r1,r14
83	ldrb	r12,[r2,#14]
84
85	and	r14,r4,#0xf		@ rem
86	ldmia	r11,{r8,r9,r10,r11}	@ load Htbl[nhi]
87	add	r14,r14,r14
88	eor	r4,r8,r4,lsr#4
89	ldrh	r8,[sp,r14]		@ rem_4bit[rem]
90	eor	r4,r4,r5,lsl#28
91	ldrb	r14,[r0,#14]
92	eor	r5,r9,r5,lsr#4
93	eor	r5,r5,r6,lsl#28
94	eor	r6,r10,r6,lsr#4
95	eor	r6,r6,r7,lsl#28
96	eor	r7,r11,r7,lsr#4
97	eor	r12,r12,r14
98	and	r14,r12,#0xf0
99	and	r12,r12,#0x0f
100	eor	r7,r7,r8,lsl#16
101
102.Linner:
103	add	r11,r1,r12,lsl#4
104	and	r12,r4,#0xf		@ rem
105	subs	r3,r3,#1
106	add	r12,r12,r12
107	ldmia	r11,{r8,r9,r10,r11}	@ load Htbl[nlo]
108	eor	r4,r8,r4,lsr#4
109	eor	r4,r4,r5,lsl#28
110	eor	r5,r9,r5,lsr#4
111	eor	r5,r5,r6,lsl#28
112	ldrh	r8,[sp,r12]		@ rem_4bit[rem]
113	eor	r6,r10,r6,lsr#4
114#ifdef	__thumb2__
115	it	pl
116#endif
117	ldrplb	r12,[r2,r3]
118	eor	r6,r6,r7,lsl#28
119	eor	r7,r11,r7,lsr#4
120
121	add	r11,r1,r14
122	and	r14,r4,#0xf		@ rem
123	eor	r7,r7,r8,lsl#16	@ ^= rem_4bit[rem]
124	add	r14,r14,r14
125	ldmia	r11,{r8,r9,r10,r11}	@ load Htbl[nhi]
126	eor	r4,r8,r4,lsr#4
127#ifdef	__thumb2__
128	it	pl
129#endif
130	ldrplb	r8,[r0,r3]
131	eor	r4,r4,r5,lsl#28
132	eor	r5,r9,r5,lsr#4
133	ldrh	r9,[sp,r14]
134	eor	r5,r5,r6,lsl#28
135	eor	r6,r10,r6,lsr#4
136	eor	r6,r6,r7,lsl#28
137#ifdef	__thumb2__
138	it	pl
139#endif
140	eorpl	r12,r12,r8
141	eor	r7,r11,r7,lsr#4
142#ifdef	__thumb2__
143	itt	pl
144#endif
145	andpl	r14,r12,#0xf0
146	andpl	r12,r12,#0x0f
147	eor	r7,r7,r9,lsl#16	@ ^= rem_4bit[rem]
148	bpl	.Linner
149
150	ldr	r3,[sp,#32]		@ re-load r3/end
151	add	r2,r2,#16
152	mov	r14,r4
153#if __ARM_ARCH__>=7 && defined(__ARMEL__)
154	rev	r4,r4
155	str	r4,[r0,#12]
156#elif defined(__ARMEB__)
157	str	r4,[r0,#12]
158#else
159	mov	r9,r4,lsr#8
160	strb	r4,[r0,#12+3]
161	mov	r10,r4,lsr#16
162	strb	r9,[r0,#12+2]
163	mov	r11,r4,lsr#24
164	strb	r10,[r0,#12+1]
165	strb	r11,[r0,#12]
166#endif
167	cmp	r2,r3
168#if __ARM_ARCH__>=7 && defined(__ARMEL__)
169	rev	r5,r5
170	str	r5,[r0,#8]
171#elif defined(__ARMEB__)
172	str	r5,[r0,#8]
173#else
174	mov	r9,r5,lsr#8
175	strb	r5,[r0,#8+3]
176	mov	r10,r5,lsr#16
177	strb	r9,[r0,#8+2]
178	mov	r11,r5,lsr#24
179	strb	r10,[r0,#8+1]
180	strb	r11,[r0,#8]
181#endif
182
183#ifdef __thumb2__
184	it	ne
185#endif
186	ldrneb	r12,[r2,#15]
187#if __ARM_ARCH__>=7 && defined(__ARMEL__)
188	rev	r6,r6
189	str	r6,[r0,#4]
190#elif defined(__ARMEB__)
191	str	r6,[r0,#4]
192#else
193	mov	r9,r6,lsr#8
194	strb	r6,[r0,#4+3]
195	mov	r10,r6,lsr#16
196	strb	r9,[r0,#4+2]
197	mov	r11,r6,lsr#24
198	strb	r10,[r0,#4+1]
199	strb	r11,[r0,#4]
200#endif
201
202#if __ARM_ARCH__>=7 && defined(__ARMEL__)
203	rev	r7,r7
204	str	r7,[r0,#0]
205#elif defined(__ARMEB__)
206	str	r7,[r0,#0]
207#else
208	mov	r9,r7,lsr#8
209	strb	r7,[r0,#0+3]
210	mov	r10,r7,lsr#16
211	strb	r9,[r0,#0+2]
212	mov	r11,r7,lsr#24
213	strb	r10,[r0,#0+1]
214	strb	r11,[r0,#0]
215#endif
216
217	bne	.Louter
218
219	add	sp,sp,#36
220#if __ARM_ARCH__>=5
221	ldmia	sp!,{r4,r5,r6,r7,r8,r9,r10,r11,pc}
222#else
223	ldmia	sp!,{r4,r5,r6,r7,r8,r9,r10,r11,lr}
224	tst	lr,#1
225	moveq	pc,lr			@ be binary compatible with V4, yet
226.word	0xe12fff1e			@ interoperable with Thumb ISA:-)
227#endif
228.size	gcm_ghash_4bit,.-gcm_ghash_4bit
229
230.globl	gcm_gmult_4bit
231.hidden	gcm_gmult_4bit
232.type	gcm_gmult_4bit,%function
233gcm_gmult_4bit:
234	stmdb	sp!,{r4,r5,r6,r7,r8,r9,r10,r11,lr}
235	ldrb	r12,[r0,#15]
236	b	rem_4bit_get
237.Lrem_4bit_got:
238	and	r14,r12,#0xf0
239	and	r12,r12,#0x0f
240	mov	r3,#14
241
242	add	r7,r1,r12,lsl#4
243	ldmia	r7,{r4,r5,r6,r7}	@ load Htbl[nlo]
244	ldrb	r12,[r0,#14]
245
246	add	r11,r1,r14
247	and	r14,r4,#0xf		@ rem
248	ldmia	r11,{r8,r9,r10,r11}	@ load Htbl[nhi]
249	add	r14,r14,r14
250	eor	r4,r8,r4,lsr#4
251	ldrh	r8,[r2,r14]	@ rem_4bit[rem]
252	eor	r4,r4,r5,lsl#28
253	eor	r5,r9,r5,lsr#4
254	eor	r5,r5,r6,lsl#28
255	eor	r6,r10,r6,lsr#4
256	eor	r6,r6,r7,lsl#28
257	eor	r7,r11,r7,lsr#4
258	and	r14,r12,#0xf0
259	eor	r7,r7,r8,lsl#16
260	and	r12,r12,#0x0f
261
262.Loop:
263	add	r11,r1,r12,lsl#4
264	and	r12,r4,#0xf		@ rem
265	subs	r3,r3,#1
266	add	r12,r12,r12
267	ldmia	r11,{r8,r9,r10,r11}	@ load Htbl[nlo]
268	eor	r4,r8,r4,lsr#4
269	eor	r4,r4,r5,lsl#28
270	eor	r5,r9,r5,lsr#4
271	eor	r5,r5,r6,lsl#28
272	ldrh	r8,[r2,r12]	@ rem_4bit[rem]
273	eor	r6,r10,r6,lsr#4
274#ifdef	__thumb2__
275	it	pl
276#endif
277	ldrplb	r12,[r0,r3]
278	eor	r6,r6,r7,lsl#28
279	eor	r7,r11,r7,lsr#4
280
281	add	r11,r1,r14
282	and	r14,r4,#0xf		@ rem
283	eor	r7,r7,r8,lsl#16	@ ^= rem_4bit[rem]
284	add	r14,r14,r14
285	ldmia	r11,{r8,r9,r10,r11}	@ load Htbl[nhi]
286	eor	r4,r8,r4,lsr#4
287	eor	r4,r4,r5,lsl#28
288	eor	r5,r9,r5,lsr#4
289	ldrh	r8,[r2,r14]	@ rem_4bit[rem]
290	eor	r5,r5,r6,lsl#28
291	eor	r6,r10,r6,lsr#4
292	eor	r6,r6,r7,lsl#28
293	eor	r7,r11,r7,lsr#4
294#ifdef	__thumb2__
295	itt	pl
296#endif
297	andpl	r14,r12,#0xf0
298	andpl	r12,r12,#0x0f
299	eor	r7,r7,r8,lsl#16	@ ^= rem_4bit[rem]
300	bpl	.Loop
301#if __ARM_ARCH__>=7 && defined(__ARMEL__)
302	rev	r4,r4
303	str	r4,[r0,#12]
304#elif defined(__ARMEB__)
305	str	r4,[r0,#12]
306#else
307	mov	r9,r4,lsr#8
308	strb	r4,[r0,#12+3]
309	mov	r10,r4,lsr#16
310	strb	r9,[r0,#12+2]
311	mov	r11,r4,lsr#24
312	strb	r10,[r0,#12+1]
313	strb	r11,[r0,#12]
314#endif
315
316#if __ARM_ARCH__>=7 && defined(__ARMEL__)
317	rev	r5,r5
318	str	r5,[r0,#8]
319#elif defined(__ARMEB__)
320	str	r5,[r0,#8]
321#else
322	mov	r9,r5,lsr#8
323	strb	r5,[r0,#8+3]
324	mov	r10,r5,lsr#16
325	strb	r9,[r0,#8+2]
326	mov	r11,r5,lsr#24
327	strb	r10,[r0,#8+1]
328	strb	r11,[r0,#8]
329#endif
330
331#if __ARM_ARCH__>=7 && defined(__ARMEL__)
332	rev	r6,r6
333	str	r6,[r0,#4]
334#elif defined(__ARMEB__)
335	str	r6,[r0,#4]
336#else
337	mov	r9,r6,lsr#8
338	strb	r6,[r0,#4+3]
339	mov	r10,r6,lsr#16
340	strb	r9,[r0,#4+2]
341	mov	r11,r6,lsr#24
342	strb	r10,[r0,#4+1]
343	strb	r11,[r0,#4]
344#endif
345
346#if __ARM_ARCH__>=7 && defined(__ARMEL__)
347	rev	r7,r7
348	str	r7,[r0,#0]
349#elif defined(__ARMEB__)
350	str	r7,[r0,#0]
351#else
352	mov	r9,r7,lsr#8
353	strb	r7,[r0,#0+3]
354	mov	r10,r7,lsr#16
355	strb	r9,[r0,#0+2]
356	mov	r11,r7,lsr#24
357	strb	r10,[r0,#0+1]
358	strb	r11,[r0,#0]
359#endif
360
361#if __ARM_ARCH__>=5
362	ldmia	sp!,{r4,r5,r6,r7,r8,r9,r10,r11,pc}
363#else
364	ldmia	sp!,{r4,r5,r6,r7,r8,r9,r10,r11,lr}
365	tst	lr,#1
366	moveq	pc,lr			@ be binary compatible with V4, yet
367.word	0xe12fff1e			@ interoperable with Thumb ISA:-)
368#endif
369.size	gcm_gmult_4bit,.-gcm_gmult_4bit
370#if __ARM_MAX_ARCH__>=7
371.arch	armv7-a
372.fpu	neon
373
374.globl	gcm_init_neon
375.hidden	gcm_init_neon
376.type	gcm_init_neon,%function
377.align	4
378gcm_init_neon:
379	vld1.64	d7,[r1]!		@ load H
380	vmov.i8	q8,#0xe1
381	vld1.64	d6,[r1]
382	vshl.i64	d17,#57
383	vshr.u64	d16,#63		@ t0=0xc2....01
384	vdup.8	q9,d7[7]
385	vshr.u64	d26,d6,#63
386	vshr.s8	q9,#7			@ broadcast carry bit
387	vshl.i64	q3,q3,#1
388	vand	q8,q8,q9
389	vorr	d7,d26		@ H<<<=1
390	veor	q3,q3,q8		@ twisted H
391	vstmia	r0,{q3}
392
393	bx	lr					@ bx lr
394.size	gcm_init_neon,.-gcm_init_neon
395
396.globl	gcm_gmult_neon
397.hidden	gcm_gmult_neon
398.type	gcm_gmult_neon,%function
399.align	4
400gcm_gmult_neon:
401	vld1.64	d7,[r0]!		@ load Xi
402	vld1.64	d6,[r0]!
403	vmov.i64	d29,#0x0000ffffffffffff
404	vldmia	r1,{d26,d27}	@ load twisted H
405	vmov.i64	d30,#0x00000000ffffffff
406#ifdef __ARMEL__
407	vrev64.8	q3,q3
408#endif
409	vmov.i64	d31,#0x000000000000ffff
410	veor	d28,d26,d27		@ Karatsuba pre-processing
411	mov	r3,#16
412	b	.Lgmult_neon
413.size	gcm_gmult_neon,.-gcm_gmult_neon
414
415.globl	gcm_ghash_neon
416.hidden	gcm_ghash_neon
417.type	gcm_ghash_neon,%function
418.align	4
419gcm_ghash_neon:
420	vld1.64	d1,[r0]!		@ load Xi
421	vld1.64	d0,[r0]!
422	vmov.i64	d29,#0x0000ffffffffffff
423	vldmia	r1,{d26,d27}	@ load twisted H
424	vmov.i64	d30,#0x00000000ffffffff
425#ifdef __ARMEL__
426	vrev64.8	q0,q0
427#endif
428	vmov.i64	d31,#0x000000000000ffff
429	veor	d28,d26,d27		@ Karatsuba pre-processing
430
431.Loop_neon:
432	vld1.64	d7,[r2]!		@ load inp
433	vld1.64	d6,[r2]!
434#ifdef __ARMEL__
435	vrev64.8	q3,q3
436#endif
437	veor	q3,q0			@ inp^=Xi
438.Lgmult_neon:
439	vext.8	d16, d26, d26, #1	@ A1
440	vmull.p8	q8, d16, d6		@ F = A1*B
441	vext.8	d0, d6, d6, #1	@ B1
442	vmull.p8	q0, d26, d0		@ E = A*B1
443	vext.8	d18, d26, d26, #2	@ A2
444	vmull.p8	q9, d18, d6		@ H = A2*B
445	vext.8	d22, d6, d6, #2	@ B2
446	vmull.p8	q11, d26, d22		@ G = A*B2
447	vext.8	d20, d26, d26, #3	@ A3
448	veor	q8, q8, q0		@ L = E + F
449	vmull.p8	q10, d20, d6		@ J = A3*B
450	vext.8	d0, d6, d6, #3	@ B3
451	veor	q9, q9, q11		@ M = G + H
452	vmull.p8	q0, d26, d0		@ I = A*B3
453	veor	d16, d16, d17	@ t0 = (L) (P0 + P1) << 8
454	vand	d17, d17, d29
455	vext.8	d22, d6, d6, #4	@ B4
456	veor	d18, d18, d19	@ t1 = (M) (P2 + P3) << 16
457	vand	d19, d19, d30
458	vmull.p8	q11, d26, d22		@ K = A*B4
459	veor	q10, q10, q0		@ N = I + J
460	veor	d16, d16, d17
461	veor	d18, d18, d19
462	veor	d20, d20, d21	@ t2 = (N) (P4 + P5) << 24
463	vand	d21, d21, d31
464	vext.8	q8, q8, q8, #15
465	veor	d22, d22, d23	@ t3 = (K) (P6 + P7) << 32
466	vmov.i64	d23, #0
467	vext.8	q9, q9, q9, #14
468	veor	d20, d20, d21
469	vmull.p8	q0, d26, d6		@ D = A*B
470	vext.8	q11, q11, q11, #12
471	vext.8	q10, q10, q10, #13
472	veor	q8, q8, q9
473	veor	q10, q10, q11
474	veor	q0, q0, q8
475	veor	q0, q0, q10
476	veor	d6,d6,d7	@ Karatsuba pre-processing
477	vext.8	d16, d28, d28, #1	@ A1
478	vmull.p8	q8, d16, d6		@ F = A1*B
479	vext.8	d2, d6, d6, #1	@ B1
480	vmull.p8	q1, d28, d2		@ E = A*B1
481	vext.8	d18, d28, d28, #2	@ A2
482	vmull.p8	q9, d18, d6		@ H = A2*B
483	vext.8	d22, d6, d6, #2	@ B2
484	vmull.p8	q11, d28, d22		@ G = A*B2
485	vext.8	d20, d28, d28, #3	@ A3
486	veor	q8, q8, q1		@ L = E + F
487	vmull.p8	q10, d20, d6		@ J = A3*B
488	vext.8	d2, d6, d6, #3	@ B3
489	veor	q9, q9, q11		@ M = G + H
490	vmull.p8	q1, d28, d2		@ I = A*B3
491	veor	d16, d16, d17	@ t0 = (L) (P0 + P1) << 8
492	vand	d17, d17, d29
493	vext.8	d22, d6, d6, #4	@ B4
494	veor	d18, d18, d19	@ t1 = (M) (P2 + P3) << 16
495	vand	d19, d19, d30
496	vmull.p8	q11, d28, d22		@ K = A*B4
497	veor	q10, q10, q1		@ N = I + J
498	veor	d16, d16, d17
499	veor	d18, d18, d19
500	veor	d20, d20, d21	@ t2 = (N) (P4 + P5) << 24
501	vand	d21, d21, d31
502	vext.8	q8, q8, q8, #15
503	veor	d22, d22, d23	@ t3 = (K) (P6 + P7) << 32
504	vmov.i64	d23, #0
505	vext.8	q9, q9, q9, #14
506	veor	d20, d20, d21
507	vmull.p8	q1, d28, d6		@ D = A*B
508	vext.8	q11, q11, q11, #12
509	vext.8	q10, q10, q10, #13
510	veor	q8, q8, q9
511	veor	q10, q10, q11
512	veor	q1, q1, q8
513	veor	q1, q1, q10
514	vext.8	d16, d27, d27, #1	@ A1
515	vmull.p8	q8, d16, d7		@ F = A1*B
516	vext.8	d4, d7, d7, #1	@ B1
517	vmull.p8	q2, d27, d4		@ E = A*B1
518	vext.8	d18, d27, d27, #2	@ A2
519	vmull.p8	q9, d18, d7		@ H = A2*B
520	vext.8	d22, d7, d7, #2	@ B2
521	vmull.p8	q11, d27, d22		@ G = A*B2
522	vext.8	d20, d27, d27, #3	@ A3
523	veor	q8, q8, q2		@ L = E + F
524	vmull.p8	q10, d20, d7		@ J = A3*B
525	vext.8	d4, d7, d7, #3	@ B3
526	veor	q9, q9, q11		@ M = G + H
527	vmull.p8	q2, d27, d4		@ I = A*B3
528	veor	d16, d16, d17	@ t0 = (L) (P0 + P1) << 8
529	vand	d17, d17, d29
530	vext.8	d22, d7, d7, #4	@ B4
531	veor	d18, d18, d19	@ t1 = (M) (P2 + P3) << 16
532	vand	d19, d19, d30
533	vmull.p8	q11, d27, d22		@ K = A*B4
534	veor	q10, q10, q2		@ N = I + J
535	veor	d16, d16, d17
536	veor	d18, d18, d19
537	veor	d20, d20, d21	@ t2 = (N) (P4 + P5) << 24
538	vand	d21, d21, d31
539	vext.8	q8, q8, q8, #15
540	veor	d22, d22, d23	@ t3 = (K) (P6 + P7) << 32
541	vmov.i64	d23, #0
542	vext.8	q9, q9, q9, #14
543	veor	d20, d20, d21
544	vmull.p8	q2, d27, d7		@ D = A*B
545	vext.8	q11, q11, q11, #12
546	vext.8	q10, q10, q10, #13
547	veor	q8, q8, q9
548	veor	q10, q10, q11
549	veor	q2, q2, q8
550	veor	q2, q2, q10
551	veor	q1,q1,q0		@ Karatsuba post-processing
552	veor	q1,q1,q2
553	veor	d1,d1,d2
554	veor	d4,d4,d3	@ Xh|Xl - 256-bit result
555
556	@ equivalent of reduction_avx from ghash-x86_64.pl
557	vshl.i64	q9,q0,#57		@ 1st phase
558	vshl.i64	q10,q0,#62
559	veor	q10,q10,q9		@
560	vshl.i64	q9,q0,#63
561	veor	q10, q10, q9		@
562	veor	d1,d1,d20	@
563	veor	d4,d4,d21
564
565	vshr.u64	q10,q0,#1		@ 2nd phase
566	veor	q2,q2,q0
567	veor	q0,q0,q10		@
568	vshr.u64	q10,q10,#6
569	vshr.u64	q0,q0,#1		@
570	veor	q0,q0,q2		@
571	veor	q0,q0,q10		@
572
573	subs	r3,#16
574	bne	.Loop_neon
575
576#ifdef __ARMEL__
577	vrev64.8	q0,q0
578#endif
579	sub	r0,#16
580	vst1.64	d1,[r0]!		@ write out Xi
581	vst1.64	d0,[r0]
582
583	bx	lr					@ bx lr
584.size	gcm_ghash_neon,.-gcm_ghash_neon
585#endif
586.byte	71,72,65,83,72,32,102,111,114,32,65,82,77,118,52,47,78,69,79,78,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
587.align	2
588.align	2
589#endif
590#endif  // !OPENSSL_NO_ASM
591.section	.note.GNU-stack,"",%progbits
592