• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# This implementation was taken from the public domain, neon2 version in
2# SUPERCOP by D. J. Bernstein and Peter Schwabe.
3
4# qhasm: int32 input_0
5
6# qhasm: int32 input_1
7
8# qhasm: int32 input_2
9
10# qhasm: int32 input_3
11
12# qhasm: stack32 input_4
13
14# qhasm: stack32 input_5
15
16# qhasm: stack32 input_6
17
18# qhasm: stack32 input_7
19
20# qhasm: int32 caller_r4
21
22# qhasm: int32 caller_r5
23
24# qhasm: int32 caller_r6
25
26# qhasm: int32 caller_r7
27
28# qhasm: int32 caller_r8
29
30# qhasm: int32 caller_r9
31
32# qhasm: int32 caller_r10
33
34# qhasm: int32 caller_r11
35
36# qhasm: int32 caller_r12
37
38# qhasm: int32 caller_r14
39
40# qhasm: reg128 caller_q4
41
42# qhasm: reg128 caller_q5
43
44# qhasm: reg128 caller_q6
45
46# qhasm: reg128 caller_q7
47
48# qhasm: startcode
49.fpu neon
50.text
51
52# qhasm: reg128 r0
53
54# qhasm: reg128 r1
55
56# qhasm: reg128 r2
57
58# qhasm: reg128 r3
59
60# qhasm: reg128 r4
61
62# qhasm: reg128 x01
63
64# qhasm: reg128 x23
65
66# qhasm: reg128 x4
67
68# qhasm: reg128 y0
69
70# qhasm: reg128 y12
71
72# qhasm: reg128 y34
73
74# qhasm: reg128 5y12
75
76# qhasm: reg128 5y34
77
78# qhasm: stack128 y0_stack
79
80# qhasm: stack128 y12_stack
81
82# qhasm: stack128 y34_stack
83
84# qhasm: stack128 5y12_stack
85
86# qhasm: stack128 5y34_stack
87
88# qhasm: reg128 z0
89
90# qhasm: reg128 z12
91
92# qhasm: reg128 z34
93
94# qhasm: reg128 5z12
95
96# qhasm: reg128 5z34
97
98# qhasm: stack128 z0_stack
99
100# qhasm: stack128 z12_stack
101
102# qhasm: stack128 z34_stack
103
104# qhasm: stack128 5z12_stack
105
106# qhasm: stack128 5z34_stack
107
108# qhasm: stack128 two24
109
110# qhasm: int32 ptr
111
112# qhasm: reg128 c01
113
114# qhasm: reg128 c23
115
116# qhasm: reg128 d01
117
118# qhasm: reg128 d23
119
120# qhasm: reg128 t0
121
122# qhasm: reg128 t1
123
124# qhasm: reg128 t2
125
126# qhasm: reg128 t3
127
128# qhasm: reg128 t4
129
130# qhasm: reg128 mask
131
132# qhasm: reg128 u0
133
134# qhasm: reg128 u1
135
136# qhasm: reg128 u2
137
138# qhasm: reg128 u3
139
140# qhasm: reg128 u4
141
142# qhasm: reg128 v01
143
144# qhasm: reg128 mid
145
146# qhasm: reg128 v23
147
148# qhasm: reg128 v4
149
150# qhasm: int32 len
151
152# qhasm: qpushenter crypto_onetimeauth_poly1305_neon2_blocks
153.align 4
154.global openssl_poly1305_neon2_blocks
155.type openssl_poly1305_neon2_blocks STT_FUNC
156openssl_poly1305_neon2_blocks:
157vpush {q4,q5,q6,q7}
158mov r12,sp
159sub sp,sp,#192
160and sp,sp,#0xffffffe0
161
162# qhasm: len = input_3
163# asm 1: mov >len=int32#4,<input_3=int32#4
164# asm 2: mov >len=r3,<input_3=r3
165mov r3,r3
166
167# qhasm: new y0
168
169# qhasm: y0  = mem64[input_1]y0[1]; input_1 += 8
170# asm 1: vld1.8 {<y0=reg128#1%bot},[<input_1=int32#2]!
171# asm 2: vld1.8 {<y0=d0},[<input_1=r1]!
172vld1.8 {d0},[r1]!
173
174# qhasm: y12 = mem128[input_1]; input_1 += 16
175# asm 1: vld1.8 {>y12=reg128#2%bot->y12=reg128#2%top},[<input_1=int32#2]!
176# asm 2: vld1.8 {>y12=d2->y12=d3},[<input_1=r1]!
177vld1.8 {d2-d3},[r1]!
178
179# qhasm: y34 = mem128[input_1]; input_1 += 16
180# asm 1: vld1.8 {>y34=reg128#3%bot->y34=reg128#3%top},[<input_1=int32#2]!
181# asm 2: vld1.8 {>y34=d4->y34=d5},[<input_1=r1]!
182vld1.8 {d4-d5},[r1]!
183
184# qhasm: input_1 += 8
185# asm 1: add >input_1=int32#2,<input_1=int32#2,#8
186# asm 2: add >input_1=r1,<input_1=r1,#8
187add r1,r1,#8
188
189# qhasm: new z0
190
191# qhasm: z0  = mem64[input_1]z0[1]; input_1 += 8
192# asm 1: vld1.8 {<z0=reg128#4%bot},[<input_1=int32#2]!
193# asm 2: vld1.8 {<z0=d6},[<input_1=r1]!
194vld1.8 {d6},[r1]!
195
196# qhasm: z12 = mem128[input_1]; input_1 += 16
197# asm 1: vld1.8 {>z12=reg128#5%bot->z12=reg128#5%top},[<input_1=int32#2]!
198# asm 2: vld1.8 {>z12=d8->z12=d9},[<input_1=r1]!
199vld1.8 {d8-d9},[r1]!
200
201# qhasm: z34 = mem128[input_1]; input_1 += 16
202# asm 1: vld1.8 {>z34=reg128#6%bot->z34=reg128#6%top},[<input_1=int32#2]!
203# asm 2: vld1.8 {>z34=d10->z34=d11},[<input_1=r1]!
204vld1.8 {d10-d11},[r1]!
205
206# qhasm: 2x mask = 0xffffffff
207# asm 1: vmov.i64 >mask=reg128#7,#0xffffffff
208# asm 2: vmov.i64 >mask=q6,#0xffffffff
209vmov.i64 q6,#0xffffffff
210
211# qhasm: 2x u4 = 0xff
212# asm 1: vmov.i64 >u4=reg128#8,#0xff
213# asm 2: vmov.i64 >u4=q7,#0xff
214vmov.i64 q7,#0xff
215
216# qhasm: x01 aligned= mem128[input_0];input_0+=16
217# asm 1: vld1.8 {>x01=reg128#9%bot->x01=reg128#9%top},[<input_0=int32#1,: 128]!
218# asm 2: vld1.8 {>x01=d16->x01=d17},[<input_0=r0,: 128]!
219vld1.8 {d16-d17},[r0,: 128]!
220
221# qhasm: x23 aligned= mem128[input_0];input_0+=16
222# asm 1: vld1.8 {>x23=reg128#10%bot->x23=reg128#10%top},[<input_0=int32#1,: 128]!
223# asm 2: vld1.8 {>x23=d18->x23=d19},[<input_0=r0,: 128]!
224vld1.8 {d18-d19},[r0,: 128]!
225
226# qhasm: x4  aligned= mem64[input_0]x4[1]
227# asm 1: vld1.8 {<x4=reg128#11%bot},[<input_0=int32#1,: 64]
228# asm 2: vld1.8 {<x4=d20},[<input_0=r0,: 64]
229vld1.8 {d20},[r0,: 64]
230
231# qhasm: input_0 -= 32
232# asm 1: sub >input_0=int32#1,<input_0=int32#1,#32
233# asm 2: sub >input_0=r0,<input_0=r0,#32
234sub r0,r0,#32
235
236# qhasm: 2x mask unsigned>>=6
237# asm 1: vshr.u64 >mask=reg128#7,<mask=reg128#7,#6
238# asm 2: vshr.u64 >mask=q6,<mask=q6,#6
239vshr.u64 q6,q6,#6
240
241# qhasm: 2x u4 unsigned>>= 7
242# asm 1: vshr.u64 >u4=reg128#8,<u4=reg128#8,#7
243# asm 2: vshr.u64 >u4=q7,<u4=q7,#7
244vshr.u64 q7,q7,#7
245
246# qhasm: 4x 5y12 = y12 << 2
247# asm 1: vshl.i32 >5y12=reg128#12,<y12=reg128#2,#2
248# asm 2: vshl.i32 >5y12=q11,<y12=q1,#2
249vshl.i32 q11,q1,#2
250
251# qhasm: 4x 5y34 = y34 << 2
252# asm 1: vshl.i32 >5y34=reg128#13,<y34=reg128#3,#2
253# asm 2: vshl.i32 >5y34=q12,<y34=q2,#2
254vshl.i32 q12,q2,#2
255
256# qhasm: 4x 5y12 += y12
257# asm 1: vadd.i32 >5y12=reg128#12,<5y12=reg128#12,<y12=reg128#2
258# asm 2: vadd.i32 >5y12=q11,<5y12=q11,<y12=q1
259vadd.i32 q11,q11,q1
260
261# qhasm: 4x 5y34 += y34
262# asm 1: vadd.i32 >5y34=reg128#13,<5y34=reg128#13,<y34=reg128#3
263# asm 2: vadd.i32 >5y34=q12,<5y34=q12,<y34=q2
264vadd.i32 q12,q12,q2
265
266# qhasm: 2x u4 <<= 24
267# asm 1: vshl.i64 >u4=reg128#8,<u4=reg128#8,#24
268# asm 2: vshl.i64 >u4=q7,<u4=q7,#24
269vshl.i64 q7,q7,#24
270
271# qhasm: 4x 5z12 = z12 << 2
272# asm 1: vshl.i32 >5z12=reg128#14,<z12=reg128#5,#2
273# asm 2: vshl.i32 >5z12=q13,<z12=q4,#2
274vshl.i32 q13,q4,#2
275
276# qhasm: 4x 5z34 = z34 << 2
277# asm 1: vshl.i32 >5z34=reg128#15,<z34=reg128#6,#2
278# asm 2: vshl.i32 >5z34=q14,<z34=q5,#2
279vshl.i32 q14,q5,#2
280
281# qhasm: 4x 5z12 += z12
282# asm 1: vadd.i32 >5z12=reg128#14,<5z12=reg128#14,<z12=reg128#5
283# asm 2: vadd.i32 >5z12=q13,<5z12=q13,<z12=q4
284vadd.i32 q13,q13,q4
285
286# qhasm: 4x 5z34 += z34
287# asm 1: vadd.i32 >5z34=reg128#15,<5z34=reg128#15,<z34=reg128#6
288# asm 2: vadd.i32 >5z34=q14,<5z34=q14,<z34=q5
289vadd.i32 q14,q14,q5
290
291# qhasm: new two24
292
293# qhasm: new y0_stack
294
295# qhasm: new y12_stack
296
297# qhasm: new y34_stack
298
299# qhasm: new 5y12_stack
300
301# qhasm: new 5y34_stack
302
303# qhasm: new z0_stack
304
305# qhasm: new z12_stack
306
307# qhasm: new z34_stack
308
309# qhasm: new 5z12_stack
310
311# qhasm: new 5z34_stack
312
313# qhasm: ptr = &two24
314# asm 1: lea >ptr=int32#2,<two24=stack128#1
315# asm 2: lea >ptr=r1,<two24=[sp,#0]
316add r1,sp,#0
317
318# qhasm: mem128[ptr] aligned= u4
319# asm 1: vst1.8 {<u4=reg128#8%bot-<u4=reg128#8%top},[<ptr=int32#2,: 128]
320# asm 2: vst1.8 {<u4=d14-<u4=d15},[<ptr=r1,: 128]
321vst1.8 {d14-d15},[r1,: 128]
322
323# qhasm: r4 = u4
324# asm 1: vmov >r4=reg128#16,<u4=reg128#8
325# asm 2: vmov >r4=q15,<u4=q7
326vmov q15,q7
327
328# qhasm: r0 = u4
329# asm 1: vmov >r0=reg128#8,<u4=reg128#8
330# asm 2: vmov >r0=q7,<u4=q7
331vmov q7,q7
332
333# qhasm: ptr = &y0_stack
334# asm 1: lea >ptr=int32#2,<y0_stack=stack128#2
335# asm 2: lea >ptr=r1,<y0_stack=[sp,#16]
336add r1,sp,#16
337
338# qhasm: mem128[ptr] aligned= y0
339# asm 1: vst1.8 {<y0=reg128#1%bot-<y0=reg128#1%top},[<ptr=int32#2,: 128]
340# asm 2: vst1.8 {<y0=d0-<y0=d1},[<ptr=r1,: 128]
341vst1.8 {d0-d1},[r1,: 128]
342
343# qhasm: ptr = &y12_stack
344# asm 1: lea >ptr=int32#2,<y12_stack=stack128#3
345# asm 2: lea >ptr=r1,<y12_stack=[sp,#32]
346add r1,sp,#32
347
348# qhasm: mem128[ptr] aligned= y12
349# asm 1: vst1.8 {<y12=reg128#2%bot-<y12=reg128#2%top},[<ptr=int32#2,: 128]
350# asm 2: vst1.8 {<y12=d2-<y12=d3},[<ptr=r1,: 128]
351vst1.8 {d2-d3},[r1,: 128]
352
353# qhasm: ptr = &y34_stack
354# asm 1: lea >ptr=int32#2,<y34_stack=stack128#4
355# asm 2: lea >ptr=r1,<y34_stack=[sp,#48]
356add r1,sp,#48
357
358# qhasm: mem128[ptr] aligned= y34
359# asm 1: vst1.8 {<y34=reg128#3%bot-<y34=reg128#3%top},[<ptr=int32#2,: 128]
360# asm 2: vst1.8 {<y34=d4-<y34=d5},[<ptr=r1,: 128]
361vst1.8 {d4-d5},[r1,: 128]
362
363# qhasm: ptr = &z0_stack
364# asm 1: lea >ptr=int32#2,<z0_stack=stack128#7
365# asm 2: lea >ptr=r1,<z0_stack=[sp,#96]
366add r1,sp,#96
367
368# qhasm: mem128[ptr] aligned= z0
369# asm 1: vst1.8 {<z0=reg128#4%bot-<z0=reg128#4%top},[<ptr=int32#2,: 128]
370# asm 2: vst1.8 {<z0=d6-<z0=d7},[<ptr=r1,: 128]
371vst1.8 {d6-d7},[r1,: 128]
372
373# qhasm: ptr = &z12_stack
374# asm 1: lea >ptr=int32#2,<z12_stack=stack128#8
375# asm 2: lea >ptr=r1,<z12_stack=[sp,#112]
376add r1,sp,#112
377
378# qhasm: mem128[ptr] aligned= z12
379# asm 1: vst1.8 {<z12=reg128#5%bot-<z12=reg128#5%top},[<ptr=int32#2,: 128]
380# asm 2: vst1.8 {<z12=d8-<z12=d9},[<ptr=r1,: 128]
381vst1.8 {d8-d9},[r1,: 128]
382
383# qhasm: ptr = &z34_stack
384# asm 1: lea >ptr=int32#2,<z34_stack=stack128#9
385# asm 2: lea >ptr=r1,<z34_stack=[sp,#128]
386add r1,sp,#128
387
388# qhasm: mem128[ptr] aligned= z34
389# asm 1: vst1.8 {<z34=reg128#6%bot-<z34=reg128#6%top},[<ptr=int32#2,: 128]
390# asm 2: vst1.8 {<z34=d10-<z34=d11},[<ptr=r1,: 128]
391vst1.8 {d10-d11},[r1,: 128]
392
393# qhasm: ptr = &5y12_stack
394# asm 1: lea >ptr=int32#2,<5y12_stack=stack128#5
395# asm 2: lea >ptr=r1,<5y12_stack=[sp,#64]
396add r1,sp,#64
397
398# qhasm: mem128[ptr] aligned= 5y12
399# asm 1: vst1.8 {<5y12=reg128#12%bot-<5y12=reg128#12%top},[<ptr=int32#2,: 128]
400# asm 2: vst1.8 {<5y12=d22-<5y12=d23},[<ptr=r1,: 128]
401vst1.8 {d22-d23},[r1,: 128]
402
403# qhasm: ptr = &5y34_stack
404# asm 1: lea >ptr=int32#2,<5y34_stack=stack128#6
405# asm 2: lea >ptr=r1,<5y34_stack=[sp,#80]
406add r1,sp,#80
407
408# qhasm: mem128[ptr] aligned= 5y34
409# asm 1: vst1.8 {<5y34=reg128#13%bot-<5y34=reg128#13%top},[<ptr=int32#2,: 128]
410# asm 2: vst1.8 {<5y34=d24-<5y34=d25},[<ptr=r1,: 128]
411vst1.8 {d24-d25},[r1,: 128]
412
413# qhasm: ptr = &5z12_stack
414# asm 1: lea >ptr=int32#2,<5z12_stack=stack128#10
415# asm 2: lea >ptr=r1,<5z12_stack=[sp,#144]
416add r1,sp,#144
417
418# qhasm: mem128[ptr] aligned= 5z12
419# asm 1: vst1.8 {<5z12=reg128#14%bot-<5z12=reg128#14%top},[<ptr=int32#2,: 128]
420# asm 2: vst1.8 {<5z12=d26-<5z12=d27},[<ptr=r1,: 128]
421vst1.8 {d26-d27},[r1,: 128]
422
423# qhasm: ptr = &5z34_stack
424# asm 1: lea >ptr=int32#2,<5z34_stack=stack128#11
425# asm 2: lea >ptr=r1,<5z34_stack=[sp,#160]
426add r1,sp,#160
427
428# qhasm: mem128[ptr] aligned= 5z34
429# asm 1: vst1.8 {<5z34=reg128#15%bot-<5z34=reg128#15%top},[<ptr=int32#2,: 128]
430# asm 2: vst1.8 {<5z34=d28-<5z34=d29},[<ptr=r1,: 128]
431vst1.8 {d28-d29},[r1,: 128]
432
433# qhasm:                       unsigned>? len - 64
434# asm 1: cmp <len=int32#4,#64
435# asm 2: cmp <len=r3,#64
436cmp r3,#64
437
438# qhasm: goto below64bytes if !unsigned>
439bls ._below64bytes
440
441# qhasm: input_2 += 32
442# asm 1: add >input_2=int32#2,<input_2=int32#3,#32
443# asm 2: add >input_2=r1,<input_2=r2,#32
444add r1,r2,#32
445
446# qhasm: mainloop2:
447._mainloop2:
448
449# qhasm:   c01 = mem128[input_2];input_2+=16
450# asm 1: vld1.8 {>c01=reg128#1%bot->c01=reg128#1%top},[<input_2=int32#2]!
451# asm 2: vld1.8 {>c01=d0->c01=d1},[<input_2=r1]!
452vld1.8 {d0-d1},[r1]!
453
454# qhasm:   c23 = mem128[input_2];input_2+=16
455# asm 1: vld1.8 {>c23=reg128#2%bot->c23=reg128#2%top},[<input_2=int32#2]!
456# asm 2: vld1.8 {>c23=d2->c23=d3},[<input_2=r1]!
457vld1.8 {d2-d3},[r1]!
458
459# qhasm: r4[0,1] += x01[0] unsigned*  z34[2];  r4[2,3] += x01[1] unsigned*  z34[3]
460# asm 1: vmlal.u32 <r4=reg128#16,<x01=reg128#9%bot,<z34=reg128#6%top
461# asm 2: vmlal.u32 <r4=q15,<x01=d16,<z34=d11
462vmlal.u32 q15,d16,d11
463
464# qhasm:   ptr = &z12_stack
465# asm 1: lea >ptr=int32#3,<z12_stack=stack128#8
466# asm 2: lea >ptr=r2,<z12_stack=[sp,#112]
467add r2,sp,#112
468
469# qhasm:   z12 aligned= mem128[ptr]
470# asm 1: vld1.8 {>z12=reg128#3%bot->z12=reg128#3%top},[<ptr=int32#3,: 128]
471# asm 2: vld1.8 {>z12=d4->z12=d5},[<ptr=r2,: 128]
472vld1.8 {d4-d5},[r2,: 128]
473
474# qhasm: r4[0,1] += x01[2] unsigned* z34[0];  r4[2,3] += x01[3] unsigned* z34[1]
475# asm 1: vmlal.u32 <r4=reg128#16,<x01=reg128#9%top,<z34=reg128#6%bot
476# asm 2: vmlal.u32 <r4=q15,<x01=d17,<z34=d10
477vmlal.u32 q15,d17,d10
478
479# qhasm:   ptr = &z0_stack
480# asm 1: lea >ptr=int32#3,<z0_stack=stack128#7
481# asm 2: lea >ptr=r2,<z0_stack=[sp,#96]
482add r2,sp,#96
483
484# qhasm:   z0 aligned= mem128[ptr]
485# asm 1: vld1.8 {>z0=reg128#4%bot->z0=reg128#4%top},[<ptr=int32#3,: 128]
486# asm 2: vld1.8 {>z0=d6->z0=d7},[<ptr=r2,: 128]
487vld1.8 {d6-d7},[r2,: 128]
488
489# qhasm: r4[0,1] += x23[0] unsigned* z12[2];  r4[2,3] += x23[1] unsigned* z12[3]
490# asm 1: vmlal.u32 <r4=reg128#16,<x23=reg128#10%bot,<z12=reg128#3%top
491# asm 2: vmlal.u32 <r4=q15,<x23=d18,<z12=d5
492vmlal.u32 q15,d18,d5
493
494# qhasm:   c01 c23 = c01[0]c01[1]c01[2]c23[2]c23[0]c23[1]c01[3]c23[3]
495# asm 1: vtrn.32 <c01=reg128#1%top,<c23=reg128#2%top
496# asm 2: vtrn.32 <c01=d1,<c23=d3
497vtrn.32 d1,d3
498
499# qhasm: r4[0,1] += x23[2] unsigned* z12[0];  r4[2,3] += x23[3] unsigned* z12[1]
500# asm 1: vmlal.u32 <r4=reg128#16,<x23=reg128#10%top,<z12=reg128#3%bot
501# asm 2: vmlal.u32 <r4=q15,<x23=d19,<z12=d4
502vmlal.u32 q15,d19,d4
503
504# qhasm: r4[0,1] +=  x4[0] unsigned* z0[0];  r4[2,3] +=  x4[1] unsigned* z0[1]
505# asm 1: vmlal.u32 <r4=reg128#16,<x4=reg128#11%bot,<z0=reg128#4%bot
506# asm 2: vmlal.u32 <r4=q15,<x4=d20,<z0=d6
507vmlal.u32 q15,d20,d6
508
509# qhasm: r3[0,1] = c23[2]<<18; r3[2,3] = c23[3]<<18
510# asm 1: vshll.u32 >r3=reg128#5,<c23=reg128#2%top,#18
511# asm 2: vshll.u32 >r3=q4,<c23=d3,#18
512vshll.u32 q4,d3,#18
513
514# qhasm:   c01 c23 = c01[0]c23[0]c01[2]c01[3]c01[1]c23[1]c23[2]c23[3]
515# asm 1: vtrn.32 <c01=reg128#1%bot,<c23=reg128#2%bot
516# asm 2: vtrn.32 <c01=d0,<c23=d2
517vtrn.32 d0,d2
518
519# qhasm: r3[0,1] += x01[0] unsigned* z34[0];   r3[2,3] += x01[1] unsigned* z34[1]
520# asm 1: vmlal.u32 <r3=reg128#5,<x01=reg128#9%bot,<z34=reg128#6%bot
521# asm 2: vmlal.u32 <r3=q4,<x01=d16,<z34=d10
522vmlal.u32 q4,d16,d10
523
524# qhasm: r3[0,1] += x01[2] unsigned* z12[2];   r3[2,3] += x01[3] unsigned* z12[3]
525# asm 1: vmlal.u32 <r3=reg128#5,<x01=reg128#9%top,<z12=reg128#3%top
526# asm 2: vmlal.u32 <r3=q4,<x01=d17,<z12=d5
527vmlal.u32 q4,d17,d5
528
529# qhasm:   r0 = r0[1]c01[0]r0[2,3]
530# asm 1: vext.32 <r0=reg128#8%bot,<r0=reg128#8%bot,<c01=reg128#1%bot,#1
531# asm 2: vext.32 <r0=d14,<r0=d14,<c01=d0,#1
532vext.32 d14,d14,d0,#1
533
534# qhasm: r3[0,1] += x23[0] unsigned* z12[0];   r3[2,3] += x23[1] unsigned* z12[1]
535# asm 1: vmlal.u32 <r3=reg128#5,<x23=reg128#10%bot,<z12=reg128#3%bot
536# asm 2: vmlal.u32 <r3=q4,<x23=d18,<z12=d4
537vmlal.u32 q4,d18,d4
538
539# qhasm: 								input_2 -= 64
540# asm 1: sub >input_2=int32#2,<input_2=int32#2,#64
541# asm 2: sub >input_2=r1,<input_2=r1,#64
542sub r1,r1,#64
543
544# qhasm: r3[0,1] += x23[2] unsigned* z0[0];   r3[2,3] += x23[3] unsigned* z0[1]
545# asm 1: vmlal.u32 <r3=reg128#5,<x23=reg128#10%top,<z0=reg128#4%bot
546# asm 2: vmlal.u32 <r3=q4,<x23=d19,<z0=d6
547vmlal.u32 q4,d19,d6
548
549# qhasm:   ptr = &5z34_stack
550# asm 1: lea >ptr=int32#3,<5z34_stack=stack128#11
551# asm 2: lea >ptr=r2,<5z34_stack=[sp,#160]
552add r2,sp,#160
553
554# qhasm:   5z34 aligned= mem128[ptr]
555# asm 1: vld1.8 {>5z34=reg128#6%bot->5z34=reg128#6%top},[<ptr=int32#3,: 128]
556# asm 2: vld1.8 {>5z34=d10->5z34=d11},[<ptr=r2,: 128]
557vld1.8 {d10-d11},[r2,: 128]
558
559# qhasm: r3[0,1] +=  x4[0] unsigned*  5z34[2]; r3[2,3] +=  x4[1] unsigned*  5z34[3]
560# asm 1: vmlal.u32 <r3=reg128#5,<x4=reg128#11%bot,<5z34=reg128#6%top
561# asm 2: vmlal.u32 <r3=q4,<x4=d20,<5z34=d11
562vmlal.u32 q4,d20,d11
563
564# qhasm:   r0 = r0[1]r0[0]r0[3]r0[2]
565# asm 1: vrev64.i32 >r0=reg128#8,<r0=reg128#8
566# asm 2: vrev64.i32 >r0=q7,<r0=q7
567vrev64.i32 q7,q7
568
569# qhasm:   r2[0,1] = c01[2]<<12; r2[2,3] = c01[3]<<12
570# asm 1: vshll.u32 >r2=reg128#14,<c01=reg128#1%top,#12
571# asm 2: vshll.u32 >r2=q13,<c01=d1,#12
572vshll.u32 q13,d1,#12
573
574# qhasm:   		d01 = mem128[input_2];input_2+=16
575# asm 1: vld1.8 {>d01=reg128#12%bot->d01=reg128#12%top},[<input_2=int32#2]!
576# asm 2: vld1.8 {>d01=d22->d01=d23},[<input_2=r1]!
577vld1.8 {d22-d23},[r1]!
578
579# qhasm: r2[0,1] += x01[0] unsigned* z12[2];   r2[2,3] += x01[1] unsigned* z12[3]
580# asm 1: vmlal.u32 <r2=reg128#14,<x01=reg128#9%bot,<z12=reg128#3%top
581# asm 2: vmlal.u32 <r2=q13,<x01=d16,<z12=d5
582vmlal.u32 q13,d16,d5
583
584# qhasm: r2[0,1] += x01[2] unsigned* z12[0];   r2[2,3] += x01[3] unsigned* z12[1]
585# asm 1: vmlal.u32 <r2=reg128#14,<x01=reg128#9%top,<z12=reg128#3%bot
586# asm 2: vmlal.u32 <r2=q13,<x01=d17,<z12=d4
587vmlal.u32 q13,d17,d4
588
589# qhasm: r2[0,1] += x23[0] unsigned* z0[0];   r2[2,3] += x23[1] unsigned* z0[1]
590# asm 1: vmlal.u32 <r2=reg128#14,<x23=reg128#10%bot,<z0=reg128#4%bot
591# asm 2: vmlal.u32 <r2=q13,<x23=d18,<z0=d6
592vmlal.u32 q13,d18,d6
593
594# qhasm: r2[0,1] += x23[2] unsigned*  5z34[2]; r2[2,3] += x23[3] unsigned*  5z34[3]
595# asm 1: vmlal.u32 <r2=reg128#14,<x23=reg128#10%top,<5z34=reg128#6%top
596# asm 2: vmlal.u32 <r2=q13,<x23=d19,<5z34=d11
597vmlal.u32 q13,d19,d11
598
599# qhasm: r2[0,1] +=  x4[0] unsigned* 5z34[0]; r2[2,3] +=  x4[1] unsigned* 5z34[1]
600# asm 1: vmlal.u32 <r2=reg128#14,<x4=reg128#11%bot,<5z34=reg128#6%bot
601# asm 2: vmlal.u32 <r2=q13,<x4=d20,<5z34=d10
602vmlal.u32 q13,d20,d10
603
604# qhasm:   r0 = r0[0,1]c01[1]r0[2]
605# asm 1: vext.32 <r0=reg128#8%top,<c01=reg128#1%bot,<r0=reg128#8%top,#1
606# asm 2: vext.32 <r0=d15,<c01=d0,<r0=d15,#1
607vext.32 d15,d0,d15,#1
608
609# qhasm:   r1[0,1] = c23[0]<<6; r1[2,3] = c23[1]<<6
610# asm 1: vshll.u32 >r1=reg128#15,<c23=reg128#2%bot,#6
611# asm 2: vshll.u32 >r1=q14,<c23=d2,#6
612vshll.u32 q14,d2,#6
613
614# qhasm: r1[0,1] += x01[0] unsigned* z12[0];   r1[2,3] += x01[1] unsigned* z12[1]
615# asm 1: vmlal.u32 <r1=reg128#15,<x01=reg128#9%bot,<z12=reg128#3%bot
616# asm 2: vmlal.u32 <r1=q14,<x01=d16,<z12=d4
617vmlal.u32 q14,d16,d4
618
619# qhasm: r1[0,1] += x01[2] unsigned* z0[0];   r1[2,3] += x01[3] unsigned* z0[1]
620# asm 1: vmlal.u32 <r1=reg128#15,<x01=reg128#9%top,<z0=reg128#4%bot
621# asm 2: vmlal.u32 <r1=q14,<x01=d17,<z0=d6
622vmlal.u32 q14,d17,d6
623
624# qhasm: r1[0,1] += x23[0] unsigned*  5z34[2]; r1[2,3] += x23[1] unsigned*  5z34[3]
625# asm 1: vmlal.u32 <r1=reg128#15,<x23=reg128#10%bot,<5z34=reg128#6%top
626# asm 2: vmlal.u32 <r1=q14,<x23=d18,<5z34=d11
627vmlal.u32 q14,d18,d11
628
629# qhasm: r1[0,1] += x23[2] unsigned* 5z34[0]; r1[2,3] += x23[3] unsigned* 5z34[1]
630# asm 1: vmlal.u32 <r1=reg128#15,<x23=reg128#10%top,<5z34=reg128#6%bot
631# asm 2: vmlal.u32 <r1=q14,<x23=d19,<5z34=d10
632vmlal.u32 q14,d19,d10
633
634# qhasm: ptr = &5z12_stack
635# asm 1: lea >ptr=int32#3,<5z12_stack=stack128#10
636# asm 2: lea >ptr=r2,<5z12_stack=[sp,#144]
637add r2,sp,#144
638
639# qhasm: 5z12 aligned= mem128[ptr]
640# asm 1: vld1.8 {>5z12=reg128#1%bot->5z12=reg128#1%top},[<ptr=int32#3,: 128]
641# asm 2: vld1.8 {>5z12=d0->5z12=d1},[<ptr=r2,: 128]
642vld1.8 {d0-d1},[r2,: 128]
643
644# qhasm: r1[0,1] +=  x4[0] unsigned* 5z12[2]; r1[2,3] +=  x4[1] unsigned* 5z12[3]
645# asm 1: vmlal.u32 <r1=reg128#15,<x4=reg128#11%bot,<5z12=reg128#1%top
646# asm 2: vmlal.u32 <r1=q14,<x4=d20,<5z12=d1
647vmlal.u32 q14,d20,d1
648
649# qhasm:   		d23 = mem128[input_2];input_2+=16
650# asm 1: vld1.8 {>d23=reg128#2%bot->d23=reg128#2%top},[<input_2=int32#2]!
651# asm 2: vld1.8 {>d23=d2->d23=d3},[<input_2=r1]!
652vld1.8 {d2-d3},[r1]!
653
654# qhasm:   		input_2 += 32
655# asm 1: add >input_2=int32#2,<input_2=int32#2,#32
656# asm 2: add >input_2=r1,<input_2=r1,#32
657add r1,r1,#32
658
659# qhasm: r0[0,1] +=  x4[0] unsigned* 5z12[0]; r0[2,3] +=  x4[1] unsigned* 5z12[1]
660# asm 1: vmlal.u32 <r0=reg128#8,<x4=reg128#11%bot,<5z12=reg128#1%bot
661# asm 2: vmlal.u32 <r0=q7,<x4=d20,<5z12=d0
662vmlal.u32 q7,d20,d0
663
664# qhasm: r0[0,1] += x23[0] unsigned* 5z34[0]; r0[2,3] += x23[1] unsigned* 5z34[1]
665# asm 1: vmlal.u32 <r0=reg128#8,<x23=reg128#10%bot,<5z34=reg128#6%bot
666# asm 2: vmlal.u32 <r0=q7,<x23=d18,<5z34=d10
667vmlal.u32 q7,d18,d10
668
669# qhasm:   		d01 d23 = d01[0] d23[0] d01[1] d23[1]
670# asm 1: vswp <d23=reg128#2%bot,<d01=reg128#12%top
671# asm 2: vswp <d23=d2,<d01=d23
672vswp d2,d23
673
674# qhasm: r0[0,1] += x23[2] unsigned* 5z12[2]; r0[2,3] += x23[3] unsigned* 5z12[3]
675# asm 1: vmlal.u32 <r0=reg128#8,<x23=reg128#10%top,<5z12=reg128#1%top
676# asm 2: vmlal.u32 <r0=q7,<x23=d19,<5z12=d1
677vmlal.u32 q7,d19,d1
678
679# qhasm: r0[0,1] += x01[0] unsigned* z0[0];   r0[2,3] += x01[1] unsigned* z0[1]
680# asm 1: vmlal.u32 <r0=reg128#8,<x01=reg128#9%bot,<z0=reg128#4%bot
681# asm 2: vmlal.u32 <r0=q7,<x01=d16,<z0=d6
682vmlal.u32 q7,d16,d6
683
684# qhasm:   		new mid
685
686# qhasm:   		2x v4 = d23 unsigned>> 40
687# asm 1: vshr.u64 >v4=reg128#4,<d23=reg128#2,#40
688# asm 2: vshr.u64 >v4=q3,<d23=q1,#40
689vshr.u64 q3,q1,#40
690
691# qhasm:   		mid = d01[1]d23[0] mid[2,3]
692# asm 1: vext.32 <mid=reg128#1%bot,<d01=reg128#12%bot,<d23=reg128#2%bot,#1
693# asm 2: vext.32 <mid=d0,<d01=d22,<d23=d2,#1
694vext.32 d0,d22,d2,#1
695
696# qhasm:   		new v23
697
698# qhasm:   		v23[2] = d23[0,1] unsigned>> 14; v23[3] = d23[2,3] unsigned>> 14
699# asm 1: vshrn.u64 <v23=reg128#10%top,<d23=reg128#2,#14
700# asm 2: vshrn.u64 <v23=d19,<d23=q1,#14
701vshrn.u64 d19,q1,#14
702
703# qhasm:   		mid = mid[0,1] d01[3]d23[2]
704# asm 1: vext.32 <mid=reg128#1%top,<d01=reg128#12%top,<d23=reg128#2%top,#1
705# asm 2: vext.32 <mid=d1,<d01=d23,<d23=d3,#1
706vext.32 d1,d23,d3,#1
707
708# qhasm:   		new v01
709
710# qhasm:   		v01[2] = d01[0,1] unsigned>> 26; v01[3] = d01[2,3] unsigned>> 26
711# asm 1: vshrn.u64 <v01=reg128#11%top,<d01=reg128#12,#26
712# asm 2: vshrn.u64 <v01=d21,<d01=q11,#26
713vshrn.u64 d21,q11,#26
714
715# qhasm:   		v01 = d01[1]d01[0] v01[2,3]
716# asm 1: vext.32 <v01=reg128#11%bot,<d01=reg128#12%bot,<d01=reg128#12%bot,#1
717# asm 2: vext.32 <v01=d20,<d01=d22,<d01=d22,#1
718vext.32 d20,d22,d22,#1
719
720# qhasm: r0[0,1] += x01[2] unsigned*  5z34[2]; r0[2,3] += x01[3] unsigned*  5z34[3]
721# asm 1: vmlal.u32 <r0=reg128#8,<x01=reg128#9%top,<5z34=reg128#6%top
722# asm 2: vmlal.u32 <r0=q7,<x01=d17,<5z34=d11
723vmlal.u32 q7,d17,d11
724
725# qhasm:   		v01 = v01[1]d01[2] v01[2,3]
726# asm 1: vext.32 <v01=reg128#11%bot,<v01=reg128#11%bot,<d01=reg128#12%top,#1
727# asm 2: vext.32 <v01=d20,<v01=d20,<d01=d23,#1
728vext.32 d20,d20,d23,#1
729
730# qhasm:   		v23[0] = mid[0,1] unsigned>> 20; v23[1] = mid[2,3] unsigned>> 20
731# asm 1: vshrn.u64 <v23=reg128#10%bot,<mid=reg128#1,#20
732# asm 2: vshrn.u64 <v23=d18,<mid=q0,#20
733vshrn.u64 d18,q0,#20
734
735# qhasm:   		v4 = v4[0]v4[2]v4[1]v4[3]
736# asm 1: vtrn.32 <v4=reg128#4%bot,<v4=reg128#4%top
737# asm 2: vtrn.32 <v4=d6,<v4=d7
738vtrn.32 d6,d7
739
740# qhasm:   		4x v01 &= 0x03ffffff
741# asm 1: vand.i32 <v01=reg128#11,#0x03ffffff
742# asm 2: vand.i32 <v01=q10,#0x03ffffff
743vand.i32 q10,#0x03ffffff
744
745# qhasm: ptr = &y34_stack
746# asm 1: lea >ptr=int32#3,<y34_stack=stack128#4
747# asm 2: lea >ptr=r2,<y34_stack=[sp,#48]
748add r2,sp,#48
749
750# qhasm: y34 aligned= mem128[ptr]
751# asm 1: vld1.8 {>y34=reg128#3%bot->y34=reg128#3%top},[<ptr=int32#3,: 128]
752# asm 2: vld1.8 {>y34=d4->y34=d5},[<ptr=r2,: 128]
753vld1.8 {d4-d5},[r2,: 128]
754
755# qhasm:   		4x v23 &= 0x03ffffff
756# asm 1: vand.i32 <v23=reg128#10,#0x03ffffff
757# asm 2: vand.i32 <v23=q9,#0x03ffffff
758vand.i32 q9,#0x03ffffff
759
760# qhasm: ptr = &y12_stack
761# asm 1: lea >ptr=int32#3,<y12_stack=stack128#3
762# asm 2: lea >ptr=r2,<y12_stack=[sp,#32]
763add r2,sp,#32
764
765# qhasm: y12 aligned= mem128[ptr]
766# asm 1: vld1.8 {>y12=reg128#2%bot->y12=reg128#2%top},[<ptr=int32#3,: 128]
767# asm 2: vld1.8 {>y12=d2->y12=d3},[<ptr=r2,: 128]
768vld1.8 {d2-d3},[r2,: 128]
769
770# qhasm:   		4x v4 |= 0x01000000
771# asm 1: vorr.i32 <v4=reg128#4,#0x01000000
772# asm 2: vorr.i32 <v4=q3,#0x01000000
773vorr.i32 q3,#0x01000000
774
775# qhasm: ptr = &y0_stack
776# asm 1: lea >ptr=int32#3,<y0_stack=stack128#2
777# asm 2: lea >ptr=r2,<y0_stack=[sp,#16]
778add r2,sp,#16
779
780# qhasm: y0 aligned= mem128[ptr]
781# asm 1: vld1.8 {>y0=reg128#1%bot->y0=reg128#1%top},[<ptr=int32#3,: 128]
782# asm 2: vld1.8 {>y0=d0->y0=d1},[<ptr=r2,: 128]
783vld1.8 {d0-d1},[r2,: 128]
784
785# qhasm: r4[0,1] += v01[0] unsigned*  y34[2];  r4[2,3] += v01[1] unsigned*  y34[3]
786# asm 1: vmlal.u32 <r4=reg128#16,<v01=reg128#11%bot,<y34=reg128#3%top
787# asm 2: vmlal.u32 <r4=q15,<v01=d20,<y34=d5
788vmlal.u32 q15,d20,d5
789
790# qhasm: r4[0,1] += v01[2] unsigned* y34[0];  r4[2,3] += v01[3] unsigned* y34[1]
791# asm 1: vmlal.u32 <r4=reg128#16,<v01=reg128#11%top,<y34=reg128#3%bot
792# asm 2: vmlal.u32 <r4=q15,<v01=d21,<y34=d4
793vmlal.u32 q15,d21,d4
794
795# qhasm: r4[0,1] += v23[0] unsigned* y12[2];  r4[2,3] += v23[1] unsigned* y12[3]
796# asm 1: vmlal.u32 <r4=reg128#16,<v23=reg128#10%bot,<y12=reg128#2%top
797# asm 2: vmlal.u32 <r4=q15,<v23=d18,<y12=d3
798vmlal.u32 q15,d18,d3
799
800# qhasm: r4[0,1] += v23[2] unsigned* y12[0];  r4[2,3] += v23[3] unsigned* y12[1]
801# asm 1: vmlal.u32 <r4=reg128#16,<v23=reg128#10%top,<y12=reg128#2%bot
802# asm 2: vmlal.u32 <r4=q15,<v23=d19,<y12=d2
803vmlal.u32 q15,d19,d2
804
805# qhasm: r4[0,1] +=  v4[0] unsigned* y0[0];  r4[2,3] +=  v4[1] unsigned* y0[1]
806# asm 1: vmlal.u32 <r4=reg128#16,<v4=reg128#4%bot,<y0=reg128#1%bot
807# asm 2: vmlal.u32 <r4=q15,<v4=d6,<y0=d0
808vmlal.u32 q15,d6,d0
809
810# qhasm: ptr = &5y34_stack
811# asm 1: lea >ptr=int32#3,<5y34_stack=stack128#6
812# asm 2: lea >ptr=r2,<5y34_stack=[sp,#80]
813add r2,sp,#80
814
815# qhasm: 5y34 aligned= mem128[ptr]
816# asm 1: vld1.8 {>5y34=reg128#13%bot->5y34=reg128#13%top},[<ptr=int32#3,: 128]
817# asm 2: vld1.8 {>5y34=d24->5y34=d25},[<ptr=r2,: 128]
818vld1.8 {d24-d25},[r2,: 128]
819
820# qhasm: r3[0,1] += v01[0] unsigned* y34[0];   r3[2,3] += v01[1] unsigned* y34[1]
821# asm 1: vmlal.u32 <r3=reg128#5,<v01=reg128#11%bot,<y34=reg128#3%bot
822# asm 2: vmlal.u32 <r3=q4,<v01=d20,<y34=d4
823vmlal.u32 q4,d20,d4
824
825# qhasm: r3[0,1] += v01[2] unsigned* y12[2];   r3[2,3] += v01[3] unsigned* y12[3]
826# asm 1: vmlal.u32 <r3=reg128#5,<v01=reg128#11%top,<y12=reg128#2%top
827# asm 2: vmlal.u32 <r3=q4,<v01=d21,<y12=d3
828vmlal.u32 q4,d21,d3
829
830# qhasm: r3[0,1] += v23[0] unsigned* y12[0];   r3[2,3] += v23[1] unsigned* y12[1]
831# asm 1: vmlal.u32 <r3=reg128#5,<v23=reg128#10%bot,<y12=reg128#2%bot
832# asm 2: vmlal.u32 <r3=q4,<v23=d18,<y12=d2
833vmlal.u32 q4,d18,d2
834
835# qhasm: r3[0,1] += v23[2] unsigned* y0[0];   r3[2,3] += v23[3] unsigned* y0[1]
836# asm 1: vmlal.u32 <r3=reg128#5,<v23=reg128#10%top,<y0=reg128#1%bot
837# asm 2: vmlal.u32 <r3=q4,<v23=d19,<y0=d0
838vmlal.u32 q4,d19,d0
839
840# qhasm: r3[0,1] +=  v4[0] unsigned*  5y34[2]; r3[2,3] +=  v4[1] unsigned*  5y34[3]
841# asm 1: vmlal.u32 <r3=reg128#5,<v4=reg128#4%bot,<5y34=reg128#13%top
842# asm 2: vmlal.u32 <r3=q4,<v4=d6,<5y34=d25
843vmlal.u32 q4,d6,d25
844
845# qhasm: ptr = &5y12_stack
846# asm 1: lea >ptr=int32#3,<5y12_stack=stack128#5
847# asm 2: lea >ptr=r2,<5y12_stack=[sp,#64]
848add r2,sp,#64
849
850# qhasm: 5y12 aligned= mem128[ptr]
851# asm 1: vld1.8 {>5y12=reg128#12%bot->5y12=reg128#12%top},[<ptr=int32#3,: 128]
852# asm 2: vld1.8 {>5y12=d22->5y12=d23},[<ptr=r2,: 128]
853vld1.8 {d22-d23},[r2,: 128]
854
855# qhasm: r0[0,1] +=  v4[0] unsigned* 5y12[0]; r0[2,3] +=  v4[1] unsigned* 5y12[1]
856# asm 1: vmlal.u32 <r0=reg128#8,<v4=reg128#4%bot,<5y12=reg128#12%bot
857# asm 2: vmlal.u32 <r0=q7,<v4=d6,<5y12=d22
858vmlal.u32 q7,d6,d22
859
860# qhasm: r0[0,1] += v23[0] unsigned* 5y34[0]; r0[2,3] += v23[1] unsigned* 5y34[1]
861# asm 1: vmlal.u32 <r0=reg128#8,<v23=reg128#10%bot,<5y34=reg128#13%bot
862# asm 2: vmlal.u32 <r0=q7,<v23=d18,<5y34=d24
863vmlal.u32 q7,d18,d24
864
865# qhasm: r0[0,1] += v23[2] unsigned* 5y12[2]; r0[2,3] += v23[3] unsigned* 5y12[3]
866# asm 1: vmlal.u32 <r0=reg128#8,<v23=reg128#10%top,<5y12=reg128#12%top
867# asm 2: vmlal.u32 <r0=q7,<v23=d19,<5y12=d23
868vmlal.u32 q7,d19,d23
869
870# qhasm: r0[0,1] += v01[0] unsigned* y0[0];   r0[2,3] += v01[1] unsigned* y0[1]
871# asm 1: vmlal.u32 <r0=reg128#8,<v01=reg128#11%bot,<y0=reg128#1%bot
872# asm 2: vmlal.u32 <r0=q7,<v01=d20,<y0=d0
873vmlal.u32 q7,d20,d0
874
875# qhasm: r0[0,1] += v01[2] unsigned*  5y34[2]; r0[2,3] += v01[3] unsigned*  5y34[3]
876# asm 1: vmlal.u32 <r0=reg128#8,<v01=reg128#11%top,<5y34=reg128#13%top
877# asm 2: vmlal.u32 <r0=q7,<v01=d21,<5y34=d25
878vmlal.u32 q7,d21,d25
879
880# qhasm: r1[0,1] += v01[0] unsigned* y12[0];   r1[2,3] += v01[1] unsigned* y12[1]
881# asm 1: vmlal.u32 <r1=reg128#15,<v01=reg128#11%bot,<y12=reg128#2%bot
882# asm 2: vmlal.u32 <r1=q14,<v01=d20,<y12=d2
883vmlal.u32 q14,d20,d2
884
885# qhasm: r1[0,1] += v01[2] unsigned* y0[0];   r1[2,3] += v01[3] unsigned* y0[1]
886# asm 1: vmlal.u32 <r1=reg128#15,<v01=reg128#11%top,<y0=reg128#1%bot
887# asm 2: vmlal.u32 <r1=q14,<v01=d21,<y0=d0
888vmlal.u32 q14,d21,d0
889
890# qhasm: r1[0,1] += v23[0] unsigned*  5y34[2]; r1[2,3] += v23[1] unsigned*  5y34[3]
891# asm 1: vmlal.u32 <r1=reg128#15,<v23=reg128#10%bot,<5y34=reg128#13%top
892# asm 2: vmlal.u32 <r1=q14,<v23=d18,<5y34=d25
893vmlal.u32 q14,d18,d25
894
895# qhasm: r1[0,1] += v23[2] unsigned* 5y34[0]; r1[2,3] += v23[3] unsigned* 5y34[1]
896# asm 1: vmlal.u32 <r1=reg128#15,<v23=reg128#10%top,<5y34=reg128#13%bot
897# asm 2: vmlal.u32 <r1=q14,<v23=d19,<5y34=d24
898vmlal.u32 q14,d19,d24
899
900# qhasm: r1[0,1] +=  v4[0] unsigned* 5y12[2]; r1[2,3] +=  v4[1] unsigned* 5y12[3]
901# asm 1: vmlal.u32 <r1=reg128#15,<v4=reg128#4%bot,<5y12=reg128#12%top
902# asm 2: vmlal.u32 <r1=q14,<v4=d6,<5y12=d23
903vmlal.u32 q14,d6,d23
904
905# qhasm: r2[0,1] += v01[0] unsigned* y12[2];   r2[2,3] += v01[1] unsigned* y12[3]
906# asm 1: vmlal.u32 <r2=reg128#14,<v01=reg128#11%bot,<y12=reg128#2%top
907# asm 2: vmlal.u32 <r2=q13,<v01=d20,<y12=d3
908vmlal.u32 q13,d20,d3
909
910# qhasm: r2[0,1] += v01[2] unsigned* y12[0];   r2[2,3] += v01[3] unsigned* y12[1]
911# asm 1: vmlal.u32 <r2=reg128#14,<v01=reg128#11%top,<y12=reg128#2%bot
912# asm 2: vmlal.u32 <r2=q13,<v01=d21,<y12=d2
913vmlal.u32 q13,d21,d2
914
915# qhasm: r2[0,1] += v23[0] unsigned* y0[0];   r2[2,3] += v23[1] unsigned* y0[1]
916# asm 1: vmlal.u32 <r2=reg128#14,<v23=reg128#10%bot,<y0=reg128#1%bot
917# asm 2: vmlal.u32 <r2=q13,<v23=d18,<y0=d0
918vmlal.u32 q13,d18,d0
919
920# qhasm: r2[0,1] += v23[2] unsigned*  5y34[2]; r2[2,3] += v23[3] unsigned*  5y34[3]
921# asm 1: vmlal.u32 <r2=reg128#14,<v23=reg128#10%top,<5y34=reg128#13%top
922# asm 2: vmlal.u32 <r2=q13,<v23=d19,<5y34=d25
923vmlal.u32 q13,d19,d25
924
925# qhasm: r2[0,1] +=  v4[0] unsigned* 5y34[0]; r2[2,3] +=  v4[1] unsigned* 5y34[1]
926# asm 1: vmlal.u32 <r2=reg128#14,<v4=reg128#4%bot,<5y34=reg128#13%bot
927# asm 2: vmlal.u32 <r2=q13,<v4=d6,<5y34=d24
928vmlal.u32 q13,d6,d24
929
930# qhasm: 				ptr = &two24
931# asm 1: lea >ptr=int32#3,<two24=stack128#1
932# asm 2: lea >ptr=r2,<two24=[sp,#0]
933add r2,sp,#0
934
935# qhasm: 2x t1 = r0 unsigned>> 26
936# asm 1: vshr.u64 >t1=reg128#4,<r0=reg128#8,#26
937# asm 2: vshr.u64 >t1=q3,<r0=q7,#26
938vshr.u64 q3,q7,#26
939
940# qhasm:   				len -= 64
941# asm 1: sub >len=int32#4,<len=int32#4,#64
942# asm 2: sub >len=r3,<len=r3,#64
943sub r3,r3,#64
944
945# qhasm:    r0 &= mask
946# asm 1: vand >r0=reg128#6,<r0=reg128#8,<mask=reg128#7
947# asm 2: vand >r0=q5,<r0=q7,<mask=q6
948vand q5,q7,q6
949
950# qhasm: 2x r1 += t1
951# asm 1: vadd.i64 >r1=reg128#4,<r1=reg128#15,<t1=reg128#4
952# asm 2: vadd.i64 >r1=q3,<r1=q14,<t1=q3
953vadd.i64 q3,q14,q3
954
955# qhasm: 		2x t4 = r3 unsigned>> 26
956# asm 1: vshr.u64 >t4=reg128#8,<r3=reg128#5,#26
957# asm 2: vshr.u64 >t4=q7,<r3=q4,#26
958vshr.u64 q7,q4,#26
959
960# qhasm: 		   r3 &= mask
961# asm 1: vand >r3=reg128#5,<r3=reg128#5,<mask=reg128#7
962# asm 2: vand >r3=q4,<r3=q4,<mask=q6
963vand q4,q4,q6
964
965# qhasm: 		2x x4 = r4 + t4
966# asm 1: vadd.i64 >x4=reg128#8,<r4=reg128#16,<t4=reg128#8
967# asm 2: vadd.i64 >x4=q7,<r4=q15,<t4=q7
968vadd.i64 q7,q15,q7
969
970# qhasm: 				r4 aligned= mem128[ptr]
971# asm 1: vld1.8 {>r4=reg128#16%bot->r4=reg128#16%top},[<ptr=int32#3,: 128]
972# asm 2: vld1.8 {>r4=d30->r4=d31},[<ptr=r2,: 128]
973vld1.8 {d30-d31},[r2,: 128]
974
975# qhasm: 2x t2 = r1 unsigned>> 26
976# asm 1: vshr.u64 >t2=reg128#9,<r1=reg128#4,#26
977# asm 2: vshr.u64 >t2=q8,<r1=q3,#26
978vshr.u64 q8,q3,#26
979
980# qhasm:    r1 &= mask
981# asm 1: vand >r1=reg128#4,<r1=reg128#4,<mask=reg128#7
982# asm 2: vand >r1=q3,<r1=q3,<mask=q6
983vand q3,q3,q6
984
985# qhasm: 		2x t0 = x4 unsigned>> 26
986# asm 1: vshr.u64 >t0=reg128#10,<x4=reg128#8,#26
987# asm 2: vshr.u64 >t0=q9,<x4=q7,#26
988vshr.u64 q9,q7,#26
989
990# qhasm: 2x r2 += t2
991# asm 1: vadd.i64 >r2=reg128#9,<r2=reg128#14,<t2=reg128#9
992# asm 2: vadd.i64 >r2=q8,<r2=q13,<t2=q8
993vadd.i64 q8,q13,q8
994
995# qhasm: 		   x4 &= mask
996# asm 1: vand >x4=reg128#11,<x4=reg128#8,<mask=reg128#7
997# asm 2: vand >x4=q10,<x4=q7,<mask=q6
998vand q10,q7,q6
999
1000# qhasm: 		2x x01 = r0 + t0
1001# asm 1: vadd.i64 >x01=reg128#6,<r0=reg128#6,<t0=reg128#10
1002# asm 2: vadd.i64 >x01=q5,<r0=q5,<t0=q9
1003vadd.i64 q5,q5,q9
1004
1005# qhasm: 				r0 aligned= mem128[ptr]
1006# asm 1: vld1.8 {>r0=reg128#8%bot->r0=reg128#8%top},[<ptr=int32#3,: 128]
1007# asm 2: vld1.8 {>r0=d14->r0=d15},[<ptr=r2,: 128]
1008vld1.8 {d14-d15},[r2,: 128]
1009
1010# qhasm: 				ptr = &z34_stack
1011# asm 1: lea >ptr=int32#3,<z34_stack=stack128#9
1012# asm 2: lea >ptr=r2,<z34_stack=[sp,#128]
1013add r2,sp,#128
1014
1015# qhasm: 		2x t0 <<= 2
1016# asm 1: vshl.i64 >t0=reg128#10,<t0=reg128#10,#2
1017# asm 2: vshl.i64 >t0=q9,<t0=q9,#2
1018vshl.i64 q9,q9,#2
1019
1020# qhasm: 2x t3 = r2 unsigned>> 26
1021# asm 1: vshr.u64 >t3=reg128#14,<r2=reg128#9,#26
1022# asm 2: vshr.u64 >t3=q13,<r2=q8,#26
1023vshr.u64 q13,q8,#26
1024
1025# qhasm: 		2x x01 += t0
1026# asm 1: vadd.i64 >x01=reg128#15,<x01=reg128#6,<t0=reg128#10
1027# asm 2: vadd.i64 >x01=q14,<x01=q5,<t0=q9
1028vadd.i64 q14,q5,q9
1029
1030# qhasm: 				z34 aligned= mem128[ptr]
1031# asm 1: vld1.8 {>z34=reg128#6%bot->z34=reg128#6%top},[<ptr=int32#3,: 128]
1032# asm 2: vld1.8 {>z34=d10->z34=d11},[<ptr=r2,: 128]
1033vld1.8 {d10-d11},[r2,: 128]
1034
1035# qhasm:    x23 = r2 & mask
1036# asm 1: vand >x23=reg128#10,<r2=reg128#9,<mask=reg128#7
1037# asm 2: vand >x23=q9,<r2=q8,<mask=q6
1038vand q9,q8,q6
1039
1040# qhasm: 2x r3 += t3
1041# asm 1: vadd.i64 >r3=reg128#5,<r3=reg128#5,<t3=reg128#14
1042# asm 2: vadd.i64 >r3=q4,<r3=q4,<t3=q13
1043vadd.i64 q4,q4,q13
1044
1045# qhasm: 								input_2 += 32
1046# asm 1: add >input_2=int32#2,<input_2=int32#2,#32
1047# asm 2: add >input_2=r1,<input_2=r1,#32
1048add r1,r1,#32
1049
1050# qhasm: 		2x t1 = x01 unsigned>> 26
1051# asm 1: vshr.u64 >t1=reg128#14,<x01=reg128#15,#26
1052# asm 2: vshr.u64 >t1=q13,<x01=q14,#26
1053vshr.u64 q13,q14,#26
1054
1055# qhasm: 						x23 = x23[0,2,1,3]
1056# asm 1: vtrn.32 <x23=reg128#10%bot,<x23=reg128#10%top
1057# asm 2: vtrn.32 <x23=d18,<x23=d19
1058vtrn.32 d18,d19
1059
1060# qhasm: 		   x01 = x01 & mask
1061# asm 1: vand >x01=reg128#9,<x01=reg128#15,<mask=reg128#7
1062# asm 2: vand >x01=q8,<x01=q14,<mask=q6
1063vand q8,q14,q6
1064
1065# qhasm: 		2x r1 += t1
1066# asm 1: vadd.i64 >r1=reg128#4,<r1=reg128#4,<t1=reg128#14
1067# asm 2: vadd.i64 >r1=q3,<r1=q3,<t1=q13
1068vadd.i64 q3,q3,q13
1069
1070# qhasm: 2x t4 = r3 unsigned>> 26
1071# asm 1: vshr.u64 >t4=reg128#14,<r3=reg128#5,#26
1072# asm 2: vshr.u64 >t4=q13,<r3=q4,#26
1073vshr.u64 q13,q4,#26
1074
1075# qhasm: 						x01 = x01[0,2,1,3]
1076# asm 1: vtrn.32 <x01=reg128#9%bot,<x01=reg128#9%top
1077# asm 2: vtrn.32 <x01=d16,<x01=d17
1078vtrn.32 d16,d17
1079
1080# qhasm:    r3 &= mask
1081# asm 1: vand >r3=reg128#5,<r3=reg128#5,<mask=reg128#7
1082# asm 2: vand >r3=q4,<r3=q4,<mask=q6
1083vand q4,q4,q6
1084
1085# qhasm: 						r1 = r1[0,2,1,3]
1086# asm 1: vtrn.32 <r1=reg128#4%bot,<r1=reg128#4%top
1087# asm 2: vtrn.32 <r1=d6,<r1=d7
1088vtrn.32 d6,d7
1089
1090# qhasm: 2x x4 += t4
1091# asm 1: vadd.i64 >x4=reg128#11,<x4=reg128#11,<t4=reg128#14
1092# asm 2: vadd.i64 >x4=q10,<x4=q10,<t4=q13
1093vadd.i64 q10,q10,q13
1094
1095# qhasm: 						r3 = r3[0,2,1,3]
1096# asm 1: vtrn.32 <r3=reg128#5%bot,<r3=reg128#5%top
1097# asm 2: vtrn.32 <r3=d8,<r3=d9
1098vtrn.32 d8,d9
1099
1100# qhasm: 						x01 = x01[0,1] r1[0,1]
1101# asm 1: vext.32 <x01=reg128#9%top,<r1=reg128#4%bot,<r1=reg128#4%bot,#0
1102# asm 2: vext.32 <x01=d17,<r1=d6,<r1=d6,#0
1103vext.32 d17,d6,d6,#0
1104
1105# qhasm: 						x23 = x23[0,1] r3[0,1]
1106# asm 1: vext.32 <x23=reg128#10%top,<r3=reg128#5%bot,<r3=reg128#5%bot,#0
1107# asm 2: vext.32 <x23=d19,<r3=d8,<r3=d8,#0
1108vext.32 d19,d8,d8,#0
1109
1110# qhasm: 						x4 = x4[0,2,1,3]
1111# asm 1: vtrn.32 <x4=reg128#11%bot,<x4=reg128#11%top
1112# asm 2: vtrn.32 <x4=d20,<x4=d21
1113vtrn.32 d20,d21
1114
1115# qhasm:                   unsigned>? len - 64
1116# asm 1: cmp <len=int32#4,#64
1117# asm 2: cmp <len=r3,#64
1118cmp r3,#64
1119
1120# qhasm: goto mainloop2 if unsigned>
1121bhi ._mainloop2
1122
1123# qhasm: input_2 -= 32
1124# asm 1: sub >input_2=int32#3,<input_2=int32#2,#32
1125# asm 2: sub >input_2=r2,<input_2=r1,#32
1126sub r2,r1,#32
1127
1128# qhasm: below64bytes:
1129._below64bytes:
1130
1131# qhasm:              unsigned>? len - 32
1132# asm 1: cmp <len=int32#4,#32
1133# asm 2: cmp <len=r3,#32
1134cmp r3,#32
1135
1136# qhasm: goto end if !unsigned>
1137bls ._end
1138
1139# qhasm: mainloop:
1140._mainloop:
1141
1142# qhasm:   new r0
1143
1144# qhasm: ptr = &two24
1145# asm 1: lea >ptr=int32#2,<two24=stack128#1
1146# asm 2: lea >ptr=r1,<two24=[sp,#0]
1147add r1,sp,#0
1148
1149# qhasm: r4 aligned= mem128[ptr]
1150# asm 1: vld1.8 {>r4=reg128#5%bot->r4=reg128#5%top},[<ptr=int32#2,: 128]
1151# asm 2: vld1.8 {>r4=d8->r4=d9},[<ptr=r1,: 128]
1152vld1.8 {d8-d9},[r1,: 128]
1153
1154# qhasm: u4 aligned= mem128[ptr]
1155# asm 1: vld1.8 {>u4=reg128#6%bot->u4=reg128#6%top},[<ptr=int32#2,: 128]
1156# asm 2: vld1.8 {>u4=d10->u4=d11},[<ptr=r1,: 128]
1157vld1.8 {d10-d11},[r1,: 128]
1158
1159# qhasm:   c01 = mem128[input_2];input_2+=16
1160# asm 1: vld1.8 {>c01=reg128#8%bot->c01=reg128#8%top},[<input_2=int32#3]!
1161# asm 2: vld1.8 {>c01=d14->c01=d15},[<input_2=r2]!
1162vld1.8 {d14-d15},[r2]!
1163
1164# qhasm: r4[0,1] += x01[0] unsigned*  y34[2];  r4[2,3] += x01[1] unsigned*  y34[3]
1165# asm 1: vmlal.u32 <r4=reg128#5,<x01=reg128#9%bot,<y34=reg128#3%top
1166# asm 2: vmlal.u32 <r4=q4,<x01=d16,<y34=d5
1167vmlal.u32 q4,d16,d5
1168
1169# qhasm:   c23 = mem128[input_2];input_2+=16
1170# asm 1: vld1.8 {>c23=reg128#14%bot->c23=reg128#14%top},[<input_2=int32#3]!
1171# asm 2: vld1.8 {>c23=d26->c23=d27},[<input_2=r2]!
1172vld1.8 {d26-d27},[r2]!
1173
1174# qhasm: r4[0,1] += x01[2] unsigned* y34[0];  r4[2,3] += x01[3] unsigned* y34[1]
1175# asm 1: vmlal.u32 <r4=reg128#5,<x01=reg128#9%top,<y34=reg128#3%bot
1176# asm 2: vmlal.u32 <r4=q4,<x01=d17,<y34=d4
1177vmlal.u32 q4,d17,d4
1178
1179# qhasm:   r0 = u4[1]c01[0]r0[2,3]
1180# asm 1: vext.32 <r0=reg128#4%bot,<u4=reg128#6%bot,<c01=reg128#8%bot,#1
1181# asm 2: vext.32 <r0=d6,<u4=d10,<c01=d14,#1
1182vext.32 d6,d10,d14,#1
1183
1184# qhasm: r4[0,1] += x23[0] unsigned* y12[2];  r4[2,3] += x23[1] unsigned* y12[3]
1185# asm 1: vmlal.u32 <r4=reg128#5,<x23=reg128#10%bot,<y12=reg128#2%top
1186# asm 2: vmlal.u32 <r4=q4,<x23=d18,<y12=d3
1187vmlal.u32 q4,d18,d3
1188
1189# qhasm:   r0 = r0[0,1]u4[1]c23[0]
1190# asm 1: vext.32 <r0=reg128#4%top,<u4=reg128#6%bot,<c23=reg128#14%bot,#1
1191# asm 2: vext.32 <r0=d7,<u4=d10,<c23=d26,#1
1192vext.32 d7,d10,d26,#1
1193
1194# qhasm: r4[0,1] += x23[2] unsigned* y12[0];  r4[2,3] += x23[3] unsigned* y12[1]
1195# asm 1: vmlal.u32 <r4=reg128#5,<x23=reg128#10%top,<y12=reg128#2%bot
1196# asm 2: vmlal.u32 <r4=q4,<x23=d19,<y12=d2
1197vmlal.u32 q4,d19,d2
1198
1199# qhasm:   r0 = r0[1]r0[0]r0[3]r0[2]
1200# asm 1: vrev64.i32 >r0=reg128#4,<r0=reg128#4
1201# asm 2: vrev64.i32 >r0=q3,<r0=q3
1202vrev64.i32 q3,q3
1203
1204# qhasm: r4[0,1] +=  x4[0] unsigned* y0[0];  r4[2,3] +=  x4[1] unsigned* y0[1]
1205# asm 1: vmlal.u32 <r4=reg128#5,<x4=reg128#11%bot,<y0=reg128#1%bot
1206# asm 2: vmlal.u32 <r4=q4,<x4=d20,<y0=d0
1207vmlal.u32 q4,d20,d0
1208
1209# qhasm: r0[0,1] +=  x4[0] unsigned* 5y12[0]; r0[2,3] +=  x4[1] unsigned* 5y12[1]
1210# asm 1: vmlal.u32 <r0=reg128#4,<x4=reg128#11%bot,<5y12=reg128#12%bot
1211# asm 2: vmlal.u32 <r0=q3,<x4=d20,<5y12=d22
1212vmlal.u32 q3,d20,d22
1213
1214# qhasm: r0[0,1] += x23[0] unsigned* 5y34[0]; r0[2,3] += x23[1] unsigned* 5y34[1]
1215# asm 1: vmlal.u32 <r0=reg128#4,<x23=reg128#10%bot,<5y34=reg128#13%bot
1216# asm 2: vmlal.u32 <r0=q3,<x23=d18,<5y34=d24
1217vmlal.u32 q3,d18,d24
1218
1219# qhasm: r0[0,1] += x23[2] unsigned* 5y12[2]; r0[2,3] += x23[3] unsigned* 5y12[3]
1220# asm 1: vmlal.u32 <r0=reg128#4,<x23=reg128#10%top,<5y12=reg128#12%top
1221# asm 2: vmlal.u32 <r0=q3,<x23=d19,<5y12=d23
1222vmlal.u32 q3,d19,d23
1223
1224# qhasm:   c01 c23 = c01[0]c23[0]c01[2]c23[2]c01[1]c23[1]c01[3]c23[3]
1225# asm 1: vtrn.32 <c01=reg128#8,<c23=reg128#14
1226# asm 2: vtrn.32 <c01=q7,<c23=q13
1227vtrn.32 q7,q13
1228
1229# qhasm: r0[0,1] += x01[0] unsigned* y0[0];   r0[2,3] += x01[1] unsigned* y0[1]
1230# asm 1: vmlal.u32 <r0=reg128#4,<x01=reg128#9%bot,<y0=reg128#1%bot
1231# asm 2: vmlal.u32 <r0=q3,<x01=d16,<y0=d0
1232vmlal.u32 q3,d16,d0
1233
1234# qhasm:   r3[0,1] = c23[2]<<18; r3[2,3] = c23[3]<<18
1235# asm 1: vshll.u32 >r3=reg128#6,<c23=reg128#14%top,#18
1236# asm 2: vshll.u32 >r3=q5,<c23=d27,#18
1237vshll.u32 q5,d27,#18
1238
1239# qhasm: r0[0,1] += x01[2] unsigned*  5y34[2]; r0[2,3] += x01[3] unsigned*  5y34[3]
1240# asm 1: vmlal.u32 <r0=reg128#4,<x01=reg128#9%top,<5y34=reg128#13%top
1241# asm 2: vmlal.u32 <r0=q3,<x01=d17,<5y34=d25
1242vmlal.u32 q3,d17,d25
1243
1244# qhasm: r3[0,1] += x01[0] unsigned* y34[0];   r3[2,3] += x01[1] unsigned* y34[1]
1245# asm 1: vmlal.u32 <r3=reg128#6,<x01=reg128#9%bot,<y34=reg128#3%bot
1246# asm 2: vmlal.u32 <r3=q5,<x01=d16,<y34=d4
1247vmlal.u32 q5,d16,d4
1248
1249# qhasm: r3[0,1] += x01[2] unsigned* y12[2];   r3[2,3] += x01[3] unsigned* y12[3]
1250# asm 1: vmlal.u32 <r3=reg128#6,<x01=reg128#9%top,<y12=reg128#2%top
1251# asm 2: vmlal.u32 <r3=q5,<x01=d17,<y12=d3
1252vmlal.u32 q5,d17,d3
1253
1254# qhasm: r3[0,1] += x23[0] unsigned* y12[0];   r3[2,3] += x23[1] unsigned* y12[1]
1255# asm 1: vmlal.u32 <r3=reg128#6,<x23=reg128#10%bot,<y12=reg128#2%bot
1256# asm 2: vmlal.u32 <r3=q5,<x23=d18,<y12=d2
1257vmlal.u32 q5,d18,d2
1258
1259# qhasm: r3[0,1] += x23[2] unsigned* y0[0];   r3[2,3] += x23[3] unsigned* y0[1]
1260# asm 1: vmlal.u32 <r3=reg128#6,<x23=reg128#10%top,<y0=reg128#1%bot
1261# asm 2: vmlal.u32 <r3=q5,<x23=d19,<y0=d0
1262vmlal.u32 q5,d19,d0
1263
1264# qhasm:   r1[0,1] = c23[0]<<6; r1[2,3] = c23[1]<<6
1265# asm 1: vshll.u32 >r1=reg128#14,<c23=reg128#14%bot,#6
1266# asm 2: vshll.u32 >r1=q13,<c23=d26,#6
1267vshll.u32 q13,d26,#6
1268
1269# qhasm: r3[0,1] +=  x4[0] unsigned*  5y34[2]; r3[2,3] +=  x4[1] unsigned*  5y34[3]
1270# asm 1: vmlal.u32 <r3=reg128#6,<x4=reg128#11%bot,<5y34=reg128#13%top
1271# asm 2: vmlal.u32 <r3=q5,<x4=d20,<5y34=d25
1272vmlal.u32 q5,d20,d25
1273
1274# qhasm: r1[0,1] += x01[0] unsigned* y12[0];   r1[2,3] += x01[1] unsigned* y12[1]
1275# asm 1: vmlal.u32 <r1=reg128#14,<x01=reg128#9%bot,<y12=reg128#2%bot
1276# asm 2: vmlal.u32 <r1=q13,<x01=d16,<y12=d2
1277vmlal.u32 q13,d16,d2
1278
1279# qhasm: r1[0,1] += x01[2] unsigned* y0[0];   r1[2,3] += x01[3] unsigned* y0[1]
1280# asm 1: vmlal.u32 <r1=reg128#14,<x01=reg128#9%top,<y0=reg128#1%bot
1281# asm 2: vmlal.u32 <r1=q13,<x01=d17,<y0=d0
1282vmlal.u32 q13,d17,d0
1283
1284# qhasm: r1[0,1] += x23[0] unsigned*  5y34[2]; r1[2,3] += x23[1] unsigned*  5y34[3]
1285# asm 1: vmlal.u32 <r1=reg128#14,<x23=reg128#10%bot,<5y34=reg128#13%top
1286# asm 2: vmlal.u32 <r1=q13,<x23=d18,<5y34=d25
1287vmlal.u32 q13,d18,d25
1288
1289# qhasm: r1[0,1] += x23[2] unsigned* 5y34[0]; r1[2,3] += x23[3] unsigned* 5y34[1]
1290# asm 1: vmlal.u32 <r1=reg128#14,<x23=reg128#10%top,<5y34=reg128#13%bot
1291# asm 2: vmlal.u32 <r1=q13,<x23=d19,<5y34=d24
1292vmlal.u32 q13,d19,d24
1293
1294# qhasm:   r2[0,1] = c01[2]<<12; r2[2,3] = c01[3]<<12
1295# asm 1: vshll.u32 >r2=reg128#8,<c01=reg128#8%top,#12
1296# asm 2: vshll.u32 >r2=q7,<c01=d15,#12
1297vshll.u32 q7,d15,#12
1298
1299# qhasm: r1[0,1] +=  x4[0] unsigned* 5y12[2]; r1[2,3] +=  x4[1] unsigned* 5y12[3]
1300# asm 1: vmlal.u32 <r1=reg128#14,<x4=reg128#11%bot,<5y12=reg128#12%top
1301# asm 2: vmlal.u32 <r1=q13,<x4=d20,<5y12=d23
1302vmlal.u32 q13,d20,d23
1303
1304# qhasm: r2[0,1] += x01[0] unsigned* y12[2];   r2[2,3] += x01[1] unsigned* y12[3]
1305# asm 1: vmlal.u32 <r2=reg128#8,<x01=reg128#9%bot,<y12=reg128#2%top
1306# asm 2: vmlal.u32 <r2=q7,<x01=d16,<y12=d3
1307vmlal.u32 q7,d16,d3
1308
1309# qhasm: r2[0,1] += x01[2] unsigned* y12[0];   r2[2,3] += x01[3] unsigned* y12[1]
1310# asm 1: vmlal.u32 <r2=reg128#8,<x01=reg128#9%top,<y12=reg128#2%bot
1311# asm 2: vmlal.u32 <r2=q7,<x01=d17,<y12=d2
1312vmlal.u32 q7,d17,d2
1313
1314# qhasm: r2[0,1] += x23[0] unsigned* y0[0];   r2[2,3] += x23[1] unsigned* y0[1]
1315# asm 1: vmlal.u32 <r2=reg128#8,<x23=reg128#10%bot,<y0=reg128#1%bot
1316# asm 2: vmlal.u32 <r2=q7,<x23=d18,<y0=d0
1317vmlal.u32 q7,d18,d0
1318
1319# qhasm: r2[0,1] += x23[2] unsigned*  5y34[2]; r2[2,3] += x23[3] unsigned*  5y34[3]
1320# asm 1: vmlal.u32 <r2=reg128#8,<x23=reg128#10%top,<5y34=reg128#13%top
1321# asm 2: vmlal.u32 <r2=q7,<x23=d19,<5y34=d25
1322vmlal.u32 q7,d19,d25
1323
1324# qhasm: r2[0,1] +=  x4[0] unsigned* 5y34[0]; r2[2,3] +=  x4[1] unsigned* 5y34[1]
1325# asm 1: vmlal.u32 <r2=reg128#8,<x4=reg128#11%bot,<5y34=reg128#13%bot
1326# asm 2: vmlal.u32 <r2=q7,<x4=d20,<5y34=d24
1327vmlal.u32 q7,d20,d24
1328
1329# qhasm: 2x t1 = r0 unsigned>> 26
1330# asm 1: vshr.u64 >t1=reg128#9,<r0=reg128#4,#26
1331# asm 2: vshr.u64 >t1=q8,<r0=q3,#26
1332vshr.u64 q8,q3,#26
1333
1334# qhasm:    r0 &= mask
1335# asm 1: vand >r0=reg128#4,<r0=reg128#4,<mask=reg128#7
1336# asm 2: vand >r0=q3,<r0=q3,<mask=q6
1337vand q3,q3,q6
1338
1339# qhasm: 2x r1 += t1
1340# asm 1: vadd.i64 >r1=reg128#9,<r1=reg128#14,<t1=reg128#9
1341# asm 2: vadd.i64 >r1=q8,<r1=q13,<t1=q8
1342vadd.i64 q8,q13,q8
1343
1344# qhasm: 		2x t4 = r3 unsigned>> 26
1345# asm 1: vshr.u64 >t4=reg128#10,<r3=reg128#6,#26
1346# asm 2: vshr.u64 >t4=q9,<r3=q5,#26
1347vshr.u64 q9,q5,#26
1348
1349# qhasm: 		   r3 &= mask
1350# asm 1: vand >r3=reg128#6,<r3=reg128#6,<mask=reg128#7
1351# asm 2: vand >r3=q5,<r3=q5,<mask=q6
1352vand q5,q5,q6
1353
1354# qhasm: 		2x r4 += t4
1355# asm 1: vadd.i64 >r4=reg128#5,<r4=reg128#5,<t4=reg128#10
1356# asm 2: vadd.i64 >r4=q4,<r4=q4,<t4=q9
1357vadd.i64 q4,q4,q9
1358
1359# qhasm: 2x t2 = r1 unsigned>> 26
1360# asm 1: vshr.u64 >t2=reg128#10,<r1=reg128#9,#26
1361# asm 2: vshr.u64 >t2=q9,<r1=q8,#26
1362vshr.u64 q9,q8,#26
1363
1364# qhasm:    r1 &= mask
1365# asm 1: vand >r1=reg128#11,<r1=reg128#9,<mask=reg128#7
1366# asm 2: vand >r1=q10,<r1=q8,<mask=q6
1367vand q10,q8,q6
1368
1369# qhasm: 		2x t0 = r4 unsigned>> 26
1370# asm 1: vshr.u64 >t0=reg128#9,<r4=reg128#5,#26
1371# asm 2: vshr.u64 >t0=q8,<r4=q4,#26
1372vshr.u64 q8,q4,#26
1373
1374# qhasm: 2x r2 += t2
1375# asm 1: vadd.i64 >r2=reg128#8,<r2=reg128#8,<t2=reg128#10
1376# asm 2: vadd.i64 >r2=q7,<r2=q7,<t2=q9
1377vadd.i64 q7,q7,q9
1378
1379# qhasm: 		   r4 &= mask
1380# asm 1: vand >r4=reg128#5,<r4=reg128#5,<mask=reg128#7
1381# asm 2: vand >r4=q4,<r4=q4,<mask=q6
1382vand q4,q4,q6
1383
1384# qhasm: 		2x r0 += t0
1385# asm 1: vadd.i64 >r0=reg128#4,<r0=reg128#4,<t0=reg128#9
1386# asm 2: vadd.i64 >r0=q3,<r0=q3,<t0=q8
1387vadd.i64 q3,q3,q8
1388
1389# qhasm: 		2x t0 <<= 2
1390# asm 1: vshl.i64 >t0=reg128#9,<t0=reg128#9,#2
1391# asm 2: vshl.i64 >t0=q8,<t0=q8,#2
1392vshl.i64 q8,q8,#2
1393
1394# qhasm: 2x t3 = r2 unsigned>> 26
1395# asm 1: vshr.u64 >t3=reg128#14,<r2=reg128#8,#26
1396# asm 2: vshr.u64 >t3=q13,<r2=q7,#26
1397vshr.u64 q13,q7,#26
1398
1399# qhasm: 		2x r0 += t0
1400# asm 1: vadd.i64 >r0=reg128#4,<r0=reg128#4,<t0=reg128#9
1401# asm 2: vadd.i64 >r0=q3,<r0=q3,<t0=q8
1402vadd.i64 q3,q3,q8
1403
1404# qhasm:    x23 = r2 & mask
1405# asm 1: vand >x23=reg128#10,<r2=reg128#8,<mask=reg128#7
1406# asm 2: vand >x23=q9,<r2=q7,<mask=q6
1407vand q9,q7,q6
1408
1409# qhasm: 2x r3 += t3
1410# asm 1: vadd.i64 >r3=reg128#6,<r3=reg128#6,<t3=reg128#14
1411# asm 2: vadd.i64 >r3=q5,<r3=q5,<t3=q13
1412vadd.i64 q5,q5,q13
1413
1414# qhasm: 		2x t1 = r0 unsigned>> 26
1415# asm 1: vshr.u64 >t1=reg128#8,<r0=reg128#4,#26
1416# asm 2: vshr.u64 >t1=q7,<r0=q3,#26
1417vshr.u64 q7,q3,#26
1418
1419# qhasm: 		   x01 = r0 & mask
1420# asm 1: vand >x01=reg128#9,<r0=reg128#4,<mask=reg128#7
1421# asm 2: vand >x01=q8,<r0=q3,<mask=q6
1422vand q8,q3,q6
1423
1424# qhasm: 		2x r1 += t1
1425# asm 1: vadd.i64 >r1=reg128#4,<r1=reg128#11,<t1=reg128#8
1426# asm 2: vadd.i64 >r1=q3,<r1=q10,<t1=q7
1427vadd.i64 q3,q10,q7
1428
1429# qhasm: 2x t4 = r3 unsigned>> 26
1430# asm 1: vshr.u64 >t4=reg128#8,<r3=reg128#6,#26
1431# asm 2: vshr.u64 >t4=q7,<r3=q5,#26
1432vshr.u64 q7,q5,#26
1433
1434# qhasm:    r3 &= mask
1435# asm 1: vand >r3=reg128#6,<r3=reg128#6,<mask=reg128#7
1436# asm 2: vand >r3=q5,<r3=q5,<mask=q6
1437vand q5,q5,q6
1438
1439# qhasm: 2x x4 = r4 + t4
1440# asm 1: vadd.i64 >x4=reg128#11,<r4=reg128#5,<t4=reg128#8
1441# asm 2: vadd.i64 >x4=q10,<r4=q4,<t4=q7
1442vadd.i64 q10,q4,q7
1443
1444# qhasm:   len -= 32
1445# asm 1: sub >len=int32#4,<len=int32#4,#32
1446# asm 2: sub >len=r3,<len=r3,#32
1447sub r3,r3,#32
1448
1449# qhasm: x01 = x01[0,2,1,3]
1450# asm 1: vtrn.32 <x01=reg128#9%bot,<x01=reg128#9%top
1451# asm 2: vtrn.32 <x01=d16,<x01=d17
1452vtrn.32 d16,d17
1453
1454# qhasm: x23 = x23[0,2,1,3]
1455# asm 1: vtrn.32 <x23=reg128#10%bot,<x23=reg128#10%top
1456# asm 2: vtrn.32 <x23=d18,<x23=d19
1457vtrn.32 d18,d19
1458
1459# qhasm: r1 = r1[0,2,1,3]
1460# asm 1: vtrn.32 <r1=reg128#4%bot,<r1=reg128#4%top
1461# asm 2: vtrn.32 <r1=d6,<r1=d7
1462vtrn.32 d6,d7
1463
1464# qhasm: r3 = r3[0,2,1,3]
1465# asm 1: vtrn.32 <r3=reg128#6%bot,<r3=reg128#6%top
1466# asm 2: vtrn.32 <r3=d10,<r3=d11
1467vtrn.32 d10,d11
1468
1469# qhasm: x4 = x4[0,2,1,3]
1470# asm 1: vtrn.32 <x4=reg128#11%bot,<x4=reg128#11%top
1471# asm 2: vtrn.32 <x4=d20,<x4=d21
1472vtrn.32 d20,d21
1473
1474# qhasm: x01 = x01[0,1] r1[0,1]
1475# asm 1: vext.32 <x01=reg128#9%top,<r1=reg128#4%bot,<r1=reg128#4%bot,#0
1476# asm 2: vext.32 <x01=d17,<r1=d6,<r1=d6,#0
1477vext.32 d17,d6,d6,#0
1478
1479# qhasm: x23 = x23[0,1] r3[0,1]
1480# asm 1: vext.32 <x23=reg128#10%top,<r3=reg128#6%bot,<r3=reg128#6%bot,#0
1481# asm 2: vext.32 <x23=d19,<r3=d10,<r3=d10,#0
1482vext.32 d19,d10,d10,#0
1483
1484# qhasm: unsigned>? len - 32
1485# asm 1: cmp <len=int32#4,#32
1486# asm 2: cmp <len=r3,#32
1487cmp r3,#32
1488
1489# qhasm: goto mainloop if unsigned>
1490bhi ._mainloop
1491
1492# qhasm: end:
1493._end:
1494
1495# qhasm: mem128[input_0] = x01;input_0+=16
1496# asm 1: vst1.8 {<x01=reg128#9%bot-<x01=reg128#9%top},[<input_0=int32#1]!
1497# asm 2: vst1.8 {<x01=d16-<x01=d17},[<input_0=r0]!
1498vst1.8 {d16-d17},[r0]!
1499
1500# qhasm: mem128[input_0] = x23;input_0+=16
1501# asm 1: vst1.8 {<x23=reg128#10%bot-<x23=reg128#10%top},[<input_0=int32#1]!
1502# asm 2: vst1.8 {<x23=d18-<x23=d19},[<input_0=r0]!
1503vst1.8 {d18-d19},[r0]!
1504
1505# qhasm: mem64[input_0] = x4[0]
1506# asm 1: vst1.8 <x4=reg128#11%bot,[<input_0=int32#1]
1507# asm 2: vst1.8 <x4=d20,[<input_0=r0]
1508vst1.8 d20,[r0]
1509
1510# qhasm: len = len
1511# asm 1: mov >len=int32#1,<len=int32#4
1512# asm 2: mov >len=r0,<len=r3
1513mov r0,r3
1514
1515# qhasm: qpopreturn len
1516mov sp,r12
1517vpop {q4,q5,q6,q7}
1518bx lr
1519
1520# qhasm: int32 input_0
1521
1522# qhasm: int32 input_1
1523
1524# qhasm: int32 input_2
1525
1526# qhasm: int32 input_3
1527
1528# qhasm: stack32 input_4
1529
1530# qhasm: stack32 input_5
1531
1532# qhasm: stack32 input_6
1533
1534# qhasm: stack32 input_7
1535
1536# qhasm: int32 caller_r4
1537
1538# qhasm: int32 caller_r5
1539
1540# qhasm: int32 caller_r6
1541
1542# qhasm: int32 caller_r7
1543
1544# qhasm: int32 caller_r8
1545
1546# qhasm: int32 caller_r9
1547
1548# qhasm: int32 caller_r10
1549
1550# qhasm: int32 caller_r11
1551
1552# qhasm: int32 caller_r12
1553
1554# qhasm: int32 caller_r14
1555
1556# qhasm: reg128 caller_q4
1557
1558# qhasm: reg128 caller_q5
1559
1560# qhasm: reg128 caller_q6
1561
1562# qhasm: reg128 caller_q7
1563
1564# qhasm: reg128 r0
1565
1566# qhasm: reg128 r1
1567
1568# qhasm: reg128 r2
1569
1570# qhasm: reg128 r3
1571
1572# qhasm: reg128 r4
1573
1574# qhasm: reg128 x01
1575
1576# qhasm: reg128 x23
1577
1578# qhasm: reg128 x4
1579
1580# qhasm: reg128 y01
1581
1582# qhasm: reg128 y23
1583
1584# qhasm: reg128 y4
1585
1586# qhasm: reg128 _5y01
1587
1588# qhasm: reg128 _5y23
1589
1590# qhasm: reg128 _5y4
1591
1592# qhasm: reg128 c01
1593
1594# qhasm: reg128 c23
1595
1596# qhasm: reg128 c4
1597
1598# qhasm: reg128 t0
1599
1600# qhasm: reg128 t1
1601
1602# qhasm: reg128 t2
1603
1604# qhasm: reg128 t3
1605
1606# qhasm: reg128 t4
1607
1608# qhasm: reg128 mask
1609
1610# qhasm: enter crypto_onetimeauth_poly1305_neon2_addmulmod
1611.align 2
1612.global openssl_poly1305_neon2_addmulmod
1613.type openssl_poly1305_neon2_addmulmod STT_FUNC
1614openssl_poly1305_neon2_addmulmod:
1615sub sp,sp,#0
1616
1617# qhasm: 				2x mask = 0xffffffff
1618# asm 1: vmov.i64 >mask=reg128#1,#0xffffffff
1619# asm 2: vmov.i64 >mask=q0,#0xffffffff
1620vmov.i64 q0,#0xffffffff
1621
1622# qhasm:   y01 aligned= mem128[input_2];input_2+=16
1623# asm 1: vld1.8 {>y01=reg128#2%bot->y01=reg128#2%top},[<input_2=int32#3,: 128]!
1624# asm 2: vld1.8 {>y01=d2->y01=d3},[<input_2=r2,: 128]!
1625vld1.8 {d2-d3},[r2,: 128]!
1626
1627# qhasm: 4x _5y01 = y01 << 2
1628# asm 1: vshl.i32 >_5y01=reg128#3,<y01=reg128#2,#2
1629# asm 2: vshl.i32 >_5y01=q2,<y01=q1,#2
1630vshl.i32 q2,q1,#2
1631
1632# qhasm:   y23 aligned= mem128[input_2];input_2+=16
1633# asm 1: vld1.8 {>y23=reg128#4%bot->y23=reg128#4%top},[<input_2=int32#3,: 128]!
1634# asm 2: vld1.8 {>y23=d6->y23=d7},[<input_2=r2,: 128]!
1635vld1.8 {d6-d7},[r2,: 128]!
1636
1637# qhasm: 4x _5y23 = y23 << 2
1638# asm 1: vshl.i32 >_5y23=reg128#9,<y23=reg128#4,#2
1639# asm 2: vshl.i32 >_5y23=q8,<y23=q3,#2
1640vshl.i32 q8,q3,#2
1641
1642# qhasm:   y4  aligned= mem64[input_2]y4[1]
1643# asm 1: vld1.8 {<y4=reg128#10%bot},[<input_2=int32#3,: 64]
1644# asm 2: vld1.8 {<y4=d18},[<input_2=r2,: 64]
1645vld1.8 {d18},[r2,: 64]
1646
1647# qhasm: 4x _5y4 = y4 << 2
1648# asm 1: vshl.i32 >_5y4=reg128#11,<y4=reg128#10,#2
1649# asm 2: vshl.i32 >_5y4=q10,<y4=q9,#2
1650vshl.i32 q10,q9,#2
1651
1652# qhasm:   x01 aligned= mem128[input_1];input_1+=16
1653# asm 1: vld1.8 {>x01=reg128#12%bot->x01=reg128#12%top},[<input_1=int32#2,: 128]!
1654# asm 2: vld1.8 {>x01=d22->x01=d23},[<input_1=r1,: 128]!
1655vld1.8 {d22-d23},[r1,: 128]!
1656
1657# qhasm: 4x _5y01 += y01
1658# asm 1: vadd.i32 >_5y01=reg128#3,<_5y01=reg128#3,<y01=reg128#2
1659# asm 2: vadd.i32 >_5y01=q2,<_5y01=q2,<y01=q1
1660vadd.i32 q2,q2,q1
1661
1662# qhasm:   x23 aligned= mem128[input_1];input_1+=16
1663# asm 1: vld1.8 {>x23=reg128#13%bot->x23=reg128#13%top},[<input_1=int32#2,: 128]!
1664# asm 2: vld1.8 {>x23=d24->x23=d25},[<input_1=r1,: 128]!
1665vld1.8 {d24-d25},[r1,: 128]!
1666
1667# qhasm: 4x _5y23 += y23
1668# asm 1: vadd.i32 >_5y23=reg128#9,<_5y23=reg128#9,<y23=reg128#4
1669# asm 2: vadd.i32 >_5y23=q8,<_5y23=q8,<y23=q3
1670vadd.i32 q8,q8,q3
1671
1672# qhasm: 4x _5y4 += y4
1673# asm 1: vadd.i32 >_5y4=reg128#11,<_5y4=reg128#11,<y4=reg128#10
1674# asm 2: vadd.i32 >_5y4=q10,<_5y4=q10,<y4=q9
1675vadd.i32 q10,q10,q9
1676
1677# qhasm:   c01 aligned= mem128[input_3];input_3+=16
1678# asm 1: vld1.8 {>c01=reg128#14%bot->c01=reg128#14%top},[<input_3=int32#4,: 128]!
1679# asm 2: vld1.8 {>c01=d26->c01=d27},[<input_3=r3,: 128]!
1680vld1.8 {d26-d27},[r3,: 128]!
1681
1682# qhasm: 4x x01 += c01
1683# asm 1: vadd.i32 >x01=reg128#12,<x01=reg128#12,<c01=reg128#14
1684# asm 2: vadd.i32 >x01=q11,<x01=q11,<c01=q13
1685vadd.i32 q11,q11,q13
1686
1687# qhasm:   c23 aligned= mem128[input_3];input_3+=16
1688# asm 1: vld1.8 {>c23=reg128#14%bot->c23=reg128#14%top},[<input_3=int32#4,: 128]!
1689# asm 2: vld1.8 {>c23=d26->c23=d27},[<input_3=r3,: 128]!
1690vld1.8 {d26-d27},[r3,: 128]!
1691
1692# qhasm: 4x x23 += c23
1693# asm 1: vadd.i32 >x23=reg128#13,<x23=reg128#13,<c23=reg128#14
1694# asm 2: vadd.i32 >x23=q12,<x23=q12,<c23=q13
1695vadd.i32 q12,q12,q13
1696
1697# qhasm:   x4  aligned= mem64[input_1]x4[1]
1698# asm 1: vld1.8 {<x4=reg128#14%bot},[<input_1=int32#2,: 64]
1699# asm 2: vld1.8 {<x4=d26},[<input_1=r1,: 64]
1700vld1.8 {d26},[r1,: 64]
1701
1702# qhasm: 				2x mask unsigned>>=6
1703# asm 1: vshr.u64 >mask=reg128#1,<mask=reg128#1,#6
1704# asm 2: vshr.u64 >mask=q0,<mask=q0,#6
1705vshr.u64 q0,q0,#6
1706
1707# qhasm:   c4  aligned= mem64[input_3]c4[1]
1708# asm 1: vld1.8 {<c4=reg128#15%bot},[<input_3=int32#4,: 64]
1709# asm 2: vld1.8 {<c4=d28},[<input_3=r3,: 64]
1710vld1.8 {d28},[r3,: 64]
1711
1712# qhasm: 4x x4 += c4
1713# asm 1: vadd.i32 >x4=reg128#14,<x4=reg128#14,<c4=reg128#15
1714# asm 2: vadd.i32 >x4=q13,<x4=q13,<c4=q14
1715vadd.i32 q13,q13,q14
1716
1717# qhasm: r0[0,1]  = x01[0] unsigned* y01[0];   r0[2,3]  = x01[1] unsigned* y01[1]
1718# asm 1: vmull.u32 >r0=reg128#15,<x01=reg128#12%bot,<y01=reg128#2%bot
1719# asm 2: vmull.u32 >r0=q14,<x01=d22,<y01=d2
1720vmull.u32 q14,d22,d2
1721
1722# qhasm: r0[0,1] += x01[2] unsigned*  _5y4[0]; r0[2,3] += x01[3] unsigned*  _5y4[1]
1723# asm 1: vmlal.u32 <r0=reg128#15,<x01=reg128#12%top,<_5y4=reg128#11%bot
1724# asm 2: vmlal.u32 <r0=q14,<x01=d23,<_5y4=d20
1725vmlal.u32 q14,d23,d20
1726
1727# qhasm: r0[0,1] += x23[0] unsigned* _5y23[2]; r0[2,3] += x23[1] unsigned* _5y23[3]
1728# asm 1: vmlal.u32 <r0=reg128#15,<x23=reg128#13%bot,<_5y23=reg128#9%top
1729# asm 2: vmlal.u32 <r0=q14,<x23=d24,<_5y23=d17
1730vmlal.u32 q14,d24,d17
1731
1732# qhasm: r0[0,1] += x23[2] unsigned* _5y23[0]; r0[2,3] += x23[3] unsigned* _5y23[1]
1733# asm 1: vmlal.u32 <r0=reg128#15,<x23=reg128#13%top,<_5y23=reg128#9%bot
1734# asm 2: vmlal.u32 <r0=q14,<x23=d25,<_5y23=d16
1735vmlal.u32 q14,d25,d16
1736
1737# qhasm: r0[0,1] +=  x4[0] unsigned* _5y01[2]; r0[2,3] +=  x4[1] unsigned* _5y01[3]
1738# asm 1: vmlal.u32 <r0=reg128#15,<x4=reg128#14%bot,<_5y01=reg128#3%top
1739# asm 2: vmlal.u32 <r0=q14,<x4=d26,<_5y01=d5
1740vmlal.u32 q14,d26,d5
1741
1742# qhasm: r1[0,1]  = x01[0] unsigned* y01[2];   r1[2,3]  = x01[1] unsigned* y01[3]
1743# asm 1: vmull.u32 >r1=reg128#3,<x01=reg128#12%bot,<y01=reg128#2%top
1744# asm 2: vmull.u32 >r1=q2,<x01=d22,<y01=d3
1745vmull.u32 q2,d22,d3
1746
1747# qhasm: r1[0,1] += x01[2] unsigned* y01[0];   r1[2,3] += x01[3] unsigned* y01[1]
1748# asm 1: vmlal.u32 <r1=reg128#3,<x01=reg128#12%top,<y01=reg128#2%bot
1749# asm 2: vmlal.u32 <r1=q2,<x01=d23,<y01=d2
1750vmlal.u32 q2,d23,d2
1751
1752# qhasm: r1[0,1] += x23[0] unsigned*  _5y4[0]; r1[2,3] += x23[1] unsigned*  _5y4[1]
1753# asm 1: vmlal.u32 <r1=reg128#3,<x23=reg128#13%bot,<_5y4=reg128#11%bot
1754# asm 2: vmlal.u32 <r1=q2,<x23=d24,<_5y4=d20
1755vmlal.u32 q2,d24,d20
1756
1757# qhasm: r1[0,1] += x23[2] unsigned* _5y23[2]; r1[2,3] += x23[3] unsigned* _5y23[3]
1758# asm 1: vmlal.u32 <r1=reg128#3,<x23=reg128#13%top,<_5y23=reg128#9%top
1759# asm 2: vmlal.u32 <r1=q2,<x23=d25,<_5y23=d17
1760vmlal.u32 q2,d25,d17
1761
1762# qhasm: r1[0,1] +=  x4[0] unsigned* _5y23[0]; r1[2,3] +=  x4[1] unsigned* _5y23[1]
1763# asm 1: vmlal.u32 <r1=reg128#3,<x4=reg128#14%bot,<_5y23=reg128#9%bot
1764# asm 2: vmlal.u32 <r1=q2,<x4=d26,<_5y23=d16
1765vmlal.u32 q2,d26,d16
1766
1767# qhasm: r2[0,1]  = x01[0] unsigned* y23[0];   r2[2,3]  = x01[1] unsigned* y23[1]
1768# asm 1: vmull.u32 >r2=reg128#16,<x01=reg128#12%bot,<y23=reg128#4%bot
1769# asm 2: vmull.u32 >r2=q15,<x01=d22,<y23=d6
1770vmull.u32 q15,d22,d6
1771
1772# qhasm: r2[0,1] += x01[2] unsigned* y01[2];   r2[2,3] += x01[3] unsigned* y01[3]
1773# asm 1: vmlal.u32 <r2=reg128#16,<x01=reg128#12%top,<y01=reg128#2%top
1774# asm 2: vmlal.u32 <r2=q15,<x01=d23,<y01=d3
1775vmlal.u32 q15,d23,d3
1776
1777# qhasm: r2[0,1] += x23[0] unsigned* y01[0];   r2[2,3] += x23[1] unsigned* y01[1]
1778# asm 1: vmlal.u32 <r2=reg128#16,<x23=reg128#13%bot,<y01=reg128#2%bot
1779# asm 2: vmlal.u32 <r2=q15,<x23=d24,<y01=d2
1780vmlal.u32 q15,d24,d2
1781
1782# qhasm: r2[0,1] += x23[2] unsigned*  _5y4[0]; r2[2,3] += x23[3] unsigned*  _5y4[1]
1783# asm 1: vmlal.u32 <r2=reg128#16,<x23=reg128#13%top,<_5y4=reg128#11%bot
1784# asm 2: vmlal.u32 <r2=q15,<x23=d25,<_5y4=d20
1785vmlal.u32 q15,d25,d20
1786
1787# qhasm: r2[0,1] +=  x4[0] unsigned* _5y23[2]; r2[2,3] +=  x4[1] unsigned* _5y23[3]
1788# asm 1: vmlal.u32 <r2=reg128#16,<x4=reg128#14%bot,<_5y23=reg128#9%top
1789# asm 2: vmlal.u32 <r2=q15,<x4=d26,<_5y23=d17
1790vmlal.u32 q15,d26,d17
1791
1792# qhasm: r3[0,1]  = x01[0] unsigned* y23[2];   r3[2,3]  = x01[1] unsigned* y23[3]
1793# asm 1: vmull.u32 >r3=reg128#9,<x01=reg128#12%bot,<y23=reg128#4%top
1794# asm 2: vmull.u32 >r3=q8,<x01=d22,<y23=d7
1795vmull.u32 q8,d22,d7
1796
1797# qhasm: r3[0,1] += x01[2] unsigned* y23[0];   r3[2,3] += x01[3] unsigned* y23[1]
1798# asm 1: vmlal.u32 <r3=reg128#9,<x01=reg128#12%top,<y23=reg128#4%bot
1799# asm 2: vmlal.u32 <r3=q8,<x01=d23,<y23=d6
1800vmlal.u32 q8,d23,d6
1801
1802# qhasm: r3[0,1] += x23[0] unsigned* y01[2];   r3[2,3] += x23[1] unsigned* y01[3]
1803# asm 1: vmlal.u32 <r3=reg128#9,<x23=reg128#13%bot,<y01=reg128#2%top
1804# asm 2: vmlal.u32 <r3=q8,<x23=d24,<y01=d3
1805vmlal.u32 q8,d24,d3
1806
1807# qhasm: r3[0,1] += x23[2] unsigned* y01[0];   r3[2,3] += x23[3] unsigned* y01[1]
1808# asm 1: vmlal.u32 <r3=reg128#9,<x23=reg128#13%top,<y01=reg128#2%bot
1809# asm 2: vmlal.u32 <r3=q8,<x23=d25,<y01=d2
1810vmlal.u32 q8,d25,d2
1811
1812# qhasm: r3[0,1] +=  x4[0] unsigned*  _5y4[0]; r3[2,3] +=  x4[1] unsigned*  _5y4[1]
1813# asm 1: vmlal.u32 <r3=reg128#9,<x4=reg128#14%bot,<_5y4=reg128#11%bot
1814# asm 2: vmlal.u32 <r3=q8,<x4=d26,<_5y4=d20
1815vmlal.u32 q8,d26,d20
1816
1817# qhasm: r4[0,1]  = x01[0] unsigned*  y4[0];  r4[2,3]  = x01[1] unsigned*  y4[1]
1818# asm 1: vmull.u32 >r4=reg128#10,<x01=reg128#12%bot,<y4=reg128#10%bot
1819# asm 2: vmull.u32 >r4=q9,<x01=d22,<y4=d18
1820vmull.u32 q9,d22,d18
1821
1822# qhasm: r4[0,1] += x01[2] unsigned* y23[2];  r4[2,3] += x01[3] unsigned* y23[3]
1823# asm 1: vmlal.u32 <r4=reg128#10,<x01=reg128#12%top,<y23=reg128#4%top
1824# asm 2: vmlal.u32 <r4=q9,<x01=d23,<y23=d7
1825vmlal.u32 q9,d23,d7
1826
1827# qhasm: r4[0,1] += x23[0] unsigned* y23[0];  r4[2,3] += x23[1] unsigned* y23[1]
1828# asm 1: vmlal.u32 <r4=reg128#10,<x23=reg128#13%bot,<y23=reg128#4%bot
1829# asm 2: vmlal.u32 <r4=q9,<x23=d24,<y23=d6
1830vmlal.u32 q9,d24,d6
1831
1832# qhasm: r4[0,1] += x23[2] unsigned* y01[2];  r4[2,3] += x23[3] unsigned* y01[3]
1833# asm 1: vmlal.u32 <r4=reg128#10,<x23=reg128#13%top,<y01=reg128#2%top
1834# asm 2: vmlal.u32 <r4=q9,<x23=d25,<y01=d3
1835vmlal.u32 q9,d25,d3
1836
1837# qhasm: r4[0,1] +=  x4[0] unsigned* y01[0];  r4[2,3] +=  x4[1] unsigned* y01[1]
1838# asm 1: vmlal.u32 <r4=reg128#10,<x4=reg128#14%bot,<y01=reg128#2%bot
1839# asm 2: vmlal.u32 <r4=q9,<x4=d26,<y01=d2
1840vmlal.u32 q9,d26,d2
1841
1842# qhasm: 2x t1 = r0 unsigned>> 26
1843# asm 1: vshr.u64 >t1=reg128#2,<r0=reg128#15,#26
1844# asm 2: vshr.u64 >t1=q1,<r0=q14,#26
1845vshr.u64 q1,q14,#26
1846
1847# qhasm:    r0 &= mask
1848# asm 1: vand >r0=reg128#4,<r0=reg128#15,<mask=reg128#1
1849# asm 2: vand >r0=q3,<r0=q14,<mask=q0
1850vand q3,q14,q0
1851
1852# qhasm: 2x r1 += t1
1853# asm 1: vadd.i64 >r1=reg128#2,<r1=reg128#3,<t1=reg128#2
1854# asm 2: vadd.i64 >r1=q1,<r1=q2,<t1=q1
1855vadd.i64 q1,q2,q1
1856
1857# qhasm:                 2x t4 = r3 unsigned>> 26
1858# asm 1: vshr.u64 >t4=reg128#3,<r3=reg128#9,#26
1859# asm 2: vshr.u64 >t4=q2,<r3=q8,#26
1860vshr.u64 q2,q8,#26
1861
1862# qhasm:                    r3 &= mask
1863# asm 1: vand >r3=reg128#9,<r3=reg128#9,<mask=reg128#1
1864# asm 2: vand >r3=q8,<r3=q8,<mask=q0
1865vand q8,q8,q0
1866
1867# qhasm:                 2x r4 += t4
1868# asm 1: vadd.i64 >r4=reg128#3,<r4=reg128#10,<t4=reg128#3
1869# asm 2: vadd.i64 >r4=q2,<r4=q9,<t4=q2
1870vadd.i64 q2,q9,q2
1871
1872# qhasm: 2x t2 = r1 unsigned>> 26
1873# asm 1: vshr.u64 >t2=reg128#10,<r1=reg128#2,#26
1874# asm 2: vshr.u64 >t2=q9,<r1=q1,#26
1875vshr.u64 q9,q1,#26
1876
1877# qhasm:    r1 &= mask
1878# asm 1: vand >r1=reg128#2,<r1=reg128#2,<mask=reg128#1
1879# asm 2: vand >r1=q1,<r1=q1,<mask=q0
1880vand q1,q1,q0
1881
1882# qhasm:                 2x t0 = r4 unsigned>> 26
1883# asm 1: vshr.u64 >t0=reg128#11,<r4=reg128#3,#26
1884# asm 2: vshr.u64 >t0=q10,<r4=q2,#26
1885vshr.u64 q10,q2,#26
1886
1887# qhasm: 2x r2 += t2
1888# asm 1: vadd.i64 >r2=reg128#10,<r2=reg128#16,<t2=reg128#10
1889# asm 2: vadd.i64 >r2=q9,<r2=q15,<t2=q9
1890vadd.i64 q9,q15,q9
1891
1892# qhasm:                    r4 &= mask
1893# asm 1: vand >r4=reg128#3,<r4=reg128#3,<mask=reg128#1
1894# asm 2: vand >r4=q2,<r4=q2,<mask=q0
1895vand q2,q2,q0
1896
1897# qhasm:                 2x r0 += t0
1898# asm 1: vadd.i64 >r0=reg128#4,<r0=reg128#4,<t0=reg128#11
1899# asm 2: vadd.i64 >r0=q3,<r0=q3,<t0=q10
1900vadd.i64 q3,q3,q10
1901
1902# qhasm:                 2x t0 <<= 2
1903# asm 1: vshl.i64 >t0=reg128#11,<t0=reg128#11,#2
1904# asm 2: vshl.i64 >t0=q10,<t0=q10,#2
1905vshl.i64 q10,q10,#2
1906
1907# qhasm: 2x t3 = r2 unsigned>> 26
1908# asm 1: vshr.u64 >t3=reg128#12,<r2=reg128#10,#26
1909# asm 2: vshr.u64 >t3=q11,<r2=q9,#26
1910vshr.u64 q11,q9,#26
1911
1912# qhasm:                 2x r0 += t0
1913# asm 1: vadd.i64 >r0=reg128#4,<r0=reg128#4,<t0=reg128#11
1914# asm 2: vadd.i64 >r0=q3,<r0=q3,<t0=q10
1915vadd.i64 q3,q3,q10
1916
1917# qhasm:    x23 = r2 & mask
1918# asm 1: vand >x23=reg128#10,<r2=reg128#10,<mask=reg128#1
1919# asm 2: vand >x23=q9,<r2=q9,<mask=q0
1920vand q9,q9,q0
1921
1922# qhasm: 2x r3 += t3
1923# asm 1: vadd.i64 >r3=reg128#9,<r3=reg128#9,<t3=reg128#12
1924# asm 2: vadd.i64 >r3=q8,<r3=q8,<t3=q11
1925vadd.i64 q8,q8,q11
1926
1927# qhasm:                 2x t1 = r0 unsigned>> 26
1928# asm 1: vshr.u64 >t1=reg128#11,<r0=reg128#4,#26
1929# asm 2: vshr.u64 >t1=q10,<r0=q3,#26
1930vshr.u64 q10,q3,#26
1931
1932# qhasm: 				x23 = x23[0,2,1,3]
1933# asm 1: vtrn.32 <x23=reg128#10%bot,<x23=reg128#10%top
1934# asm 2: vtrn.32 <x23=d18,<x23=d19
1935vtrn.32 d18,d19
1936
1937# qhasm:                    x01 = r0 & mask
1938# asm 1: vand >x01=reg128#4,<r0=reg128#4,<mask=reg128#1
1939# asm 2: vand >x01=q3,<r0=q3,<mask=q0
1940vand q3,q3,q0
1941
1942# qhasm:                 2x r1 += t1
1943# asm 1: vadd.i64 >r1=reg128#2,<r1=reg128#2,<t1=reg128#11
1944# asm 2: vadd.i64 >r1=q1,<r1=q1,<t1=q10
1945vadd.i64 q1,q1,q10
1946
1947# qhasm: 2x t4 = r3 unsigned>> 26
1948# asm 1: vshr.u64 >t4=reg128#11,<r3=reg128#9,#26
1949# asm 2: vshr.u64 >t4=q10,<r3=q8,#26
1950vshr.u64 q10,q8,#26
1951
1952# qhasm: 				x01 = x01[0,2,1,3]
1953# asm 1: vtrn.32 <x01=reg128#4%bot,<x01=reg128#4%top
1954# asm 2: vtrn.32 <x01=d6,<x01=d7
1955vtrn.32 d6,d7
1956
1957# qhasm:    r3 &= mask
1958# asm 1: vand >r3=reg128#1,<r3=reg128#9,<mask=reg128#1
1959# asm 2: vand >r3=q0,<r3=q8,<mask=q0
1960vand q0,q8,q0
1961
1962# qhasm: 				r1 = r1[0,2,1,3]
1963# asm 1: vtrn.32 <r1=reg128#2%bot,<r1=reg128#2%top
1964# asm 2: vtrn.32 <r1=d2,<r1=d3
1965vtrn.32 d2,d3
1966
1967# qhasm: 2x x4 = r4 + t4
1968# asm 1: vadd.i64 >x4=reg128#3,<r4=reg128#3,<t4=reg128#11
1969# asm 2: vadd.i64 >x4=q2,<r4=q2,<t4=q10
1970vadd.i64 q2,q2,q10
1971
1972# qhasm: 				r3 = r3[0,2,1,3]
1973# asm 1: vtrn.32 <r3=reg128#1%bot,<r3=reg128#1%top
1974# asm 2: vtrn.32 <r3=d0,<r3=d1
1975vtrn.32 d0,d1
1976
1977# qhasm: 				x01 = x01[0,1] r1[0,1]
1978# asm 1: vext.32 <x01=reg128#4%top,<r1=reg128#2%bot,<r1=reg128#2%bot,#0
1979# asm 2: vext.32 <x01=d7,<r1=d2,<r1=d2,#0
1980vext.32 d7,d2,d2,#0
1981
1982# qhasm: 				x23 = x23[0,1] r3[0,1]
1983# asm 1: vext.32 <x23=reg128#10%top,<r3=reg128#1%bot,<r3=reg128#1%bot,#0
1984# asm 2: vext.32 <x23=d19,<r3=d0,<r3=d0,#0
1985vext.32 d19,d0,d0,#0
1986
1987# qhasm: 				x4 = x4[0,2,1,3]
1988# asm 1: vtrn.32 <x4=reg128#3%bot,<x4=reg128#3%top
1989# asm 2: vtrn.32 <x4=d4,<x4=d5
1990vtrn.32 d4,d5
1991
1992# qhasm: mem128[input_0] aligned= x01;input_0+=16
1993# asm 1: vst1.8 {<x01=reg128#4%bot-<x01=reg128#4%top},[<input_0=int32#1,: 128]!
1994# asm 2: vst1.8 {<x01=d6-<x01=d7},[<input_0=r0,: 128]!
1995vst1.8 {d6-d7},[r0,: 128]!
1996
1997# qhasm: mem128[input_0] aligned= x23;input_0+=16
1998# asm 1: vst1.8 {<x23=reg128#10%bot-<x23=reg128#10%top},[<input_0=int32#1,: 128]!
1999# asm 2: vst1.8 {<x23=d18-<x23=d19},[<input_0=r0,: 128]!
2000vst1.8 {d18-d19},[r0,: 128]!
2001
2002# qhasm: mem64[input_0] aligned= x4[0]
2003# asm 1: vst1.8 <x4=reg128#3%bot,[<input_0=int32#1,: 64]
2004# asm 2: vst1.8 <x4=d4,[<input_0=r0,: 64]
2005vst1.8 d4,[r0,: 64]
2006
2007# qhasm: return
2008add sp,sp,#0
2009bx lr
2010