Home
last modified time | relevance | path

Searched refs:q4 (Results 1 – 25 of 488) sorted by relevance

12345678910>>...20

/external/llvm/test/MC/ARM/
Dneon-bitwise-encoding.s110 veor q4, q7, q3
111 veor.8 q4, q7, q3
112 veor.16 q4, q7, q3
113 veor.32 q4, q7, q3
114 veor.64 q4, q7, q3
116 veor.i8 q4, q7, q3
117 veor.i16 q4, q7, q3
118 veor.i32 q4, q7, q3
119 veor.i64 q4, q7, q3
121 veor.s8 q4, q7, q3
[all …]
Dthumb-neon-v8.s5 vmaxnm.f32 q2, q4, q6
6 @ CHECK: vmaxnm.f32 q2, q4, q6 @ encoding: [0x08,0xff,0x5c,0x4f]
16 vcvta.s32.f32 q4, q6
17 @ CHECK: vcvta.s32.f32 q4, q6 @ encoding: [0xbb,0xff,0x4c,0x80]
18 vcvta.u32.f32 q4, q10
19 @ CHECK: vcvta.u32.f32 q4, q10 @ encoding: [0xbb,0xff,0xe4,0x80]
43 vcvtp.s32.f32 q4, q15
44 @ CHECK: vcvtp.s32.f32 q4, q15 @ encoding: [0xbb,0xff,0x6e,0x82]
50 vrintn.f32 q1, q4
51 @ CHECK: vrintn.f32 q1, q4 @ encoding: [0xba,0xff,0x48,0x24]
[all …]
Dneon-v8.s5 vmaxnm.f32 q2, q4, q6
6 @ CHECK: vmaxnm.f32 q2, q4, q6 @ encoding: [0x5c,0x4f,0x08,0xf3]
16 vcvta.s32.f32 q4, q6
17 @ CHECK: vcvta.s32.f32 q4, q6 @ encoding: [0x4c,0x80,0xbb,0xf3]
18 vcvta.u32.f32 q4, q10
19 @ CHECK: vcvta.u32.f32 q4, q10 @ encoding: [0xe4,0x80,0xbb,0xf3]
43 vcvtp.s32.f32 q4, q15
44 @ CHECK: vcvtp.s32.f32 q4, q15 @ encoding: [0x6e,0x82,0xbb,0xf3]
50 vrintn.f32 q1, q4
51 @ CHECK: vrintn.f32 q1, q4 @ encoding: [0x48,0x24,0xba,0xf3]
[all …]
Dneon-shift-encoding.s116 vsra.s64 q4, q5, #63
122 vsra.s8 q4, #7
134 @ CHECK: vsra.s64 q4, q5, #63 @ encoding: [0xda,0x81,0x81,0xf2]
139 @ CHECK: vsra.s8 q4, q4, #7 @ encoding: [0x58,0x81,0x89,0xf2]
152 vsra.u64 q4, q5, #63
158 vsra.u8 q4, #7
170 @ CHECK: vsra.u64 q4, q5, #63 @ encoding: [0xda,0x81,0x81,0xf3]
175 @ CHECK: vsra.u8 q4, q4, #7 @ encoding: [0x58,0x81,0x89,0xf3]
188 vsri.64 q4, q5, #63
194 vsri.8 q4, #7
[all …]
/external/swiftshader/third_party/llvm-7.0/llvm/test/MC/ARM/
Dneon-bitwise-encoding.s110 veor q4, q7, q3
111 veor.8 q4, q7, q3
112 veor.16 q4, q7, q3
113 veor.32 q4, q7, q3
114 veor.64 q4, q7, q3
116 veor.i8 q4, q7, q3
117 veor.i16 q4, q7, q3
118 veor.i32 q4, q7, q3
119 veor.i64 q4, q7, q3
121 veor.s8 q4, q7, q3
[all …]
Dneon-v8.s5 vmaxnm.f32 q2, q4, q6
6 @ CHECK: vmaxnm.f32 q2, q4, q6 @ encoding: [0x5c,0x4f,0x08,0xf3]
16 vcvta.s32.f32 q4, q6
17 @ CHECK: vcvta.s32.f32 q4, q6 @ encoding: [0x4c,0x80,0xbb,0xf3]
18 vcvta.u32.f32 q4, q10
19 @ CHECK: vcvta.u32.f32 q4, q10 @ encoding: [0xe4,0x80,0xbb,0xf3]
43 vcvtp.s32.f32 q4, q15
44 @ CHECK: vcvtp.s32.f32 q4, q15 @ encoding: [0x6e,0x82,0xbb,0xf3]
50 vrintn.f32 q1, q4
51 @ CHECK: vrintn.f32 q1, q4 @ encoding: [0x48,0x24,0xba,0xf3]
[all …]
Dthumb-neon-v8.s5 vmaxnm.f32 q2, q4, q6
6 @ CHECK: vmaxnm.f32 q2, q4, q6 @ encoding: [0x08,0xff,0x5c,0x4f]
16 vcvta.s32.f32 q4, q6
17 @ CHECK: vcvta.s32.f32 q4, q6 @ encoding: [0xbb,0xff,0x4c,0x80]
18 vcvta.u32.f32 q4, q10
19 @ CHECK: vcvta.u32.f32 q4, q10 @ encoding: [0xbb,0xff,0xe4,0x80]
43 vcvtp.s32.f32 q4, q15
44 @ CHECK: vcvtp.s32.f32 q4, q15 @ encoding: [0xbb,0xff,0x6e,0x82]
50 vrintn.f32 q1, q4
51 @ CHECK: vrintn.f32 q1, q4 @ encoding: [0xba,0xff,0x48,0x24]
[all …]
Dneon-shift-encoding.s116 vsra.s64 q4, q5, #63
122 vsra.s8 q4, #7
134 @ CHECK: vsra.s64 q4, q5, #63 @ encoding: [0xda,0x81,0x81,0xf2]
139 @ CHECK: vsra.s8 q4, q4, #7 @ encoding: [0x58,0x81,0x89,0xf2]
152 vsra.u64 q4, q5, #63
158 vsra.u8 q4, #7
170 @ CHECK: vsra.u64 q4, q5, #63 @ encoding: [0xda,0x81,0x81,0xf3]
175 @ CHECK: vsra.u8 q4, q4, #7 @ encoding: [0x58,0x81,0x89,0xf3]
188 vsri.64 q4, q5, #63
194 vsri.8 q4, #7
[all …]
/external/capstone/suite/MC/ARM/
Dneon-bitwise-encoding.s.cs23 0x56,0x81,0x0e,0xf3 = veor q4, q7, q3
24 0x56,0x81,0x0e,0xf3 = veor q4, q7, q3
25 0x56,0x81,0x0e,0xf3 = veor q4, q7, q3
26 0x56,0x81,0x0e,0xf3 = veor q4, q7, q3
27 0x56,0x81,0x0e,0xf3 = veor q4, q7, q3
28 0x56,0x81,0x0e,0xf3 = veor q4, q7, q3
29 0x56,0x81,0x0e,0xf3 = veor q4, q7, q3
30 0x56,0x81,0x0e,0xf3 = veor q4, q7, q3
31 0x56,0x81,0x0e,0xf3 = veor q4, q7, q3
32 0x56,0x81,0x0e,0xf3 = veor q4, q7, q3
[all …]
Dneon-shift-encoding.s.cs57 0xda,0x81,0x81,0xf2 = vsra.s64 q4, q5, #63
62 0x58,0x81,0x89,0xf2 = vsra.s8 q4, q4, #7
73 0xda,0x81,0x81,0xf3 = vsra.u64 q4, q5, #63
78 0x58,0x81,0x89,0xf3 = vsra.u8 q4, q4, #7
89 0xda,0x84,0x81,0xf3 = vsri.64 q4, q5, #63
94 0x58,0x84,0x89,0xf3 = vsri.8 q4, q4, #7
105 0xda,0x85,0xbf,0xf3 = vsli.64 q4, q5, #63
110 0x58,0x85,0x8f,0xf3 = vsli.8 q4, q4, #7
167 0x48,0x84,0x0a,0xf2 = vshl.s8 q4, q4, q5
168 0x48,0x84,0x1a,0xf2 = vshl.s16 q4, q4, q5
[all …]
Dneon-shiftaccum-encoding.s.cs9 0xd8,0x01,0xc0,0xf2 = vsra.s64 q8, q4, #64
17 0xda,0x81,0xa7,0xf3 = vsra.u64 q4, q5, #25
25 0xd8,0x81,0x80,0xf2 = vsra.s64 q4, q4, #64
44 0x58,0x63,0xa0,0xf2 = vrsra.s32 q3, q4, #32
45 0xda,0x83,0x80,0xf2 = vrsra.s64 q4, q5, #64
60 0x58,0x83,0xa0,0xf2 = vrsra.s32 q4, q4, #32
72 0x58,0x65,0xbf,0xf3 = vsli.32 q3, q4, #31
73 0xda,0x85,0xbf,0xf3 = vsli.64 q4, q5, #63
80 0x58,0xe4,0xa0,0xf3 = vsri.32 q7, q4, #32
88 0x58,0x85,0xbf,0xf3 = vsli.32 q4, q4, #31
[all …]
Dneont2-shiftaccum-encoding.s.cs9 0xc0,0xef,0xd8,0x01 = vsra.s64 q8, q4, #64
17 0xa7,0xff,0xda,0x81 = vsra.u64 q4, q5, #25
25 0x80,0xef,0xd8,0x81 = vsra.s64 q4, q4, #64
44 0xa0,0xef,0x58,0x63 = vrsra.s32 q3, q4, #32
45 0x80,0xef,0xda,0x83 = vrsra.s64 q4, q5, #64
60 0xa0,0xef,0x58,0x83 = vrsra.s32 q4, q4, #32
72 0xbf,0xff,0x58,0x65 = vsli.32 q3, q4, #31
73 0xbf,0xff,0xda,0x85 = vsli.64 q4, q5, #63
80 0xa0,0xff,0x58,0xe4 = vsri.32 q7, q4, #32
88 0xbf,0xff,0x58,0x85 = vsli.32 q4, q4, #31
[all …]
Dneon-v8.s.cs3 0x5c,0x4f,0x08,0xf3 = vmaxnm.f32 q2, q4, q6
8 0x4c,0x80,0xbb,0xf3 = vcvta.s32.f32 q4, q6
9 0xe4,0x80,0xbb,0xf3 = vcvta.u32.f32 q4, q10
20 0x6e,0x82,0xbb,0xf3 = vcvtp.s32.f32 q4, q15
23 0x48,0x24,0xba,0xf3 = vrintn.f32 q1, q4
29 0xc8,0x25,0xfa,0xf3 = vrintz.f32 q9, q4
31 0xc8,0x26,0xba,0xf3 = vrintm.f32 q1, q4
33 0xc8,0x27,0xba,0xf3 = vrintp.f32 q1, q4
37 0xc8,0x25,0xfa,0xf3 = vrintz.f32 q9, q4
38 0xc8,0x27,0xba,0xf3 = vrintp.f32 q1, q4
Dthumb-neon-v8.s.cs3 0x08,0xff,0x5c,0x4f = vmaxnm.f32 q2, q4, q6
8 0xbb,0xff,0x4c,0x80 = vcvta.s32.f32 q4, q6
9 0xbb,0xff,0xe4,0x80 = vcvta.u32.f32 q4, q10
20 0xbb,0xff,0x6e,0x82 = vcvtp.s32.f32 q4, q15
23 0xba,0xff,0x48,0x24 = vrintn.f32 q1, q4
29 0xfa,0xff,0xc8,0x25 = vrintz.f32 q9, q4
31 0xba,0xff,0xc8,0x26 = vrintm.f32 q1, q4
33 0xba,0xff,0xc8,0x27 = vrintp.f32 q1, q4
37 0xfa,0xff,0xc8,0x25 = vrintz.f32 q9, q4
38 0xba,0xff,0xc8,0x27 = vrintp.f32 q1, q4
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/ARM/
Dthumb-big-stack.ll145 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q…
147 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q…
149 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q…
151 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q…
153 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q…
155 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q…
157 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q…
159 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q…
161 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q…
163 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q…
[all …]
/external/llvm/test/CodeGen/ARM/
Dthumb-big-stack.ll145 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q…
147 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q…
149 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q…
151 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q…
153 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q…
155 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q…
157 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q…
159 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q…
161 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q…
163 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q…
[all …]
/external/libvpx/libvpx/vp8/common/arm/neon/
Ddequant_idct_neon.c27 int16x8_t q1, q2, q3, q4, q5, q6; in vp8_dequant_idct_add_neon() local
38 q4 = vld1q_s16(input); in vp8_dequant_idct_add_neon()
59 vmulq_u16(vreinterpretq_u16_s16(q4), vreinterpretq_u16_s16(q6))); in vp8_dequant_idct_add_neon()
67 q4 = vqdmulhq_n_s16(q2, cospi8sqrt2minus1); in vp8_dequant_idct_add_neon()
69 q4 = vshrq_n_s16(q4, 1); in vp8_dequant_idct_add_neon()
71 q4 = vqaddq_s16(q4, q2); in vp8_dequant_idct_add_neon()
73 d10 = vqsub_s16(vget_low_s16(q3), vget_high_s16(q4)); in vp8_dequant_idct_add_neon()
74 d11 = vqadd_s16(vget_high_s16(q3), vget_low_s16(q4)); in vp8_dequant_idct_add_neon()
92 q4 = vqdmulhq_n_s16(q2, cospi8sqrt2minus1); in vp8_dequant_idct_add_neon()
97 q4 = vshrq_n_s16(q4, 1); in vp8_dequant_idct_add_neon()
[all …]
/external/libvpx/libvpx/vpx_dsp/arm/
Dvpx_convolve8_horiz_filter_type1_neon.asm100 vdup.16 q4, r7
107 vmlsl.u8 q4, d1, d25 ;mul_res = vmlal_u8(src[0_1],
110 vmlal.u8 q4, d3, d27 ;mul_res = vmull_u8(src[0_3],
113 vmlsl.u8 q4, d0, d24 ;mul_res = vmlsl_u8(src[0_0],
116 vmlal.u8 q4, d2, d26 ;mul_res = vmlsl_u8(src[0_2],
119 vmlal.u8 q4, d4, d28 ;mul_res = vmlal_u8(src[0_4],
122 vmlal.u8 q4, d5, d29 ;mul_res = vmlsl_u8(src[0_5],
125 vmlsl.u8 q4, d6, d30 ;mul_res = vmlal_u8(src[0_6],
128 vmlsl.u8 q4, d7, d31 ;mul_res = vmlsl_u8(src[0_7],
137 vhadd.s16 q4, q4, q11
[all …]
Dvpx_convolve8_horiz_filter_type2_neon.asm100 vdup.16 q4, r7
107 vmlal.u8 q4, d1, d25 ;mul_res = vmlal_u8(src[0_1],
110 vmlal.u8 q4, d3, d27 ;mul_res = vmull_u8(src[0_3],
113 vmlsl.u8 q4, d0, d24 ;mul_res = vmlsl_u8(src[0_0],
116 vmlsl.u8 q4, d2, d26 ;mul_res = vmlsl_u8(src[0_2],
119 vmlal.u8 q4, d4, d28 ;mul_res = vmlal_u8(src[0_4],
122 vmlsl.u8 q4, d5, d29 ;mul_res = vmlsl_u8(src[0_5],
125 vmlal.u8 q4, d6, d30 ;mul_res = vmlal_u8(src[0_6],
128 vmlsl.u8 q4, d7, d31 ;mul_res = vmlsl_u8(src[0_7],
137 vhadd.s16 q4, q4, q11
[all …]
Dvpx_convolve8_avg_horiz_filter_type2_neon.asm101 vdup.16 q4, r7
108 vmlal.u8 q4, d1, d25 ;mul_res = vmlal_u8(src[0_1],
111 vmlal.u8 q4, d3, d27 ;mul_res = vmull_u8(src[0_3],
114 vmlsl.u8 q4, d0, d24 ;mul_res = vmlsl_u8(src[0_0],
117 vmlsl.u8 q4, d2, d26 ;mul_res = vmlsl_u8(src[0_2],
120 vmlal.u8 q4, d4, d28 ;mul_res = vmlal_u8(src[0_4],
123 vmlsl.u8 q4, d5, d29 ;mul_res = vmlsl_u8(src[0_5],
126 vmlal.u8 q4, d6, d30 ;mul_res = vmlal_u8(src[0_6],
129 vmlsl.u8 q4, d7, d31 ;mul_res = vmlsl_u8(src[0_7],
138 vhadd.s16 q4, q4, q11
[all …]
Dvpx_convolve8_avg_horiz_filter_type1_neon.asm100 vdup.16 q4, r7
107 vmlsl.u8 q4, d1, d25 ;mul_res = vmlal_u8(src[0_1],
110 vmlal.u8 q4, d3, d27 ;mul_res = vmull_u8(src[0_3],
113 vmlsl.u8 q4, d0, d24 ;mul_res = vmlsl_u8(src[0_0],
116 vmlal.u8 q4, d2, d26 ;mul_res = vmlsl_u8(src[0_2],
119 vmlal.u8 q4, d4, d28 ;mul_res = vmlal_u8(src[0_4],
122 vmlal.u8 q4, d5, d29 ;mul_res = vmlsl_u8(src[0_5],
125 vmlsl.u8 q4, d6, d30 ;mul_res = vmlal_u8(src[0_6],
128 vmlsl.u8 q4, d7, d31 ;mul_res = vmlsl_u8(src[0_7],
137 vhadd.s16 q4, q4, q11
[all …]
/external/libvpx/config/arm-neon/vpx_dsp/arm/
Dvpx_convolve8_avg_horiz_filter_type1_neon.asm.S106 vdup.16 q4, r7
113 vmlsl.u8 q4, d1, d25 @mul_res = vmlal_u8(src[0_1],
116 vmlal.u8 q4, d3, d27 @mul_res = vmull_u8(src[0_3],
119 vmlsl.u8 q4, d0, d24 @mul_res = vmlsl_u8(src[0_0],
122 vmlal.u8 q4, d2, d26 @mul_res = vmlsl_u8(src[0_2],
125 vmlal.u8 q4, d4, d28 @mul_res = vmlal_u8(src[0_4],
128 vmlal.u8 q4, d5, d29 @mul_res = vmlsl_u8(src[0_5],
131 vmlsl.u8 q4, d6, d30 @mul_res = vmlal_u8(src[0_6],
134 vmlsl.u8 q4, d7, d31 @mul_res = vmlsl_u8(src[0_7],
143 vhadd.s16 q4, q4, q11
[all …]
Dvpx_convolve8_horiz_filter_type1_neon.asm.S106 vdup.16 q4, r7
113 vmlsl.u8 q4, d1, d25 @mul_res = vmlal_u8(src[0_1],
116 vmlal.u8 q4, d3, d27 @mul_res = vmull_u8(src[0_3],
119 vmlsl.u8 q4, d0, d24 @mul_res = vmlsl_u8(src[0_0],
122 vmlal.u8 q4, d2, d26 @mul_res = vmlsl_u8(src[0_2],
125 vmlal.u8 q4, d4, d28 @mul_res = vmlal_u8(src[0_4],
128 vmlal.u8 q4, d5, d29 @mul_res = vmlsl_u8(src[0_5],
131 vmlsl.u8 q4, d6, d30 @mul_res = vmlal_u8(src[0_6],
134 vmlsl.u8 q4, d7, d31 @mul_res = vmlsl_u8(src[0_7],
143 vhadd.s16 q4, q4, q11
[all …]
Dvpx_convolve8_horiz_filter_type2_neon.asm.S106 vdup.16 q4, r7
113 vmlal.u8 q4, d1, d25 @mul_res = vmlal_u8(src[0_1],
116 vmlal.u8 q4, d3, d27 @mul_res = vmull_u8(src[0_3],
119 vmlsl.u8 q4, d0, d24 @mul_res = vmlsl_u8(src[0_0],
122 vmlsl.u8 q4, d2, d26 @mul_res = vmlsl_u8(src[0_2],
125 vmlal.u8 q4, d4, d28 @mul_res = vmlal_u8(src[0_4],
128 vmlsl.u8 q4, d5, d29 @mul_res = vmlsl_u8(src[0_5],
131 vmlal.u8 q4, d6, d30 @mul_res = vmlal_u8(src[0_6],
134 vmlsl.u8 q4, d7, d31 @mul_res = vmlsl_u8(src[0_7],
143 vhadd.s16 q4, q4, q11
[all …]
Dvpx_convolve8_avg_horiz_filter_type2_neon.asm.S107 vdup.16 q4, r7
114 vmlal.u8 q4, d1, d25 @mul_res = vmlal_u8(src[0_1],
117 vmlal.u8 q4, d3, d27 @mul_res = vmull_u8(src[0_3],
120 vmlsl.u8 q4, d0, d24 @mul_res = vmlsl_u8(src[0_0],
123 vmlsl.u8 q4, d2, d26 @mul_res = vmlsl_u8(src[0_2],
126 vmlal.u8 q4, d4, d28 @mul_res = vmlal_u8(src[0_4],
129 vmlsl.u8 q4, d5, d29 @mul_res = vmlsl_u8(src[0_5],
132 vmlal.u8 q4, d6, d30 @mul_res = vmlal_u8(src[0_6],
135 vmlsl.u8 q4, d7, d31 @mul_res = vmlsl_u8(src[0_7],
144 vhadd.s16 q4, q4, q11
[all …]

12345678910>>...20