/external/llvm/test/MC/ARM/ |
D | neon-bitwise-encoding.s | 110 veor q4, q7, q3 111 veor.8 q4, q7, q3 112 veor.16 q4, q7, q3 113 veor.32 q4, q7, q3 114 veor.64 q4, q7, q3 116 veor.i8 q4, q7, q3 117 veor.i16 q4, q7, q3 118 veor.i32 q4, q7, q3 119 veor.i64 q4, q7, q3 121 veor.s8 q4, q7, q3 [all …]
|
D | neon-v8.s | 5 vmaxnm.f32 q2, q4, q6 6 @ CHECK: vmaxnm.f32 q2, q4, q6 @ encoding: [0x5c,0x4f,0x08,0xf3] 16 vcvta.s32.f32 q4, q6 17 @ CHECK: vcvta.s32.f32 q4, q6 @ encoding: [0x4c,0x80,0xbb,0xf3] 18 vcvta.u32.f32 q4, q10 19 @ CHECK: vcvta.u32.f32 q4, q10 @ encoding: [0xe4,0x80,0xbb,0xf3] 43 vcvtp.s32.f32 q4, q15 44 @ CHECK: vcvtp.s32.f32 q4, q15 @ encoding: [0x6e,0x82,0xbb,0xf3] 50 vrintn.f32 q1, q4 51 @ CHECK: vrintn.f32 q1, q4 @ encoding: [0x48,0x24,0xba,0xf3] [all …]
|
D | thumb-neon-v8.s | 5 vmaxnm.f32 q2, q4, q6 6 @ CHECK: vmaxnm.f32 q2, q4, q6 @ encoding: [0x08,0xff,0x5c,0x4f] 16 vcvta.s32.f32 q4, q6 17 @ CHECK: vcvta.s32.f32 q4, q6 @ encoding: [0xbb,0xff,0x4c,0x80] 18 vcvta.u32.f32 q4, q10 19 @ CHECK: vcvta.u32.f32 q4, q10 @ encoding: [0xbb,0xff,0xe4,0x80] 43 vcvtp.s32.f32 q4, q15 44 @ CHECK: vcvtp.s32.f32 q4, q15 @ encoding: [0xbb,0xff,0x6e,0x82] 50 vrintn.f32 q1, q4 51 @ CHECK: vrintn.f32 q1, q4 @ encoding: [0xba,0xff,0x48,0x24] [all …]
|
D | neon-shift-encoding.s | 116 vsra.s64 q4, q5, #63 122 vsra.s8 q4, #7 134 @ CHECK: vsra.s64 q4, q5, #63 @ encoding: [0xda,0x81,0x81,0xf2] 139 @ CHECK: vsra.s8 q4, q4, #7 @ encoding: [0x58,0x81,0x89,0xf2] 152 vsra.u64 q4, q5, #63 158 vsra.u8 q4, #7 170 @ CHECK: vsra.u64 q4, q5, #63 @ encoding: [0xda,0x81,0x81,0xf3] 175 @ CHECK: vsra.u8 q4, q4, #7 @ encoding: [0x58,0x81,0x89,0xf3] 188 vsri.64 q4, q5, #63 194 vsri.8 q4, #7 [all …]
|
D | neont2-shiftaccum-encoding.s | 12 vsra.s64 q8, q4, #64 20 vsra.u64 q4, q5, #25 30 vsra.s64 q4, #64 47 @ CHECK: vsra.s64 q8, q4, #64 @ encoding: [0xc0,0xef,0xd8,0x01] 55 @ CHECK: vsra.u64 q4, q5, #25 @ encoding: [0xa7,0xff,0xda,0x81] 64 @ CHECK: vsra.s64 q4, q4, #64 @ encoding: [0x80,0xef,0xd8,0x81] 85 vrsra.s32 q3, q4, #32 86 vrsra.s64 q4, q5, #64 103 vrsra.s32 q4, #32 120 @ CHECK: vrsra.s32 q3, q4, #32 @ encoding: [0xa0,0xef,0x58,0x63] [all …]
|
D | neon-shiftaccum-encoding.s | 10 vsra.s64 q8, q4, #64 18 vsra.u64 q4, q5, #25 28 vsra.s64 q4, #64 45 @ CHECK: vsra.s64 q8, q4, #64 @ encoding: [0xd8,0x01,0xc0,0xf2] 53 @ CHECK: vsra.u64 q4, q5, #25 @ encoding: [0xda,0x81,0xa7,0xf3] 62 @ CHECK: vsra.s64 q4, q4, #64 @ encoding: [0xd8,0x81,0x80,0xf2] 82 vrsra.s32 q3, q4, #32 83 vrsra.s64 q4, q5, #64 100 vrsra.s32 q4, #32 117 @ CHECK: vrsra.s32 q3, q4, #32 @ encoding: [0x58,0x63,0xa0,0xf2] [all …]
|
/external/capstone/suite/MC/ARM/ |
D | neon-bitwise-encoding.s.cs | 23 0x56,0x81,0x0e,0xf3 = veor q4, q7, q3 24 0x56,0x81,0x0e,0xf3 = veor q4, q7, q3 25 0x56,0x81,0x0e,0xf3 = veor q4, q7, q3 26 0x56,0x81,0x0e,0xf3 = veor q4, q7, q3 27 0x56,0x81,0x0e,0xf3 = veor q4, q7, q3 28 0x56,0x81,0x0e,0xf3 = veor q4, q7, q3 29 0x56,0x81,0x0e,0xf3 = veor q4, q7, q3 30 0x56,0x81,0x0e,0xf3 = veor q4, q7, q3 31 0x56,0x81,0x0e,0xf3 = veor q4, q7, q3 32 0x56,0x81,0x0e,0xf3 = veor q4, q7, q3 [all …]
|
D | neon-shift-encoding.s.cs | 57 0xda,0x81,0x81,0xf2 = vsra.s64 q4, q5, #63 62 0x58,0x81,0x89,0xf2 = vsra.s8 q4, q4, #7 73 0xda,0x81,0x81,0xf3 = vsra.u64 q4, q5, #63 78 0x58,0x81,0x89,0xf3 = vsra.u8 q4, q4, #7 89 0xda,0x84,0x81,0xf3 = vsri.64 q4, q5, #63 94 0x58,0x84,0x89,0xf3 = vsri.8 q4, q4, #7 105 0xda,0x85,0xbf,0xf3 = vsli.64 q4, q5, #63 110 0x58,0x85,0x8f,0xf3 = vsli.8 q4, q4, #7 167 0x48,0x84,0x0a,0xf2 = vshl.s8 q4, q4, q5 168 0x48,0x84,0x1a,0xf2 = vshl.s16 q4, q4, q5 [all …]
|
D | neon-shiftaccum-encoding.s.cs | 9 0xd8,0x01,0xc0,0xf2 = vsra.s64 q8, q4, #64 17 0xda,0x81,0xa7,0xf3 = vsra.u64 q4, q5, #25 25 0xd8,0x81,0x80,0xf2 = vsra.s64 q4, q4, #64 44 0x58,0x63,0xa0,0xf2 = vrsra.s32 q3, q4, #32 45 0xda,0x83,0x80,0xf2 = vrsra.s64 q4, q5, #64 60 0x58,0x83,0xa0,0xf2 = vrsra.s32 q4, q4, #32 72 0x58,0x65,0xbf,0xf3 = vsli.32 q3, q4, #31 73 0xda,0x85,0xbf,0xf3 = vsli.64 q4, q5, #63 80 0x58,0xe4,0xa0,0xf3 = vsri.32 q7, q4, #32 88 0x58,0x85,0xbf,0xf3 = vsli.32 q4, q4, #31 [all …]
|
D | neont2-shiftaccum-encoding.s.cs | 9 0xc0,0xef,0xd8,0x01 = vsra.s64 q8, q4, #64 17 0xa7,0xff,0xda,0x81 = vsra.u64 q4, q5, #25 25 0x80,0xef,0xd8,0x81 = vsra.s64 q4, q4, #64 44 0xa0,0xef,0x58,0x63 = vrsra.s32 q3, q4, #32 45 0x80,0xef,0xda,0x83 = vrsra.s64 q4, q5, #64 60 0xa0,0xef,0x58,0x83 = vrsra.s32 q4, q4, #32 72 0xbf,0xff,0x58,0x65 = vsli.32 q3, q4, #31 73 0xbf,0xff,0xda,0x85 = vsli.64 q4, q5, #63 80 0xa0,0xff,0x58,0xe4 = vsri.32 q7, q4, #32 88 0xbf,0xff,0x58,0x85 = vsli.32 q4, q4, #31 [all …]
|
D | neon-v8.s.cs | 3 0x5c,0x4f,0x08,0xf3 = vmaxnm.f32 q2, q4, q6 8 0x4c,0x80,0xbb,0xf3 = vcvta.s32.f32 q4, q6 9 0xe4,0x80,0xbb,0xf3 = vcvta.u32.f32 q4, q10 20 0x6e,0x82,0xbb,0xf3 = vcvtp.s32.f32 q4, q15 23 0x48,0x24,0xba,0xf3 = vrintn.f32 q1, q4 29 0xc8,0x25,0xfa,0xf3 = vrintz.f32 q9, q4 31 0xc8,0x26,0xba,0xf3 = vrintm.f32 q1, q4 33 0xc8,0x27,0xba,0xf3 = vrintp.f32 q1, q4 37 0xc8,0x25,0xfa,0xf3 = vrintz.f32 q9, q4 38 0xc8,0x27,0xba,0xf3 = vrintp.f32 q1, q4
|
D | thumb-neon-v8.s.cs | 3 0x08,0xff,0x5c,0x4f = vmaxnm.f32 q2, q4, q6 8 0xbb,0xff,0x4c,0x80 = vcvta.s32.f32 q4, q6 9 0xbb,0xff,0xe4,0x80 = vcvta.u32.f32 q4, q10 20 0xbb,0xff,0x6e,0x82 = vcvtp.s32.f32 q4, q15 23 0xba,0xff,0x48,0x24 = vrintn.f32 q1, q4 29 0xfa,0xff,0xc8,0x25 = vrintz.f32 q9, q4 31 0xba,0xff,0xc8,0x26 = vrintm.f32 q1, q4 33 0xba,0xff,0xc8,0x27 = vrintp.f32 q1, q4 37 0xfa,0xff,0xc8,0x25 = vrintz.f32 q9, q4 38 0xba,0xff,0xc8,0x27 = vrintp.f32 q1, q4
|
/external/llvm/test/CodeGen/ARM/ |
D | thumb-big-stack.ll | 145 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q… 147 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q… 149 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q… 151 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q… 153 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q… 155 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q… 157 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q… 159 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q… 161 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q… 163 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q… [all …]
|
/external/libvpx/libvpx/vp8/common/arm/neon/ |
D | dequant_idct_neon.c | 27 int16x8_t q1, q2, q3, q4, q5, q6; in vp8_dequant_idct_add_neon() local 38 q4 = vld1q_s16(input); in vp8_dequant_idct_add_neon() 59 vmulq_u16(vreinterpretq_u16_s16(q4), vreinterpretq_u16_s16(q6))); in vp8_dequant_idct_add_neon() 67 q4 = vqdmulhq_n_s16(q2, cospi8sqrt2minus1); in vp8_dequant_idct_add_neon() 69 q4 = vshrq_n_s16(q4, 1); in vp8_dequant_idct_add_neon() 71 q4 = vqaddq_s16(q4, q2); in vp8_dequant_idct_add_neon() 73 d10 = vqsub_s16(vget_low_s16(q3), vget_high_s16(q4)); in vp8_dequant_idct_add_neon() 74 d11 = vqadd_s16(vget_high_s16(q3), vget_low_s16(q4)); in vp8_dequant_idct_add_neon() 92 q4 = vqdmulhq_n_s16(q2, cospi8sqrt2minus1); in vp8_dequant_idct_add_neon() 97 q4 = vshrq_n_s16(q4, 1); in vp8_dequant_idct_add_neon() [all …]
|
D | idct_dequant_full_2x_neon.c | 21 int16x8_t q0, q1, q2, q3, q4, q5, q6, q7, q8, q9, q10, q11; in idct_dequant_full_2x_neon() local 41 q4 = vld1q_s16(q); in idct_dequant_full_2x_neon() 68 q4 = vmulq_s16(q4, q0); in idct_dequant_full_2x_neon() 74 dLow1 = vget_low_s16(q4); in idct_dequant_full_2x_neon() 75 dHigh1 = vget_high_s16(q4); in idct_dequant_full_2x_neon() 77 q4 = vcombine_s16(dHigh0, dHigh1); in idct_dequant_full_2x_neon() 86 q6 = vqdmulhq_n_s16(q4, sinpi8sqrt2); in idct_dequant_full_2x_neon() 88 q8 = vqdmulhq_n_s16(q4, cospi8sqrt2minus1); in idct_dequant_full_2x_neon() 97 q4 = vqaddq_s16(q4, q8); in idct_dequant_full_2x_neon() 101 q3 = vqaddq_s16(q7, q4); in idct_dequant_full_2x_neon() [all …]
|
D | mbloopfilter_neon.c | 18 uint8x16_t q4, // p2 in vp8_mbloop_filter_neon() argument 38 q11u8 = vabdq_u8(q3, q4); in vp8_mbloop_filter_neon() 39 q12u8 = vabdq_u8(q4, q5); in vp8_mbloop_filter_neon() 70 q4 = veorq_u8(q4, q0u8); in vp8_mbloop_filter_neon() 137 q0s8 = vqaddq_s8(vreinterpretq_s8_u8(q4), q0s8); in vp8_mbloop_filter_neon() 157 uint8x16_t qblimit, qlimit, qthresh, q3, q4; in vp8_mbloop_filter_horizontal_edge_y_neon() local 168 q4 = vld1q_u8(src); in vp8_mbloop_filter_horizontal_edge_y_neon() 182 vp8_mbloop_filter_neon(qblimit, qlimit, qthresh, q3, q4, q5, q6, q7, q8, q9, in vp8_mbloop_filter_horizontal_edge_y_neon() 183 q10, &q4, &q5, &q6, &q7, &q8, &q9); in vp8_mbloop_filter_horizontal_edge_y_neon() 186 vst1q_u8(src, q4); in vp8_mbloop_filter_horizontal_edge_y_neon() [all …]
|
/external/valgrind/none/tests/arm/ |
D | neon128.c | 439 TESTINSN_bin("vand q4, q6, q5", q4, q6, i8, 0xff, q5, i16, 0x57); in main() 445 TESTINSN_bin("vbic q4, q6, q5", q4, q6, i8, 0xff, q5, i16, 0x57); in main() 452 TESTINSN_bin("vorr q4, q4, q4", q4, q4, i16, 0xff, q4, i16, 0xff); in main() 458 TESTINSN_bin("vorn q4, q4, q4", q4, q4, i16, 0xff, q4, i16, 0xff); in main() 463 TESTINSN_bin("veor q4, q6, q5", q4, q6, i8, 0xff, q5, i16, 0x57); in main() 468 TESTINSN_bin("veor q4, q4, q4", q4, q4, i16, 0xff, q4, i16, 0xff); in main() 473 TESTINSN_bin("vbsl q4, q6, q5", q4, q6, i8, 0xff, q5, i16, 0x57); in main() 478 TESTINSN_bin("vbsl q4, q4, q4", q4, q4, i16, 0xff, q4, i16, 0xff); in main() 483 TESTINSN_bin("vbit q4, q6, q5", q4, q6, i8, 0xff, q5, i16, 0x57); in main() 488 TESTINSN_bin("vbit q4, q4, q4", q4, q4, i16, 0xff, q4, i16, 0xff); in main() [all …]
|
/external/boringssl/src/crypto/curve25519/asm/ |
D | x25519-asm-arm.S | 30 vpush {q4,q5,q6,q7} 107 vand q4,q4,q2 118 vadd.i64 q12,q4,q1 128 vsub.i64 q4,q4,q12 164 vadd.i64 q4,q4,q14 169 vadd.i64 q4,q4,q6 173 vadd.i64 q4,q4,q13 175 vadd.i64 q1,q4,q1 191 vsub.i64 q1,q4,q1 246 veor q6,q4,q5 [all …]
|
/external/libhevc/common/arm/ |
D | ihevc_inter_pred_luma_vert_w16inp_w16out.s | 163 vmull.s16 q4,d1,d23 @mul_res1 = vmull_u8(src_tmp2, coeffabs_1)@ 165 vmlal.s16 q4,d0,d22 @mul_res1 = vmlal_u8(mul_res1, src_tmp1, coeffabs_0)@ 167 vmlal.s16 q4,d2,d24 @mul_res1 = vmlal_u8(mul_res1, src_tmp3, coeffabs_2)@ 169 vmlal.s16 q4,d3,d25 @mul_res1 = vmlal_u8(mul_res1, src_tmp4, coeffabs_3)@ 171 vmlal.s16 q4,d4,d26 @mul_res1 = vmlal_u8(mul_res1, src_tmp1, coeffabs_4)@ 173 vmlal.s16 q4,d5,d27 @mul_res1 = vmlal_u8(mul_res1, src_tmp2, coeffabs_5)@ 174 vmlal.s16 q4,d6,d28 @mul_res1 = vmlal_u8(mul_res1, src_tmp3, coeffabs_6)@ 175 vmlal.s16 q4,d7,d29 @mul_res1 = vmlal_u8(mul_res1, src_tmp4, coeffabs_7)@ 192 vsub.s32 q4, q4, q15 207 vshrn.s32 d8, q4, #6 [all …]
|
D | ihevc_inter_pred_filters_luma_vert_w16inp.s | 153 vmull.s16 q4,d1,d23 @mul_res1 = vmull_u8(src_tmp2, coeffabs_1)@ 155 vmlal.s16 q4,d0,d22 @mul_res1 = vmlal_u8(mul_res1, src_tmp1, coeffabs_0)@ 157 vmlal.s16 q4,d2,d24 @mul_res1 = vmlal_u8(mul_res1, src_tmp3, coeffabs_2)@ 159 vmlal.s16 q4,d3,d25 @mul_res1 = vmlal_u8(mul_res1, src_tmp4, coeffabs_3)@ 161 vmlal.s16 q4,d4,d26 @mul_res1 = vmlal_u8(mul_res1, src_tmp1, coeffabs_4)@ 163 vmlal.s16 q4,d5,d27 @mul_res1 = vmlal_u8(mul_res1, src_tmp2, coeffabs_5)@ 164 vmlal.s16 q4,d6,d28 @mul_res1 = vmlal_u8(mul_res1, src_tmp3, coeffabs_6)@ 165 vmlal.s16 q4,d7,d29 @mul_res1 = vmlal_u8(mul_res1, src_tmp4, coeffabs_7)@ 182 vqshrn.s32 d8, q4, #6 197 vqrshrun.s16 d8,q4,#6 @sto_res = vqmovun_s16(sto_res_tmp)@ [all …]
|
D | ihevc_inter_pred_luma_horz_w16out.s | 258 vmull.u8 q4,d1,d25 @arithmetic operations for ii iteration in the same time 259 vmlsl.u8 q4,d0,d24 260 vmlsl.u8 q4,d2,d26 261 vmlal.u8 q4,d3,d27 262 vmlal.u8 q4,d4,d28 263 vmlsl.u8 q4,d5,d29 264 vmlal.u8 q4,d6,d30 265 vmlsl.u8 q4,d7,d31 267 @ vqrshrun.s16 d8,q4,#6 @narrow right shift and saturating the result 314 vmull.u8 q4,d1,d25 @arithmetic operations for ii iteration in the same time [all …]
|
D | ihevc_inter_pred_filters_luma_vert.s | 164 vmull.u8 q4,d1,d23 @mul_res1 = vmull_u8(src_tmp2, coeffabs_1)@ 166 vmlsl.u8 q4,d0,d22 @mul_res1 = vmlsl_u8(mul_res1, src_tmp1, coeffabs_0)@ 168 vmlsl.u8 q4,d2,d24 @mul_res1 = vmlsl_u8(mul_res1, src_tmp3, coeffabs_2)@ 170 vmlal.u8 q4,d3,d25 @mul_res1 = vmlal_u8(mul_res1, src_tmp4, coeffabs_3)@ 172 vmlal.u8 q4,d4,d26 @mul_res1 = vmlal_u8(mul_res1, src_tmp1, coeffabs_4)@ 174 vmlsl.u8 q4,d5,d27 @mul_res1 = vmlsl_u8(mul_res1, src_tmp2, coeffabs_5)@ 176 vmlal.u8 q4,d6,d28 @mul_res1 = vmlal_u8(mul_res1, src_tmp3, coeffabs_6)@ 178 vmlsl.u8 q4,d7,d29 @mul_res1 = vmlsl_u8(mul_res1, src_tmp4, coeffabs_7)@ 204 vqrshrun.s16 d8,q4,#6 @sto_res = vqmovun_s16(sto_res_tmp)@ 247 vmull.u8 q4,d1,d23 @mul_res1 = vmull_u8(src_tmp2, coeffabs_1)@ [all …]
|
/external/freetype/src/base/ |
D | ftbbox.c | 253 FT_Pos q4 ) in cubic_peak() argument 271 FT_ABS( q4 ) ) ); in cubic_peak() 282 q4 <<= shift; in cubic_peak() 289 q4 >>= -shift; in cubic_peak() 297 if ( q1 + q2 > q3 + q4 ) /* first half */ in cubic_peak() 299 q4 = q4 + q3; in cubic_peak() 302 q4 = q4 + q3; in cubic_peak() 304 q4 = ( q4 + q3 ) / 8; in cubic_peak() 312 q3 = q3 + q4; in cubic_peak() 326 if ( q3 == q4 && q2 <= q4 ) in cubic_peak() [all …]
|
/external/libavc/common/arm/ |
D | ih264_inter_pred_filters_luma_horz_a9q.s | 127 vaddl.u8 q4, d31, d2 @// a0 + a5 (column1,row0) 135 vmlal.u8 q4, d31, d1 @// a0 + a5 + 20a2 (column1,row0) 143 vmlal.u8 q4, d31, d1 @// a0 + a5 + 20a2 + 20a3 (column1,row0) 151 vmlsl.u8 q4, d31, d0 @// a0 + a5 + 20a2 + 20a3 - 5a1 (column1,row0) 159 vmlsl.u8 q4, d31, d0 @// a0 + a5 + 20a2 + 20a3 - 5a1 - 5a4 (column1,row0) 165 …vqrshrun.s16 d20, q4, #5 @// (a0 + a5 + 20a2 + 20a3 - 5a1 - 5a4 + 16) >> 5 (column1,r… 195 vaddl.u8 q4, d31, d2 @// a0 + a5 (column1,row0) 198 vmlal.u8 q4, d29, d1 @// a0 + a5 + 20a2 + 20a3 (column1,row0) 199 vmlal.u8 q4, d30, d1 @// a0 + a5 + 20a2 (column1,row0) 200 vmlsl.u8 q4, d27, d0 @// a0 + a5 + 20a2 + 20a3 - 5a1 (column1,row0) [all …]
|
/external/libxaac/decoder/armv7/ |
D | ixheaacd_sbr_imdct_using_fft.s | 149 VADD.I32 q8, q0, q4 153 VSUB.I32 q9, q0, q4 164 VSUB.I32 q4, q1, q5 199 VSUB.S32 q6, q4, q5 202 VADD.S32 q9, q4, q5 205 VADD.S32 q4, q8, q1 277 VADD.S32 q3, q4, q11 280 VSUB.S32 q10, q4, q11 283 VADD.S32 q4, q8, q7 296 VPUSH {q3-q4} [all …]
|