/external/llvm-project/llvm/test/MC/ARM/ |
D | neon-mul-accum-encoding.s | 3 vmla.i8 d16, d18, d17 4 vmla.i16 d16, d18, d17 5 vmla.i32 d16, d18, d17 6 vmla.f32 d16, d18, d17 7 vmla.i8 q9, q8, q10 8 vmla.i16 q9, q8, q10 9 vmla.i32 q9, q8, q10 10 vmla.f32 q9, q8, q10 11 vmla.i32 q12, q8, d3[0] 13 @ CHECK: vmla.i8 d16, d18, d17 @ encoding: [0xa1,0x09,0x42,0xf2] [all …]
|
D | neont2-mul-accum-encoding.s | 5 vmla.i8 d16, d18, d17 6 vmla.i16 d16, d18, d17 7 vmla.i32 d16, d18, d17 8 vmla.f32 d16, d18, d17 9 vmla.i8 q9, q8, q10 10 vmla.i16 q9, q8, q10 11 vmla.i32 q9, q8, q10 12 vmla.f32 q9, q8, q10 13 vmla.i32 q12, q8, d3[0] 15 @ CHECK: vmla.i8 d16, d18, d17 @ encoding: [0x42,0xef,0xa1,0x09] [all …]
|
D | mve-qdest-rsrc.s | 351 # CHECK: vmla.s8 q0, q3, r8 @ encoding: [0x07,0xee,0x48,0x0e] 352 # CHECK-NOFP: vmla.s8 q0, q3, r8 @ encoding: [0x07,0xee,0x48,0x0e] 353 vmla.s8 q0, q3, r8 355 # CHECK: vmla.s16 q1, q3, r10 @ encoding: [0x17,0xee,0x4a,0x2e] 356 # CHECK-NOFP: vmla.s16 q1, q3, r10 @ encoding: [0x17,0xee,0x4a,0x2e] 357 vmla.s16 q1, q3, r10 359 # CHECK: vmla.s32 q1, q3, r1 @ encoding: [0x27,0xee,0x41,0x2e] 360 # CHECK-NOFP: vmla.s32 q1, q3, r1 @ encoding: [0x27,0xee,0x41,0x2e] 361 vmla.s32 q1, q3, r1 363 # CHECK: vmla.u8 q0, q7, r10 @ encoding: [0x0f,0xfe,0x4a,0x0e] [all …]
|
D | fullfp16-neon.s | 32 vmla.f16 d0, d1, d2 33 vmla.f16 q0, q1, q2 34 @ ARM: vmla.f16 d0, d1, d2 @ encoding: [0x12,0x0d,0x11,0xf2] 35 @ ARM: vmla.f16 q0, q1, q2 @ encoding: [0x54,0x0d,0x12,0xf2] 36 @ THUMB: vmla.f16 d0, d1, d2 @ encoding: [0x11,0xef,0x12,0x0d] 37 @ THUMB: vmla.f16 q0, q1, q2 @ encoding: [0x12,0xef,0x54,0x0d] 39 vmla.f16 d5, d6, d7[2] 40 vmla.f16 q5, q6, d7[3] 41 @ ARM: vmla.f16 d5, d6, d7[2] @ encoding: [0x67,0x51,0x96,0xf2] 42 @ ARM: vmla.f16 q5, q6, d7[3] @ encoding: [0x6f,0xa1,0x9c,0xf3] [all …]
|
/external/llvm/test/MC/ARM/ |
D | neon-mul-accum-encoding.s | 3 vmla.i8 d16, d18, d17 4 vmla.i16 d16, d18, d17 5 vmla.i32 d16, d18, d17 6 vmla.f32 d16, d18, d17 7 vmla.i8 q9, q8, q10 8 vmla.i16 q9, q8, q10 9 vmla.i32 q9, q8, q10 10 vmla.f32 q9, q8, q10 11 vmla.i32 q12, q8, d3[0] 13 @ CHECK: vmla.i8 d16, d18, d17 @ encoding: [0xa1,0x09,0x42,0xf2] [all …]
|
D | neont2-mul-accum-encoding.s | 5 vmla.i8 d16, d18, d17 6 vmla.i16 d16, d18, d17 7 vmla.i32 d16, d18, d17 8 vmla.f32 d16, d18, d17 9 vmla.i8 q9, q8, q10 10 vmla.i16 q9, q8, q10 11 vmla.i32 q9, q8, q10 12 vmla.f32 q9, q8, q10 13 vmla.i32 q12, q8, d3[0] 15 @ CHECK: vmla.i8 d16, d18, d17 @ encoding: [0x42,0xef,0xa1,0x09] [all …]
|
/external/llvm/test/CodeGen/ARM/ |
D | fmacs.ll | 10 ; VFP2: vmla.f32 13 ; NEON: vmla.f32 26 ; VFP2: vmla.f64 29 ; NEON: vmla.f64 42 ; VFP2: vmla.f32 45 ; NEON: vmla.f32 55 ; It's possible to make use of fp vmla / vmls on Cortex-A9. 65 ; Two vmla with now RAW hazard 67 ; A9: vmla.f32 68 ; A9: vmla.f32 [all …]
|
D | a15-mla.ll | 9 ; CHECK: vmla 19 ; CHECK: vmla.f32 29 ; CHECK: vmla.f32 32 ; CHECK: vmla.f32
|
D | vmla.ll | 5 ;CHECK: vmla.i8 16 ;CHECK: vmla.i16 27 ;CHECK: vmla.i32 38 ;CHECK: vmla.f32 49 ;CHECK: vmla.i8 60 ;CHECK: vmla.i16 71 ;CHECK: vmla.i32 82 ;CHECK: vmla.f32
|
/external/llvm-project/llvm/test/CodeGen/ARM/ |
D | fmacs.ll | 12 ; VFP2: vmla.f32 15 ; NEON: vmla.f32 33 ; VMLA: vmla.f32 s0, s1, s2 44 ; VFP2: vmla.f64 47 ; NEON: vmla.f64 60 ; VFP2: vmla.f32 63 ; NEON: vmla.f32 73 ; It's possible to make use of fp vmla / vmls on Cortex-A9. 83 ; Two vmla with now RAW hazard 85 ; A9: vmla.f32 [all …]
|
D | a15-mla.ll | 9 ; CHECK: vmla 19 ; CHECK: vmla.f32 29 ; CHECK: vmla.f32 32 ; CHECK: vmla.f32
|
D | vmla.ll | 5 ;CHECK: vmla.i8 16 ;CHECK: vmla.i16 27 ;CHECK: vmla.i32 38 ;CHECK: vmla.f32 49 ;CHECK: vmla.i8 60 ;CHECK: vmla.i16 71 ;CHECK: vmla.i32 82 ;CHECK: vmla.f32
|
/external/capstone/suite/MC/ARM/ |
D | neon-mul-accum-encoding.s.cs | 2 0xa1,0x09,0x42,0xf2 = vmla.i8 d16, d18, d17 3 0xa1,0x09,0x52,0xf2 = vmla.i16 d16, d18, d17 4 0xa1,0x09,0x62,0xf2 = vmla.i32 d16, d18, d17 5 0xb1,0x0d,0x42,0xf2 = vmla.f32 d16, d18, d17 6 0xe4,0x29,0x40,0xf2 = vmla.i8 q9, q8, q10 7 0xe4,0x29,0x50,0xf2 = vmla.i16 q9, q8, q10 8 0xe4,0x29,0x60,0xf2 = vmla.i32 q9, q8, q10 9 0xf4,0x2d,0x40,0xf2 = vmla.f32 q9, q8, q10 10 0xc3,0x80,0xe0,0xf3 = vmla.i32 q12, q8, d3[0]
|
D | neont2-mul-accum-encoding.s.cs | 2 0x42,0xef,0xa1,0x09 = vmla.i8 d16, d18, d17 3 0x52,0xef,0xa1,0x09 = vmla.i16 d16, d18, d17 4 0x62,0xef,0xa1,0x09 = vmla.i32 d16, d18, d17 5 0x42,0xef,0xb1,0x0d = vmla.f32 d16, d18, d17 6 0x40,0xef,0xe4,0x29 = vmla.i8 q9, q8, q10 7 0x50,0xef,0xe4,0x29 = vmla.i16 q9, q8, q10 8 0x60,0xef,0xe4,0x29 = vmla.i32 q9, q8, q10 9 0x40,0xef,0xf4,0x2d = vmla.f32 q9, q8, q10 10 0xe0,0xff,0xc3,0x80 = vmla.i32 q12, q8, d3[0]
|
/external/libavc/common/arm/ |
D | ih264_inter_pred_filters_luma_vert_a9q.s | 131 vmla.u16 q7, q6, q11 @ temp += temp1 * 20 134 vmla.u16 q10, q9, q11 @ temp4 += temp3 * 20 141 vmla.u16 q8, q6, q11 148 vmla.u16 q7, q6, q11 155 vmla.u16 q9, q6, q11 162 vmla.u16 q8, q6, q11 170 vmla.u16 q7, q6, q11 176 vmla.u16 q9, q6, q11 190 vmla.u16 q7, q6, q11 @ temp += temp1 * 20 213 vmla.u16 q4, q3, q11 @ temp += temp1 * 20 [all …]
|
D | ih264_inter_pred_luma_vert_qpel_a9q.s | 138 vmla.u16 q7, q6, q11 @ temp += temp1 * 20 141 vmla.u16 q10, q9, q11 @ temp4 += temp3 * 20 148 vmla.u16 q8, q6, q11 155 vmla.u16 q7, q6, q11 164 vmla.u16 q9, q6, q11 171 vmla.u16 q8, q6, q11 181 vmla.u16 q7, q6, q11 187 vmla.u16 q9, q6, q11 203 vmla.u16 q7, q6, q11 @ temp += temp1 * 20 228 vmla.u16 q4, q3, q11 @ temp += temp1 * 20 [all …]
|
D | ih264_inter_pred_luma_horz_hpel_vert_qpel_a9q.s | 159 vmla.u16 q3, q4, q11 173 vmla.u16 q4, q5, q11 187 vmla.u16 q5, q6, q11 201 vmla.u16 q6, q7, q11 215 vmla.u16 q7, q8, q11 231 vmla.u16 q8, q9, q11 259 vmla.u16 q10, q1, q11 299 vmla.u16 q4, q1, q11 334 vmla.u16 q14, q1, q11 387 vmla.u16 q3, q4, q11 [all …]
|
D | ih264_weighted_bi_pred_a9q.s | 185 vmla.s16 q2, q3, d2[2] @weight 2 mult. for rows 1,2 187 vmla.s16 q4, q5, d2[2] @weight 2 mult. for rows 3,4 225 vmla.s16 q2, q3, d2[2] @weight 2 mult. for row 1 229 vmla.s16 q4, q5, d2[2] @weight 2 mult. for row 2 234 vmla.s16 q6, q7, d2[2] @weight 2 mult. for row 3 236 vmla.s16 q8, q9, d2[2] @weight 2 mult. for row 4 280 vmla.s16 q10, q11, d2[2] @weight 2 mult. for row 1L 285 vmla.s16 q2, q3, d2[2] @weight 2 mult. for row 1H 290 vmla.s16 q12, q13, d2[2] @weight 2 mult. for row 2L 295 vmla.s16 q4, q5, d2[2] @weight 2 mult. for row 2H [all …]
|
D | ih264_inter_pred_luma_horz_qpel_vert_hpel_a9q.s | 170 vmla.u16 q9, q10, q14 174 vmla.u16 q10, q12, q14 180 vmla.u16 q11, q12, q14 247 vmla.u16 q9, q10, q14 251 vmla.u16 q10, q12, q14 257 vmla.u16 q11, q12, q14 332 vmla.u16 q6, q7, q13 336 vmla.u16 q7, q9, q13 345 vmla.u16 q14, q8, q13 363 vmla.u16 q7, q10, q13 [all …]
|
D | ih264_inter_pred_luma_horz_hpel_vert_hpel_a9q.s | 148 vmla.u16 q12, q10, d0[0] @ temp += temp1 * 20 153 vmla.u16 q13, q11, d0[0] @ temp4 += temp3 * 20 157 vmla.u16 q14, q11, d0[0] @ temp4 += temp3 * 20 207 vmla.u16 q12, q10, d0[0] @ temp += temp1 * 20 212 vmla.u16 q13, q11, d0[0] @ temp4 += temp3 * 20 215 vmla.u16 q14, q11, d0[0] @ temp4 += temp3 * 20 281 vmla.u16 q12, q10, d0[0] @ temp += temp1 * 20 284 vmla.u16 q13, q14, d0[0] @ temp += temp1 * 20 313 vmla.u16 q12, q10, d0[0] @ temp += temp1 * 20 316 vmla.u16 q13, q14, d0[0] @ temp += temp1 * 20 [all …]
|
/external/llvm-project/llvm/test/tools/llvm-objdump/ELF/ARM/ |
D | v7a-subfeature.s | 8 vmla.f32 s0, s1, s2 11 @CHECK: 81 0a 00 ee vmla.f32 s0, s1, s2 20 vmla.f32 d0, d1, d2 23 @CHECK: 12 0d 01 f2 vmla.f32 d0, d1, d2
|
D | v7a-neg-subfeature.s | 7 vmla.f32 s0, s1, s2 10 @CHECK-NOT: 81 0a 00 ee vmla.f32 s0, s1, s2 20 vmla.f32 d0, d1, d2 23 @CHECK-NOT: 12 0d 01 f2 vmla.f32 d0, d1, d2
|
D | v7m-neg-subfeatures.s | 8 vmla.f32 s0, s1, s2 11 @CHECK-NOT: 00 ee 81 0a vmla.f32 s0, s1, s2
|
D | v7m-subfeatures.s | 9 vmla.f32 s0, s1, s2 12 @CHECK: 00 ee 81 0a vmla.f32 s0, s1, s2
|
/external/llvm-project/llvm/test/CodeGen/Thumb2/ |
D | mve-fmas.ll | 23 ; CHECK-MVE-NEXT: vmla.f16 s0, s4, s8 27 ; CHECK-MVE-NEXT: vmla.f16 s13, s14, s12 29 ; CHECK-MVE-NEXT: vmla.f16 s16, s5, s9 38 ; CHECK-MVE-NEXT: vmla.f16 s20, s18, s16 41 ; CHECK-MVE-NEXT: vmla.f16 s16, s6, s10 48 ; CHECK-MVE-NEXT: vmla.f16 s20, s18, s16 51 ; CHECK-MVE-NEXT: vmla.f16 s16, s7, s11 57 ; CHECK-MVE-NEXT: vmla.f16 s0, s4, s8 87 ; CHECK-MVE-NEXT: vmla.f16 s0, s4, s8 91 ; CHECK-MVE-NEXT: vmla.f16 s13, s14, s12 [all …]
|