/external/llvm/test/MC/ARM/ |
D | neon-shiftaccum-encoding.s | 3 vsra.s8 d17, d16, #8 4 vsra.s16 d15, d14, #16 5 vsra.s32 d13, d12, #32 6 vsra.s64 d11, d10, #64 7 vsra.s8 q7, q2, #8 8 vsra.s16 q3, q6, #16 9 vsra.s32 q9, q5, #32 10 vsra.s64 q8, q4, #64 11 vsra.u8 d17, d16, #8 12 vsra.u16 d11, d14, #11 [all …]
|
D | neont2-shiftaccum-encoding.s | 5 vsra.s8 d17, d16, #8 6 vsra.s16 d15, d14, #16 7 vsra.s32 d13, d12, #32 8 vsra.s64 d11, d10, #64 9 vsra.s8 q7, q2, #8 10 vsra.s16 q3, q6, #16 11 vsra.s32 q9, q5, #32 12 vsra.s64 q8, q4, #64 13 vsra.u8 d17, d16, #8 14 vsra.u16 d11, d14, #11 [all …]
|
D | neon-shift-encoding.s | 109 vsra.s8 d16, d6, #7 110 vsra.s16 d26, d18, #15 111 vsra.s32 d11, d10, #31 112 vsra.s64 d12, d19, #63 113 vsra.s8 q1, q8, #7 114 vsra.s16 q2, q7, #15 115 vsra.s32 q3, q6, #31 116 vsra.s64 q4, q5, #63 118 vsra.s8 d16, #7 119 vsra.s16 d15, #15 [all …]
|
/external/llvm-project/llvm/test/MC/ARM/ |
D | neont2-shiftaccum-encoding.s | 5 vsra.s8 d17, d16, #8 6 vsra.s16 d15, d14, #16 7 vsra.s32 d13, d12, #32 8 vsra.s64 d11, d10, #64 9 vsra.s8 q7, q2, #8 10 vsra.s16 q3, q6, #16 11 vsra.s32 q9, q5, #32 12 vsra.s64 q8, q4, #64 13 vsra.u8 d17, d16, #8 14 vsra.u16 d11, d14, #11 [all …]
|
D | neon-shiftaccum-encoding.s | 3 vsra.s8 d17, d16, #8 4 vsra.s16 d15, d14, #16 5 vsra.s32 d13, d12, #32 6 vsra.s64 d11, d10, #64 7 vsra.s8 q7, q2, #8 8 vsra.s16 q3, q6, #16 9 vsra.s32 q9, q5, #32 10 vsra.s64 q8, q4, #64 11 vsra.u8 d17, d16, #8 12 vsra.u16 d11, d14, #11 [all …]
|
D | neon-shift-encoding.s | 109 vsra.s8 d16, d6, #7 110 vsra.s16 d26, d18, #15 111 vsra.s32 d11, d10, #31 112 vsra.s64 d12, d19, #63 113 vsra.s8 q1, q8, #7 114 vsra.s16 q2, q7, #15 115 vsra.s32 q3, q6, #31 116 vsra.s64 q4, q5, #63 118 vsra.s8 d16, #7 119 vsra.s16 d15, #15 [all …]
|
/external/capstone/suite/MC/ARM/ |
D | neont2-shiftaccum-encoding.s.cs | 2 0xc8,0xef,0x30,0x11 = vsra.s8 d17, d16, #8 3 0x90,0xef,0x1e,0xf1 = vsra.s16 d15, d14, #16 4 0xa0,0xef,0x1c,0xd1 = vsra.s32 d13, d12, #32 5 0x80,0xef,0x9a,0xb1 = vsra.s64 d11, d10, #64 6 0x88,0xef,0x54,0xe1 = vsra.s8 q7, q2, #8 7 0x90,0xef,0x5c,0x61 = vsra.s16 q3, q6, #16 8 0xe0,0xef,0x5a,0x21 = vsra.s32 q9, q5, #32 9 0xc0,0xef,0xd8,0x01 = vsra.s64 q8, q4, #64 10 0xc8,0xff,0x30,0x11 = vsra.u8 d17, d16, #8 11 0x95,0xff,0x1e,0xb1 = vsra.u16 d11, d14, #11 [all …]
|
D | neon-shiftaccum-encoding.s.cs | 2 0x30,0x11,0xc8,0xf2 = vsra.s8 d17, d16, #8 3 0x1e,0xf1,0x90,0xf2 = vsra.s16 d15, d14, #16 4 0x1c,0xd1,0xa0,0xf2 = vsra.s32 d13, d12, #32 5 0x9a,0xb1,0x80,0xf2 = vsra.s64 d11, d10, #64 6 0x54,0xe1,0x88,0xf2 = vsra.s8 q7, q2, #8 7 0x5c,0x61,0x90,0xf2 = vsra.s16 q3, q6, #16 8 0x5a,0x21,0xe0,0xf2 = vsra.s32 q9, q5, #32 9 0xd8,0x01,0xc0,0xf2 = vsra.s64 q8, q4, #64 10 0x30,0x11,0xc8,0xf3 = vsra.u8 d17, d16, #8 11 0x1e,0xb1,0x95,0xf3 = vsra.u16 d11, d14, #11 [all …]
|
D | neon-shift-encoding.s.cs | 50 0x16,0x01,0xc9,0xf2 = vsra.s8 d16, d6, #7 51 0x32,0xa1,0xd1,0xf2 = vsra.s16 d26, d18, #15 52 0x1a,0xb1,0xa1,0xf2 = vsra.s32 d11, d10, #31 53 0xb3,0xc1,0x81,0xf2 = vsra.s64 d12, d19, #63 54 0x70,0x21,0x89,0xf2 = vsra.s8 q1, q8, #7 55 0x5e,0x41,0x91,0xf2 = vsra.s16 q2, q7, #15 56 0x5c,0x61,0xa1,0xf2 = vsra.s32 q3, q6, #31 57 0xda,0x81,0x81,0xf2 = vsra.s64 q4, q5, #63 58 0x30,0x01,0xc9,0xf2 = vsra.s8 d16, d16, #7 59 0x1f,0xf1,0x91,0xf2 = vsra.s16 d15, d15, #15 [all …]
|
/external/llvm-project/llvm/test/MC/VE/ |
D | VSRAX.s | 6 # CHECK-INST: vsra.l %v11, %v22, %s20 8 vsra.l %v11, %v22, %s20 10 # CHECK-INST: vsra.l %vix, %vix, %vix 12 vsra.l %vix, %vix, %vix 14 # CHECK-INST: vsra.l %vix, %v22, 22 16 vsra.l %vix, %v22, 22 18 # CHECK-INST: vsra.l %v11, %v22, 63, %vm11 20 vsra.l %v11, %v22, 63, %vm11 22 # CHECK-INST: vsra.l %v11, %v23, %v22, %vm11 24 vsra.l %v11, %v23, %v22, %vm11
|
D | VSRA.s | 6 # CHECK-INST: vsra.w.sx %v11, %v22, %s20 8 vsra.w.sx %v11, %v22, %s20 10 # CHECK-INST: vsra.w.sx %vix, %vix, %vix 12 vsra.w.sx %vix, %vix, %vix 14 # CHECK-INST: vsra.w.sx %vix, %vix, %vix 24 vsra.w.zx %vix, %v22, 22 28 vsra.w %vix, %v22, 22
|
/external/llvm-project/llvm/test/CodeGen/VE/VELIntrinsics/ |
D | vsra.ll | 16 ; CHECK-NEXT: vsra.w.sx %v0, %v0, %v1 31 ; CHECK-NEXT: vsra.w.sx %v2, %v0, %v1 50 ; CHECK-NEXT: vsra.w.sx %v0, %v0, %s0 66 ; CHECK-NEXT: vsra.w.sx %v1, %v0, %s0 84 ; CHECK-NEXT: vsra.w.sx %v0, %v0, 8 96 ; CHECK-NEXT: vsra.w.sx %v1, %v0, 8 111 ; CHECK-NEXT: vsra.w.sx %v2, %v0, %v1, %vm1 130 ; CHECK-NEXT: vsra.w.sx %v1, %v0, %s0, %vm1 148 ; CHECK-NEXT: vsra.w.sx %v1, %v0, 8, %vm1 163 ; CHECK-NEXT: vsra.w.zx %v0, %v0, %v1 [all …]
|
/external/llvm-project/llvm/test/MC/RISCV/rvv/ |
D | shift.s | 83 vsra.vv v8, v4, v20, v0.t 84 # CHECK-INST: vsra.vv v8, v4, v20, v0.t 89 vsra.vv v8, v4, v20 90 # CHECK-INST: vsra.vv v8, v4, v20 95 vsra.vx v8, v4, a0, v0.t 96 # CHECK-INST: vsra.vx v8, v4, a0, v0.t 101 vsra.vx v8, v4, a0 102 # CHECK-INST: vsra.vx v8, v4, a0 107 vsra.vi v8, v4, 31, v0.t 108 # CHECK-INST: vsra.vi v8, v4, 31, v0.t [all …]
|
/external/llvm/test/CodeGen/ARM/ |
D | vsra.ll | 5 ;CHECK: vsra.s8 15 ;CHECK: vsra.s16 25 ;CHECK: vsra.s32 35 ;CHECK: vsra.s64 45 ;CHECK: vsra.s8 55 ;CHECK: vsra.s16 65 ;CHECK: vsra.s32 75 ;CHECK: vsra.s64 85 ;CHECK: vsra.u8 95 ;CHECK: vsra.u16 [all …]
|
/external/llvm-project/llvm/test/CodeGen/ARM/ |
D | vsra.ll | 5 ;CHECK: vsra.s8 15 ;CHECK: vsra.s16 25 ;CHECK: vsra.s32 35 ;CHECK: vsra.s64 45 ;CHECK: vsra.s8 55 ;CHECK: vsra.s16 65 ;CHECK: vsra.s32 75 ;CHECK: vsra.s64 85 ;CHECK: vsra.u8 95 ;CHECK: vsra.u16 [all …]
|
/external/llvm-project/llvm/test/tools/llvm-mca/ARM/ |
D | cortex-a57-neon-instructions.s | 569 vsra.u8 d16, d16, #7 570 vsra.u16 d16, d16, #15 571 vsra.u32 d16, d16, #31 572 vsra.u64 d16, d16, #63 573 vsra.u8 q8, q8, #7 574 vsra.u16 q8, q8, #15 575 vsra.u32 q8, q8, #31 576 vsra.u64 q8, q8, #63 577 vsra.s8 d16, d16, #7 578 vsra.s16 d16, d16, #15 [all …]
|
/external/llvm-project/llvm/test/MC/Disassembler/ARM/ |
D | neon.txt | 1206 # CHECK: vsra.u8 d16, d16, #7 1208 # CHECK: vsra.u16 d16, d16, #15 1210 # CHECK: vsra.u32 d16, d16, #31 1212 # CHECK: vsra.u64 d16, d16, #63 1214 # CHECK: vsra.u8 q8, q8, #7 1216 # CHECK: vsra.u16 q8, q8, #15 1218 # CHECK: vsra.u32 q8, q8, #31 1220 # CHECK: vsra.u64 q8, q8, #63 1222 # CHECK: vsra.s8 d16, d16, #7 1224 # CHECK: vsra.s16 d16, d16, #15 [all …]
|
D | neont2.txt | 1188 # CHECK: vsra.s8 d17, d16, #8 1190 # CHECK: vsra.s16 d17, d16, #16 1192 # CHECK: vsra.s32 d17, d16, #32 1194 # CHECK: vsra.s64 d17, d16, #64 1196 # CHECK: vsra.s8 q8, q9, #8 1198 # CHECK: vsra.s16 q8, q9, #16 1200 # CHECK: vsra.s32 q8, q9, #32 1202 # CHECK: vsra.s64 q8, q9, #64 1204 # CHECK: vsra.u8 d17, d16, #8 1206 # CHECK: vsra.u16 d17, d16, #16 [all …]
|
/external/llvm/test/MC/Disassembler/ARM/ |
D | neon.txt | 1197 # CHECK: vsra.u8 d16, d16, #7 1199 # CHECK: vsra.u16 d16, d16, #15 1201 # CHECK: vsra.u32 d16, d16, #31 1203 # CHECK: vsra.u64 d16, d16, #63 1205 # CHECK: vsra.u8 q8, q8, #7 1207 # CHECK: vsra.u16 q8, q8, #15 1209 # CHECK: vsra.u32 q8, q8, #31 1211 # CHECK: vsra.u64 q8, q8, #63 1213 # CHECK: vsra.s8 d16, d16, #7 1215 # CHECK: vsra.s16 d16, d16, #15 [all …]
|
D | neont2.txt | 1178 # CHECK: vsra.s8 d17, d16, #8 1180 # CHECK: vsra.s16 d17, d16, #16 1182 # CHECK: vsra.s32 d17, d16, #32 1184 # CHECK: vsra.s64 d17, d16, #64 1186 # CHECK: vsra.s8 q8, q9, #8 1188 # CHECK: vsra.s16 q8, q9, #16 1190 # CHECK: vsra.s32 q8, q9, #32 1192 # CHECK: vsra.s64 q8, q9, #64 1194 # CHECK: vsra.u8 d17, d16, #8 1196 # CHECK: vsra.u16 d17, d16, #16 [all …]
|
/external/arm-neon-tests/ |
D | ref_vsra_n.c | 40 vsra##Q##_n_##T2##W(VECT_VAR(vector, T1, W, N), \ in exec_vsra_n()
|
/external/libavc/common/arm/ |
D | ih264_iquant_itrans_recon_a9.s | 616 vsra.s16 q2, q6, #0x1 @ Q2 = y6 658 vsra.s16 q6, q9, #0x2 @ Q6 = z3 689 vsra.s16 q9, q7, #0x2 @ Q9 = z1 735 vsra.s16 q2, q6, #0x1 @ Q2 = y6 780 vsra.s16 q6, q9, #0x2 @ Q6 = z3 810 vsra.s16 q9, q7, #0x2 @ Q9 = z1
|
D | ih264_deblk_luma_a9.s | 254 vsra.u8 q10, q0, #2 @((Alpha >> 2) + 2) 869 vsra.u8 d14, d15, #2 @alpha >>2 +2
|
/external/llvm/include/llvm/IR/ |
D | IntrinsicsSystemZ.td | 308 def int_s390_vsra : SystemZBinary<"vsra", llvm_v16i8_ty>;
|
/external/capstone/arch/AArch64/ |
D | ARMMappingInsnOp.inc | 4537 { /* ARM_VSRAsv16i8, ARM_INS_VSRA: vsra${p}.s8 $vd, $vm, $simm */ 4540 { /* ARM_VSRAsv1i64, ARM_INS_VSRA: vsra${p}.s64 $vd, $vm, $simm */ 4543 { /* ARM_VSRAsv2i32, ARM_INS_VSRA: vsra${p}.s32 $vd, $vm, $simm */ 4546 { /* ARM_VSRAsv2i64, ARM_INS_VSRA: vsra${p}.s64 $vd, $vm, $simm */ 4549 { /* ARM_VSRAsv4i16, ARM_INS_VSRA: vsra${p}.s16 $vd, $vm, $simm */ 4552 { /* ARM_VSRAsv4i32, ARM_INS_VSRA: vsra${p}.s32 $vd, $vm, $simm */ 4555 { /* ARM_VSRAsv8i16, ARM_INS_VSRA: vsra${p}.s16 $vd, $vm, $simm */ 4558 { /* ARM_VSRAsv8i8, ARM_INS_VSRA: vsra${p}.s8 $vd, $vm, $simm */ 4561 { /* ARM_VSRAuv16i8, ARM_INS_VSRA: vsra${p}.u8 $vd, $vm, $simm */ 4564 { /* ARM_VSRAuv1i64, ARM_INS_VSRA: vsra${p}.u64 $vd, $vm, $simm */ [all …]
|