Home
last modified time | relevance | path

Searched refs:asrd (Results 1 – 16 of 16) sorted by relevance

/external/llvm-project/llvm/test/MC/AArch64/SVE/
Dasrd.s10 asrd z0.b, p0/m, z0.b, #1 label
16 asrd z31.b, p0/m, z31.b, #8 label
22 asrd z0.h, p0/m, z0.h, #1 label
28 asrd z31.h, p0/m, z31.h, #16 label
34 asrd z0.s, p0/m, z0.s, #1 label
40 asrd z31.s, p0/m, z31.s, #32 label
46 asrd z0.d, p0/m, z0.d, #1 label
52 asrd z31.d, p0/m, z31.d, #64 label
68 asrd z31.d, p0/m, z31.d, #64 label
80 asrd z31.d, p0/m, z31.d, #64 label
Dasrd-diagnostics.s3 asrd z18.b, p0/m, z28.b, #0 label
8 asrd z1.b, p0/m, z9.b, #9 label
13 asrd z21.h, p0/m, z2.h, #0 label
18 asrd z14.h, p0/m, z30.h, #17 label
23 asrd z6.s, p0/m, z12.s, #0 label
28 asrd z23.s, p0/m, z19.s, #33 label
33 asrd z3.d, p0/m, z24.d, #0 label
38 asrd z25.d, p0/m, z16.d, #65 label
/external/llvm-project/llvm/test/CodeGen/AArch64/
Dsve-intrinsics-shifts.ll87 ; CHECK: asrd z0.b, p0/m, z0.b, #1
89 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.asrd.nxv16i8(<vscale x 16 x i1> %pg,
97 ; CHECK: asrd z0.h, p0/m, z0.h, #2
99 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.asrd.nxv8i16(<vscale x 8 x i1> %pg,
107 ; CHECK: asrd z0.s, p0/m, z0.s, #31
109 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.asrd.nxv4i32(<vscale x 4 x i1> %pg,
117 ; CHECK: asrd z0.d, p0/m, z0.d, #64
119 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.asrd.nxv2i64(<vscale x 2 x i1> %pg,
350 declare <vscale x 16 x i8> @llvm.aarch64.sve.asrd.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, i…
351 declare <vscale x 8 x i16> @llvm.aarch64.sve.asrd.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, i3…
[all …]
Dsve-intrinsics-shifts-merging.ll99 ; CHECK-NEXT: asrd z0.b, p0/m, z0.b, #1
102 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.asrd.nxv16i8(<vscale x 16 x i1> %pg,
111 ; CHECK-NEXT: asrd z0.h, p0/m, z0.h, #2
114 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.asrd.nxv8i16(<vscale x 8 x i1> %pg,
123 ; CHECK-NEXT: asrd z0.s, p0/m, z0.s, #31
126 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.asrd.nxv4i32(<vscale x 4 x i1> %pg,
135 ; CHECK-NEXT: asrd z0.d, p0/m, z0.d, #64
138 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.asrd.nxv2i64(<vscale x 2 x i1> %pg,
323 declare <vscale x 16 x i8> @llvm.aarch64.sve.asrd.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, i…
324 declare <vscale x 8 x i16> @llvm.aarch64.sve.asrd.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, i3…
[all …]
/external/vixl/test/aarch64/
Dtest-api-movprfx-aarch64.cc652 __ asrd(z29.VnB(), p4.Merging(), z29.VnB(), 3); in TEST() local
1293 __ asrd(z24.VnH(), p3.Merging(), z24.VnH(), 3); in TEST() local
Dtest-disasm-sve-aarch64.cc316 COMPARE_PREFIX(asrd(z0.VnB(), p4.Merging(), z0.VnB(), 1), in TEST()
318 COMPARE_PREFIX(asrd(z0.VnH(), p4.Merging(), z0.VnH(), 1), in TEST()
320 COMPARE_PREFIX(asrd(z0.VnS(), p4.Merging(), z0.VnS(), 1), in TEST()
322 COMPARE_PREFIX(asrd(z0.VnD(), p4.Merging(), z0.VnD(), 1), in TEST()
/external/vixl/src/aarch64/
Dsimulator-aarch64.h4217 LogicVRegister asrd(VectorFormat vform,
Dassembler-aarch64.h3661 void asrd(const ZRegister& zd,
Dassembler-sve-aarch64.cc218 void Assembler::asrd(const ZRegister& zd, in asrd() function in vixl::aarch64::Assembler
Dlogic-aarch64.cc6852 LogicVRegister Simulator::asrd(VectorFormat vform, in asrd() function in vixl::aarch64::Simulator
Dmacro-assembler-aarch64.h3562 asrd(zd, pg, zd, shift); in Asrd()
Dsimulator-aarch64.cc7477 asrd(vform, result, zdn, shift_dist); in VisitSVEBitwiseShiftByImm_Predicated()
/external/swiftshader/third_party/llvm-10.0/configs/common/lib/Target/AArch64/
DAArch64GenAsmMatcher.inc12477 "asr\004asrd\004asrr\004asrv\005autda\005autdb\006autdza\006autdzb\005au"
12913 …{ 138 /* asrd */, AArch64::ASRD_ZPmI_H, Convert__SVEVectorHReg1_0__SVEPredicate3bAnyReg1_1__Tie0_1…
12914 …{ 138 /* asrd */, AArch64::ASRD_ZPmI_S, Convert__SVEVectorSReg1_0__SVEPredicate3bAnyReg1_1__Tie0_1…
12915 …{ 138 /* asrd */, AArch64::ASRD_ZPmI_D, Convert__SVEVectorDReg1_0__SVEPredicate3bAnyReg1_1__Tie0_1…
12916 …{ 138 /* asrd */, AArch64::ASRD_ZPmI_B, Convert__SVEVectorBReg1_0__SVEPredicate3bAnyReg1_1__Tie0_1…
20286 …{ 138 /* asrd */, AArch64::ASRD_ZPmI_H, Convert__SVEVectorHReg1_0__SVEPredicate3bAnyReg1_1__Tie0_1…
20287 …{ 138 /* asrd */, AArch64::ASRD_ZPmI_S, Convert__SVEVectorSReg1_0__SVEPredicate3bAnyReg1_1__Tie0_1…
20288 …{ 138 /* asrd */, AArch64::ASRD_ZPmI_D, Convert__SVEVectorDReg1_0__SVEPredicate3bAnyReg1_1__Tie0_1…
20289 …{ 138 /* asrd */, AArch64::ASRD_ZPmI_B, Convert__SVEVectorBReg1_0__SVEPredicate3bAnyReg1_1__Tie0_1…
28029 { 138 /* asrd */, 2 /* 1 */, MCK_SVEPredicate3bAnyReg, AMFBS_HasSVE },
[all …]
/external/swiftshader/third_party/llvm-10.0/llvm/lib/Target/AArch64/
DAArch64SVEInstrInfo.td949 defm ASRD_ZPmI : sve_int_bin_pred_shift_imm_right<0b0100, "asrd", int_aarch64_sve_asrd>;
/external/llvm-project/llvm/lib/Target/AArch64/
DAArch64SVEInstrInfo.td1395 …defm ASRD_ZPmI : sve_int_bin_pred_shift_imm_right< 0b0100, "asrd", "ASRD_ZPZI", int_aarch64_sve…
/external/swiftshader/third_party/llvm-10.0/configs/common/include/llvm/IR/
DIntrinsicImpl.inc526 "llvm.aarch64.sve.asrd",
10659 1, // llvm.aarch64.sve.asrd