/external/llvm-project/llvm/test/MC/AArch64/SVE/ |
D | cmplo.s | 10 cmplo p0.b, p0/z, z0.b, z1.b label 16 cmplo p0.h, p0/z, z0.h, z1.h label 22 cmplo p0.s, p0/z, z0.s, z1.s label 28 cmplo p0.d, p0/z, z0.d, z1.d label 34 cmplo p0.b, p0/z, z0.b, z0.d label 40 cmplo p0.h, p0/z, z0.h, z0.d label 46 cmplo p0.s, p0/z, z0.s, z0.d label 52 cmplo p0.b, p0/z, z0.b, #0 label 58 cmplo p0.h, p0/z, z0.h, #0 label 64 cmplo p0.s, p0/z, z0.s, #0 label [all …]
|
D | cmplo-diagnostics.s | 6 cmplo p0.b, p8/z, z0.b, z0.b label 15 cmplo p0.b, p0/m, z0.b, z0.b label 24 cmplo p0.b, p0/z, z0.b, z0.h label 29 cmplo p0.h, p0/z, z0.h, z0.s label 34 cmplo p0.s, p0/z, z0.s, z0.h label 39 cmplo p0.d, p0/z, z0.d, z0.s label 44 cmplo p0.b, p0/z, z0.h, z0.h label 49 cmplo p0.h, p0/z, z0.s, z0.s label 54 cmplo p0.s, p0/z, z0.h, z0.h label 59 cmplo p0.d, p0/z, z0.s, z0.s label [all …]
|
/external/llvm-project/llvm/test/CodeGen/AArch64/ |
D | sve-intrinsics-int-compares-with-imm.ll | 1045 ; CHECK: cmplo p0.b, p0/z, z0.b, #4 1055 ; CHECK: cmplo p0.b, p0/z, z0.b, #4 1067 ; CHECK: cmplo p0.b, p0/z, z0.b, #4 1071 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.cmplo.wide.nxv16i8(<vscale x 16 x i1> %pg, 1079 ; CHECK: cmplo p0.h, p0/z, z0.h, #0 1089 ; CHECK: cmplo p0.h, p0/z, z0.h, #0 1101 ; CHECK: cmplo p0.h, p0/z, z0.h, #0 1105 %out = call <vscale x 8 x i1> @llvm.aarch64.sve.cmplo.wide.nxv8i16(<vscale x 8 x i1> %pg, 1113 ; CHECK: cmplo p0.s, p0/z, z0.s, #68 1123 ; CHECK: cmplo p0.s, p0/z, z0.s, #68 [all …]
|
D | sve-intrinsics-int-compares.ll | 741 ; CHECK: cmplo p0.b, p0/z, z0.b, z1.d 743 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.cmplo.wide.nxv16i8(<vscale x 16 x i1> %pg, 751 ; CHECK: cmplo p0.h, p0/z, z0.h, z1.d 753 %out = call <vscale x 8 x i1> @llvm.aarch64.sve.cmplo.wide.nxv8i16(<vscale x 8 x i1> %pg, 761 ; CHECK: cmplo p0.s, p0/z, z0.s, z1.d 763 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.cmplo.wide.nxv4i32(<vscale x 4 x i1> %pg, 1015 declare <vscale x 16 x i1> @llvm.aarch64.sve.cmplo.wide.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x … 1016 declare <vscale x 8 x i1> @llvm.aarch64.sve.cmplo.wide.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16… 1017 declare <vscale x 4 x i1> @llvm.aarch64.sve.cmplo.wide.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32…
|
D | sve-fixed-length-int-immediates.ll | 281 ; CHECK-NEXT: cmplo p1.d, p0/z, z0.d, #63
|
/external/swiftshader/third_party/llvm-10.0/llvm/lib/Target/AArch64/ |
D | AArch64SVEInstrInfo.td | 795 defm CMPLO_WIDE_PPzZZ : sve_int_cmp_1_wide<0b110, "cmplo", int_aarch64_sve_cmplo_wide>; 806 defm CMPLO_PPzZI : sve_int_ucmp_vi<0b10, "cmplo", SETULT, null_frag, int_aarch64_sve_cmphi>; 1037 def : InstAlias<"cmplo $Zd, $Pg/z, $Zm, $Zn", 1039 def : InstAlias<"cmplo $Zd, $Pg/z, $Zm, $Zn", 1041 def : InstAlias<"cmplo $Zd, $Pg/z, $Zm, $Zn", 1043 def : InstAlias<"cmplo $Zd, $Pg/z, $Zm, $Zn",
|
/external/vixl/test/aarch64/ |
D | test-disasm-sve-aarch64.cc | 2530 COMPARE_PREFIX(cmplo(p9.VnB(), p4.Zeroing(), z4.VnB(), 32), in TEST() 2532 COMPARE_PREFIX(cmplo(p9.VnH(), p4.Zeroing(), z4.VnH(), 22), in TEST() 2534 COMPARE_PREFIX(cmplo(p9.VnS(), p4.Zeroing(), z4.VnS(), 15), in TEST() 2536 COMPARE_PREFIX(cmplo(p9.VnD(), p4.Zeroing(), z4.VnD(), 11), in TEST() 2629 COMPARE_PREFIX(cmplo(p12.VnB(), p6.Zeroing(), z21.VnB(), z10.VnD()), in TEST() 2631 COMPARE_PREFIX(cmplo(p12.VnH(), p6.Zeroing(), z21.VnH(), z10.VnD()), in TEST() 2633 COMPARE_PREFIX(cmplo(p12.VnS(), p6.Zeroing(), z21.VnS(), z10.VnD()), in TEST() 2669 COMPARE_PREFIX(cmplo(p10.VnB(), p3.Zeroing(), z14.VnB(), z20.VnB()), in TEST() 2671 COMPARE_PREFIX(cmplo(p10.VnH(), p3.Zeroing(), z14.VnH(), z20.VnH()), in TEST() 2673 COMPARE_PREFIX(cmplo(p10.VnS(), p3.Zeroing(), z14.VnS(), z20.VnS()), in TEST() [all …]
|
/external/swiftshader/third_party/llvm-10.0/configs/common/lib/Target/AArch64/ |
D | AArch64GenAsmMatcher.inc | 12489 "\005cmple\005cmplo\005cmpls\005cmplt\005cmpne\004cmpp\005cmtst\004cneg\004" 13343 …{ 705 /* cmplo */, AArch64::CMPLO_PPzZI_H, Convert__SVEPredicateHReg1_0__SVEPredicate3bAnyReg1_1__… 13344 …{ 705 /* cmplo */, AArch64::CMPHI_PPzZZ_H, Convert__SVEPredicateHReg1_0__SVEPredicate3bAnyReg1_1__… 13345 …{ 705 /* cmplo */, AArch64::CMPLO_WIDE_PPzZZ_H, Convert__SVEPredicateHReg1_0__SVEPredicate3bAnyReg… 13346 …{ 705 /* cmplo */, AArch64::CMPLO_PPzZI_S, Convert__SVEPredicateSReg1_0__SVEPredicate3bAnyReg1_1__… 13347 …{ 705 /* cmplo */, AArch64::CMPHI_PPzZZ_S, Convert__SVEPredicateSReg1_0__SVEPredicate3bAnyReg1_1__… 13348 …{ 705 /* cmplo */, AArch64::CMPLO_WIDE_PPzZZ_S, Convert__SVEPredicateSReg1_0__SVEPredicate3bAnyReg… 13349 …{ 705 /* cmplo */, AArch64::CMPLO_PPzZI_D, Convert__SVEPredicateDReg1_0__SVEPredicate3bAnyReg1_1__… 13350 …{ 705 /* cmplo */, AArch64::CMPHI_PPzZZ_D, Convert__SVEPredicateDReg1_0__SVEPredicate3bAnyReg1_1__… 13351 …{ 705 /* cmplo */, AArch64::CMPLO_PPzZI_B, Convert__SVEPredicateBReg1_0__SVEPredicate3bAnyReg1_1__… [all …]
|
/external/llvm-project/llvm/lib/Target/AArch64/ |
D | AArch64SVEInstrInfo.td | 1241 defm CMPLO_WIDE_PPzZZ : sve_int_cmp_1_wide<0b110, "cmplo", int_aarch64_sve_cmplo_wide>; 1252 defm CMPLO_PPzZI : sve_int_ucmp_vi<0b10, "cmplo", SETULT, SETUGT>; 1565 def : InstAlias<"cmplo $Zd, $Pg/z, $Zm, $Zn", 1567 def : InstAlias<"cmplo $Zd, $Pg/z, $Zm, $Zn", 1569 def : InstAlias<"cmplo $Zd, $Pg/z, $Zm, $Zn", 1571 def : InstAlias<"cmplo $Zd, $Pg/z, $Zm, $Zn",
|
/external/vixl/src/aarch64/ |
D | assembler-sve-aarch64.cc | 2780 cmplo(pd, pg, zn, zm); in cmp() 2914 void Assembler::cmplo(const PRegisterWithLaneSize& pd, in cmplo() function in vixl::aarch64::Assembler 3047 void Assembler::cmplo(const PRegisterWithLaneSize& pd, in cmplo() function in vixl::aarch64::Assembler
|
D | assembler-aarch64.h | 3873 void cmplo(const PRegisterWithLaneSize& pd, 3879 void cmplo(const PRegisterWithLaneSize& pd,
|
D | macro-assembler-aarch64.h | 3857 cmplo(pd, pg, zn, zm); in Cmplo() 3865 cmplo(pd, pg, zn, static_cast<unsigned>(imm.AsUintN(7))); in Cmplo()
|
/external/swiftshader/third_party/llvm-10.0/configs/common/include/llvm/IR/ |
D | IntrinsicImpl.inc | 546 "llvm.aarch64.sve.cmplo.wide", 10679 1, // llvm.aarch64.sve.cmplo.wide
|