Home
last modified time | relevance | path

Searched refs:v64i1 (Results 1 – 19 of 19) sorted by relevance

/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/X86/
DX86InstrVecCompiler.td188 def : Pat<(v64i1 (insert_subvector (v64i1 immAllZerosV),
191 def : Pat<(v64i1 (insert_subvector (v64i1 immAllZerosV),
194 def : Pat<(v64i1 (insert_subvector (v64i1 immAllZerosV),
197 def : Pat<(v64i1 (insert_subvector (v64i1 immAllZerosV),
242 def : Pat<(v64i1 (insert_subvector (v64i1 immAllZerosV),
245 def : Pat<(v64i1 (insert_subvector (v64i1 immAllZerosV),
300 def : Pat<(v64i1 (insert_subvector (v64i1 immAllZerosV),
303 def : Pat<(v64i1 (insert_subvector (v64i1 immAllZerosV),
314 def : Pat<(v64i1 (insert_subvector (v64i1 immAllZerosV),
325 def : Pat<(v64i1 (insert_subvector (v64i1 immAllZerosV),
[all …]
DX86CallingConv.td93 // __mmask64 (v64i1) --> GPR64 (for x64) or 2 x GPR32 (for IA32)
94 CCIfType<[v64i1], CCPromoteToType<i64>>,
172 // __mmask64 (v64i1) --> GPR64 (for x64) or 2 x GPR32 (for IA32)
173 CCIfType<[v64i1], CCPromoteToType<i64>>,
231 CCIfType<[v64i1], CCPromoteToType<v64i8>>,
539 CCIfType<[v64i1], CCPromoteToType<v64i8>>,
824 CCIfType<[v64i1], CCPromoteToType<v64i8>>,
DX86RegisterInfo.td602 def VK64 : RegisterClass<"X86", [v64i1], 64, (add VK32)> {let Size = 64;}
620 def VK64WM : RegisterClass<"X86", [v64i1], 64, (add VK32WM)> {let Size = 64;}
DX86FrameLowering.cpp2037 VT = STI.hasBWI() ? MVT::v64i1 : MVT::v16i1; in assignCalleeSavedSpillSlots()
2118 VT = STI.hasBWI() ? MVT::v64i1 : MVT::v16i1; in spillCalleeSavedRegisters()
2199 VT = STI.hasBWI() ? MVT::v64i1 : MVT::v16i1; in restoreCalleeSavedRegisters()
DX86TargetTransformInfo.cpp1291 { ISD::SIGN_EXTEND, MVT::v64i8, MVT::v64i1, 1 }, in getCastInstrCost()
1299 { ISD::ZERO_EXTEND, MVT::v64i8, MVT::v64i1, 2 }, in getCastInstrCost()
2702 { ISD::AND, MVT::v64i1, 13 }, in getArithmeticReductionCost()
2708 { ISD::OR, MVT::v64i1, 13 }, in getArithmeticReductionCost()
DX86ISelLowering.cpp1730 addRegisterClass(MVT::v64i1, &X86::VK64RegClass); in X86TargetLowering()
1732 for (auto VT : { MVT::v32i1, MVT::v64i1 }) { in X86TargetLowering()
1752 setOperationAction(ISD::CONCAT_VECTORS, MVT::v64i1, Custom); in X86TargetLowering()
1754 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v64i1, Custom); in X86TargetLowering()
2102 if (VT == MVT::v64i1 && Subtarget.hasBWI() && !Subtarget.useAVX512Regs() && in getRegisterTypeForCallingConv()
2126 if (VT == MVT::v64i1 && Subtarget.hasBWI() && !Subtarget.useAVX512Regs() && in getNumRegistersForCallingConv()
2152 if (VT == MVT::v64i1 && Subtarget.hasBWI() && !Subtarget.useAVX512Regs() && in getVectorTypeBreakdownForCallingConv()
2602 (ValVT == MVT::v64i1 && ValLoc == MVT::i64)) { in lowerMasksToReg()
2742 assert(VA.getValVT() == MVT::v64i1 && in LowerReturn()
2910 assert(VA.getValVT() == MVT::v64i1 && in getv64i1Argument()
[all …]
DX86InstrAVX512.td170 def v64i1_info : X86KVectorVTInfo<VK64, VK64WM, v64i1>;
2845 defm KMOVQ : avx512_mask_mov<0x90, 0x90, 0x91, "kmovq", VK64, v64i1, i64mem>,
2884 def : Pat<(v64i1 (bitconvert (i64 GR64:$src))),
2886 def : Pat<(i64 (bitconvert (v64i1 VK64:$src))),
2935 defm : operation_gpr_mask_copy_lowering<VK64, v64i1>;
3311 defm Q : avx512_mask_setop<VK64, v64i1, Val>;
3343 defm : operation_subvector_mask_lowering<VK1, v1i1, VK64, v64i1>;
3349 defm : operation_subvector_mask_lowering<VK2, v2i1, VK64, v64i1>;
3354 defm : operation_subvector_mask_lowering<VK4, v4i1, VK64, v64i1>;
3358 defm : operation_subvector_mask_lowering<VK8, v8i1, VK64, v64i1>;
[all …]
DX86ISelDAGToDAG.cpp4288 case MVT::v64i1: return X86::VK64RegClassID; in tryVPTESTM()
DX86InstrCompiler.td593 defm _VK64 : CMOVrr_PSEUDO<VK64, v64i1>;
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/include/llvm/Support/
DMachineValueType.h66 v64i1 = 20, // 64 x i1 enumerator
342 return (SimpleTy == MVT::v64i1 || SimpleTy == MVT::v8i8 || in is64BitVector()
434 case v64i1: in getVectorElementType()
568 case v64i1: in getVectorNumElements()
737 case v64i1: in getSizeInBits()
929 if (NumElements == 64) return MVT::v64i1; in getVectorVT()
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/Hexagon/
DHexagonRegisterInfo.td302 [v64i1, v128i1, v64i1]>;
304 [v32i1, v64i1, v32i1]>;
DHexagonISelLoweringHVX.cpp41 addRegisterClass(MVT::v64i1, &Hexagon::HvxQRRegClass); in initializeHVXLowering()
51 addRegisterClass(MVT::v64i1, &Hexagon::HvxQRRegClass); in initializeHVXLowering()
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/configs/common/lib/Target/X86/
DX86GenCallingConv.inc531 if (LocVT == MVT::v64i1) {
849 if (LocVT == MVT::v64i1) {
1529 if (LocVT == MVT::v64i1) {
1918 if (LocVT == MVT::v64i1) {
2389 if (LocVT == MVT::v64i1) {
2776 if (LocVT == MVT::v64i1) {
3094 if (LocVT == MVT::v64i1) {
3667 if (LocVT == MVT::v64i1) {
3881 if (LocVT == MVT::v64i1) {
DX86GenGlobalISel.inc3981 … (and:{ *:[v64i1] } VK64:{ *:[v64i1] }:$src1, VK64:{ *:[v64i1] }:$src2) => (KANDQrr:{ *:[v64i1] …
5555 …// (or:{ *:[v64i1] } VK64:{ *:[v64i1] }:$src1, VK64:{ *:[v64i1] }:$src2) => (KORQrr:{ *:[v64i1] …
6877 … (xor:{ *:[v64i1] } VK64:{ *:[v64i1] }:$src1, VK64:{ *:[v64i1] }:$src2) => (KXORQrr:{ *:[v64i1] …
6954 …// (concat_vectors:{ *:[v64i1] } VK32:{ *:[v32i1] }:$src1, VK32:{ *:[v32i1] }:$src2) => (KUNPCKD…
7144 …// (bitconvert:{ *:[i64] } VK64:{ *:[v64i1] }:$src) => (COPY_TO_REGCLASS:{ *:[i64] } VK64:{ *:[v…
7154 … (bitconvert:{ *:[f64] } VK64:{ *:[v64i1] }:$src) => (VMOV64toSDZrr:{ *:[f64] } (KMOVQrk:{ *:[i6…
7262 …// (bitconvert:{ *:[v64i1] } GR64:{ *:[i64] }:$src) => (COPY_TO_REGCLASS:{ *:[v64i1] } GR64:{ *:…
7270 …// (bitconvert:{ *:[v64i1] } FR64X:{ *:[f64] }:$src) => (KMOVQkr:{ *:[v64i1] } (VMOVSDto64Zrr:{ …
10649 …// (anyext:{ *:[v64i8] } VK64:{ *:[v64i1] }:$src) => (VPMOVM2BZrr:{ *:[v64i8] } VK64:{ *:[v64i1]…
11454 …// (sext:{ *:[v64i8] } VK64:{ *:[v64i1] }:$src) => (VPMOVM2BZrr:{ *:[v64i8] } VK64:{ *:[v64i1] }…
DX86GenFastISel.inc333 case MVT::v64i1: return fastEmit_ISD_ANY_EXTEND_MVT_v64i1_r(RetVT, Op0, Op0IsKill);
1549 case MVT::v64i1: return fastEmit_ISD_SIGN_EXTEND_MVT_v64i1_r(RetVT, Op0, Op0IsKill);
7202 if (RetVT.SimpleTy != MVT::v64i1)
7375 case MVT::v64i1: return fastEmit_ISD_AND_MVT_v64i1_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
8209 if (RetVT.SimpleTy != MVT::v64i1)
8382 case MVT::v64i1: return fastEmit_ISD_OR_MVT_v64i1_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
10359 if (RetVT.SimpleTy != MVT::v64i1)
10532 case MVT::v64i1: return fastEmit_ISD_XOR_MVT_v64i1_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
12373 if (RetVT.SimpleTy != MVT::v64i1)
12386 case MVT::v64i1: return fastEmit_X86ISD_KADD_MVT_v64i1_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
[all …]
DX86GenRegisterInfo.inc4490 /* 36 */ MVT::v64i1, MVT::Other,
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/CodeGen/
DValueTypes.cpp165 case MVT::v64i1: return VectorType::get(Type::getInt1Ty(Context), 64); in getTypeForEVT()
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/include/llvm/CodeGen/
DValueTypes.td41 def v64i1 : ValueType<64 , 20>; // 64 x i1 vector value
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/include/llvm/IR/
DIntrinsics.td234 def llvm_v64i1_ty : LLVMType<v64i1>; // 64 x i1