/external/swiftshader/third_party/llvm-10.0/llvm/include/llvm/Support/ |
D | MachineValueType.h | 160 nxv8i8 = 97, // n x 8 x i8 enumerator 457 case nxv8i8: in getVectorElementType() 607 case nxv8i8: in getVectorNumElements() 745 case nxv8i8: in getSizeInBits() 1034 if (NumElements == 8) return MVT::nxv8i8; in getScalableVectorVT()
|
/external/llvm-project/llvm/test/CodeGen/AArch64/ |
D | sve-masked-ldst-trunc.ll | 61 …call void @llvm.masked.store.nxv8i8(<vscale x 8 x i8> %trunc, <vscale x 8 x i8> *%b, i32 2, <vscal… 70 declare void @llvm.masked.store.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i8>*, i32, <vscale x 8 x i1…
|
D | sve-masked-ldst-sext.ll | 60 …%load = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8(<vscale x 8 x i8> *%a, i32 1, <vscale x 8 … 70 declare <vscale x 8 x i8> @llvm.masked.load.nxv8i8(<vscale x 8 x i8>*, i32, <vscale x 8 x i1>, <vsc…
|
D | sve-masked-ldst-zext.ll | 66 …%load = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8(<vscale x 8 x i8>* %src, i32 1, <vscale x … 76 declare <vscale x 8 x i8> @llvm.masked.load.nxv8i8(<vscale x 8 x i8>*, i32, <vscale x 8 x i1>, <vsc…
|
D | sve-intrinsics-ld1.ll | 24 %load = call <vscale x 8 x i8> @llvm.aarch64.sve.ld1.nxv8i8(<vscale x 8 x i1> %pred, i8* %addr) 33 %load = call <vscale x 8 x i8> @llvm.aarch64.sve.ld1.nxv8i8(<vscale x 8 x i1> %pred, i8* %addr) 200 declare <vscale x 8 x i8> @llvm.aarch64.sve.ld1.nxv8i8(<vscale x 8 x i1>, i8*)
|
D | sve-intrinsics-ld1-addressing-mode-reg-reg.ll | 25 %load = call <vscale x 8 x i8> @llvm.aarch64.sve.ld1.nxv8i8(<vscale x 8 x i1> %pred, i8* %base) 35 %load = call <vscale x 8 x i8> @llvm.aarch64.sve.ld1.nxv8i8(<vscale x 8 x i1> %pred, i8* %base) 217 declare <vscale x 8 x i8> @llvm.aarch64.sve.ld1.nxv8i8(<vscale x 8 x i1>, i8*)
|
D | sve-intrinsics-st1.ll | 27 call void @llvm.aarch64.sve.st1.nxv8i8(<vscale x 8 x i8> %trunc, 172 declare void @llvm.aarch64.sve.st1.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i1>, i8*)
|
D | sve-intrinsics-loads-ff.ll | 32 %load = call <vscale x 8 x i8> @llvm.aarch64.sve.ldff1.nxv8i8(<vscale x 8 x i1> %pg, i8* %a) 42 %load = call <vscale x 8 x i8> @llvm.aarch64.sve.ldff1.nxv8i8(<vscale x 8 x i1> %pg, i8* %base) 93 %load = call <vscale x 8 x i8> @llvm.aarch64.sve.ldff1.nxv8i8(<vscale x 8 x i1> %pg, i8* %a) 103 %load = call <vscale x 8 x i8> @llvm.aarch64.sve.ldff1.nxv8i8(<vscale x 8 x i1> %pg, i8* %base) 419 declare <vscale x 8 x i8> @llvm.aarch64.sve.ldff1.nxv8i8(<vscale x 8 x i1>, i8*)
|
D | sve-pred-contiguous-ldst-addressing-mode-reg-reg.ll | 455 %data = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8(<vscale x 8 x i8>* %base_addr, 459 call void @llvm.masked.store.nxv8i8(<vscale x 8 x i8> %data, 528 %load = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8(<vscale x 8 x i8>* %base_addr, 542 %load = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8(<vscale x 8 x i8>* %base_addr, 559 call void @llvm.masked.store.nxv8i8(<vscale x 8 x i8> %trunc, 603 declare <vscale x 8 x i8> @llvm.masked.load.nxv8i8 (<vscale x 8 x i8>* , i32, <vscale x 8 x i1>, <… 628 declare void @llvm.masked.store.nxv8i8 (<vscale x 8 x i8> , <vscale x 8 x i8>* , i32, <vscale x 8 x…
|
D | sve-pred-contiguous-ldst-addressing-mode-reg-imm.ll | 469 %data = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8(<vscale x 8 x i8>* %base_load, 474 call void @llvm.masked.store.nxv8i8(<vscale x 8 x i8> %data, 542 %load = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8(<vscale x 8 x i8>* %base_load, 555 %load = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8(<vscale x 8 x i8>* %base_load, 571 call void @llvm.masked.store.nxv8i8(<vscale x 8 x i8> %trunc, 615 declare <vscale x 8 x i8> @llvm.masked.load.nxv8i8 (<vscale x 8 x i8>* , i32, <vscale x 8 x i1>, <… 640 declare void @llvm.masked.store.nxv8i8 (<vscale x 8 x i8> , <vscale x 8 x i8>* , i32, <vscale x 8 x…
|
D | sve-intrinsics-st1-addressing-mode-reg-reg.ll | 30 call void @llvm.aarch64.sve.st1.nxv8i8(<vscale x 8 x i8> %trunc, 186 declare void @llvm.aarch64.sve.st1.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i1>, i8*)
|
D | sve-split-int-reduce.ll | 17 %res = call i8 @llvm.vector.reduce.and.nxv8i8(<vscale x 8 x i8> %a) 212 declare i8 @llvm.vector.reduce.and.nxv8i8(<vscale x 8 x i8>)
|
D | sve-intrinsics-loads-nf.ll | 82 %load = call <vscale x 8 x i8> @llvm.aarch64.sve.ldnf1.nxv8i8(<vscale x 8 x i1> %pg, i8* %a) 94 …%load = call <vscale x 8 x i8> @llvm.aarch64.sve.ldnf1.nxv8i8(<vscale x 8 x i1> %pg, i8* %base_sca… 103 %load = call <vscale x 8 x i8> @llvm.aarch64.sve.ldnf1.nxv8i8(<vscale x 8 x i1> %pg, i8* %a) 115 …%load = call <vscale x 8 x i8> @llvm.aarch64.sve.ldnf1.nxv8i8(<vscale x 8 x i1> %pg, i8* %base_sca… 465 declare <vscale x 8 x i8> @llvm.aarch64.sve.ldnf1.nxv8i8(<vscale x 8 x i1>, i8*)
|
D | sve-intrinsics-ld1-addressing-mode-reg-imm.ll | 103 …%load = call <vscale x 8 x i8> @llvm.aarch64.sve.ld1.nxv8i8(<vscale x 8 x i1> %pg, i8* %base_scala… 115 …%load = call <vscale x 8 x i8> @llvm.aarch64.sve.ld1.nxv8i8(<vscale x 8 x i1> %pg, i8* %base_scala… 303 declare <vscale x 8 x i8> @llvm.aarch64.sve.ld1.nxv8i8(<vscale x 8 x i1>, i8*)
|
D | sve-intrinsics-st1-addressing-mode-reg-imm.ll | 88 …call void @llvm.aarch64.sve.st1.nxv8i8(<vscale x 8 x i8> %trunc, <vscale x 8 x i1> %pg, i8* %base_… 231 declare void @llvm.aarch64.sve.st1.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i1>, i8*)
|
/external/llvm-project/llvm/include/llvm/Support/ |
D | MachineValueType.h | 180 nxv8i8 = 117, // n x 8 x i8 enumerator 525 case nxv8i8: in getVectorElementType() 719 case nxv8i8: in getVectorNumElements() 876 case nxv8i8: in getSizeInBits() 1242 if (NumElements == 8) return MVT::nxv8i8; in getScalableVectorVT()
|
/external/llvm-project/llvm/lib/Target/RISCV/Utils/ |
D | RISCVBaseInfo.h | 282 constexpr MVT vint8m1_t = MVT::nxv8i8;
|
/external/llvm-project/llvm/lib/Target/RISCV/ |
D | RISCVRegisterInfo.td | 285 // i8 nxv1i8 nxv2i8 nxv4i8 nxv8i8 nxv16i8 nxv32i8 nxv64i8 294 defvar vint8m1_t = nxv8i8;
|
/external/swiftshader/third_party/llvm-10.0/llvm/lib/CodeGen/ |
D | ValueTypes.cpp | 251 case MVT::nxv8i8: in getTypeForEVT()
|
/external/swiftshader/third_party/llvm-10.0/llvm/include/llvm/CodeGen/ |
D | ValueTypes.td | 126 def nxv8i8 : ValueType<64, 97>; // n x 8 x i8 vector value
|
/external/llvm-project/llvm/include/llvm/CodeGen/ |
D | ValueTypes.td | 147 def nxv8i8 : ValueType<64, 117>; // n x 8 x i8 vector value
|
/external/llvm-project/llvm/lib/CodeGen/ |
D | ValueTypes.cpp | 402 case MVT::nxv8i8: in getTypeForEVT()
|
/external/llvm-project/llvm/utils/TableGen/ |
D | CodeGenTarget.cpp | 183 case MVT::nxv8i8: return "MVT::nxv8i8"; in getEnumName()
|
/external/llvm-project/llvm/lib/Target/AArch64/ |
D | AArch64SVEInstrInfo.td | 1667 …def : Pat<(sext_inreg (nxv8i16 ZPR:$Zs), nxv8i8), (SXTB_ZPmZ_H (IMPLICIT_DEF), (PTRUE_H 31), ZPR:… 2046 defm : ld1<LD1B_H, LD1B_H_IMM, nxv8i16, AArch64ld1_z, nxv8i1, nxv8i8, am_sve_regreg_lsl0>; 2047 defm : ld1<LD1SB_H, LD1SB_H_IMM, nxv8i16, AArch64ld1s_z, nxv8i1, nxv8i8, am_sve_regreg_lsl0>; 2089 defm : ldnf1<LDNF1B_H_IMM, nxv8i16, AArch64ldnf1_z, nxv8i1, nxv8i8>; 2090 defm : ldnf1<LDNF1SB_H_IMM, nxv8i16, AArch64ldnf1s_z, nxv8i1, nxv8i8>; 2133 defm : ldff1<LDFF1B_H, nxv8i16, AArch64ldff1_z, nxv8i1, nxv8i8, am_sve_regreg_lsl0>; 2134 defm : ldff1<LDFF1SB_H, nxv8i16, AArch64ldff1s_z, nxv8i1, nxv8i8, am_sve_regreg_lsl0>; 2176 defm : st1<ST1B_H, ST1B_H_IMM, nxv8i16, AArch64st1, nxv8i1, nxv8i8, am_sve_regreg_lsl0>;
|
/external/swiftshader/third_party/llvm-10.0/llvm/lib/Target/AArch64/ |
D | AArch64SVEInstrInfo.td | 1106 …def : Pat<(sext_inreg (nxv8i16 ZPR:$Zs), nxv8i8), (SXTB_ZPmZ_H (IMPLICIT_DEF), (PTRUE_H 31), ZPR:…
|