/external/llvm-project/llvm/test/CodeGen/AArch64/ |
D | sve-gather-scatter-dag-combine.ll | 19 …%load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i8.nxv2i64(<vscale x… 24 call void @llvm.masked.store.nxv2i8(<vscale x 2 x i8> %load, 42 …%load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i8.nxv2i64(<vscale x… 46 call void @llvm.masked.store.nxv2i8(<vscale x 2 x i8> %load, 63 …%load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i8.nxv2i64(<vscale x… 67 call void @llvm.masked.store.nxv2i8(<vscale x 2 x i8> %load, 75 declare <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i8.nxv2i64(<vscale x 2 x i… 76 declare void @llvm.masked.store.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i8>*, i32, <vscale x 2 x i1…
|
D | sve-int-log-pred.ll | 11 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.and.nxv2i8(<vscale x 16 x i1> %pg, 51 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.orr.nxv2i8(<vscale x 16 x i1> %pg, 91 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.eor.nxv2i8(<vscale x 16 x i1> %pg, 131 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.bic.nxv2i8(<vscale x 16 x i1> %pg, 168 declare <vscale x 16 x i8> @llvm.aarch64.sve.and.nxv2i8(<vscale x 16 x i1>,<vscale x 16 x i8>,<vsca… 172 declare <vscale x 16 x i8> @llvm.aarch64.sve.orr.nxv2i8(<vscale x 16 x i1>,<vscale x 16 x i8>,<vsca… 176 declare <vscale x 16 x i8> @llvm.aarch64.sve.eor.nxv2i8(<vscale x 16 x i1>,<vscale x 16 x i8>,<vsca… 180 declare <vscale x 16 x i8> @llvm.aarch64.sve.bic.nxv2i8(<vscale x 16 x i1>,<vscale x 16 x i8>,<vsca…
|
D | sve-intrinsics-gather-loads-64bit-unscaled-offset.ll | 16 %load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.nxv2i8(<vscale x 2 x i1> %pg, 74 %load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.nxv2i8(<vscale x 2 x i1> %pg, 103 declare <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.nxv2i8(<vscale x 2 x i1>, i8*, <vscale x 2 x…
|
D | sve2-intrinsics-nt-gather-loads-64bit-unscaled-offset.ll | 16 %load = call <vscale x 2 x i8> @llvm.aarch64.sve.ldnt1.gather.nxv2i8(<vscale x 2 x i1> %pg, 74 %load = call <vscale x 2 x i8> @llvm.aarch64.sve.ldnt1.gather.nxv2i8(<vscale x 2 x i1> %pg, 103 declare <vscale x 2 x i8> @llvm.aarch64.sve.ldnt1.gather.nxv2i8(<vscale x 2 x i1>, i8*, <vscale x 2…
|
D | sve-intrinsics-ff-gather-loads-64bit-unscaled-offset.ll | 16 %load = call <vscale x 2 x i8> @llvm.aarch64.sve.ldff1.gather.nxv2i8(<vscale x 2 x i1> %pg, 74 %load = call <vscale x 2 x i8> @llvm.aarch64.sve.ldff1.gather.nxv2i8(<vscale x 2 x i1> %pg, 103 declare <vscale x 2 x i8> @llvm.aarch64.sve.ldff1.gather.nxv2i8(<vscale x 2 x i1>, i8*, <vscale x 2…
|
D | sve-intrinsics-scatter-stores-64bit-unscaled-offset.ll | 17 call void @llvm.aarch64.sve.st1.scatter.nxv2i8(<vscale x 2 x i8> %data_trunc, 70 declare void @llvm.aarch64.sve.st1.scatter.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i1>, i8*, <vscal…
|
D | sve2-intrinsics-nt-scatter-stores-64bit-unscaled-offset.ll | 17 call void @llvm.aarch64.sve.stnt1.scatter.nxv2i8(<vscale x 2 x i8> %data_trunc, 70 declare void @llvm.aarch64.sve.stnt1.scatter.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i1>, i8*, <vsc…
|
D | sve-masked-ldst-trunc.ll | 16 …call void @llvm.masked.store.nxv2i8(<vscale x 2 x i8> %trunc, <vscale x 2 x i8> *%b, i32 8, <vscal… 65 declare void @llvm.masked.store.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i8>*, i32, <vscale x 2 x i1…
|
D | sve-masked-gather-legalize.ll | 18 …%data = call <vscale x 2 x i8> @llvm.masked.gather.nxv2i8(<vscale x 2 x i8*> %ptrs, i32 1, <vscale… 37 …%data = call <vscale x 2 x i8> @llvm.masked.gather.nxv2i8(<vscale x 2 x i8*> %ptrs, i32 1, <vscale… 96 declare <vscale x 2 x i8> @llvm.masked.gather.nxv2i8(<vscale x 2 x i8*>, i32, <vscale x 2 x i1>, <v…
|
D | sve2-intrinsics-nt-scatter-stores-32bit-unscaled-offset.ll | 64 declare void @llvm.aarch64.sve.stnt1.scatter.uxtw.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i1>, i8*,… 66 declare void @llvm.aarch64.sve.stnt1.scatter.sxtw.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i1>, i8*,…
|
D | sve-masked-ldst-sext.ll | 15 …%load = call <vscale x 2 x i8> @llvm.masked.load.nxv2i8(<vscale x 2 x i8> *%a, i32 1, <vscale x 2 … 65 declare <vscale x 2 x i8> @llvm.masked.load.nxv2i8(<vscale x 2 x i8>*, i32, <vscale x 2 x i1>, <vsc…
|
D | sve-masked-ldst-zext.ll | 16 …%load = call <vscale x 2 x i8> @llvm.masked.load.nxv2i8(<vscale x 2 x i8>* %src, i32 1, <vscale x … 71 declare <vscale x 2 x i8> @llvm.masked.load.nxv2i8(<vscale x 2 x i8>*, i32, <vscale x 2 x i1>, <vsc…
|
D | sve-masked-gather-64b-unscaled.ll | 11 …%vals = call <vscale x 2 x i8> @llvm.masked.gather.nxv2i8(<vscale x 2 x i8*> %ptrs, i32 1, <vscale… 90 …%vals = call <vscale x 2 x i8> @llvm.masked.gather.nxv2i8(<vscale x 2 x i8*> %ptrs, i32 1, <vscale… 119 declare <vscale x 2 x i8> @llvm.masked.gather.nxv2i8(<vscale x 2 x i8*>, i32, <vscale x 2 x i1>, <v…
|
D | sve-intrinsics-gather-loads-32bit-unscaled-offsets.ll | 40 %load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.uxtw.nxv2i8(<vscale x 2 x i1> %pg, 51 %load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.sxtw.nxv2i8(<vscale x 2 x i1> %pg, 240 %load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.uxtw.nxv2i8(<vscale x 2 x i1> %pg, 251 %load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.sxtw.nxv2i8(<vscale x 2 x i1> %pg, 328 declare <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.uxtw.nxv2i8(<vscale x 2 x i1>, i8*, <vscale … 330 declare <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.sxtw.nxv2i8(<vscale x 2 x i1>, i8*, <vscale …
|
D | sve-intrinsics-ff-gather-loads-32bit-unscaled-offsets.ll | 40 %load = call <vscale x 2 x i8> @llvm.aarch64.sve.ldff1.gather.uxtw.nxv2i8(<vscale x 2 x i1> %pg, 51 %load = call <vscale x 2 x i8> @llvm.aarch64.sve.ldff1.gather.sxtw.nxv2i8(<vscale x 2 x i1> %pg, 240 %load = call <vscale x 2 x i8> @llvm.aarch64.sve.ldff1.gather.uxtw.nxv2i8(<vscale x 2 x i1> %pg, 251 %load = call <vscale x 2 x i8> @llvm.aarch64.sve.ldff1.gather.sxtw.nxv2i8(<vscale x 2 x i1> %pg, 328 declare <vscale x 2 x i8> @llvm.aarch64.sve.ldff1.gather.uxtw.nxv2i8(<vscale x 2 x i1>, i8*, <vscal… 330 declare <vscale x 2 x i8> @llvm.aarch64.sve.ldff1.gather.sxtw.nxv2i8(<vscale x 2 x i1>, i8*, <vscal…
|
D | sve-intrinsics-scatter-stores-32bit-unscaled-offsets.ll | 43 call void @llvm.aarch64.sve.st1.scatter.uxtw.nxv2i8(<vscale x 2 x i8> %data_trunc, 55 call void @llvm.aarch64.sve.st1.scatter.sxtw.nxv2i8(<vscale x 2 x i8> %data_trunc, 228 declare void @llvm.aarch64.sve.st1.scatter.uxtw.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i1>, i8*, <… 230 declare void @llvm.aarch64.sve.st1.scatter.sxtw.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i1>, i8*, <…
|
D | sve-intrinsics-ff-gather-loads-vector-base-scalar-offset.ll | 28 …%load = call <vscale x 2 x i8> @llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv2i8.nxv2i64(<vscale… 131 …%load = call <vscale x 2 x i8> @llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv2i8.nxv2i64(<vscale… 175 declare <vscale x 2 x i8> @llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv2i8.nxv2i64(<vscale x 2 x…
|
D | sve-intrinsics-ld1.ll | 60 %load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.nxv2i8(<vscale x 2 x i1> %pred, i8* %addr) 69 %load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.nxv2i8(<vscale x 2 x i1> %pred, i8* %addr) 210 declare <vscale x 2 x i8> @llvm.aarch64.sve.ld1.nxv2i8(<vscale x 2 x i1>, i8*)
|
D | sve2-intrinsics-nt-gather-loads-vector-base-scalar-offset.ll | 28 …%load = call <vscale x 2 x i8> @llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2i8.nxv2i64(<vscale… 133 …%load = call <vscale x 2 x i8> @llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2i8.nxv2i64(<vscale… 177 declare <vscale x 2 x i8> @llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2i8.nxv2i64(<vscale x 2 x…
|
D | sve-intrinsics-gather-loads-vector-base-scalar-offset.ll | 28 …%load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i8.nxv2i64(<vscale x… 131 …%load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i8.nxv2i64(<vscale x… 175 declare <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i8.nxv2i64(<vscale x 2 x i…
|
D | sve-intrinsics-gather-loads-vector-base-imm-offset.ll | 28 …%load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i8.nxv2i64(<vscale x… 132 …%load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i8.nxv2i64(<vscale x… 197 …%load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i8.nxv2i64(<vscale x… 310 …%load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i8.nxv2i64(<vscale x… 357 declare <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i8.nxv2i64(<vscale x 2 x i…
|
D | sve-intrinsics-ff-gather-loads-vector-base-imm-offset.ll | 28 …%load = call <vscale x 2 x i8> @llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv2i8.nxv2i64(<vscale… 132 …%load = call <vscale x 2 x i8> @llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv2i8.nxv2i64(<vscale… 197 …%load = call <vscale x 2 x i8> @llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv2i8.nxv2i64(<vscale… 310 …%load = call <vscale x 2 x i8> @llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv2i8.nxv2i64(<vscale… 357 declare <vscale x 2 x i8> @llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv2i8.nxv2i64(<vscale x 2 x…
|
D | sve-split-store.ll | 68 …call void @llvm.masked.store.nxv2i8(<vscale x 2 x i8> %data, <vscale x 2 x i8> *%a, i32 1, <vscale… 131 declare void @llvm.masked.store.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i8>*, i32, <vscale x 2 x i1…
|
/external/swiftshader/third_party/llvm-10.0/llvm/include/llvm/Support/ |
D | MachineValueType.h | 158 nxv2i8 = 95, // n x 2 x i8 enumerator 455 case nxv2i8: in getVectorElementType() 645 case nxv2i8: in getVectorNumElements() 716 case nxv2i8: in getSizeInBits() 1032 if (NumElements == 2) return MVT::nxv2i8; in getScalableVectorVT()
|
/external/llvm-project/llvm/include/llvm/Support/ |
D | MachineValueType.h | 178 nxv2i8 = 115, // n x 2 x i8 enumerator 523 case nxv2i8: in getVectorElementType() 762 case nxv2i8: in getVectorNumElements() 841 case nxv2i8: in getSizeInBits() 1240 if (NumElements == 2) return MVT::nxv2i8; in getScalableVectorVT()
|