/external/llvm-project/llvm/test/CodeGen/AArch64/ |
D | sve-intrinsic-opts-ptest.ll | 25 ; OPT-NEXT: %2 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x … 29 %2 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %a) 36 ; OPT: %mask = tail call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 0) 38 ; OPT-NEXT: %[[OUT:.*]] = call i1 @llvm.aarch64.sve.ptest.first.nxv4i1(<vscale x 4 x i1> %mask, <vs… 40 %mask = tail call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 0) 41 …%1 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %ma… 42 %2 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %a) 72 declare <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32) 80 declare <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1>)
|
D | sve-intrinsics-while.ll | 47 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.whilele.nxv4i1.i32(i32 %a, i32 %b) 55 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.whilele.nxv4i1.i64(i64 %a, i64 %b) 115 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.whilelo.nxv4i1.i32(i32 %a, i32 %b) 123 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.whilelo.nxv4i1.i64(i64 %a, i64 %b) 183 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.whilels.nxv4i1.i32(i32 %a, i32 %b) 191 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.whilels.nxv4i1.i64(i64 %a, i64 %b) 251 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.whilelt.nxv4i1.i32(i32 %a, i32 %b) 259 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.whilelt.nxv4i1.i64(i64 %a, i64 %b) 283 declare <vscale x 4 x i1> @llvm.aarch64.sve.whilele.nxv4i1.i32(i32, i32) 284 declare <vscale x 4 x i1> @llvm.aarch64.sve.whilele.nxv4i1.i64(i64, i64) [all …]
|
D | sve2-intrinsics-while.ll | 47 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.whilege.nxv4i1.i32(i32 %a, i32 %b) 55 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.whilege.nxv4i1.i64(i64 %a, i64 %b) 115 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.whilehs.nxv4i1.i32(i32 %a, i32 %b) 123 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.whilehs.nxv4i1.i64(i64 %a, i64 %b) 183 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.whilegt.nxv4i1.i32(i32 %a, i32 %b) 191 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.whilegt.nxv4i1.i64(i64 %a, i64 %b) 251 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.whilehi.nxv4i1.i32(i32 %a, i32 %b) 259 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.whilehi.nxv4i1.i64(i64 %a, i64 %b) 283 declare <vscale x 4 x i1> @llvm.aarch64.sve.whilege.nxv4i1.i32(i32, i32) 284 declare <vscale x 4 x i1> @llvm.aarch64.sve.whilege.nxv4i1.i64(i64, i64) [all …]
|
D | sve-int-pred-reduce.ll | 42 %res = call i1 @llvm.vector.reduce.and.i1.nxv4i1(<vscale x 4 x i1> %vec) 89 %res = call i1 @llvm.vector.reduce.or.i1.nxv4i1(<vscale x 4 x i1> %vec) 135 %res = call i1 @llvm.vector.reduce.xor.i1.nxv4i1(<vscale x 4 x i1> %vec) 184 %res = call i1 @llvm.vector.reduce.smax.i1.nxv4i1(<vscale x 4 x i1> %vec) 231 %res = call i1 @llvm.vector.reduce.smin.i1.nxv4i1(<vscale x 4 x i1> %vec) 277 %res = call i1 @llvm.vector.reduce.umax.i1.nxv4i1(<vscale x 4 x i1> %vec) 326 %res = call i1 @llvm.vector.reduce.umin.i1.nxv4i1(<vscale x 4 x i1> %vec) 344 declare i1 @llvm.vector.reduce.and.i1.nxv4i1(<vscale x 4 x i1> %vec) 349 declare i1 @llvm.vector.reduce.or.i1.nxv4i1(<vscale x 4 x i1> %vec) 354 declare i1 @llvm.vector.reduce.xor.i1.nxv4i1(<vscale x 4 x i1> %vec) [all …]
|
D | sve-intrinsic-opts-reinterpret.ll | 32 %1 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %a) 33 …%2 = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %… 41 ; OPT: %1 = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x… 42 ; OPT-NEXT: %2 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x … 44 …%1 = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %… 45 %2 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %1) 102 ; No transform as the reinterprets are converting from different types (nxv2i1 & nxv4i1) 123 …%b1 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %b) 203 declare <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1>) 206 declare <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1>)
|
D | sve-intrinsics-reinterpret.ll | 34 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %pg) 69 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg) 82 declare <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1>) 87 declare <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1>)
|
D | sve-intrinsics-ldN-reg+reg-addr-mode.ll | 51 %res = call <vscale x 8 x i32> @llvm.aarch64.sve.ld2.nxv8i32.nxv4i1.p0i32(<vscale x 4 x i1> %Pg, i3… 60 %res = call <vscale x 8 x float> @llvm.aarch64.sve.ld2.nxv8f32.nxv4i1.p0f32(<vscale x 4 x i1> %Pg, … 127 %res = call <vscale x 12 x i32> @llvm.aarch64.sve.ld3.nxv12i32.nxv4i1.p0i32(<vscale x 4 x i1> %Pg, … 136 %res = call <vscale x 12 x float> @llvm.aarch64.sve.ld3.nxv12f32.nxv4i1.p0f32(<vscale x 4 x i1> %Pg… 203 %res = call <vscale x 16 x i32> @llvm.aarch64.sve.ld4.nxv16i32.nxv4i1.p0i32(<vscale x 4 x i1> %Pg, … 212 %res = call <vscale x 16 x float> @llvm.aarch64.sve.ld4.nxv16f32.nxv4i1.p0f32(<vscale x 4 x i1> %Pg… 237 declare <vscale x 8 x i32> @llvm.aarch64.sve.ld2.nxv8i32.nxv4i1.p0i32(<vscale x 4 x i1>, i32*) 241 declare <vscale x 8 x float> @llvm.aarch64.sve.ld2.nxv8f32.nxv4i1.p0f32(<vscale x 4 x i1>, float*) 246 declare <vscale x 12 x i32> @llvm.aarch64.sve.ld3.nxv12i32.nxv4i1.p0i32(<vscale x 4 x i1>, i32*) 250 declare <vscale x 12 x float> @llvm.aarch64.sve.ld3.nxv12f32.nxv4i1.p0f32(<vscale x 4 x i1>, float*) [all …]
|
D | sve-pred-log.ll | 59 …%res = call <vscale x 4 x i1> @llvm.aarch64.sve.and.z.nxv4i1(<vscale x 4 x i1> %Pg, <vscale x 4 x … 91 …%res = call <vscale x 4 x i1> @llvm.aarch64.sve.bic.z.nxv4i1(<vscale x 4 x i1> %Pg, <vscale x 4 x … 123 …%res = call <vscale x 4 x i1> @llvm.aarch64.sve.eor.z.nxv4i1(<vscale x 4 x i1> %Pg, <vscale x 4 x … 155 …%res = call <vscale x 4 x i1> @llvm.aarch64.sve.orr.z.nxv4i1(<vscale x 4 x i1> %Pg, <vscale x 4 x … 187 …%res = call <vscale x 4 x i1> @llvm.aarch64.sve.orn.z.nxv4i1(<vscale x 4 x i1> %Pg, <vscale x 4 x … 219 …%res = call <vscale x 4 x i1> @llvm.aarch64.sve.nor.z.nxv4i1(<vscale x 4 x i1> %Pg, <vscale x 4 x … 251 …%res = call <vscale x 4 x i1> @llvm.aarch64.sve.nand.z.nxv4i1(<vscale x 4 x i1> %Pg, <vscale x 4 x… 265 declare <vscale x 4 x i1> @llvm.aarch64.sve.and.z.nxv4i1(<vscale x 4 x i1>, <vscale x 4 x i1>, <vsc… 269 declare <vscale x 4 x i1> @llvm.aarch64.sve.bic.z.nxv4i1(<vscale x 4 x i1>, <vscale x 4 x i1>, <vsc… 273 declare <vscale x 4 x i1> @llvm.aarch64.sve.eor.z.nxv4i1(<vscale x 4 x i1>, <vscale x 4 x i1>, <vsc… [all …]
|
D | sve-intrinsics-pred-creation.ll | 31 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 30) 45 declare <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 %pattern)
|
D | sve-intrinsics-pred-operations.ll | 139 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.pnext.nxv4i1(<vscale x 4 x i1> %pg, 170 %res = call <vscale x 4 x i1> @llvm.aarch64.sve.punpkhi.nxv4i1(<vscale x 8 x i1> %a) 198 %res = call <vscale x 4 x i1> @llvm.aarch64.sve.punpklo.nxv4i1(<vscale x 8 x i1> %a) 222 declare <vscale x 4 x i1> @llvm.aarch64.sve.pnext.nxv4i1(<vscale x 4 x i1>, <vscale x 4 x i1>) 226 declare <vscale x 4 x i1> @llvm.aarch64.sve.punpkhi.nxv4i1(<vscale x 8 x i1>) 230 declare <vscale x 4 x i1> @llvm.aarch64.sve.punpklo.nxv4i1(<vscale x 8 x i1>)
|
D | sve-intrinsics-loads.ll | 333 …%res = call <vscale x 8 x i32> @llvm.aarch64.sve.ld2.nxv8i32.nxv4i1.p0i32(<vscale x 4 x i1> %pred,… 341 …%res = call <vscale x 8 x float> @llvm.aarch64.sve.ld2.nxv8f32.nxv4i1.p0f32(<vscale x 4 x i1> %pre… 413 …%res = call <vscale x 12 x i32> @llvm.aarch64.sve.ld3.nxv12i32.nxv4i1.p0i32(<vscale x 4 x i1> %pre… 421 …%res = call <vscale x 12 x float> @llvm.aarch64.sve.ld3.nxv12f32.nxv4i1.p0f32(<vscale x 4 x i1> %p… 493 …%res = call <vscale x 16 x i32> @llvm.aarch64.sve.ld4.nxv16i32.nxv4i1.p0i32(<vscale x 4 x i1> %pre… 501 …%res = call <vscale x 16 x float> @llvm.aarch64.sve.ld4.nxv16f32.nxv4i1.p0f32(<vscale x 4 x i1> %p… 546 declare <vscale x 8 x i32> @llvm.aarch64.sve.ld2.nxv8i32.nxv4i1.p0i32(<vscale x 4 x i1>, i32*) 550 declare <vscale x 8 x float> @llvm.aarch64.sve.ld2.nxv8f32.nxv4i1.p0f32(<vscale x 4 x i1>, float*) 555 declare <vscale x 12 x i32> @llvm.aarch64.sve.ld3.nxv12i32.nxv4i1.p0i32(<vscale x 4 x i1>, i32*) 559 declare <vscale x 12 x float> @llvm.aarch64.sve.ld3.nxv12f32.nxv4i1.p0f32(<vscale x 4 x i1>, float*) [all …]
|
D | sve-intrinsics-uqdec.ll | 188 %out = call i32 @llvm.aarch64.sve.uqdecp.n32.nxv4i1(i32 %a, <vscale x 4 x i1> %b) 220 %out = call i64 @llvm.aarch64.sve.uqdecp.n64.nxv4i1(i64 %a, <vscale x 4 x i1> %b) 250 declare i32 @llvm.aarch64.sve.uqdecp.n32.nxv4i1(i32, <vscale x 4 x i1>) 255 declare i64 @llvm.aarch64.sve.uqdecp.n64.nxv4i1(i64, <vscale x 4 x i1>)
|
D | sve-intrinsics-contiguous-prefetches.ll | 162 tail call void @llvm.aarch64.sve.prf.nxv4i1(<vscale x 4 x i1> %pg, i8* %base, i32 0) 195 tail call void @llvm.aarch64.sve.prf.nxv4i1(<vscale x 4 x i1> %pg, i8* %addr, i32 13) 238 tail call void @llvm.aarch64.sve.prf.nxv4i1(<vscale x 4 x i1> %pg, i8* %addr, i32 13) 255 declare void @llvm.aarch64.sve.prf.nxv4i1(<vscale x 4 x i1>, i8*, i32)
|
D | sve-intrinsics-uqinc.ll | 188 %out = call i32 @llvm.aarch64.sve.uqincp.n32.nxv4i1(i32 %a, <vscale x 4 x i1> %b) 220 %out = call i64 @llvm.aarch64.sve.uqincp.n64.nxv4i1(i64 %a, <vscale x 4 x i1> %b) 250 declare i32 @llvm.aarch64.sve.uqincp.n32.nxv4i1(i32, <vscale x 4 x i1>) 255 declare i64 @llvm.aarch64.sve.uqincp.n64.nxv4i1(i64, <vscale x 4 x i1>)
|
D | sve-intrinsics-sqinc.ll | 248 %out = call i32 @llvm.aarch64.sve.sqincp.n32.nxv4i1(i32 %a, <vscale x 4 x i1> %b) 256 %out = call i32 @llvm.aarch64.sve.sqincp.n32.nxv4i1(i32 %a, <vscale x 4 x i1> %b) 300 %out = call i64 @llvm.aarch64.sve.sqincp.n64.nxv4i1(i64 %a, <vscale x 4 x i1> %b) 330 declare i32 @llvm.aarch64.sve.sqincp.n32.nxv4i1(i32, <vscale x 4 x i1>) 335 declare i64 @llvm.aarch64.sve.sqincp.n64.nxv4i1(i64, <vscale x 4 x i1>)
|
D | sve-intrinsics-sqdec.ll | 248 %out = call i32 @llvm.aarch64.sve.sqdecp.n32.nxv4i1(i32 %a, <vscale x 4 x i1> %b) 256 %out = call i32 @llvm.aarch64.sve.sqdecp.n32.nxv4i1(i32 %a, <vscale x 4 x i1> %b) 300 %out = call i64 @llvm.aarch64.sve.sqdecp.n64.nxv4i1(i64 %a, <vscale x 4 x i1> %b) 330 declare i32 @llvm.aarch64.sve.sqdecp.n32.nxv4i1(i32, <vscale x 4 x i1>) 335 declare i64 @llvm.aarch64.sve.sqdecp.n64.nxv4i1(i64, <vscale x 4 x i1>)
|
D | sve-intrinsics-ldN-reg+imm-addr-mode.ll | 115 %res = call <vscale x 8 x i32> @llvm.aarch64.sve.ld2.nxv8i32.nxv4i1.p0i32(<vscale x 4 x i1> %Pg, i3… 125 %res = call <vscale x 8 x float> @llvm.aarch64.sve.ld2.nxv8f32.nxv4i1.p0f32(<vscale x 4 x i1> %Pg, … 263 %res = call <vscale x 12 x i32> @llvm.aarch64.sve.ld3.nxv12i32.nxv4i1.p0i32(<vscale x 4 x i1> %Pg, … 273 %res = call <vscale x 12 x float> @llvm.aarch64.sve.ld3.nxv12f32.nxv4i1.p0f32(<vscale x 4 x i1> %Pg… 436 %res = call <vscale x 16 x i32> @llvm.aarch64.sve.ld4.nxv16i32.nxv4i1.p0i32(<vscale x 4 x i1> %Pg, … 446 %res = call <vscale x 16 x float> @llvm.aarch64.sve.ld4.nxv16f32.nxv4i1.p0f32(<vscale x 4 x i1> %Pg… 473 declare <vscale x 8 x i32> @llvm.aarch64.sve.ld2.nxv8i32.nxv4i1.p0i32(<vscale x 4 x i1>, i32*) 477 declare <vscale x 8 x float> @llvm.aarch64.sve.ld2.nxv8f32.nxv4i1.p0f32(<vscale x 4 x i1>, float*) 482 declare <vscale x 12 x i32> @llvm.aarch64.sve.ld3.nxv12i32.nxv4i1.p0i32(<vscale x 4 x i1>, i32*) 486 declare <vscale x 12 x float> @llvm.aarch64.sve.ld3.nxv12f32.nxv4i1.p0f32(<vscale x 4 x i1>, float*) [all …]
|
D | sve-intrinsics-counting-elems.ll | 153 %out = call i64 @llvm.aarch64.sve.cntp.nxv4i1(<vscale x 4 x i1> %pg, 174 declare i64 @llvm.aarch64.sve.cntp.nxv4i1(<vscale x 4 x i1>, <vscale x 4 x i1>)
|
/external/llvm-project/llvm/lib/Target/AArch64/ |
D | AArch64SVEInstrInfo.td | 472 def : Pat<(nxv4f16 (AArch64fma_p nxv4i1:$P, nxv4f16:$Op1, nxv4f16:$Op2, nxv4f16:$Op3)), 476 def : Pat<(nxv4f32 (AArch64fma_p nxv4i1:$P, nxv4f32:$Op1, nxv4f32:$Op2, nxv4f32:$Op3)), 1085 defm : sve_prefetch<int_aarch64_sve_prf, nxv4i1, PRFW_PRI, PRFS_PRR, 2, am_sve_regreg_lsl2>; 1175 def : Pat<(nxv2i1 (extract_subvector (nxv4i1 PPR:$Ps), (i64 0))), 1177 def : Pat<(nxv2i1 (extract_subvector (nxv4i1 PPR:$Ps), (i64 2))), 1179 def : Pat<(nxv4i1 (extract_subvector (nxv8i1 PPR:$Ps), (i64 0))), 1181 def : Pat<(nxv4i1 (extract_subvector (nxv8i1 PPR:$Ps), (i64 4))), 1211 def : Pat<(nxv4i1 (concat_vectors nxv2i1:$p1, nxv2i1:$p2)), 1213 def : Pat<(nxv8i1 (concat_vectors nxv4i1:$p1, nxv4i1:$p2)), 1423 … ZPR32, ZPR16, int_aarch64_sve_fcvt_f16f32, AArch64fcvtr_mt, nxv4f16, nxv4i1, nxv4f32, ElementS… [all …]
|
D | SVEInstrFormats.td | 289 def _S : sve_int_ptrue<0b10, opc, asm, PPR32, nxv4i1, op>; 573 def : SVE_2_Op_Pat<nxv4i1, op, nxv4i1, nxv4i1, !cast<Instruction>(NAME # _S)>; 622 def : Pat<(i32 (op GPR32:$Rn, (nxv4i1 PPRAny:$Pg))), 624 def : Pat<(i64 (sext (i32 (op GPR32:$Rn, (nxv4i1 PPRAny:$Pg))))), 644 def : Pat<(i32 (op GPR32:$Rn, (nxv4i1 PPRAny:$Pg))), 661 def : Pat<(i64 (op GPR64:$Rn, (nxv4i1 PPRAny:$Pg))), 696 def : SVE_2_Op_Pat<nxv4i32, op, nxv4i32, nxv4i1, !cast<Instruction>(NAME # _S)>; 736 def : SVE_2_Op_Pat<i64, int_op, nxv4i1, nxv4i1, !cast<Instruction>(NAME # _S)>; 1190 def : SVE_1_Op_Pat<nxv4i1, op, nxv4i1, !cast<Instruction>(NAME # _S)>; 1355 def : SVE_3_Op_Pat<nxv4i32, op, nxv4i1, nxv4i32, nxv4i32, !cast<Instruction>(NAME # _S)>; [all …]
|
/external/swiftshader/third_party/llvm-10.0/llvm/lib/Target/AArch64/ |
D | SVEInstrFormats.td | 282 def _S : sve_int_ptrue<0b10, opc, asm, PPR32, nxv4i1, op>; 457 def : SVE_2_Op_Pat<nxv4i1, op, nxv4i1, nxv4i1, !cast<Instruction>(NAME # _S)>; 506 def : Pat<(i32 (op GPR32:$Rn, (nxv4i1 PPRAny:$Pg))), 508 def : Pat<(i64 (sext (i32 (op GPR32:$Rn, (nxv4i1 PPRAny:$Pg))))), 528 def : Pat<(i32 (op GPR32:$Rn, (nxv4i1 PPRAny:$Pg))), 545 def : Pat<(i64 (op GPR64:$Rn, (nxv4i1 PPRAny:$Pg))), 580 def : SVE_2_Op_Pat<nxv4i32, op, nxv4i32, nxv4i1, !cast<Instruction>(NAME # _S)>; 620 def : SVE_2_Op_Pat<i64, int_op, nxv4i1, nxv4i1, !cast<Instruction>(NAME # _S)>; 1030 def : SVE_1_Op_Pat<nxv4i1, op, nxv4i1, !cast<Instruction>(NAME # _S)>; 1195 def : SVE_3_Op_Pat<nxv4i32, op, nxv4i1, nxv4i32, nxv4i32, !cast<Instruction>(NAME # _S)>; [all …]
|
D | AArch64SVEInstrInfo.td | 965 …010100, "scvtf", ZPR32, ZPR32, int_aarch64_sve_scvtf, nxv4f32, nxv4i1, nxv4i32, Element… 966 …010101, "ucvtf", ZPR32, ZPR32, int_aarch64_sve_ucvtf, nxv4f32, nxv4i1, nxv4i32, Element… 969 …011100, "fcvtzs", ZPR32, ZPR32, int_aarch64_sve_fcvtzs, nxv4i32, nxv4i1, nxv4f32, Element… 971 …011101, "fcvtzu", ZPR32, ZPR32, int_aarch64_sve_fcvtzu, nxv4i32, nxv4i1, nxv4f32, Element… 1096 def : Pat<(AArch64ptest (nxv4i1 PPR:$pg), (nxv4i1 PPR:$src)), 1178 defm : pred_load<nxv4i32, nxv4i1, zext_masked_load_i8, LD1B_S_IMM>; 1179 defm : pred_load<nxv4i32, nxv4i1, asext_masked_load_i8, LD1SB_S_IMM>; 1180 defm : pred_load<nxv4i32, nxv4i1, zext_masked_load_i16, LD1H_S_IMM>; 1181 defm : pred_load<nxv4i32, nxv4i1, asext_masked_load_i16, LD1SH_S_IMM>; 1182 defm : pred_load<nxv4i32, nxv4i1, nonext_masked_load, LD1W_IMM>; [all …]
|
/external/swiftshader/third_party/llvm-10.0/llvm/include/llvm/Support/ |
D | MachineValueType.h | 152 nxv4i1 = 90, // n x 4 x i1 enumerator 441 case nxv4i1: in getVectorElementType() 624 case nxv4i1: in getVectorNumElements() 704 case nxv4i1: return TypeSize::Scalable(4); in getSizeInBits() 1025 if (NumElements == 4) return MVT::nxv4i1; in getScalableVectorVT()
|
/external/llvm-project/llvm/include/llvm/Support/ |
D | MachineValueType.h | 171 nxv4i1 = 109, // n x 4 x i1 enumerator 508 case nxv4i1: in getVectorElementType() 738 case nxv4i1: in getVectorNumElements() 828 case nxv4i1: return TypeSize::Scalable(4); in getSizeInBits() 1232 if (NumElements == 4) return MVT::nxv4i1; in getScalableVectorVT()
|
/external/llvm-project/llvm/lib/Target/RISCV/Utils/ |
D | RISCVBaseInfo.h | 327 constexpr MVT vbool16_t = MVT::nxv4i1;
|