/external/llvm-project/llvm/test/CodeGen/AArch64/ |
D | arm64-vfloatintrinsics.ll | 270 %v8f16 = type <8 x half> 273 define %v8f16 @test_v8f16.sqrt(%v8f16 %a) { 284 %1 = call %v8f16 @llvm.sqrt.v8f16(%v8f16 %a) 285 ret %v8f16 %1 287 define %v8f16 @test_v8f16.powi(%v8f16 %a, i32 %b) { 293 %1 = call %v8f16 @llvm.powi.v8f16(%v8f16 %a, i32 %b) 294 ret %v8f16 %1 298 define %v8f16 @test_v8f16.sin(%v8f16 %a) { 304 %1 = call %v8f16 @llvm.sin.v8f16(%v8f16 %a) 305 ret %v8f16 %1 [all …]
|
D | fp16-vector-load-store.ll | 53 ; Load to one lane of v8f16 72 ; Simple store of v8f16 130 ; Store from one lane of v8f16 186 declare { <8 x half>, <8 x half> } @llvm.aarch64.neon.ld2.v8f16.p0v8f16(<8 x half>*) 187 declare { <8 x half>, <8 x half>, <8 x half> } @llvm.aarch64.neon.ld3.v8f16.p0v8f16(<8 x half>*) 188 declare { <8 x half>, <8 x half>, <8 x half>, <8 x half> } @llvm.aarch64.neon.ld4.v8f16.p0v8f16(<8 … 189 declare void @llvm.aarch64.neon.st2.v8f16.p0v8f16(<8 x half>, <8 x half>, <8 x half>*) 190 declare void @llvm.aarch64.neon.st3.v8f16.p0v8f16(<8 x half>, <8 x half>, <8 x half>, <8 x half>*) 191 declare void @llvm.aarch64.neon.st4.v8f16.p0v8f16(<8 x half>, <8 x half>, <8 x half>, <8 x half>, <… 247 ; Load 2 x v8f16 with de-interleaving [all …]
|
D | fp16_intrinsic_vector_2op.ll | 4 declare <8 x half> @llvm.aarch64.neon.fmulx.v8f16(<8 x half>, <8 x half>) 6 declare <8 x half> @llvm.aarch64.neon.fminnmp.v8f16(<8 x half>, <8 x half>) 8 declare <8 x half> @llvm.aarch64.neon.fmaxnmp.v8f16(<8 x half>, <8 x half>) 10 declare <8 x half> @llvm.aarch64.neon.fabd.v8f16(<8 x half>, <8 x half>) 12 declare <8 x half> @llvm.fabs.v8f16(<8 x half>) 46 %vmulx2.i = tail call <8 x half> @llvm.aarch64.neon.fmulx.v8f16(<8 x half> %a, <8 x half> %b) 64 %vpminnm2.i = tail call <8 x half> @llvm.aarch64.neon.fminnmp.v8f16(<8 x half> %a, <8 x half> %b) 82 %vpmaxnm2.i = tail call <8 x half> @llvm.aarch64.neon.fmaxnmp.v8f16(<8 x half> %a, <8 x half> %b) 100 %vabdh_f16 = tail call <8 x half> @llvm.aarch64.neon.fabd.v8f16(<8 x half> %a, <8 x half> %b) 120 %abs = tail call <8 x half> @llvm.fabs.v8f16(<8 x half> %sub)
|
D | neon-fp16fml.ll | 7 declare <4 x float> @llvm.aarch64.neon.fmlal.v4f32.v8f16(<4 x float>, <8 x half>, <8 x half>) 8 declare <4 x float> @llvm.aarch64.neon.fmlsl.v4f32.v8f16(<4 x float>, <8 x half>, <8 x half>) 9 declare <4 x float> @llvm.aarch64.neon.fmlal2.v4f32.v8f16(<4 x float>, <8 x half>, <8 x half>) 10 declare <4 x float> @llvm.aarch64.neon.fmlsl2.v4f32.v8f16(<4 x float>, <8 x half>, <8 x half>) 48 …%vfmlalq_low4.i = call <4 x float> @llvm.aarch64.neon.fmlal.v4f32.v8f16(<4 x float> %a, <8 x half>… 56 …%vfmlslq_low4.i = call <4 x float> @llvm.aarch64.neon.fmlsl.v4f32.v8f16(<4 x float> %a, <8 x half>… 64 …%vfmlalq_high4.i = call <4 x float> @llvm.aarch64.neon.fmlal2.v4f32.v8f16(<4 x float> %a, <8 x hal… 72 …%vfmlslq_high4.i = call <4 x float> @llvm.aarch64.neon.fmlsl2.v4f32.v8f16(<4 x float> %a, <8 x hal…
|
D | fp16_intrinsic_vector_1op.ll | 4 declare <8 x half> @llvm.nearbyint.v8f16(<8 x half>) 6 declare <8 x half> @llvm.sqrt.v8f16(<8 x half>) 22 %vrndi1.i = tail call <8 x half> @llvm.nearbyint.v8f16(<8 x half> %a) 40 %vsqrt.i = tail call <8 x half> @llvm.sqrt.v8f16(<8 x half> %a)
|
D | fp16-vector-nvcast.ll | 46 ; Test pattern (v8f16 (AArch64NvCast (v4i32 FPR128:$src))) 57 ; Test pattern (v8f16 (AArch64NvCast (v8i16 FPR128:$src))) 68 ; Test pattern (v8f16 (AArch64NvCast (v16i8 FPR128:$src))) 79 ; Test pattern (v8f16 (AArch64NvCast (v2i64 FPR128:$src)))
|
/external/llvm/test/CodeGen/AArch64/ |
D | fp16-vector-load-store.ll | 53 ; Load to one lane of v8f16 72 ; Simple store of v8f16 91 ; Store from one lane of v8f16 108 declare { <8 x half>, <8 x half> } @llvm.aarch64.neon.ld2.v8f16.p0v8f16(<8 x half>*) 109 declare { <8 x half>, <8 x half>, <8 x half> } @llvm.aarch64.neon.ld3.v8f16.p0v8f16(<8 x half>*) 110 declare { <8 x half>, <8 x half>, <8 x half>, <8 x half> } @llvm.aarch64.neon.ld4.v8f16.p0v8f16(<8 … 111 declare void @llvm.aarch64.neon.st2.v8f16.p0v8f16(<8 x half>, <8 x half>, <8 x half>*) 112 declare void @llvm.aarch64.neon.st3.v8f16.p0v8f16(<8 x half>, <8 x half>, <8 x half>, <8 x half>*) 113 declare void @llvm.aarch64.neon.st4.v8f16.p0v8f16(<8 x half>, <8 x half>, <8 x half>, <8 x half>, <… 169 ; Load 2 x v8f16 with de-interleaving [all …]
|
D | fp16-vector-nvcast.ll | 46 ; Test pattern (v8f16 (AArch64NvCast (v4i32 FPR128:$src))) 57 ; Test pattern (v8f16 (AArch64NvCast (v8i16 FPR128:$src))) 68 ; Test pattern (v8f16 (AArch64NvCast (v16i8 FPR128:$src))) 79 ; Test pattern (v8f16 (AArch64NvCast (v2i64 FPR128:$src)))
|
/external/llvm-project/llvm/test/CodeGen/Thumb2/ |
D | mve-vmaxnma-commute.ll | 106 %aa = tail call fast <8 x half> @llvm.fabs.v8f16(<8 x half> %a) 107 %bb = tail call fast <8 x half> @llvm.fabs.v8f16(<8 x half> %b) 108 %c = tail call fast <8 x half> @llvm.maxnum.v8f16(<8 x half> %aa, <8 x half> %bb) 117 %aa = tail call fast <8 x half> @llvm.fabs.v8f16(<8 x half> %a) 118 %bb = tail call fast <8 x half> @llvm.fabs.v8f16(<8 x half> %b) 119 %c = tail call fast <8 x half> @llvm.maxnum.v8f16(<8 x half> %bb, <8 x half> %aa) 128 %aa = tail call fast <8 x half> @llvm.fabs.v8f16(<8 x half> %a) 129 %bb = tail call fast <8 x half> @llvm.fabs.v8f16(<8 x half> %b) 130 %c = tail call fast <8 x half> @llvm.minnum.v8f16(<8 x half> %aa, <8 x half> %bb) 139 %aa = tail call fast <8 x half> @llvm.fabs.v8f16(<8 x half> %a) [all …]
|
/external/llvm-project/llvm/test/CodeGen/Thumb2/mve-intrinsics/ |
D | vminnmaq.ll | 10 %0 = tail call <8 x half> @llvm.fabs.v8f16(<8 x half> %a) 11 %1 = tail call <8 x half> @llvm.fabs.v8f16(<8 x half> %b) 12 %2 = tail call <8 x half> @llvm.minnum.v8f16(<8 x half> %0, <8 x half> %1) 16 declare <8 x half> @llvm.fabs.v8f16(<8 x half>) #1 18 declare <8 x half> @llvm.minnum.v8f16(<8 x half>, <8 x half>) #1 46 …%2 = tail call <8 x half> @llvm.arm.mve.vminnma.predicated.v8f16.v8i1(<8 x half> %a, <8 x half> %b… 52 declare <8 x half> @llvm.arm.mve.vminnma.predicated.v8f16.v8i1(<8 x half>, <8 x half>, <8 x i1>) #2
|
D | vmaxnmaq.ll | 10 %0 = tail call <8 x half> @llvm.fabs.v8f16(<8 x half> %a) 11 %1 = tail call <8 x half> @llvm.fabs.v8f16(<8 x half> %b) 12 %2 = tail call <8 x half> @llvm.maxnum.v8f16(<8 x half> %0, <8 x half> %1) 16 declare <8 x half> @llvm.fabs.v8f16(<8 x half>) #1 18 declare <8 x half> @llvm.maxnum.v8f16(<8 x half>, <8 x half>) #1 46 …%2 = tail call <8 x half> @llvm.arm.mve.vmaxnma.predicated.v8f16.v8i1(<8 x half> %a, <8 x half> %b… 52 declare <8 x half> @llvm.arm.mve.vmaxnma.predicated.v8f16.v8i1(<8 x half>, <8 x half>, <8 x i1>) #2
|
D | vrint-predicated.ll | 14 …%2 = tail call <8 x half> @llvm.arm.mve.vrinta.predicated.v8f16.v8i1(<8 x half> %a, <8 x i1> %1, <… 42 …%2 = tail call <8 x half> @llvm.arm.mve.vrintm.predicated.v8f16.v8i1(<8 x half> %a, <8 x i1> %1, <… 70 …%2 = tail call <8 x half> @llvm.arm.mve.vrintn.predicated.v8f16.v8i1(<8 x half> %a, <8 x i1> %1, <… 98 …%2 = tail call <8 x half> @llvm.arm.mve.vrintp.predicated.v8f16.v8i1(<8 x half> %a, <8 x i1> %1, <… 126 …%2 = tail call <8 x half> @llvm.arm.mve.vrintz.predicated.v8f16.v8i1(<8 x half> %a, <8 x i1> %1, <… 154 …%2 = tail call <8 x half> @llvm.arm.mve.vrintx.predicated.v8f16.v8i1(<8 x half> %a, <8 x i1> %1, <… 174 declare <8 x half> @llvm.arm.mve.vrinta.predicated.v8f16.v8i1(<8 x half>, <8 x i1>, <8 x half>) 176 declare <8 x half> @llvm.arm.mve.vrintm.predicated.v8f16.v8i1(<8 x half>, <8 x i1>, <8 x half>) 178 declare <8 x half> @llvm.arm.mve.vrintn.predicated.v8f16.v8i1(<8 x half>, <8 x i1>, <8 x half>) 180 declare <8 x half> @llvm.arm.mve.vrintp.predicated.v8f16.v8i1(<8 x half>, <8 x i1>, <8 x half>) [all …]
|
D | vcvt_anpm.ll | 10 %0 = tail call <8 x i16> @llvm.arm.mve.vcvta.v8i16.v8f16(i32 0, <8 x half> %a) 30 %0 = tail call <8 x i16> @llvm.arm.mve.vcvta.v8i16.v8f16(i32 1, <8 x half> %a) 50 %0 = tail call <8 x i16> @llvm.arm.mve.vcvtm.v8i16.v8f16(i32 0, <8 x half> %a) 70 %0 = tail call <8 x i16> @llvm.arm.mve.vcvtm.v8i16.v8f16(i32 1, <8 x half> %a) 90 %0 = tail call <8 x i16> @llvm.arm.mve.vcvtn.v8i16.v8f16(i32 0, <8 x half> %a) 110 %0 = tail call <8 x i16> @llvm.arm.mve.vcvtn.v8i16.v8f16(i32 1, <8 x half> %a) 130 %0 = tail call <8 x i16> @llvm.arm.mve.vcvtp.v8i16.v8f16(i32 0, <8 x half> %a) 150 %0 = tail call <8 x i16> @llvm.arm.mve.vcvtp.v8i16.v8f16(i32 1, <8 x half> %a) 174 …%2 = tail call <8 x i16> @llvm.arm.mve.vcvta.predicated.v8i16.v8f16.v8i1(i32 0, <8 x i16> %inactiv… 202 …%2 = tail call <8 x i16> @llvm.arm.mve.vcvta.predicated.v8i16.v8f16.v8i1(i32 1, <8 x i16> %inactiv… [all …]
|
D | vcmlaq.ll | 7 declare <8 x half> @llvm.arm.mve.vcmlaq.v8f16(i32, <8 x half>, <8 x half>, <8 x half>) 10 declare <8 x half> @llvm.arm.mve.vcmlaq.predicated.v8f16.v8i1(i32, <8 x half>, <8 x half>, <8 x hal… 20 …%0 = call <8 x half> @llvm.arm.mve.vcmlaq.v8f16(i32 0, <8 x half> %a, <8 x half> %b, <8 x half> %c) 41 …%0 = call <8 x half> @llvm.arm.mve.vcmlaq.v8f16(i32 1, <8 x half> %a, <8 x half> %b, <8 x half> %c) 61 …%0 = call <8 x half> @llvm.arm.mve.vcmlaq.v8f16(i32 2, <8 x half> %a, <8 x half> %b, <8 x half> %c) 81 …%0 = call <8 x half> @llvm.arm.mve.vcmlaq.v8f16(i32 3, <8 x half> %a, <8 x half> %b, <8 x half> %c) 105 …%2 = call <8 x half> @llvm.arm.mve.vcmlaq.predicated.v8f16.v8i1(i32 0, <8 x half> %a, <8 x half> %… 133 …%2 = call <8 x half> @llvm.arm.mve.vcmlaq.predicated.v8f16.v8i1(i32 1, <8 x half> %a, <8 x half> %… 161 …%2 = call <8 x half> @llvm.arm.mve.vcmlaq.predicated.v8f16.v8i1(i32 2, <8 x half> %a, <8 x half> %… 189 …%2 = call <8 x half> @llvm.arm.mve.vcmlaq.predicated.v8f16.v8i1(i32 3, <8 x half> %a, <8 x half> %…
|
D | vcmulq.ll | 7 declare <8 x half> @llvm.arm.mve.vcmulq.v8f16(i32, <8 x half>, <8 x half>) 10 declare <8 x half> @llvm.arm.mve.vcmulq.predicated.v8f16.v8i1(i32, <8 x half>, <8 x half>, <8 x hal… 19 %0 = call <8 x half> @llvm.arm.mve.vcmulq.v8f16(i32 0, <8 x half> %a, <8 x half> %b) 40 %0 = call <8 x half> @llvm.arm.mve.vcmulq.v8f16(i32 1, <8 x half> %a, <8 x half> %b) 61 %0 = call <8 x half> @llvm.arm.mve.vcmulq.v8f16(i32 2, <8 x half> %a, <8 x half> %b) 82 %0 = call <8 x half> @llvm.arm.mve.vcmulq.v8f16(i32 3, <8 x half> %a, <8 x half> %b) 107 …%2 = call <8 x half> @llvm.arm.mve.vcmulq.predicated.v8f16.v8i1(i32 0, <8 x half> %inactive, <8 x … 135 …%2 = call <8 x half> @llvm.arm.mve.vcmulq.predicated.v8f16.v8i1(i32 1, <8 x half> %inactive, <8 x … 163 …%2 = call <8 x half> @llvm.arm.mve.vcmulq.predicated.v8f16.v8i1(i32 2, <8 x half> %inactive, <8 x … 191 …%2 = call <8 x half> @llvm.arm.mve.vcmulq.predicated.v8f16.v8i1(i32 3, <8 x half> %inactive, <8 x … [all …]
|
D | vmaxnmq.ll | 10 %0 = tail call <8 x half> @llvm.maxnum.v8f16(<8 x half> %a, <8 x half> %b) 14 declare <8 x half> @llvm.maxnum.v8f16(<8 x half>, <8 x half>) #1 38 …%2 = tail call <8 x half> @llvm.arm.mve.max.predicated.v8f16.v8i1(<8 x half> %a, <8 x half> %b, i3… 44 declare <8 x half> @llvm.arm.mve.max.predicated.v8f16.v8i1(<8 x half>, <8 x half>, i32, <8 x i1>, <… 74 …%2 = tail call <8 x half> @llvm.arm.mve.max.predicated.v8f16.v8i1(<8 x half> %a, <8 x half> %b, i3…
|
D | vminnmq.ll | 10 %0 = tail call <8 x half> @llvm.minnum.v8f16(<8 x half> %a, <8 x half> %b) 14 declare <8 x half> @llvm.minnum.v8f16(<8 x half>, <8 x half>) #1 38 …%2 = tail call <8 x half> @llvm.arm.mve.min.predicated.v8f16.v8i1(<8 x half> %a, <8 x half> %b, i3… 44 declare <8 x half> @llvm.arm.mve.min.predicated.v8f16.v8i1(<8 x half>, <8 x half>, i32, <8 x i1>, <… 74 …%2 = tail call <8 x half> @llvm.arm.mve.min.predicated.v8f16.v8i1(<8 x half> %a, <8 x half> %b, i3…
|
D | vcvt-fp-int.ll | 14 …%2 = tail call <8 x half> @llvm.arm.mve.vcvt.fp.int.predicated.v8f16.v8i16.v8i1(<8 x i16> %a, i32 … 28 …%2 = tail call <8 x half> @llvm.arm.mve.vcvt.fp.int.predicated.v8f16.v8i16.v8i1(<8 x i16> %a, i32 … 70 …%2 = tail call <8 x i16> @llvm.arm.mve.vcvt.fp.int.predicated.v8i16.v8f16.v8i1(<8 x half> %a, i32 … 98 …%2 = tail call <8 x i16> @llvm.arm.mve.vcvt.fp.int.predicated.v8i16.v8f16.v8i1(<8 x half> %a, i32 … 119 declare <8 x half> @llvm.arm.mve.vcvt.fp.int.predicated.v8f16.v8i16.v8i1(<8 x i16>, i32, <8 x i1>, … 121 declare <8 x i16> @llvm.arm.mve.vcvt.fp.int.predicated.v8i16.v8f16.v8i1(<8 x half>, i32, <8 x i1>, …
|
D | vcvt.ll | 12 declare <8 x half> @llvm.arm.mve.vcvt.fix.v8f16.v8i16(i32, <8 x i16>, i32) 14 declare <8 x i16> @llvm.arm.mve.vcvt.fix.v8i16.v8f16(i32, <8 x half>, i32) 16 declare <8 x half> @llvm.arm.mve.vcvt.fix.predicated.v8f16.v8i16.v8i1(i32, <8 x half>, <8 x i16>, i… 18 declare <8 x i16> @llvm.arm.mve.vcvt.fix.predicated.v8i16.v8f16.v8i1(i32, <8 x i16>, <8 x half>, i3… 75 %0 = call <8 x half> @llvm.arm.mve.vcvt.fix.v8f16.v8i16(i32 0, <8 x i16> %a, i32 1) 85 %0 = call <8 x half> @llvm.arm.mve.vcvt.fix.v8f16.v8i16(i32 1, <8 x i16> %a, i32 2) 115 %0 = call <8 x i16> @llvm.arm.mve.vcvt.fix.v8i16.v8f16(i32 0, <8 x half> %a, i32 1) 125 %0 = call <8 x i16> @llvm.arm.mve.vcvt.fix.v8i16.v8f16(i32 1, <8 x half> %a, i32 2) 159 …%2 = call <8 x half> @llvm.arm.mve.vcvt.fix.predicated.v8f16.v8i16.v8i1(i32 0, <8 x half> %inactiv… 173 …%2 = call <8 x half> @llvm.arm.mve.vcvt.fix.predicated.v8f16.v8i16.v8i1(i32 1, <8 x half> %inactiv… [all …]
|
D | vrintn.ll | 10 %0 = tail call <8 x half> @llvm.arm.mve.vrintn.v8f16(<8 x half> %a) 24 declare <8 x half> @llvm.arm.mve.vrintn.v8f16(<8 x half>)
|
/external/llvm-project/llvm/test/CodeGen/Thumb2/LowOverheadLoops/ |
D | exitcount.ll | 54 …%6 = tail call fast <8 x half> @llvm.masked.load.v8f16.p0v8f16(<8 x half>* %5, i32 2, <8 x i1> %4,… 56 tail call void @llvm.masked.store.v8f16.p0v8f16(<8 x half> %6, <8 x half>* %7, i32 2, <8 x i1> %4) 76 …%13 = tail call fast <8 x half> @llvm.arm.mve.vcvt.fp.int.predicated.v8f16.v8i16.v8i1(<8 x i16> %1… 77 …%14 = tail call fast <8 x half> @llvm.arm.mve.mul.predicated.v8f16.v8i1(<8 x half> %13, <8 x half>… 79 …tail call void @llvm.masked.store.v8f16.p0v8f16(<8 x half> %14, <8 x half>* %15, i32 2, <8 x i1> %… 92 declare <8 x half> @llvm.masked.load.v8f16.p0v8f16(<8 x half>*, i32 immarg, <8 x i1>, <8 x half>) 94 declare void @llvm.masked.store.v8f16.p0v8f16(<8 x half>, <8 x half>*, i32 immarg, <8 x i1>) 98 declare <8 x half> @llvm.arm.mve.vcvt.fp.int.predicated.v8f16.v8i16.v8i1(<8 x i16>, i32, <8 x i1>, … 100 declare <8 x half> @llvm.arm.mve.mul.predicated.v8f16.v8i1(<8 x half>, <8 x half>, <8 x i1>, <8 x h…
|
/external/llvm-project/llvm/test/CodeGen/ARM/ |
D | armv8.2a-fp16-vector-intrinsics.ll | 20 %vabs1.i = tail call <8 x half> @llvm.fabs.v8f16(<8 x half> %a) 219 %vcvtaq_s16_v1.i = tail call <8 x i16> @llvm.arm.neon.vcvtas.v8i16.v8f16(<8 x half> %a) 237 %vcvtmq_s16_v1.i = tail call <8 x i16> @llvm.arm.neon.vcvtms.v8i16.v8f16(<8 x half> %a) 255 %vcvtmq_u16_v1.i = tail call <8 x i16> @llvm.arm.neon.vcvtmu.v8i16.v8f16(<8 x half> %a) 273 %vcvtnq_s16_v1.i = tail call <8 x i16> @llvm.arm.neon.vcvtns.v8i16.v8f16(<8 x half> %a) 291 %vcvtnq_u16_v1.i = tail call <8 x i16> @llvm.arm.neon.vcvtnu.v8i16.v8f16(<8 x half> %a) 309 %vcvtpq_s16_v1.i = tail call <8 x i16> @llvm.arm.neon.vcvtps.v8i16.v8f16(<8 x half> %a) 327 %vcvtpq_u16_v1.i = tail call <8 x i16> @llvm.arm.neon.vcvtpu.v8i16.v8f16(<8 x half> %a) 363 %vrecpeq_v1.i = tail call <8 x half> @llvm.arm.neon.vrecpe.v8f16(<8 x half> %a) 381 %vrndq_v1.i = tail call <8 x half> @llvm.arm.neon.vrintz.v8f16(<8 x half> %a) [all …]
|
D | neon-vcadd.ll | 30 …%vcaddq_rot90_v2.i = tail call <8 x half> @llvm.arm.neon.vcadd.rot90.v8f16(<8 x half> %a, <8 x hal… 31 …%vcaddq_rot270_v2.i = tail call <8 x half> @llvm.arm.neon.vcadd.rot270.v8f16(<8 x half> %a, <8 x h… 51 declare <8 x half> @llvm.arm.neon.vcadd.rot90.v8f16(<8 x half>, <8 x half>) 52 declare <8 x half> @llvm.arm.neon.vcadd.rot270.v8f16(<8 x half>, <8 x half>)
|
/external/swiftshader/third_party/llvm-10.0/llvm/lib/Target/ARM/ |
D | ARMInstrMVE.td | 347 def MVE_v8f16 : MVEVectorVTInfo<v8f16, v4f32, v8i1, 0b01, "f", ?>; 1093 def : Pat<(v8f16 (fmaxnum (v8f16 MQPR:$val1), (v8f16 MQPR:$val2))), 1094 (v8f16 (MVE_VMAXNMf16 (v8f16 MQPR:$val1), (v8f16 MQPR:$val2)))>; 1100 def : Pat<(v8f16 (int_arm_mve_max_predicated (v8f16 MQPR:$val1), (v8f16 MQPR:$val2), (i32 0), 1101 (v8i1 VCCR:$mask), (v8f16 MQPR:$inactive))), 1102 (v8f16 (MVE_VMAXNMf16 (v8f16 MQPR:$val1), (v8f16 MQPR:$val2), 1104 (v8f16 MQPR:$inactive)))>; 1113 def : Pat<(v8f16 (fminnum (v8f16 MQPR:$val1), (v8f16 MQPR:$val2))), 1114 (v8f16 (MVE_VMINNMf16 (v8f16 MQPR:$val1), (v8f16 MQPR:$val2)))>; 1120 def : Pat<(v8f16 (int_arm_mve_min_predicated (v8f16 MQPR:$val1), (v8f16 MQPR:$val2), [all …]
|
/external/llvm-project/llvm/test/Analysis/CostModel/ARM/ |
D | target-intrinsics.ll | 10 …nstruction: %t2 = tail call { <8 x half>, <8 x half> } @llvm.arm.mve.vld2q.v8f16.p0f16(half* undef) 17 …nstruction: %t2 = tail call { <8 x half>, <8 x half> } @llvm.arm.mve.vld2q.v8f16.p0f16(half* undef) 24 …nstruction: %t2 = tail call { <8 x half>, <8 x half> } @llvm.arm.mve.vld2q.v8f16.p0f16(half* undef) 30 %t2 = tail call { <8 x half>, <8 x half> } @llvm.arm.mve.vld2q.v8f16.p0f16(half* undef) 37 declare { <8 x half>, <8 x half> } @llvm.arm.mve.vld2q.v8f16.p0f16(half*)
|