/external/llvm-project/llvm/test/CodeGen/X86/ |
D | vec-strict-512.ll | 6 declare <16 x float> @llvm.experimental.constrained.fadd.v16f32(<16 x float>, <16 x float>, metadat… 8 declare <16 x float> @llvm.experimental.constrained.fsub.v16f32(<16 x float>, <16 x float>, metadat… 10 declare <16 x float> @llvm.experimental.constrained.fmul.v16f32(<16 x float>, <16 x float>, metadat… 12 declare <16 x float> @llvm.experimental.constrained.fdiv.v16f32(<16 x float>, <16 x float>, metadat… 14 declare <16 x float> @llvm.experimental.constrained.sqrt.v16f32(<16 x float>, metadata, metadata) 18 declare <16 x float> @llvm.experimental.constrained.fma.v16f32(<16 x float>, <16 x float>, <16 x fl… 19 declare <16 x float> @llvm.experimental.constrained.ceil.v16f32(<16 x float>, metadata) 21 declare <16 x float> @llvm.experimental.constrained.floor.v16f32(<16 x float>, metadata) 23 declare <16 x float> @llvm.experimental.constrained.trunc.v16f32(<16 x float>, metadata) 25 declare <16 x float> @llvm.experimental.constrained.rint.v16f32(<16 x float>, metadata, metadata) [all …]
|
D | pr46532.ll | 17 …%3 = tail call <16 x float> @llvm.masked.load.v16f32.p0v16f32(<16 x float>* undef, i32 4, <16 x i1… 18 …tail call void @llvm.masked.store.v16f32.p0v16f32(<16 x float> undef, <16 x float>* nonnull undef,… 26 declare <16 x float> @llvm.masked.load.v16f32.p0v16f32(<16 x float>*, i32 immarg, <16 x i1>, <16 x … 28 declare void @llvm.masked.store.v16f32.p0v16f32(<16 x float>, <16 x float>*, i32 immarg, <16 x i1>)
|
D | vec-copysign-avx512.ll | 23 define <16 x float> @v16f32(<16 x float> %a, <16 x float> %b) nounwind { 24 ; CHECK-LABEL: v16f32: 28 %tmp = tail call <16 x float> @llvm.copysign.v16f32( <16 x float> %a, <16 x float> %b ) 61 declare <16 x float> @llvm.copysign.v16f32(<16 x float> %Mag, <16 x float> %Sgn)
|
D | vmaskmov-offset.ll | 4 declare void @llvm.masked.store.v16f32.p0v16f32(<16 x float>, <16 x float>*, i32, <16 x i1>) 5 declare <16 x float> @llvm.masked.load.v16f32.p0v16f32(<16 x float>*, i32, <16 x i1>, <16 x float>) 25 …%masked_loaded_vec = call <16 x float> @llvm.masked.load.v16f32.p0v16f32(<16 x float>* nonnull %st… 26 …call void @llvm.masked.store.v16f32.p0v16f32(<16 x float> %masked_loaded_vec, <16 x float>* nonnul…
|
D | vec-strict-cmp-512.ll | 23 %cond = call <16 x i1> @llvm.experimental.constrained.fcmp.v16f32( 48 %cond = call <16 x i1> @llvm.experimental.constrained.fcmp.v16f32( 73 %cond = call <16 x i1> @llvm.experimental.constrained.fcmp.v16f32( 98 %cond = call <16 x i1> @llvm.experimental.constrained.fcmp.v16f32( 123 %cond = call <16 x i1> @llvm.experimental.constrained.fcmp.v16f32( 148 %cond = call <16 x i1> @llvm.experimental.constrained.fcmp.v16f32( 173 %cond = call <16 x i1> @llvm.experimental.constrained.fcmp.v16f32( 198 %cond = call <16 x i1> @llvm.experimental.constrained.fcmp.v16f32( 223 %cond = call <16 x i1> @llvm.experimental.constrained.fcmp.v16f32( 248 %cond = call <16 x i1> @llvm.experimental.constrained.fcmp.v16f32( [all …]
|
D | wide-fma-contraction.ll | 41 …%ret = tail call <16 x float> @llvm.fmuladd.v16f32(<16 x float> %a, <16 x float> %b, <16 x float> … 45 declare <16 x float> @llvm.fmuladd.v16f32(<16 x float>, <16 x float>, <16 x float>) nounwind readno…
|
D | pr45443.ll | 12 …%tmp4 = tail call fast <16 x float> @llvm.fma.v16f32(<16 x float> undef, <16 x float> <float 0x3FE… 20 declare <16 x float> @llvm.fma.v16f32(<16 x float>, <16 x float>, <16 x float>)
|
D | pr46820.ll | 5 ; v16f32, v4f32, and v4f32. This loads 24 elements, but the load is aligned 8 ; v16f32 and require padding.
|
D | vec-strict-inttofp-512.ll | 7 declare <16 x float> @llvm.experimental.constrained.sitofp.v16f32.v16i1(<16 x i1>, metadata, metada… 8 declare <16 x float> @llvm.experimental.constrained.uitofp.v16f32.v16i1(<16 x i1>, metadata, metada… 9 declare <16 x float> @llvm.experimental.constrained.sitofp.v16f32.v16i8(<16 x i8>, metadata, metada… 10 declare <16 x float> @llvm.experimental.constrained.uitofp.v16f32.v16i8(<16 x i8>, metadata, metada… 11 declare <16 x float> @llvm.experimental.constrained.sitofp.v16f32.v16i16(<16 x i16>, metadata, meta… 12 declare <16 x float> @llvm.experimental.constrained.uitofp.v16f32.v16i16(<16 x i16>, metadata, meta… 13 declare <16 x float> @llvm.experimental.constrained.sitofp.v16f32.v16i32(<16 x i32>, metadata, meta… 14 declare <16 x float> @llvm.experimental.constrained.uitofp.v16f32.v16i32(<16 x i32>, metadata, meta… 36 %result = call <16 x float> @llvm.experimental.constrained.sitofp.v16f32.v16i1(<16 x i1> %x, 70 %result = call <16 x float> @llvm.experimental.constrained.uitofp.v16f32.v16i1(<16 x i1> %x, [all …]
|
D | vec-strict-fptoint-512.ll | 22 declare <16 x i32> @llvm.experimental.constrained.fptosi.v16i32.v16f32(<16 x float>, metadata) 23 declare <16 x i32> @llvm.experimental.constrained.fptoui.v16i32.v16f32(<16 x float>, metadata) 24 declare <16 x i16> @llvm.experimental.constrained.fptosi.v16i16.v16f32(<16 x float>, metadata) 25 declare <16 x i16> @llvm.experimental.constrained.fptoui.v16i16.v16f32(<16 x float>, metadata) 26 declare <16 x i8> @llvm.experimental.constrained.fptosi.v16i8.v16f32(<16 x float>, metadata) 27 declare <16 x i8> @llvm.experimental.constrained.fptoui.v16i8.v16f32(<16 x float>, metadata) 28 declare <16 x i1> @llvm.experimental.constrained.fptosi.v16i1.v16f32(<16 x float>, metadata) 29 declare <16 x i1> @llvm.experimental.constrained.fptoui.v16i1.v16f32(<16 x float>, metadata) 772 %ret = call <16 x i32> @llvm.experimental.constrained.fptosi.v16i32.v16f32(<16 x float> %a, 782 %ret = call <16 x i32> @llvm.experimental.constrained.fptoui.v16i32.v16f32(<16 x float> %a, [all …]
|
/external/llvm/test/CodeGen/X86/ |
D | avx512-round.ll | 6 %res = call <16 x float> @llvm.floor.v16f32(<16 x float> %a) 9 declare <16 x float> @llvm.floor.v16f32(<16 x float> %p) 22 %res = call <16 x float> @llvm.ceil.v16f32(<16 x float> %a) 25 declare <16 x float> @llvm.ceil.v16f32(<16 x float> %p) 38 %res = call <16 x float> @llvm.trunc.v16f32(<16 x float> %a) 41 declare <16 x float> @llvm.trunc.v16f32(<16 x float> %p) 54 %res = call <16 x float> @llvm.rint.v16f32(<16 x float> %a) 57 declare <16 x float> @llvm.rint.v16f32(<16 x float> %p) 70 %res = call <16 x float> @llvm.nearbyint.v16f32(<16 x float> %a) 73 declare <16 x float> @llvm.nearbyint.v16f32(<16 x float> %p)
|
D | wide-fma-contraction.ll | 25 …%ret = tail call <16 x float> @llvm.fmuladd.v16f32(<16 x float> %a, <16 x float> %b, <16 x float> … 29 declare <16 x float> @llvm.fmuladd.v16f32(<16 x float>, <16 x float>, <16 x float>) nounwind readno…
|
/external/llvm-project/llvm/test/Analysis/CostModel/X86/ |
D | intrinsic-cost-kinds.ll | 20 declare <16 x float> @llvm.fmuladd.v16f32(<16 x float>, <16 x float>, <16 x float>) 23 declare <16 x float> @llvm.log2.v16f32(<16 x float>) 26 declare <16 x float> @llvm.experimental.constrained.fadd.v16f32(<16 x float>, <16 x float>, metadat… 29 declare <16 x float> @llvm.maximum.v16f32(<16 x float>, <16 x float>) 40 declare <16 x float> @llvm.masked.gather.v16f32.v16p0f32(<16 x float*>, i32, <16 x i1>, <16 x float… 41 declare void @llvm.masked.scatter.v16f32.v16p0f32(<16 x float>, <16 x float*>, i32, <16 x i1>) 42 declare float @llvm.vector.reduce.fmax.v16f32(<16 x float>) 43 declare float @llvm.vector.reduce.fmul.v16f32(float, <16 x float>) 44 declare float @llvm.vector.reduce.fadd.v16f32(float, <16 x float>) 103 …timated cost of 16 for instruction: %v = call <16 x float> @llvm.fmuladd.v16f32(<16 x float> %va, … [all …]
|
D | fround.ll | 21 …ated cost of 172 for instruction: %V16F32 = call <16 x float> @llvm.ceil.v16f32(<16 x float> undef) 32 …imated cost of 8 for instruction: %V16F32 = call <16 x float> @llvm.ceil.v16f32(<16 x float> undef) 43 …imated cost of 4 for instruction: %V16F32 = call <16 x float> @llvm.ceil.v16f32(<16 x float> undef) 54 …imated cost of 1 for instruction: %V16F32 = call <16 x float> @llvm.ceil.v16f32(<16 x float> undef) 64 %V16F32 = call <16 x float> @llvm.ceil.v16f32(<16 x float> undef) 79 …ted cost of 172 for instruction: %V16F32 = call <16 x float> @llvm.floor.v16f32(<16 x float> undef) 90 …mated cost of 8 for instruction: %V16F32 = call <16 x float> @llvm.floor.v16f32(<16 x float> undef) 101 …mated cost of 4 for instruction: %V16F32 = call <16 x float> @llvm.floor.v16f32(<16 x float> undef) 112 …mated cost of 1 for instruction: %V16F32 = call <16 x float> @llvm.floor.v16f32(<16 x float> undef) 122 %V16F32 = call <16 x float> @llvm.floor.v16f32(<16 x float> undef) [all …]
|
D | arith-fma.ll | 13 …timated cost of 4 for instruction: %V16F32 = call <16 x float> @llvm.fma.v16f32(<16 x float> undef… 24 …timated cost of 4 for instruction: %V16F32 = call <16 x float> @llvm.fma.v16f32(<16 x float> undef… 35 …timated cost of 1 for instruction: %V16F32 = call <16 x float> @llvm.fma.v16f32(<16 x float> undef… 45 …%V16F32 = call <16 x float> @llvm.fma.v16f32(<16 x float> undef, <16 x float> undef, <16 x float> … 58 declare <16 x float> @llvm.fma.v16f32(<16 x float>, <16 x float>, <16 x float>)
|
D | fmaxnum-size-latency.ll | 11 …ted cost of 16 for instruction: %V16F32 = call <16 x float> @llvm.maxnum.v16f32(<16 x float> undef… 19 …ated cost of 6 for instruction: %V16F32 = call <16 x float> @llvm.maxnum.v16f32(<16 x float> undef… 26 %V16F32 = call <16 x float> @llvm.maxnum.v16f32(<16 x float> undef, <16 x float> undef) 59 declare <16 x float> @llvm.maxnum.v16f32(<16 x float>, <16 x float>)
|
D | fminnum-size-latency.ll | 11 …ted cost of 16 for instruction: %V16F32 = call <16 x float> @llvm.minnum.v16f32(<16 x float> undef… 19 …ated cost of 6 for instruction: %V16F32 = call <16 x float> @llvm.minnum.v16f32(<16 x float> undef… 26 %V16F32 = call <16 x float> @llvm.minnum.v16f32(<16 x float> undef, <16 x float> undef) 59 declare <16 x float> @llvm.minnum.v16f32(<16 x float>, <16 x float>)
|
D | fminnum.ll | 19 …ted cost of 16 for instruction: %V16F32 = call <16 x float> @llvm.minnum.v16f32(<16 x float> undef… 27 …ted cost of 10 for instruction: %V16F32 = call <16 x float> @llvm.minnum.v16f32(<16 x float> undef… 35 …ated cost of 6 for instruction: %V16F32 = call <16 x float> @llvm.minnum.v16f32(<16 x float> undef… 43 …ated cost of 2 for instruction: %V16F32 = call <16 x float> @llvm.minnum.v16f32(<16 x float> undef… 50 %V16F32 = call <16 x float> @llvm.minnum.v16f32(<16 x float> undef, <16 x float> undef) 101 …cost of 4 for instruction: %V16F32 = call nnan <16 x float> @llvm.minnum.v16f32(<16 x float> undef… 109 …cost of 2 for instruction: %V16F32 = call nnan <16 x float> @llvm.minnum.v16f32(<16 x float> undef… 117 …cost of 1 for instruction: %V16F32 = call nnan <16 x float> @llvm.minnum.v16f32(<16 x float> undef… 124 %V16F32 = call nnan <16 x float> @llvm.minnum.v16f32(<16 x float> undef, <16 x float> undef) 165 declare <16 x float> @llvm.minnum.v16f32(<16 x float>, <16 x float>)
|
D | fmaxnum.ll | 19 …ted cost of 16 for instruction: %V16F32 = call <16 x float> @llvm.maxnum.v16f32(<16 x float> undef… 27 …ted cost of 10 for instruction: %V16F32 = call <16 x float> @llvm.maxnum.v16f32(<16 x float> undef… 35 …ated cost of 6 for instruction: %V16F32 = call <16 x float> @llvm.maxnum.v16f32(<16 x float> undef… 43 …ated cost of 2 for instruction: %V16F32 = call <16 x float> @llvm.maxnum.v16f32(<16 x float> undef… 50 %V16F32 = call <16 x float> @llvm.maxnum.v16f32(<16 x float> undef, <16 x float> undef) 101 …cost of 4 for instruction: %V16F32 = call nnan <16 x float> @llvm.maxnum.v16f32(<16 x float> undef… 109 …cost of 2 for instruction: %V16F32 = call nnan <16 x float> @llvm.maxnum.v16f32(<16 x float> undef… 117 …cost of 1 for instruction: %V16F32 = call nnan <16 x float> @llvm.maxnum.v16f32(<16 x float> undef… 124 %V16F32 = call nnan <16 x float> @llvm.maxnum.v16f32(<16 x float> undef, <16 x float> undef) 165 declare <16 x float> @llvm.maxnum.v16f32(<16 x float>, <16 x float>)
|
/external/llvm-project/clang/test/CodeGen/ |
D | arm64_32.c | 28 typedef float __attribute__((ext_vector_type(16))) v16f32; typedef 29 v16f32 func(v16f32 in) { return in; } in func()
|
/external/llvm-project/llvm/test/Analysis/CostModel/ARM/ |
D | intrinsic-cost-kinds.ll | 17 declare <16 x float> @llvm.fmuladd.v16f32(<16 x float>, <16 x float>, <16 x float>) 20 declare <16 x float> @llvm.log2.v16f32(<16 x float>) 23 declare <16 x float> @llvm.experimental.constrained.fadd.v16f32(<16 x float>, <16 x float>, metadat… 26 declare <16 x float> @llvm.maximum.v16f32(<16 x float>, <16 x float>) 37 declare <16 x float> @llvm.masked.gather.v16f32.v16p0f32(<16 x float*>, i32, <16 x i1>, <16 x float… 38 declare void @llvm.masked.scatter.v16f32.v16p0f32(<16 x float>, <16 x float*>, i32, <16 x i1>) 39 declare float @llvm.vector.reduce.fmax.v16f32(<16 x float>) 72 …stimated cost of 8 for instruction: %v = call <16 x float> @llvm.fmuladd.v16f32(<16 x float> %va, … 77 …stimated cost of 3 for instruction: %v = call <16 x float> @llvm.fmuladd.v16f32(<16 x float> %va, … 82 …stimated cost of 8 for instruction: %v = call <16 x float> @llvm.fmuladd.v16f32(<16 x float> %va, … [all …]
|
/external/llvm-project/llvm/test/Transforms/LoopVectorize/X86/ |
D | gather_scatter.ll | 38 ; AVX512-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <16 x float> @llvm.masked.gather.v16f32.v16p0f3… 42 ; AVX512-NEXT: call void @llvm.masked.store.v16f32.p0v16f32(<16 x float> [[TMP7]], <16 x float>*… 53 ; AVX512-NEXT: [[WIDE_MASKED_GATHER_1:%.*]] = call <16 x float> @llvm.masked.gather.v16f32.v16p0… 57 ; AVX512-NEXT: call void @llvm.masked.store.v16f32.p0v16f32(<16 x float> [[TMP17]], <16 x float>… 68 ; AVX512-NEXT: [[WIDE_MASKED_GATHER_2:%.*]] = call <16 x float> @llvm.masked.gather.v16f32.v16p0… 72 ; AVX512-NEXT: call void @llvm.masked.store.v16f32.p0v16f32(<16 x float> [[TMP27]], <16 x float>… 83 ; AVX512-NEXT: [[WIDE_MASKED_GATHER_3:%.*]] = call <16 x float> @llvm.masked.gather.v16f32.v16p0… 87 ; AVX512-NEXT: call void @llvm.masked.store.v16f32.p0v16f32(<16 x float> [[TMP37]], <16 x float>… 211 ; AVX512-NEXT: [[WIDE_MASKED_GATHER7:%.*]] = call <16 x float> @llvm.masked.gather.v16f32.v16p0f… 214 ; AVX512-NEXT: call void @llvm.masked.scatter.v16f32.v16p0f32(<16 x float> [[TMP3]], <16 x float… [all …]
|
/external/llvm/test/Transforms/LoopVectorize/X86/ |
D | gather_scatter.ll | 21 ;AVX512: llvm.masked.gather.v16f32 22 ;AVX512: llvm.masked.store.v16f32 99 ;AVX512: llvm.masked.gather.v16f32 100 ;AVX512: llvm.masked.store.v16f32 174 ;AVX512: llvm.masked.gather.v16f32 177 ;AVX512: llvm.masked.scatter.v16f32 236 declare void @llvm.masked.scatter.v16f32(<16 x float>, <16 x float*>, i32, <16 x i1>)
|
/external/llvm/include/llvm/CodeGen/ |
D | MachineValueType.h | 114 v16f32 = 59, // 16 x f32 enumerator 264 return (SimpleTy == MVT::v16f32 || SimpleTy == MVT::v8f64 || in is512BitVector() 363 case v16f32: return f32; in getVectorElementType() 394 case v16f32: return 16; in getVectorNumElements() 501 case v16f32: in getSizeInBits() 653 if (NumElements == 16) return MVT::v16f32; in getVectorVT()
|
/external/llvm-project/llvm/test/Verifier/ |
D | scatter_gather.ll | 6 …%res = call <16 x float> @llvm.masked.gather.v16f32.v16p0f32(<16 x float*> %ptrs, i32 4, <16 x i1>… 9 declare <16 x float> @llvm.masked.gather.v16f32.v16p0f32(<16 x float*>, i32, <16 x i1>*, <16 x floa… 70 …call void @llvm.masked.scatter.v16f32.v16p0f32(<16 x float> %value, <16 x float*> %ptrs, i32 4, <1… 73 declare void @llvm.masked.scatter.v16f32.v16p0f32(<16 x float>, <16 x float*>, i32, <16 x i1>*)
|