/external/llvm-project/llvm/test/Analysis/CostModel/X86/ |
D | ctlz.ll | 13 declare i64 @llvm.ctlz.i64(i64, i1) 14 declare i32 @llvm.ctlz.i32(i32, i1) 15 declare i16 @llvm.ctlz.i16(i16, i1) 16 declare i8 @llvm.ctlz.i8(i8, i1) 20 …T: Cost Model: Found an estimated cost of 4 for instruction: %ctlz = call i64 @llvm.ctlz.i64(i64 … 21 ; NOLZCNT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i64 %ctlz 24 …T: Cost Model: Found an estimated cost of 1 for instruction: %ctlz = call i64 @llvm.ctlz.i64(i64 … 25 ; LZCNT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i64 %ctlz 27 %ctlz = call i64 @llvm.ctlz.i64(i64 %a, i1 0) 28 ret i64 %ctlz [all …]
|
/external/llvm/test/CodeGen/AMDGPU/ |
D | ctlz.ll | 5 declare i7 @llvm.ctlz.i7(i7, i1) nounwind readnone 6 declare i8 @llvm.ctlz.i8(i8, i1) nounwind readnone 7 declare i16 @llvm.ctlz.i16(i16, i1) nounwind readnone 9 declare i32 @llvm.ctlz.i32(i32, i1) nounwind readnone 10 declare <2 x i32> @llvm.ctlz.v2i32(<2 x i32>, i1) nounwind readnone 11 declare <4 x i32> @llvm.ctlz.v4i32(<4 x i32>, i1) nounwind readnone 13 declare i64 @llvm.ctlz.i64(i64, i1) nounwind readnone 14 declare <2 x i64> @llvm.ctlz.v2i64(<2 x i64>, i1) nounwind readnone 15 declare <4 x i64> @llvm.ctlz.v4i64(<4 x i64>, i1) nounwind readnone 31 %ctlz = call i32 @llvm.ctlz.i32(i32 %val, i1 false) nounwind readnone [all …]
|
D | ctlz_zero_undef.ll | 5 declare i8 @llvm.ctlz.i8(i8, i1) nounwind readnone 7 declare i32 @llvm.ctlz.i32(i32, i1) nounwind readnone 8 declare <2 x i32> @llvm.ctlz.v2i32(<2 x i32>, i1) nounwind readnone 9 declare <4 x i32> @llvm.ctlz.v4i32(<4 x i32>, i1) nounwind readnone 11 declare i64 @llvm.ctlz.i64(i64, i1) nounwind readnone 12 declare <2 x i64> @llvm.ctlz.v2i64(<2 x i64>, i1) nounwind readnone 13 declare <4 x i64> @llvm.ctlz.v4i64(<4 x i64>, i1) nounwind readnone 26 %ctlz = call i32 @llvm.ctlz.i32(i32 %val, i1 true) nounwind readnone 27 store i32 %ctlz, i32 addrspace(1)* %out, align 4 40 %ctlz = call i32 @llvm.ctlz.i32(i32 %val, i1 true) nounwind readnone [all …]
|
/external/llvm/test/Analysis/CostModel/X86/ |
D | ctbits-cost.ll | 94 declare <2 x i64> @llvm.ctlz.v2i64(<2 x i64>, i1) 95 declare <4 x i32> @llvm.ctlz.v4i32(<4 x i32>, i1) 96 declare <8 x i16> @llvm.ctlz.v8i16(<8 x i16>, i1) 97 declare <16 x i8> @llvm.ctlz.v16i8(<16 x i8>, i1) 99 declare <4 x i64> @llvm.ctlz.v4i64(<4 x i64>, i1) 100 declare <8 x i32> @llvm.ctlz.v8i32(<8 x i32>, i1) 101 declare <16 x i16> @llvm.ctlz.v16i16(<16 x i16>, i1) 102 declare <32 x i8> @llvm.ctlz.v32i8(<32 x i8>, i1) 106 ; SSE: Found an estimated cost of 6 for instruction: %ctlz 107 ; AVX: Found an estimated cost of 6 for instruction: %ctlz [all …]
|
/external/llvm-project/llvm/test/CodeGen/AMDGPU/ |
D | ctlz_zero_undef.ll | 5 declare i8 @llvm.ctlz.i8(i8, i1) nounwind readnone 7 declare i32 @llvm.ctlz.i32(i32, i1) nounwind readnone 8 declare <2 x i32> @llvm.ctlz.v2i32(<2 x i32>, i1) nounwind readnone 9 declare <4 x i32> @llvm.ctlz.v4i32(<4 x i32>, i1) nounwind readnone 11 declare i64 @llvm.ctlz.i64(i64, i1) nounwind readnone 12 declare <2 x i64> @llvm.ctlz.v2i64(<2 x i64>, i1) nounwind readnone 13 declare <4 x i64> @llvm.ctlz.v4i64(<4 x i64>, i1) nounwind readnone 26 %ctlz = call i32 @llvm.ctlz.i32(i32 %val, i1 true) nounwind readnone 27 store i32 %ctlz, i32 addrspace(1)* %out, align 4 42 %ctlz = call i32 @llvm.ctlz.i32(i32 %val, i1 true) nounwind readnone [all …]
|
/external/llvm/test/CodeGen/AArch64/ |
D | arm64-vclz.ll | 7 %vclz.i = tail call <8 x i8> @llvm.ctlz.v8i8(<8 x i8> %a, i1 false) nounwind 15 %vclz.i = tail call <8 x i8> @llvm.ctlz.v8i8(<8 x i8> %a, i1 false) nounwind 23 %vclz1.i = tail call <4 x i16> @llvm.ctlz.v4i16(<4 x i16> %a, i1 false) nounwind 31 %vclz1.i = tail call <4 x i16> @llvm.ctlz.v4i16(<4 x i16> %a, i1 false) nounwind 39 %vclz1.i = tail call <2 x i32> @llvm.ctlz.v2i32(<2 x i32> %a, i1 false) nounwind 47 %vclz1.i = tail call <2 x i32> @llvm.ctlz.v2i32(<2 x i32> %a, i1 false) nounwind 53 %vclz1.i = tail call <1 x i64> @llvm.ctlz.v1i64(<1 x i64> %a, i1 false) nounwind 59 %vclz1.i = tail call <1 x i64> @llvm.ctlz.v1i64(<1 x i64> %a, i1 false) nounwind 67 %vclz.i = tail call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %a, i1 false) nounwind 75 %vclz.i = tail call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %a, i1 false) nounwind [all …]
|
/external/llvm/test/CodeGen/X86/ |
D | lzcnt.ll | 3 declare i8 @llvm.ctlz.i8(i8, i1) nounwind readnone 4 declare i16 @llvm.ctlz.i16(i16, i1) nounwind readnone 5 declare i32 @llvm.ctlz.i32(i32, i1) nounwind readnone 6 declare i64 @llvm.ctlz.i64(i64, i1) nounwind readnone 9 %tmp = tail call i8 @llvm.ctlz.i8( i8 %x, i1 false ) 16 %tmp = tail call i16 @llvm.ctlz.i16( i16 %x, i1 false ) 23 %tmp = tail call i32 @llvm.ctlz.i32( i32 %x, i1 false ) 30 %tmp = tail call i64 @llvm.ctlz.i64( i64 %x, i1 false ) 37 %tmp = tail call i8 @llvm.ctlz.i8( i8 %x, i1 true ) 44 %tmp = tail call i16 @llvm.ctlz.i16( i16 %x, i1 true ) [all …]
|
/external/llvm/test/Transforms/SLPVectorizer/X86/ |
D | ctlz.ll | 17 declare i64 @llvm.ctlz.i64(i64, i1) 18 declare i32 @llvm.ctlz.i32(i32, i1) 19 declare i16 @llvm.ctlz.i16(i16, i1) 20 declare i8 @llvm.ctlz.i8(i8, i1) 30 ; CHECK-NEXT: [[CTLZ0:%.*]] = call i64 @llvm.ctlz.i64(i64 [[LD0]], i1 false) 31 ; CHECK-NEXT: [[CTLZ1:%.*]] = call i64 @llvm.ctlz.i64(i64 [[LD1]], i1 false) 38 %ctlz0 = call i64 @llvm.ctlz.i64(i64 %ld0, i1 0) 39 %ctlz1 = call i64 @llvm.ctlz.i64(i64 %ld1, i1 0) 51 ; CHECK-NEXT: [[CTLZ0:%.*]] = call i64 @llvm.ctlz.i64(i64 [[LD0]], i1 false) 52 ; CHECK-NEXT: [[CTLZ1:%.*]] = call i64 @llvm.ctlz.i64(i64 [[LD1]], i1 false) [all …]
|
/external/llvm-project/llvm/test/Transforms/SLPVectorizer/X86/ |
D | ctlz.ll | 18 declare i64 @llvm.ctlz.i64(i64, i1) 19 declare i32 @llvm.ctlz.i32(i32, i1) 20 declare i16 @llvm.ctlz.i16(i16, i1) 21 declare i8 @llvm.ctlz.i8(i8, i1) 31 ; CHECK-NEXT: [[CTLZ0:%.*]] = call i64 @llvm.ctlz.i64(i64 [[LD0]], i1 false) 32 ; CHECK-NEXT: [[CTLZ1:%.*]] = call i64 @llvm.ctlz.i64(i64 [[LD1]], i1 false) 39 %ctlz0 = call i64 @llvm.ctlz.i64(i64 %ld0, i1 0) 40 %ctlz1 = call i64 @llvm.ctlz.i64(i64 %ld1, i1 0) 52 ; CHECK-NEXT: [[CTLZ0:%.*]] = call i64 @llvm.ctlz.i64(i64 [[LD0]], i1 false) 53 ; CHECK-NEXT: [[CTLZ1:%.*]] = call i64 @llvm.ctlz.i64(i64 [[LD1]], i1 false) [all …]
|
/external/llvm-project/llvm/test/CodeGen/AArch64/ |
D | arm64-vclz.ll | 9 %vclz.i = tail call <8 x i8> @llvm.ctlz.v8i8(<8 x i8> %a, i1 false) nounwind 18 %vclz.i = tail call <8 x i8> @llvm.ctlz.v8i8(<8 x i8> %a, i1 false) nounwind 27 %vclz1.i = tail call <4 x i16> @llvm.ctlz.v4i16(<4 x i16> %a, i1 false) nounwind 36 %vclz1.i = tail call <4 x i16> @llvm.ctlz.v4i16(<4 x i16> %a, i1 false) nounwind 45 %vclz1.i = tail call <2 x i32> @llvm.ctlz.v2i32(<2 x i32> %a, i1 false) nounwind 54 %vclz1.i = tail call <2 x i32> @llvm.ctlz.v2i32(<2 x i32> %a, i1 false) nounwind 61 %vclz1.i = tail call <1 x i64> @llvm.ctlz.v1i64(<1 x i64> %a, i1 false) nounwind 68 %vclz1.i = tail call <1 x i64> @llvm.ctlz.v1i64(<1 x i64> %a, i1 false) nounwind 77 %vclz.i = tail call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %a, i1 false) nounwind 86 %vclz.i = tail call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %a, i1 false) nounwind [all …]
|
/external/llvm-project/llvm/test/CodeGen/VE/Scalar/ |
D | ctlz.ll | 3 declare i128 @llvm.ctlz.i128(i128, i1) 4 declare i64 @llvm.ctlz.i64(i64, i1) 5 declare i32 @llvm.ctlz.i32(i32, i1) 6 declare i16 @llvm.ctlz.i16(i16, i1) 7 declare i8 @llvm.ctlz.i8(i8, i1) 19 %r = tail call i128 @llvm.ctlz.i128(i128 %p, i1 true) 28 %r = tail call i64 @llvm.ctlz.i64(i64 %p, i1 true) 40 %r = tail call i32 @llvm.ctlz.i32(i32 %p, i1 true) 51 %r = tail call i32 @llvm.ctlz.i32(i32 %p, i1 true) 64 %r = tail call i16 @llvm.ctlz.i16(i16 %p, i1 true) [all …]
|
/external/llvm/test/CodeGen/NVPTX/ |
D | ctlz.ll | 5 declare i16 @llvm.ctlz.i16(i16, i1) readnone 6 declare i32 @llvm.ctlz.i32(i32, i1) readnone 7 declare i64 @llvm.ctlz.i64(i64, i1) readnone 11 %val = call i32 @llvm.ctlz.i32(i32 %a, i1 false) readnone 17 %val = call i16 @llvm.ctlz.i16(i16 %a, i1 false) readnone 23 %val = call i64 @llvm.ctlz.i64(i64 %a, i1 false) readnone 30 %val = call i32 @llvm.ctlz.i32(i32 %a, i1 true) readnone 36 %val = call i16 @llvm.ctlz.i16(i16 %a, i1 true) readnone 42 %val = call i64 @llvm.ctlz.i64(i64 %a, i1 true) readnone
|
/external/llvm-project/llvm/test/CodeGen/SystemZ/ |
D | vec-ctlz-01.ll | 5 declare <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %src, i1 %is_zero_undef) 6 declare <8 x i16> @llvm.ctlz.v8i16(<8 x i16> %src, i1 %is_zero_undef) 7 declare <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %src, i1 %is_zero_undef) 8 declare <2 x i64> @llvm.ctlz.v2i64(<2 x i64> %src, i1 %is_zero_undef) 15 %res = call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %a, i1 false) 24 %res = call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %a, i1 true) 33 %res = call <8 x i16> @llvm.ctlz.v8i16(<8 x i16> %a, i1 false) 42 %res = call <8 x i16> @llvm.ctlz.v8i16(<8 x i16> %a, i1 true) 51 %res = call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %a, i1 false) 60 %res = call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %a, i1 true) [all …]
|
D | scalar-ctlz.ll | 5 declare i64 @llvm.ctlz.i64(i64, i1) 6 declare i32 @llvm.ctlz.i32(i32, i1) 7 declare i16 @llvm.ctlz.i16(i16, i1) 8 declare i8 @llvm.ctlz.i8(i8, i1) 15 %1 = tail call i64 @llvm.ctlz.i64(i64 %arg, i1 false) 25 %1 = tail call i64 @llvm.ctlz.i64(i64 %arg, i1 true) 37 %1 = tail call i32 @llvm.ctlz.i32(i32 %arg, i1 false) 49 %1 = tail call i32 @llvm.ctlz.i32(i32 %arg, i1 true) 62 %1 = tail call i16 @llvm.ctlz.i16(i16 %arg, i1 false) 75 %1 = tail call i16 @llvm.ctlz.i16(i16 %arg, i1 true) [all …]
|
/external/llvm/test/CodeGen/SystemZ/ |
D | vec-ctlz-01.ll | 5 declare <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %src, i1 %is_zero_undef) 6 declare <8 x i16> @llvm.ctlz.v8i16(<8 x i16> %src, i1 %is_zero_undef) 7 declare <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %src, i1 %is_zero_undef) 8 declare <2 x i64> @llvm.ctlz.v2i64(<2 x i64> %src, i1 %is_zero_undef) 15 %res = call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %a, i1 false) 24 %res = call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %a, i1 true) 33 %res = call <8 x i16> @llvm.ctlz.v8i16(<8 x i16> %a, i1 false) 42 %res = call <8 x i16> @llvm.ctlz.v8i16(<8 x i16> %a, i1 true) 51 %res = call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %a, i1 false) 60 %res = call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %a, i1 true) [all …]
|
/external/llvm-project/llvm/test/CodeGen/NVPTX/ |
D | ctlz.ll | 5 declare i16 @llvm.ctlz.i16(i16, i1) readnone 6 declare i32 @llvm.ctlz.i32(i32, i1) readnone 7 declare i64 @llvm.ctlz.i64(i64, i1) readnone 9 ; There should be no difference between llvm.ctlz.i32(%a, true) and 10 ; llvm.ctlz.i32(%a, false), as ptx's clz(0) is defined to return 0. 18 %val = call i32 @llvm.ctlz.i32(i32 %a, i1 false) readnone 27 %val = call i32 @llvm.ctlz.i32(i32 %a, i1 true) readnone 40 %val = call i64 @llvm.ctlz.i64(i64 %a, i1 false) readnone 50 %val = call i64 @llvm.ctlz.i64(i64 %a, i1 true) readnone 54 ; Here we truncate the 64-bit value of LLVM's ctlz intrinsic to 32 bits, the [all …]
|
/external/llvm/test/Assembler/ |
D | auto_upgrade_intrinsics.ll | 5 declare i8 @llvm.ctlz.i8(i8) 6 declare i16 @llvm.ctlz.i16(i16) 7 declare i32 @llvm.ctlz.i32(i32) 8 declare i42 @llvm.ctlz.i42(i42) ; Not a power-of-2 14 define void @test.ctlz(i8 %a, i16 %b, i32 %c, i42 %d) { 15 ; CHECK: @test.ctlz 18 ; CHECK: call i8 @llvm.ctlz.i8(i8 %a, i1 false) 19 call i8 @llvm.ctlz.i8(i8 %a) 20 ; CHECK: call i16 @llvm.ctlz.i16(i16 %b, i1 false) 21 call i16 @llvm.ctlz.i16(i16 %b) [all …]
|
/external/llvm/test/CodeGen/PowerPC/ |
D | vec_clz.ll | 5 declare <16 x i8> @llvm.ctlz.v16i8(<16 x i8>) nounwind readnone 6 declare <8 x i16> @llvm.ctlz.v8i16(<8 x i16>) nounwind readnone 7 declare <4 x i32> @llvm.ctlz.v4i32(<4 x i32>) nounwind readnone 8 declare <2 x i64> @llvm.ctlz.v2i64(<2 x i64>) nounwind readnone 11 %vcnt = tail call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %x) 19 %vcnt = tail call <8 x i16> @llvm.ctlz.v8i16(<8 x i16> %x) 27 %vcnt = tail call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %x) 35 %vcnt = tail call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> %x)
|
/external/llvm-project/llvm/test/Transforms/InstCombine/ |
D | intrinsics.ll | 6 declare i32 @llvm.ctlz.i32(i32, i1) nounwind readnone 8 declare i1 @llvm.ctlz.i1(i1, i1) nounwind readnone 11 declare <2 x i32> @llvm.ctlz.v2i32(<2 x i32>, i1) nounwind readnone 13 declare i8 @llvm.ctlz.i8(i8, i1) nounwind readnone 14 declare <2 x i8> @llvm.ctlz.v2i8(<2 x i8>, i1) nounwind readnone 129 define i8 @ctlz(i8 %a) { 130 ; CHECK-LABEL: @ctlz( 135 %count = tail call i8 @llvm.ctlz.i8(i8 %and, i1 true) nounwind readnone 145 %count = tail call <2 x i8> @llvm.ctlz.v2i8(<2 x i8> %and, i1 true) nounwind readnone 149 ; Make sure we don't add range metadata to i1 ctlz. [all …]
|
/external/llvm/test/CodeGen/ARM/ |
D | vcnt.ll | 27 %tmp2 = call <8 x i8> @llvm.ctlz.v8i8(<8 x i8> %tmp1, i1 0) 35 %tmp2 = call <4 x i16> @llvm.ctlz.v4i16(<4 x i16> %tmp1, i1 0) 43 %tmp2 = call <2 x i32> @llvm.ctlz.v2i32(<2 x i32> %tmp1, i1 0) 50 %tmp2 = call <1 x i64> @llvm.ctlz.v1i64(<1 x i64> %tmp1, i1 0) 58 %tmp2 = call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %tmp1, i1 0) 66 %tmp2 = call <8 x i16> @llvm.ctlz.v8i16(<8 x i16> %tmp1, i1 0) 74 %tmp2 = call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %tmp1, i1 0) 81 %tmp2 = call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> %tmp1, i1 0) 89 %tmp2 = call <8 x i8> @llvm.ctlz.v8i8(<8 x i8> %tmp1, i1 1) 97 %tmp2 = call <4 x i16> @llvm.ctlz.v4i16(<4 x i16> %tmp1, i1 1) [all …]
|
/external/llvm-project/llvm/test/CodeGen/ARM/ |
D | vcnt.ll | 27 %tmp2 = call <8 x i8> @llvm.ctlz.v8i8(<8 x i8> %tmp1, i1 0) 35 %tmp2 = call <4 x i16> @llvm.ctlz.v4i16(<4 x i16> %tmp1, i1 0) 43 %tmp2 = call <2 x i32> @llvm.ctlz.v2i32(<2 x i32> %tmp1, i1 0) 50 %tmp2 = call <1 x i64> @llvm.ctlz.v1i64(<1 x i64> %tmp1, i1 0) 58 %tmp2 = call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %tmp1, i1 0) 66 %tmp2 = call <8 x i16> @llvm.ctlz.v8i16(<8 x i16> %tmp1, i1 0) 74 %tmp2 = call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %tmp1, i1 0) 81 %tmp2 = call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> %tmp1, i1 0) 89 %tmp2 = call <8 x i8> @llvm.ctlz.v8i8(<8 x i8> %tmp1, i1 1) 97 %tmp2 = call <4 x i16> @llvm.ctlz.v4i16(<4 x i16> %tmp1, i1 1) [all …]
|
/external/llvm-project/llvm/test/CodeGen/PowerPC/ |
D | vec_clz.ll | 6 declare <16 x i8> @llvm.ctlz.v16i8(<16 x i8>) nounwind readnone 7 declare <8 x i16> @llvm.ctlz.v8i16(<8 x i16>) nounwind readnone 8 declare <4 x i32> @llvm.ctlz.v4i32(<4 x i32>) nounwind readnone 9 declare <2 x i64> @llvm.ctlz.v2i64(<2 x i64>) nounwind readnone 21 %vcnt = tail call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %x) 35 %vcnt = tail call <8 x i16> @llvm.ctlz.v8i16(<8 x i16> %x) 49 %vcnt = tail call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %x) 63 %vcnt = tail call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> %x) 67 declare <2 x i32> @llvm.ctlz.v2i32(<2 x i32>, i1 immarg) 79 %v2 = call <2 x i32> @llvm.ctlz.v2i32(<2 x i32> %v1, i1 true)
|
/external/llvm/test/Transforms/InstCombine/ |
D | select-cmp-cttz-ctlz.ll | 4 ; a cttz/ctlz followed by a icmp + select into a single cttz/ctlz with 9 ; CHECK: [[VAR:%[a-zA-Z0-9]+]] = tail call i16 @llvm.ctlz.i16(i16 %x, i1 false) 12 %0 = tail call i16 @llvm.ctlz.i16(i16 %x, i1 true) 20 ; CHECK: [[VAR:%[a-zA-Z0-9]+]] = tail call i32 @llvm.ctlz.i32(i32 %x, i1 false) 23 %0 = tail call i32 @llvm.ctlz.i32(i32 %x, i1 true) 31 ; CHECK: [[VAR:%[a-zA-Z0-9]+]] = tail call i64 @llvm.ctlz.i64(i64 %x, i1 false) 34 %0 = tail call i64 @llvm.ctlz.i64(i64 %x, i1 true) 42 ; CHECK: [[VAR:%[a-zA-Z0-9]+]] = tail call i16 @llvm.ctlz.i16(i16 %x, i1 false) 45 %0 = tail call i16 @llvm.ctlz.i16(i16 %x, i1 true) 53 ; CHECK: [[VAR:%[a-zA-Z0-9]+]] = tail call i32 @llvm.ctlz.i32(i32 %x, i1 false) [all …]
|
/external/llvm-project/llvm/test/Transforms/SLPVectorizer/NVPTX/ |
D | non-vectorizable-intrinsic.ll | 16 ; CHECK-NEXT: [[CALL_I:%.*]] = call i8 @llvm.ctlz.i8(i8 [[TMP0]], i1 false) 19 ; CHECK-NEXT: [[CALL_I4:%.*]] = call i8 @llvm.ctlz.i8(i8 [[TMP1]], i1 false) 25 %call.i = call i8 @llvm.ctlz.i8(i8 %0, i1 false) 28 %call.i4 = call i8 @llvm.ctlz.i8(i8 %1, i1 false) 38 ; CHECK-NEXT: [[CALL_I:%.*]] = call i8 @llvm.ctlz.i8(i8 [[TMP0]], i1 false) 39 ; CHECK-NEXT: [[CALL_I4:%.*]] = call i8 @llvm.ctlz.i8(i8 [[TMP1]], i1 false) 47 %call.i = call i8 @llvm.ctlz.i8(i8 %0, i1 false) 48 %call.i4 = call i8 @llvm.ctlz.i8(i8 %1, i1 false) 54 declare i8 @llvm.ctlz.i8(i8, i1) #3
|
/external/llvm/test/Feature/ |
D | intrinsics.ll | 27 declare i8 @llvm.ctlz.i8(i8, i1) 29 declare i16 @llvm.ctlz.i16(i16, i1) 31 declare i32 @llvm.ctlz.i32(i32, i1) 33 declare i64 @llvm.ctlz.i64(i64, i1) 50 call i8 @llvm.ctlz.i8( i8 14, i1 true ) ; <i32>:9 [#uses=0] 51 call i16 @llvm.ctlz.i16( i16 15, i1 true ) ; <i32>:10 [#uses=0] 52 call i32 @llvm.ctlz.i32( i32 16, i1 true ) ; <i32>:11 [#uses=0] 53 call i64 @llvm.ctlz.i64( i64 17, i1 true ) ; <i32>:12 [#uses=0]
|