Searched refs:conv2 (Results 1 – 25 of 341) sorted by relevance
12345678910>>...14
/external/llvm-project/llvm/test/CodeGen/ARM/ |
D | fast-isel-icmp.ll | 16 %conv2 = zext i1 %cmp to i32 17 ret i32 %conv2 31 %conv2 = zext i1 %cmp to i32 32 ret i32 %conv2 46 %conv2 = zext i1 %cmp to i32 47 ret i32 %conv2 61 %conv2 = zext i1 %cmp to i32 62 ret i32 %conv2 76 %conv2 = zext i1 %cmp to i32 77 ret i32 %conv2
|
D | dsp-mlal.ll | 71 %conv2 = trunc i64 %0 to i32 72 ret i32 %conv2 95 %conv2 = trunc i64 %0 to i32 96 ret i32 %conv2 120 %conv2 = sext i32 %c to i64 121 %mul = mul nsw i64 %conv2, %conv1 155 %conv2 = trunc i64 %0 to i32 156 %sub = sub nsw i32 %a, %conv2 181 %conv2 = sext i32 %c to i64 182 %mul = mul nsw i64 %conv2, %conv1 [all …]
|
D | noreturn.ll | 24 %conv2 = trunc i64 %mul to i32 25 %conv3 = sext i32 %conv2 to i64 34 ret i32 %conv2 79 %conv2 = trunc i64 %mul to i32 80 %conv3 = sext i32 %conv2 to i64 89 ret i32 %conv2
|
/external/llvm/test/CodeGen/ARM/ |
D | fast-isel-icmp.ll | 16 %conv2 = zext i1 %cmp to i32 17 ret i32 %conv2 31 %conv2 = zext i1 %cmp to i32 32 ret i32 %conv2 46 %conv2 = zext i1 %cmp to i32 47 ret i32 %conv2 61 %conv2 = zext i1 %cmp to i32 62 ret i32 %conv2 76 %conv2 = zext i1 %cmp to i32 77 ret i32 %conv2
|
D | longMAC.ll | 33 %conv2 = zext i32 %c to i64 34 %add = add i64 %mul, %conv2 44 %conv2 = sext i32 %c to i64 45 %add = add nsw i64 %mul, %conv2 86 %conv2 = sext i32 %c to i64 88 %mul4 = mul nsw i64 %conv3, %conv2 127 %conv2 = zext i32 %lo to i64 128 %add = add i64 %mul, %conv2 142 %conv2 = zext i32 %lo to i64 144 %add = add i64 %conv2, %conv3
|
D | smul.ll | 74 %conv2 = sext i32 %shr to i64 75 %mul = mul nsw i64 %conv2, %conv 89 %conv2 = trunc i64 %shr5 to i32 90 %add = add nsw i32 %conv2, %c 113 %conv2 = sext i32 %shr to i64 114 %mul = mul nsw i64 %conv2, %conv 127 %conv2 = trunc i64 %shr4 to i32 128 ret i32 %conv2
|
/external/llvm/test/Transforms/InstCombine/ |
D | udivrem-change-width.ll | 8 %conv2 = zext i8 %b to i32 9 %div = udiv i32 %conv, %conv2 18 %conv2 = zext i8 %b to i32 19 %div = urem i32 %conv, %conv2 28 %conv2 = zext i8 %b to i32 29 %div = udiv i32 %conv, %conv2 38 %conv2 = zext i8 %b to i32 39 %div = urem i32 %conv, %conv2
|
D | overflow.ll | 11 %conv2 = sext i32 %b to i64 12 %add = add nsw i64 %conv2, %conv 36 %conv2 = sext i32 %b to i64 37 %add = add nsw i64 %conv2, %conv 63 %conv2 = sext i32 %b to i64 64 %add = add nsw i64 %conv2, %conv 84 %conv2 = sext i8 %b to i32 85 %add = add nsw i32 %conv2, %conv
|
D | cos-1.ll | 25 %conv2 = fptrunc double %cos to float 27 ret float %conv2 36 %conv2 = fptrunc double %cos to float 37 ret float %conv2
|
/external/llvm-project/llvm/test/CodeGen/Hexagon/ |
D | extload-combine.ll | 23 %conv2 = zext i16 %0 to i64 24 ret i64 %conv2 34 %conv2 = sext i16 %0 to i64 35 ret i64 %conv2 45 %conv2 = zext i8 %0 to i64 46 ret i64 %conv2 56 %conv2 = sext i8 %0 to i64 57 ret i64 %conv2
|
D | memops.ll | 24 %conv2 = trunc i32 %add to i8 25 store i8 %conv2, i8* %p, align 1 37 %conv2 = trunc i32 %sub to i8 38 store i8 %conv2, i8* %p, align 1 108 %conv2 = trunc i32 %add to i8 109 store i8 %conv2, i8* %add.ptr, align 1 122 %conv2 = trunc i32 %sub to i8 123 store i8 %conv2, i8* %add.ptr, align 1 197 %conv2 = trunc i32 %add to i8 198 store i8 %conv2, i8* %add.ptr, align 1 [all …]
|
D | memops2.ll | 10 %conv2 = zext i16 %0 to i32 11 %sub = add nsw i32 %conv2, 65535 25 %conv2 = trunc i32 %sub to i16 26 store i16 %conv2, i16* %add.ptr1, align 2
|
/external/llvm/test/CodeGen/Hexagon/ |
D | extload-combine.ll | 23 %conv2 = zext i16 %0 to i64 24 ret i64 %conv2 34 %conv2 = sext i16 %0 to i64 35 ret i64 %conv2 45 %conv2 = zext i8 %0 to i64 46 ret i64 %conv2 56 %conv2 = sext i8 %0 to i64 57 ret i64 %conv2
|
D | memops.ll | 22 %conv2 = trunc i32 %add to i8 23 store i8 %conv2, i8* %p, align 1 34 %conv2 = trunc i32 %sub to i8 35 store i8 %conv2, i8* %p, align 1 99 %conv2 = trunc i32 %add to i8 100 store i8 %conv2, i8* %add.ptr, align 1 112 %conv2 = trunc i32 %sub to i8 113 store i8 %conv2, i8* %add.ptr, align 1 181 %conv2 = trunc i32 %add to i8 182 store i8 %conv2, i8* %add.ptr, align 1 [all …]
|
D | memops2.ll | 10 %conv2 = zext i16 %0 to i32 11 %sub = add nsw i32 %conv2, 65535 25 %conv2 = trunc i32 %sub to i16 26 store i16 %conv2, i16* %add.ptr1, align 2
|
/external/llvm-project/llvm/test/CodeGen/PowerPC/ |
D | vec_int_ext.ll | 21 %conv2 = sext i8 %vecext1 to i32 22 %vecinit3 = insertelement <4 x i32> %vecinit, i32 %conv2, i32 1 48 %conv2 = sext i8 %vecext1 to i64 49 %vecinit3 = insertelement <2 x i64> %vecinit, i64 %conv2, i32 1 69 %conv2 = sext i16 %vecext1 to i32 70 %vecinit3 = insertelement <4 x i32> %vecinit, i32 %conv2, i32 1 96 %conv2 = sext i16 %vecext1 to i64 97 %vecinit3 = insertelement <2 x i64> %vecinit, i64 %conv2, i32 1 117 %conv2 = sext i32 %vecext1 to i64 118 %vecinit3 = insertelement <2 x i64> %vecinit, i64 %conv2, i32 1 [all …]
|
/external/llvm-project/llvm/test/Transforms/CodeGenPrepare/X86/ |
D | ext-logicop.ll | 21 %conv2 = zext i8 %and to i32 22 %add = add nsw i32 %conv2, %ll 45 %conv2 = zext i8 %or to i32 46 %add = add nsw i32 %conv2, %ll 71 %conv2 = zext i8 %and to i32 72 %add = add nsw i32 %conv2, %ll 95 %conv2 = zext i8 %lshr to i32 96 %add = add nsw i32 %conv2, %ll 119 %conv2 = zext i8 %xor to i32 120 %add = add nsw i32 %conv2, %ll
|
/external/llvm/test/CodeGen/AArch64/ |
D | arm64-fast-isel-icmp.ll | 180 %conv2 = zext i1 %cmp to i32 181 ret i32 %conv2 191 %conv2 = zext i1 %cmp to i32 192 ret i32 %conv2 203 %conv2 = zext i1 %cmp to i32 204 ret i32 %conv2 215 %conv2 = zext i1 %cmp to i32 216 ret i32 %conv2 227 %conv2 = zext i1 %cmp to i32 228 ret i32 %conv2 [all …]
|
D | arm64-shifted-sext.ll | 13 %conv2 = trunc i32 %shl to i16 14 ret i16 %conv2 25 %conv2 = trunc i32 %shr4 to i16 26 ret i16 %conv2 37 %conv2 = trunc i32 %shl to i16 38 ret i16 %conv2 50 %conv2 = trunc i32 %shr4 to i16 51 ret i16 %conv2 172 %conv2 = zext i16 %inc to i32 173 %shl = shl nuw i32 %conv2, 16 [all …]
|
/external/llvm/test/Transforms/LoadCombine/ |
D | load-combine-aa.ll | 16 %conv2 = zext i32 %load2 to i64 17 %shl = shl nuw i64 %conv2, 32 34 %conv2 = zext i32 %load2 to i64 35 %shl = shl nuw i64 %conv2, 32
|
/external/llvm-project/llvm/test/CodeGen/AArch64/ |
D | arm64-fast-isel-icmp.ll | 196 %conv2 = zext i1 %cmp to i32 197 ret i32 %conv2 208 %conv2 = zext i1 %cmp to i32 209 ret i32 %conv2 221 %conv2 = zext i1 %cmp to i32 222 ret i32 %conv2 233 %conv2 = zext i1 %cmp to i32 234 ret i32 %conv2 245 %conv2 = zext i1 %cmp to i32 246 ret i32 %conv2 [all …]
|
D | arm64-shifted-sext.ll | 13 %conv2 = trunc i32 %shl to i16 14 ret i16 %conv2 25 %conv2 = trunc i32 %shr4 to i16 26 ret i16 %conv2 37 %conv2 = trunc i32 %shl to i16 38 ret i16 %conv2 50 %conv2 = trunc i32 %shr4 to i16 51 ret i16 %conv2 172 %conv2 = zext i16 %inc to i32 173 %shl = shl nuw i32 %conv2, 16 [all …]
|
/external/llvm-project/llvm/test/Transforms/AggressiveInstCombine/ |
D | trunc_select_cmp.ll | 34 %conv2 = sext i8 %b to i32 35 %cmp = icmp slt i32 %conv, %conv2 36 %cond = select i1 %cmp, i32 %conv2, i32 %conv 53 %conv2 = zext i8 %b to i32 54 %cmp = icmp slt i32 %conv, %conv2 55 %cond = select i1 %cmp, i32 %conv2, i32 %conv 72 %conv2 = sext i8 %b to i32 73 %cmp = icmp slt i32 %conv, %conv2 74 %cond = select i1 %cmp, i32 %conv2, i32 %conv 91 %conv2 = sext i16 %b to i32 [all …]
|
/external/llvm-project/llvm/test/CodeGen/Thumb2/ |
D | longMACt.ll | 30 %conv2 = zext i32 %c to i64 31 %add = add i64 %mul, %conv2 41 %conv2 = sext i32 %c to i64 42 %add = add nsw i64 %mul, %conv2
|
/external/llvm/test/CodeGen/Thumb2/ |
D | longMACt.ll | 30 %conv2 = zext i32 %c to i64 31 %add = add i64 %mul, %conv2 41 %conv2 = sext i32 %c to i64 42 %add = add nsw i64 %mul, %conv2
|
12345678910>>...14