/external/llvm-project/llvm/test/Transforms/Reassociate/ |
D | wrap-flags.ll | 17 %mul2 = add i32 %mul, 1 18 ret i32 %mul2 31 %mul2 = add i32 %mul, 1 32 ret i32 %mul2 45 %mul2 = add i32 %mul, 1 46 ret i32 %mul2 59 %mul2 = add i32 %mul, 1 60 ret i32 %mul2 73 %mul2 = add i32 %mul, 1 74 ret i32 %mul2
|
D | mixed-fast-nonfast-fp.ll | 14 %mul2 = fmul fast float %a, %b 18 %add2 = fadd fast float %mul4, %mul2 35 %mul2 = fmul fast float %a, %b 39 %add2 = fadd reassoc float %mul4, %mul2
|
D | canonicalize-neg-const.ll | 211 %mul2 = fmul double %mul1, -4.0 212 %add = fadd double %mul2, %a 227 %mul2 = fmul double %mul1, -4.0 228 %mul3 = fmul double %mul2, -5.0 256 %mul2 = fmul double %mul1, 4.0 257 %sub = fsub double %a, %mul2 272 %mul2 = fmul double %mul1, -4.0 273 %mul3 = fmul double %mul2, -5.0 407 %mul2 = fmul double -4.0, %div1 408 %div3 = fdiv double %mul2, -5.0 [all …]
|
D | propagate-flags.ll | 11 %mul2 = fmul fast double %b, %b 12 %mul3 = fmul fast double %mul1, %mul2
|
/external/llvm/test/Transforms/StraightLineStrengthReduce/ |
D | slsr-mul.ll | 20 %mul2 = mul i32 %b2, %s 21 call void @foo(i32 %mul2) 41 %mul2 = mul i32 %b2, %s 42 call void @foo(i32 %mul2) 63 %mul2 = mul i32 %b2, %s 65 call void @foo(i32 %mul2) 82 %mul2 = mul i32 %a, %b1 87 call void @foo(i32 %mul2) 101 ; mul1 = mul0 + bump; // GVN ensures mul1 and mul2 use the same bump. 102 ; mul2 = mul1 + bump; [all …]
|
/external/llvm/test/CodeGen/AMDGPU/ |
D | fmul-2-combine-multi-use.ll | 37 %mul2 = fmul fast float %x, 2.0 38 %mad = fadd fast float %mul2, %y 39 store volatile float %mul2, float addrspace(1)* %out 53 %mul2 = fmul fast float %x.abs, 2.0 54 %mad = fadd fast float %mul2, %y 55 store volatile float %mul2, float addrspace(1)* %out 66 %mul2 = fmul fast float %x.abs, 2.0 67 %mad0 = fadd fast float %mul2, %y 68 %mad1 = fadd fast float %mul2, %z 80 %mul2 = fmul fast float %x, 2.0 [all …]
|
/external/llvm-project/llvm/test/Transforms/StraightLineStrengthReduce/ |
D | slsr-mul.ll | 21 %mul2 = mul i32 %b2, %s 22 call void @foo(i32 %mul2) 42 %mul2 = mul i32 %b2, %s 43 call void @foo(i32 %mul2) 64 %mul2 = mul i32 %b2, %s 66 call void @foo(i32 %mul2) 83 %mul2 = mul i32 %a, %b1 88 call void @foo(i32 %mul2) 102 ; mul1 = mul0 + bump; // GVN ensures mul1 and mul2 use the same bump. 103 ; mul2 = mul1 + bump; [all …]
|
/external/llvm/test/Transforms/Reassociate/ |
D | wrap-flags.ll | 12 %mul2 = add i32 %mul, 1 13 ret i32 %mul2 22 %mul2 = add i32 %mul, 1 23 ret i32 %mul2 32 %mul2 = add i32 %mul, 1 33 ret i32 %mul2
|
/external/llvm-project/llvm/test/CodeGen/AMDGPU/ |
D | fmul-2-combine-multi-use.ll | 52 %mul2 = fmul fast float %x, 2.0 53 %mad = fadd fast float %mul2, %y 54 store volatile float %mul2, float addrspace(1)* %out 69 %mul2 = fmul fast float %x.abs, 2.0 70 %mad = fadd fast float %mul2, %y 71 store volatile float %mul2, float addrspace(1)* %out 84 %mul2 = fmul fast float %x.abs, 2.0 85 %mad0 = fadd fast float %mul2, %y 86 %mad1 = fadd fast float %mul2, %z 98 %mul2 = fmul fast float %x, 2.0 [all …]
|
D | inline-maxbb.ll | 11 %mul2 = mul i32 %mul1, %x 12 %mul3 = mul i32 %mul1, %mul2 13 %mul4 = mul i32 %mul3, %mul2 44 %mul2 = mul i32 %mul1, %x 45 %mul3 = mul i32 %mul1, %mul2 46 %mul4 = mul i32 %mul3, %mul2
|
D | computeNumSignBits-mul.ll | 18 %mul2 = mul i48 %mul0, %mul1 19 %trunc = trunc i48 %mul2 to i16 39 %mul2 = mul i48 %mul0, %mul1 40 %ashr = ashr i48 %mul2, 24 71 %mul2 = mul i32 %mul0, %mul1 72 ret i32 %mul2 97 %mul2 = mul i32 %mul0, %mul1 98 ret i32 %mul2 127 %mul2 = mul i32 %mul0, %mul1 128 ret i32 %mul2 [all …]
|
D | early-inline.ll | 8 %mul2 = mul i32 %mul1, %x 9 %mul3 = mul i32 %mul1, %mul2 10 %mul4 = mul i32 %mul3, %mul2
|
D | fdot2.ll | 37 %mul2 = fmul half %src1.el2, %src2.el2 40 %acc1 = fadd half %mul2, %acc 79 %mul2 = fmul float %csrc1.el2, %csrc2.el2 82 %acc1 = fadd float %mul2, %acc 119 %mul2 = fmul float %csrc2.el2, %csrc1.el2 122 %acc1 = fadd float %mul2, %acc 156 %mul2 = fmul float %csrc1.el2, %csrc2.el2 159 %acc1 = fadd float %mul2, %acc 193 %mul2 = fmul float %csrc1.el2, %csrc1.el1 196 %acc1 = fadd float %mul2, %acc [all …]
|
/external/llvm/test/Transforms/SLPVectorizer/X86/ |
D | crash_smallpt.ll | 44 %mul2.i256.us = fmul double %add4.i267.us, 1.400000e+02 46 %add4.i246.us = fadd double %mul2.i256.us, 5.200000e+01 48 %mul2.i.i.us = fmul double undef, %add4.i267.us 52 store double %mul2.i.i.us, double* %agg.tmp101211.sroa.1.8.idx390, align 8 83 %mul2.i738 = fmul double undef, %sub10.i773 85 %mul2.i729 = fmul double undef, %mul2.i738 87 %add4.i719 = fadd double undef, %mul2.i729 91 %mul2.i.i680 = fmul double undef, %add4.i698 95 store double %mul2.i.i680, double* %agg.tmp74663.sroa.1.8.idx943, align 8
|
/external/llvm-project/llvm/test/Transforms/InstCombine/ |
D | reassociate-nuw.ll | 76 %mul2 = mul nuw i32 %mul0, %mul1 77 ret i32 %mul2 140 %mul2 = mul nuw i32 %x, %z 141 %add1 = add nuw i32 %mul1, %mul2 152 %mul2 = mul nuw i32 %x, %z 153 %add1 = add nuw i32 %mul1, %mul2 164 %mul2 = mul i32 %x, %z 165 %add1 = add nuw i32 %mul1, %mul2 176 %mul2 = mul nuw i32 %x, %z 177 %add1 = add i32 %mul1, %mul2
|
/external/llvm-project/llvm/test/Transforms/GVNHoist/ |
D | hoist-convergent.ll | 19 %mul2 = call float @convergent_func(float %sub1, float %div) 28 %tmax.0 = phi float [ %mul2, %if.then ], [ %mul6, %if.else ] 49 %mul2 = call float @func(float %sub1, float %div) #0 58 %tmax.0 = phi float [ %mul2, %if.then ], [ %mul6, %if.else ] 75 %mul2 = call float @func(float %sub1, float %div) 84 %tmax.0 = phi float [ %mul2, %if.then ], [ %mul6, %if.else ]
|
/external/llvm-project/llvm/test/CodeGen/X86/ |
D | dag-fmf-cse.ll | 17 %mul2 = fmul fast float %nega, %b 18 %abx2 = fsub fast float %mul1, %mul2 30 %mul2 = fmul fast <4 x float> %nega, %b 31 %abx2 = fsub fast <4 x float> %mul1, %mul2
|
D | lea-opt-cse3.ll | 27 %mul2 = shl i32 %b, 2 28 %add4 = add i32 %add, %mul2 55 %mul2 = shl i32 %b, 3 56 %add4 = add i32 %add, %mul2 104 %mul2 = shl i32 %b, 3 105 %add4 = add i32 %addn, %mul2 157 %mul2 = shl i32 %b, 3 158 %add4 = add i32 %addn, %mul2
|
D | MachineSink-SubReg.ll | 17 %mul2 = mul nuw nsw i64 %conv, 5 25 %add7 = add i64 %mul2, %value 29 %conv9 = trunc i64 %mul2 to i32
|
D | pr47517.ll | 36 %mul2 = fmul fast float %mul, %sub 37 %add2 = fadd fast float %mul2, 1.0 38 %add3 = fadd fast float %mul2, %add2
|
/external/llvm-project/llvm/test/CodeGen/PowerPC/ |
D | fma-aggr-FMF.ll | 13 %mul2 = fmul contract float %f3, %f4 14 %add = fadd contract float %mul1, %mul2 31 %mul2 = fmul float %f3, %f4 32 %add = fadd contract float %mul1, %mul2
|
/external/llvm-project/llvm/test/CodeGen/ARM/ |
D | cortex-a57-misched-vfma.ll | 37 %mul2 = fmul float %f3, %f4 39 %add1 = fadd float %mul1, %mul2 76 %mul2 = fmul <2 x float> %f3, %f4 78 %add1 = fadd <2 x float> %mul1, %mul2 114 %mul2 = fmul float %f3, %f4 116 %sub1 = fsub float %mul1, %mul2 153 %mul2 = fmul <2 x float> %f3, %f4 155 %sub1 = fsub <2 x float> %mul1, %mul2
|
/external/llvm-project/llvm/test/Transforms/SLPVectorizer/X86/ |
D | crash_smallpt.ll | 84 %mul2.i256.us = fmul double %add4.i267.us, 1.400000e+02 86 %add4.i246.us = fadd double %mul2.i256.us, 5.200000e+01 88 %mul2.i.i.us = fmul double undef, %add4.i267.us 92 store double %mul2.i.i.us, double* %agg.tmp101211.sroa.1.8.idx390, align 8 144 %mul2.i738 = fmul double undef, %sub10.i773 146 %mul2.i729 = fmul double undef, %mul2.i738 148 %add4.i719 = fadd double undef, %mul2.i729 152 %mul2.i.i680 = fmul double undef, %add4.i698 156 store double %mul2.i.i680, double* %agg.tmp74663.sroa.1.8.idx943, align 8
|
/external/llvm-project/llvm/test/Analysis/ScalarEvolution/ |
D | huge_expression_limit.ll | 20 ; CHECK-NEXT: %mul2 = mul i32 %mul1, %c 22 ; CHECK-NEXT: %mul3 = mul i32 %mul2, %d 37 %mul2 = mul i32 %mul1, %c 38 %mul3 = mul i32 %mul2, %d
|
/external/llvm-project/llvm/test/CodeGen/AArch64/ |
D | aarch64-fold-lslfast.ll | 64 %mul2 = shl i64 %b, 3 65 %cmp = icmp slt i64 %mul1, %mul2 71 %cmp2 = icmp sgt i64 %mul1, %mul2 76 ret i64 %mul2
|