/external/llvm-project/llvm/test/CodeGen/AMDGPU/ |
D | inline-maxbb.ll | 12 %mul3 = mul i32 %mul1, %mul2 13 %mul4 = mul i32 %mul3, %mul2 14 %mul5 = mul i32 %mul4, %mul3 45 %mul3 = mul i32 %mul1, %mul2 46 %mul4 = mul i32 %mul3, %mul2 47 %mul5 = mul i32 %mul4, %mul3
|
D | early-inline.ll | 9 %mul3 = mul i32 %mul1, %mul2 10 %mul4 = mul i32 %mul3, %mul2 11 %mul5 = mul i32 %mul4, %mul3
|
/external/llvm/test/Transforms/Reassociate/ |
D | mixed-fast-nonfast-fp.ll | 4 ; CHECK: %mul3 = fmul float %a, %b 8 ; CHECK-NEXT: fadd fast float %tmp2, %mul3 12 %mul3 = fmul float %a, %b 14 %add1 = fadd fast float %mul1, %mul3
|
D | canonicalize-neg-const.ll | 16 %mul3 = fmul double %add, %add2 17 ret double %mul3 32 %mul3 = fmul double %add, %add2 33 ret double %mul3 48 %mul3 = fmul double %add, %add2 49 ret double %mul3
|
/external/llvm-project/llvm/test/CodeGen/ARM/ |
D | machine-outliner-return-2.ll | 15 %mul3 = mul nsw i32 %div2, %b 16 %add4 = add nsw i32 %mul3, 1 35 %mul3 = mul nsw i32 %div2, %b 36 %add4 = add nsw i32 %mul3, 3
|
D | cortex-a57-misched-vfma.ll | 38 %mul3 = fmul float %f5, %f6 40 %add2 = fadd float %add1, %mul3 77 %mul3 = fmul <2 x float> %f5, %f6 79 %add2 = fadd <2 x float> %add1, %mul3 115 %mul3 = fmul float %f5, %f6 117 %sub2 = fsub float %sub1, %mul3 154 %mul3 = fmul <2 x float> %f5, %f6 156 %sub2 = fsub <2 x float> %sub1, %mul3
|
D | cortex-a57-misched-ldm-wrback.ll | 34 %mul3 = mul i32 %mul2, %3 35 ret i32 %mul3
|
/external/llvm-project/llvm/test/Transforms/Reassociate/ |
D | mixed-fast-nonfast-fp.ll | 15 %mul3 = fmul float %a, %b ; STRICT 17 %add1 = fadd fast float %mul1, %mul3 36 %mul3 = fmul float %a, %b ; STRICT 38 %add1 = fadd fast float %mul1, %mul3
|
D | propagate-flags.ll | 12 %mul3 = fmul fast double %mul1, %mul2 13 ret double %mul3
|
D | canonicalize-neg-const.ll | 17 %mul3 = fmul double %add, %add2 18 ret double %mul3 33 %mul3 = fmul double %add, %add2 34 ret double %mul3 49 %mul3 = fmul double %add, %add2 50 ret double %mul3 228 %mul3 = fmul double %mul2, -5.0 229 %add = fadd double %mul3, %c 273 %mul3 = fmul double %mul2, -5.0 274 %sub = fsub double %c, %mul3 [all …]
|
/external/llvm-project/llvm/test/CodeGen/PowerPC/ |
D | fma-precision.ll | 17 %mul3 = fmul reassoc double %mul, %sub 18 ret double %mul3 34 %mul3 = fmul reassoc double %mul1, %sub 35 ret double %mul3 66 %mul3 = fmul reassoc double %mul, %add 67 ret double %mul3 83 %mul3 = fmul reassoc double %mul1, %add 84 ret double %mul3
|
/external/llvm-project/llvm/test/Analysis/ScalarEvolution/ |
D | huge_expression_limit.ll | 22 ; CHECK-NEXT: %mul3 = mul i32 %mul2, %d 24 ; CHECK-NEXT: %mul4 = mul i32 %mul3, %e 38 %mul3 = mul i32 %mul2, %d 39 %mul4 = mul i32 %mul3, %e
|
D | 2012-05-29-MulAddRec.ll | 5 ; outer loop. While reducing the recurrence at %mul3, unsigned integer overflow 37 %mul3 = phi i8 [ undef, %entry ], [ %mul.lcssa, %for.cond.loopexit ] 44 %mul45 = phi i8 [ %mul3, %for.cond ], [ %mul, %for.body ]
|
/external/llvm/test/Transforms/SimplifyCFG/AArch64/ |
D | prefer-fma.ll | 55 ; CHECK: %mul3 = fmul fast double %5, 3.000000e+00 56 ; CHECK-NEXT: %neg = fsub fast double 0.000000e+00, %mul3 58 %mul3 = fmul fast double %6, 3.0000000e+00 59 %neg = fsub fast double 0.0000000e+00, %mul3
|
/external/llvm-project/llvm/test/Transforms/SimplifyCFG/AArch64/ |
D | prefer-fma.ll | 56 ; CHECK: %mul3 = fmul fast double %5, 3.000000e+00 57 ; CHECK-NEXT: %neg = fsub fast double 0.000000e+00, %mul3 59 %mul3 = fmul fast double %6, 3.0000000e+00 60 %neg = fsub fast double 0.0000000e+00, %mul3
|
/external/llvm/test/Transforms/BBVectorize/X86/ |
D | loop1.ll | 20 %mul3 = fmul double %0, %1 21 %add = fadd double %mul, %mul3 47 ; CHECK-UNRL: %mul3 = fmul <2 x double> %2, %3 48 ; CHECK-UNRL: %add = fadd <2 x double> %mul, %mul3
|
/external/llvm/test/Transforms/BBVectorize/ |
D | loop1.ll | 20 %mul3 = fmul double %0, %1 21 %add = fadd double %mul, %mul3 43 ; CHECK: %mul3 = fmul double %0, %1 44 ; CHECK: %add = fadd double %mul, %mul3 73 ; CHECK-UNRL: %mul3 = fmul <2 x double> %2, %3 74 ; CHECK-UNRL: %add = fadd <2 x double> %mul, %mul3
|
/external/llvm/test/Analysis/ScalarEvolution/ |
D | 2012-05-29-MulAddRec.ll | 5 ; outer loop. While reducing the recurrence at %mul3, unsigned integer overflow 34 %mul3 = phi i8 [ undef, %entry ], [ %mul.lcssa, %for.cond.loopexit ] 40 %mul45 = phi i8 [ %mul3, %for.cond ], [ %mul, %for.body ]
|
/external/llvm/test/CodeGen/AArch64/ |
D | arm64-fml-combines.ll | 36 %mul3 = fmul fast double %mul, %e10 37 %sub4 = fsub fast double %mul, %mul3 79 %mul3 = fmul fast float %mul, %e10 80 %add4 = fsub fast float %mul, %mul3 121 %mul3 = fmul fast float %mul, %e10 122 store float %mul3, float* %arrayidx2, align 8
|
D | arm64-fma-combines.ll | 44 %mul3 = fmul fast double %mul, %e10 45 %add4 = fadd fast double %mul3, %mul 87 %mul3 = fmul fast float %mul, %e10 88 %add4 = fadd fast float %mul3, %mul 129 %mul3 = fmul fast float %mul, %e10 130 store float %mul3, float* %arrayidx2, align 8
|
/external/llvm-project/polly/test/Isl/CodeGen/ |
D | reduction_simple_binary.ll | 27 %mul3 = mul nsw i32 %tmp1, %add2 28 store i32 %mul3, i32* @prod, align 4
|
/external/llvm-project/llvm/test/CodeGen/AArch64/ |
D | arm64-fma-combines.ll | 44 %mul3 = fmul fast double %mul, %e10 45 %add4 = fadd fast double %mul3, %mul 87 %mul3 = fmul fast float %mul, %e10 88 %add4 = fadd fast float %mul3, %mul 129 %mul3 = fmul fast float %mul, %e10 130 store float %mul3, float* %arrayidx2, align 8
|
D | arm64-fml-combines.ll | 38 %mul3 = fmul fast double %mul, %e10 39 %sub4 = fsub fast double %mul, %mul3 81 %mul3 = fmul fast float %mul, %e10 82 %add4 = fsub fast float %mul, %mul3 123 %mul3 = fmul fast float %mul, %e10 124 store float %mul3, float* %arrayidx2, align 8
|
/external/llvm-project/llvm/test/Transforms/LICM/ |
D | extra-copies.ll | 15 %mul3 = add nsw i32 %add2, %mul 21 %a9.0.lcssa = phi i32 [ %mul3, %for.body ]
|
/external/llvm/test/Transforms/LICM/ |
D | extra-copies.ll | 15 %mul3 = add nsw i32 %add2, %mul 21 %a9.0.lcssa = phi i32 [ %mul3, %for.body ]
|