Home
last modified time | relevance | path

Searched refs:mul2 (Results 1 – 25 of 56) sorted by relevance

123

/external/llvm/test/Transforms/StraightLineStrengthReduce/
Dslsr-mul.ll20 %mul2 = mul i32 %b2, %s
21 call void @foo(i32 %mul2)
41 %mul2 = mul i32 %b2, %s
42 call void @foo(i32 %mul2)
63 %mul2 = mul i32 %b2, %s
65 call void @foo(i32 %mul2)
82 %mul2 = mul i32 %a, %b1
87 call void @foo(i32 %mul2)
101 ; mul1 = mul0 + bump; // GVN ensures mul1 and mul2 use the same bump.
102 ; mul2 = mul1 + bump;
[all …]
/external/llvm/test/CodeGen/AMDGPU/
Dfmul-2-combine-multi-use.ll37 %mul2 = fmul fast float %x, 2.0
38 %mad = fadd fast float %mul2, %y
39 store volatile float %mul2, float addrspace(1)* %out
53 %mul2 = fmul fast float %x.abs, 2.0
54 %mad = fadd fast float %mul2, %y
55 store volatile float %mul2, float addrspace(1)* %out
66 %mul2 = fmul fast float %x.abs, 2.0
67 %mad0 = fadd fast float %mul2, %y
68 %mad1 = fadd fast float %mul2, %z
80 %mul2 = fmul fast float %x, 2.0
[all …]
/external/llvm/test/Transforms/Reassociate/
Dwrap-flags.ll12 %mul2 = add i32 %mul, 1
13 ret i32 %mul2
22 %mul2 = add i32 %mul, 1
23 ret i32 %mul2
32 %mul2 = add i32 %mul, 1
33 ret i32 %mul2
Dmixed-fast-nonfast-fp.ll11 %mul2 = fmul fast float %a, %b
15 %add2 = fadd fast float %mul4, %mul2
/external/llvm/test/Transforms/SLPVectorizer/X86/
Dcrash_smallpt.ll44 %mul2.i256.us = fmul double %add4.i267.us, 1.400000e+02
46 %add4.i246.us = fadd double %mul2.i256.us, 5.200000e+01
48 %mul2.i.i.us = fmul double undef, %add4.i267.us
52 store double %mul2.i.i.us, double* %agg.tmp101211.sroa.1.8.idx390, align 8
83 %mul2.i738 = fmul double undef, %sub10.i773
85 %mul2.i729 = fmul double undef, %mul2.i738
87 %add4.i719 = fadd double undef, %mul2.i729
91 %mul2.i.i680 = fmul double undef, %add4.i698
95 store double %mul2.i.i680, double* %agg.tmp74663.sroa.1.8.idx943, align 8
Dcrash_binaryop.ll19 %mul2 = fmul double %add2, 0.000000e+00
20 %binaryop_B = fadd double %postadd1_phi, %mul2
Dcompare-reduce.ll27 %mul2 = fmul double %mul1, 7.000000e+00
28 %add = fadd double %mul2, 5.000000e+00
Din-tree-user.ll25 %mul2 = fmul double %mul1, 7.000000e+00
26 %add = fadd double %mul2, 5.000000e+00
Dordering.ll10 %mul2 = fmul double undef, %mul
11 %mul4 = fmul double %0, %mul2
/external/llvm/test/Transforms/InstCombine/
Dadd2.ll237 %mul2 = mul i16 %a, 3
238 %add = add nsw i16 %mul1, %mul2
247 %mul2 = mul nsw i16 %a, 7
248 %add = add nsw i16 %mul1, %mul2
257 %mul2 = mul nsw i16 %a, 7
258 %add = add nsw i16 %mul1, %mul2
267 %mul2 = mul nsw i32 %mul1, 5
268 %add = add nsw i32 %mul1, %mul2
287 %mul2 = mul nsw i16 %a, 16384
288 %add = add nsw i16 %mul1, %mul2
[all …]
Dfmul.ll68 %mul2 = fmul float %mul, %sub1
69 ret float %mul2
148 %mul2 = fmul double %mul1, %sqrt
149 ret double %mul2
152 ; CHECK-NEXT: %mul2 = fmul double %sqrt, %f
153 ; CHECK-NEXT: ret double %mul2
Dfast-math.ll576 %mul2 = fmul fast double %mul, %x
577 %sqrt = call fast double @llvm.sqrt.f64(double %mul2)
589 %mul2 = fmul fast double %mul, %x
590 %sqrt = call fast double @llvm.sqrt.f64(double %mul2)
602 %mul2 = fmul fast double %mul, %y
603 %sqrt = call fast double @llvm.sqrt.f64(double %mul2)
615 %mul2 = fmul fast double %x, %mul
616 %sqrt = call fast double @llvm.sqrt.f64(double %mul2)
628 %mul2 = fmul fast double %x, %mul
629 %sqrt = call fast double @llvm.sqrt.f64(double %mul2)
[all …]
/external/llvm/test/CodeGen/AArch64/
Darm64-fml-combines.ll25 %mul2 = fmul fast <2 x double> %sub2,<double 3.000000e+00, double -3.000000e+00>
28 %sub3 = fsub fast <2 x double> <double 3.000000e+00, double -3.000000e+00>, %mul2
68 %mul2 = fmul fast <2 x float> %add2,<float 3.000000e+00, float -3.000000e+00>
71 %add3 = fsub fast <2 x float> <float 3.000000e+00, float -3.000000e+00>, %mul2
110 …%mul2 = fmul fast <4 x float> %add2,<float 3.000000e+00, float -3.000000e+00, float 5.000000e+00, …
113 … x float> <float 3.000000e+00, float -3.000000e+00, float 5.000000e+00, float 7.000000e+00> , %mul2
Darm64-fma-combines.ll33 %mul2 = fmul fast <2 x double> %add2,<double 3.000000e+00, double -3.000000e+00>
36 %add3 = fadd fast <2 x double> %mul2, <double 3.000000e+00, double -3.000000e+00>
76 %mul2 = fmul fast <2 x float> %add2,<float 3.000000e+00, float -3.000000e+00>
79 %add3 = fadd fast <2 x float> %mul2, <float 3.000000e+00, float -3.000000e+00>
118 …%mul2 = fmul fast <4 x float> %add2,<float 3.000000e+00, float -3.000000e+00, float 5.000000e+00, …
121 …%add3 = fadd fast <4 x float> %mul2, <float 3.000000e+00, float -3.000000e+00, float 5.000000e+00…
/external/llvm/test/Transforms/SimplifyCFG/AArch64/
Dprefer-fma.ll45 ; CHECK-NEXT: %mul2 = fsub fast double %6, %7
48 %mul2 = fsub fast double %7, %8
49 store double %mul2, double* %y, align 8
/external/llvm/test/CodeGen/X86/
Ddag-fmf-cse.ll18 %mul2 = fmul fast float %nega, %b
19 %abx2 = fsub fast float %mul1, %mul2
Dfmul-combines.ll109 %mul2 = fmul fast <4 x float> <float 4.0, float 4.0, float 4.0, float 4.0>, %mul1
110 %mul3 = fmul fast <4 x float> %a, %mul2
125 %mul2 = fmul fast <4 x float> <float 9.0, float 10.0, float 11.0, float 12.0>, %mul1
126 %mul3 = fmul fast <4 x float> %a, %mul2
Dcombine-multiplies.ll124 %mul2 = mul <4 x i32> %add2, <i32 22, i32 22, i32 22, i32 22>
126 store <4 x i32> %mul2, <4 x i32>* @v3, align 16
158 %mul2 = mul <4 x i32> %add2, <i32 22, i32 33, i32 44, i32 55>
160 store <4 x i32> %mul2, <4 x i32>* @v3, align 16
/external/llvm/test/Analysis/DependenceAnalysis/
DSymbolicSIV.ll81 %mul2 = mul nsw i64 %i.03, 3
83 %add4 = add i64 %mul2, %mul3
127 %mul2 = shl i64 %n, 1
128 %add = sub i64 %mul2, %i.03
173 %mul2 = shl i64 %n, 1
174 %sub = sub i64 %i.03, %mul2
356 %mul2 = shl i64 %N, 2
357 %mul3 = mul i64 %mul2, %i.03
405 %mul2 = shl i64 %N, 1
406 %mul3 = mul i64 %mul2, %i.03
/external/syslinux/gpxe/src/crypto/axtls/
Daes.c50 #define mul2(x,t) ((t)=((x)&mt), \ macro
54 (f2)=mul2(x,f2), \
55 (f4)=mul2(f2,f4), \
56 (f8)=mul2(f4,f8), \
/external/vulkan-validation-layers/libs/glm/detail/
Dintrinsic_geometric.inl130 __m128 mul2 = _mm_mul_ps(sub0, sub1); local
132 if(_mm_movemask_ps(_mm_cmplt_ss(mul2, glm::detail::zero)) == 0)
135 __m128 sqt0 = _mm_sqrt_ps(mul2);
/external/vulkan-validation-layers/libs/glm/gtx/
Dsimd_quat.inl125 __m128 mul2 = _mm_mul_ps(q1.Data, _mm_shuffle_ps(q2.Data, q2.Data, _MM_SHUFFLE(2, 3, 0, 1))); local
131 __m128 add2 = _mm_dp_ps(mul2, _mm_set_ps(1.0f, 1.0f, -1.0f, 1.0f), 0xff);
142 mul2 = _mm_mul_ps(mul2, _mm_set_ps(1.0f, 1.0f, -1.0f, 1.0f));
143 __m128 add2 = _mm_add_ps(mul2, _mm_movehl_ps(mul2, mul2));
/external/llvm/test/Transforms/BBVectorize/
Dcycle.ll13 define void @test1(double %a, double %b, double %c, double %add80, double %mul1, double %mul2.v.r1,…
40 %sub71 = fsub double %add69, %mul2.v.r1
48 %sub86 = fsub double %add84, %mul2.v.r1
/external/llvm/test/CodeGen/SPARC/
DLeonPreventRoundChangePassUT.ll39 %mul2 = fmul double %2, %3
40 %cmp = fcmp une double %sub1, %mul2
/external/eigen/Eigen/src/Geometry/arch/
DGeometry_SSE.h73 __m128 mul2=_mm_mul_ps(vec4f_swizzle1(a,2,0,1,3),vec4f_swizzle1(b,1,2,0,3));
75 pstoret<float,Packet4f,ResAlignment>(&res.x(),_mm_sub_ps(mul1,mul2));

123