Home
last modified time | relevance | path

Searched refs:tmp9 (Results 1 – 25 of 427) sorted by relevance

12345678910>>...18

/external/swiftshader/third_party/LLVM/test/CodeGen/X86/
Dlea-recursion.ll18 %tmp9 = add i32 %tmp4, 1 ; <i32> [#uses=1]
19 %tmp10 = add i32 %tmp9, %tmp8 ; <i32> [#uses=2]
22 %tmp9.1 = add i32 %tmp10, 1 ; <i32> [#uses=1]
23 %tmp10.1 = add i32 %tmp9.1, %tmp8.1 ; <i32> [#uses=2]
26 %tmp9.2 = add i32 %tmp10.1, 1 ; <i32> [#uses=1]
27 %tmp10.2 = add i32 %tmp9.2, %tmp8.2 ; <i32> [#uses=2]
30 %tmp9.3 = add i32 %tmp10.2, 1 ; <i32> [#uses=1]
31 %tmp10.3 = add i32 %tmp9.3, %tmp8.3 ; <i32> [#uses=2]
34 %tmp9.4 = add i32 %tmp10.3, 1 ; <i32> [#uses=1]
35 %tmp10.4 = add i32 %tmp9.4, %tmp8.4 ; <i32> [#uses=2]
[all …]
Dvec_shift.ll9 …%tmp9 = tail call <8 x i16> @llvm.x86.sse2.psll.w( <8 x i16> %tmp8, <8 x i16> %tmp6 ) nounwind rea…
10 %tmp10 = bitcast <8 x i16> %tmp9 to <2 x i64> ; <<2 x i64>> [#uses=1]
19 …%tmp9 = tail call <8 x i16> @llvm.x86.sse2.psra.w( <8 x i16> %tmp2, <8 x i16> %tmp8 ) ; <<8 x i16…
20 %tmp11 = bitcast <8 x i16> %tmp9 to <2 x i64> ; <<2 x i64>> [#uses=1]
28 …%tmp9 = tail call <2 x i64> @llvm.x86.sse2.psrl.q( <2 x i64> %b1, <2 x i64> %c ) nounwind readnone…
29 ret <2 x i64> %tmp9
Dfold-and-shift.ll9 %tmp9 = load i32* %tmp78, align 4 ; <i32> [#uses=1]
10 ret i32 %tmp9
19 %tmp9 = load i32* %tmp78, align 4 ; <i32> [#uses=1]
20 ret i32 %tmp9
D2006-05-08-CoalesceSubRegClass.ll20 %tmp9 = add i32 %tmp8, %tmp6 ; <i32> [#uses=1]
21 %tmp9.upgrd.3 = inttoptr i32 %tmp9 to i16* ; <i16*> [#uses=1]
22 store i16* %tmp9.upgrd.3, i16** @C
D2009-11-16-MachineLICM.ll20 %tmp9 = shl i64 %indvar, 2 ; <i64> [#uses=4]
21 %tmp1016 = or i64 %tmp9, 1 ; <i64> [#uses=1]
23 %tmp1117 = or i64 %tmp9, 2 ; <i64> [#uses=1]
25 %tmp1318 = or i64 %tmp9, 3 ; <i64> [#uses=1]
27 %x_addr.03 = getelementptr float* %x, i64 %tmp9 ; <float*> [#uses=1]
Dmmx-punpckhdq.ll10 …%tmp9 = shufflevector <2 x i32> %tmp6, <2 x i32> undef, <2 x i32> < i32 1, i32 1 > ; <<2 x i32>> …
11 %tmp10 = bitcast <2 x i32> %tmp9 to <1 x i64> ; <<1 x i64>> [#uses=1]
24 %tmp9 = tail call x86_mmx @llvm.x86.mmx.punpckhdq (x86_mmx %tmp2, x86_mmx %tmp2)
25 store x86_mmx %tmp9, x86_mmx* %x
Dvec_set-5.ll9 %tmp9 = insertelement <4 x float> %tmp8, float 0.000000e+00, i32 3 ; <<4 x float>> [#uses=1]
10 ret <4 x float> %tmp9
17 %tmp9 = insertelement <4 x float> %tmp8, float 0.000000e+00, i32 3 ; <<4 x float>> [#uses=1]
18 ret <4 x float> %tmp9
Dcoalescer-commute2.ll16 %tmp9 = add <8 x i16> %tmp8, %tmp6 ; <<8 x i16>> [#uses=1]
17 %tmp10 = bitcast <8 x i16> %tmp9 to <2 x i64> ; <<2 x i64>> [#uses=1]
25 %tmp9 = add <8 x i16> %tmp8, %tmp6 ; <<8 x i16>> [#uses=1]
26 %tmp10 = bitcast <8 x i16> %tmp9 to <2 x i64> ; <<2 x i64>> [#uses=1]
/external/llvm/test/CodeGen/X86/
Dlea-recursion.ll18 %tmp9 = add i32 %tmp4, 1 ; <i32> [#uses=1]
19 %tmp10 = add i32 %tmp9, %tmp8 ; <i32> [#uses=2]
22 %tmp9.1 = add i32 %tmp10, 1 ; <i32> [#uses=1]
23 %tmp10.1 = add i32 %tmp9.1, %tmp8.1 ; <i32> [#uses=2]
26 %tmp9.2 = add i32 %tmp10.1, 1 ; <i32> [#uses=1]
27 %tmp10.2 = add i32 %tmp9.2, %tmp8.2 ; <i32> [#uses=2]
30 %tmp9.3 = add i32 %tmp10.2, 1 ; <i32> [#uses=1]
31 %tmp10.3 = add i32 %tmp9.3, %tmp8.3 ; <i32> [#uses=2]
34 %tmp9.4 = add i32 %tmp10.3, 1 ; <i32> [#uses=1]
35 %tmp10.4 = add i32 %tmp9.4, %tmp8.4 ; <i32> [#uses=2]
[all …]
D2006-05-08-CoalesceSubRegClass.ll20 %tmp9 = add i32 %tmp8, %tmp6 ; <i32> [#uses=1]
21 %tmp9.upgrd.3 = inttoptr i32 %tmp9 to i16* ; <i16*> [#uses=1]
22 store i16* %tmp9.upgrd.3, i16** @C
Dvec_shift.ll18 …%tmp9 = tail call <8 x i16> @llvm.x86.sse2.psll.w( <8 x i16> %tmp8, <8 x i16> %tmp6 ) nounwind rea…
19 %tmp10 = bitcast <8 x i16> %tmp9 to <2 x i64> ; <<2 x i64>> [#uses=1]
39 …%tmp9 = tail call <8 x i16> @llvm.x86.sse2.psra.w( <8 x i16> %tmp2, <8 x i16> %tmp8 ) ; <<8 x i16…
40 %tmp11 = bitcast <8 x i16> %tmp9 to <2 x i64> ; <<2 x i64>> [#uses=1]
57 …%tmp9 = tail call <2 x i64> @llvm.x86.sse2.psrl.q( <2 x i64> %b1, <2 x i64> %c ) nounwind readnone…
58 ret <2 x i64> %tmp9
D2009-11-16-MachineLICM.ll20 %tmp9 = shl i64 %indvar, 2 ; <i64> [#uses=4]
21 %tmp1016 = or i64 %tmp9, 1 ; <i64> [#uses=1]
23 %tmp1117 = or i64 %tmp9, 2 ; <i64> [#uses=1]
25 %tmp1318 = or i64 %tmp9, 3 ; <i64> [#uses=1]
27 %x_addr.03 = getelementptr float, float* %x, i64 %tmp9 ; <float*> [#uses=1]
Dcoalescer-commute2.ll16 %tmp9 = add <8 x i16> %tmp8, %tmp6 ; <<8 x i16>> [#uses=1]
17 %tmp10 = bitcast <8 x i16> %tmp9 to <2 x i64> ; <<2 x i64>> [#uses=1]
25 %tmp9 = add <8 x i16> %tmp8, %tmp6 ; <<8 x i16>> [#uses=1]
26 %tmp10 = bitcast <8 x i16> %tmp9 to <2 x i64> ; <<2 x i64>> [#uses=1]
/external/llvm/test/Transforms/SimplifyCFG/
Dphi-undef-loadstore.ll24 %tmp9 = load i32, i32* %x.0
25 ret i32 %tmp9
53 %tmp9 = load i32, i32* %x.0
54 ret i32 %tmp9
82 %tmp9 = load i32, i32* %x.0
83 ret i32 %tmp9
109 %tmp9 = load i32, i32* %gep
110 %tmp10 = or i32 %tmp9, 1
112 ret i32 %tmp9
/external/llvm/test/Transforms/TailCallElim/
Ddont_reorder_load.ll24 %tmp9 = load i32, i32* @extern_weak_global ; <i32> [#uses=1]
25 %tmp10 = add i32 %tmp9, %tmp8 ; <i32> [#uses=1]
43 %tmp9 = load i32, i32* %a_arg ; <i32> [#uses=1]
44 %tmp10 = add i32 %tmp9, %tmp8 ; <i32> [#uses=1]
61 %tmp9 = load volatile i32, i32* %a_arg ; <i32> [#uses=1]
62 %tmp10 = add i32 %tmp9, %tmp8 ; <i32> [#uses=1]
79 %tmp9 = load i32, i32* %a_arg ; <i32> [#uses=1]
80 %tmp10 = add i32 %tmp9, %tmp8 ; <i32> [#uses=1]
Dreorder_load.ll32 %tmp9 = load i32, i32* %a_arg ; <i32> [#uses=1]
33 %tmp10 = add i32 %tmp9, %tmp8 ; <i32> [#uses=1]
63 %tmp9 = load i32, i32* @global ; <i32> [#uses=1]
64 %tmp10 = add i32 %tmp9, %tmp8 ; <i32> [#uses=1]
87 %tmp9 = load i32, i32* @extern_weak_global ; <i32> [#uses=1]
88 %tmp10 = add i32 %tmp9, %tmp8 ; <i32> [#uses=1]
144 %tmp9 = load i32, i32* %a_arg ; <i32> [#uses=1]
145 %tmp10 = add i32 %tmp9, %tmp8 ; <i32> [#uses=1]
/external/swiftshader/third_party/LLVM/test/Transforms/IndVarSimplify/
Dgep-with-mul-base.ll28 %tmp9 = getelementptr double* %p, i64 %i.01 ; <double*> [#uses=1]
29 %tmp10 = load double* %tmp9, align 8 ; <double> [#uses=1]
31 store double %tmp11, double* %tmp9, align 8
55 %tmp9 = getelementptr double* %p, i64 %i.01 ; <double*> [#uses=1]
56 %tmp10 = load double* %tmp9, align 8 ; <double> [#uses=1]
58 store double %tmp11, double* %tmp9, align 8
/external/swiftshader/third_party/LLVM/test/Transforms/TailCallElim/
Ddont_reorder_load.ll24 %tmp9 = load i32* @extern_weak_global ; <i32> [#uses=1]
25 %tmp10 = add i32 %tmp9, %tmp8 ; <i32> [#uses=1]
43 %tmp9 = load i32* %a_arg ; <i32> [#uses=1]
44 %tmp10 = add i32 %tmp9, %tmp8 ; <i32> [#uses=1]
61 %tmp9 = volatile load i32* %a_arg ; <i32> [#uses=1]
62 %tmp10 = add i32 %tmp9, %tmp8 ; <i32> [#uses=1]
/external/swiftshader/third_party/LLVM/test/Transforms/InstCombine/
D2007-05-10-icmp-or.ll2 define i1 @test(i32 %tmp9) {
3 %tmp20 = icmp ugt i32 %tmp9, 255 ; <i1> [#uses=1]
4 %tmp11.not = icmp sgt i32 %tmp9, 255 ; <i1> [#uses=1]
D2007-06-06-AshrSignBit.ll4 define void @av_cmp_q_cond_true(i32* %retval, i32* %tmp9, i64* %tmp10) {
17 store i32 %tmp33, i32* %tmp9
18 %tmp34 = load i32* %tmp9 ; <i32> [#uses=1]
/external/llvm/test/Transforms/InstCombine/
D2007-05-10-icmp-or.ll2 define i1 @test(i32 %tmp9) {
3 %tmp20 = icmp ugt i32 %tmp9, 255 ; <i1> [#uses=1]
4 %tmp11.not = icmp sgt i32 %tmp9, 255 ; <i1> [#uses=1]
D2007-06-06-AshrSignBit.ll4 define void @av_cmp_q_cond_true(i32* %retval, i32* %tmp9, i64* %tmp10) {
17 store i32 %tmp33, i32* %tmp9
18 %tmp34 = load i32, i32* %tmp9 ; <i32> [#uses=1]
/external/llvm/test/Transforms/Reassociate/
Drepeats.ll139 %tmp9 = mul i4 %tmp8, %x
140 ret i4 %tmp9
159 %tmp9 = mul i4 %tmp8, %x
160 %tmp10 = mul i4 %tmp9, %x
178 %tmp9 = mul i4 %tmp8, %x
179 %tmp10 = mul i4 %tmp9, %x
199 %tmp9 = mul i4 %tmp8, %x
200 %tmp10 = mul i4 %tmp9, %x
221 %tmp9 = mul i4 %tmp8, %x
222 %tmp10 = mul i4 %tmp9, %x
[all …]
/external/swiftshader/third_party/LLVM/test/Transforms/SimplifyCFG/
Dphi-undef-loadstore.ll24 %tmp9 = load i32* %x.0
25 ret i32 %tmp9
53 %tmp9 = load i32* %x.0
54 ret i32 %tmp9
82 %tmp9 = load i32* %x.0
83 ret i32 %tmp9
/external/llvm/test/CodeGen/PowerPC/
Dvec_shuffle_p8vector_le.ll13 %tmp9 = bitcast <4 x i32> %tmp8 to <2 x i64>
14 store <2 x i64> %tmp9, <2 x i64>* %A
32 %tmp9 = insertelement <4 x i32> undef, i32 %tmp5, i32 0
33 %tmp10 = insertelement <4 x i32> %tmp9, i32 %tmp6, i32 1

12345678910>>...18