/external/swiftshader/third_party/LLVM/test/CodeGen/X86/ |
D | lea-recursion.ll | 18 %tmp9 = add i32 %tmp4, 1 ; <i32> [#uses=1] 19 %tmp10 = add i32 %tmp9, %tmp8 ; <i32> [#uses=2] 22 %tmp9.1 = add i32 %tmp10, 1 ; <i32> [#uses=1] 23 %tmp10.1 = add i32 %tmp9.1, %tmp8.1 ; <i32> [#uses=2] 26 %tmp9.2 = add i32 %tmp10.1, 1 ; <i32> [#uses=1] 27 %tmp10.2 = add i32 %tmp9.2, %tmp8.2 ; <i32> [#uses=2] 30 %tmp9.3 = add i32 %tmp10.2, 1 ; <i32> [#uses=1] 31 %tmp10.3 = add i32 %tmp9.3, %tmp8.3 ; <i32> [#uses=2] 34 %tmp9.4 = add i32 %tmp10.3, 1 ; <i32> [#uses=1] 35 %tmp10.4 = add i32 %tmp9.4, %tmp8.4 ; <i32> [#uses=2] [all …]
|
D | vec_shift.ll | 9 …%tmp9 = tail call <8 x i16> @llvm.x86.sse2.psll.w( <8 x i16> %tmp8, <8 x i16> %tmp6 ) nounwind rea… 10 %tmp10 = bitcast <8 x i16> %tmp9 to <2 x i64> ; <<2 x i64>> [#uses=1] 19 …%tmp9 = tail call <8 x i16> @llvm.x86.sse2.psra.w( <8 x i16> %tmp2, <8 x i16> %tmp8 ) ; <<8 x i16… 20 %tmp11 = bitcast <8 x i16> %tmp9 to <2 x i64> ; <<2 x i64>> [#uses=1] 28 …%tmp9 = tail call <2 x i64> @llvm.x86.sse2.psrl.q( <2 x i64> %b1, <2 x i64> %c ) nounwind readnone… 29 ret <2 x i64> %tmp9
|
D | fold-and-shift.ll | 9 %tmp9 = load i32* %tmp78, align 4 ; <i32> [#uses=1] 10 ret i32 %tmp9 19 %tmp9 = load i32* %tmp78, align 4 ; <i32> [#uses=1] 20 ret i32 %tmp9
|
D | 2006-05-08-CoalesceSubRegClass.ll | 20 %tmp9 = add i32 %tmp8, %tmp6 ; <i32> [#uses=1] 21 %tmp9.upgrd.3 = inttoptr i32 %tmp9 to i16* ; <i16*> [#uses=1] 22 store i16* %tmp9.upgrd.3, i16** @C
|
D | 2009-11-16-MachineLICM.ll | 20 %tmp9 = shl i64 %indvar, 2 ; <i64> [#uses=4] 21 %tmp1016 = or i64 %tmp9, 1 ; <i64> [#uses=1] 23 %tmp1117 = or i64 %tmp9, 2 ; <i64> [#uses=1] 25 %tmp1318 = or i64 %tmp9, 3 ; <i64> [#uses=1] 27 %x_addr.03 = getelementptr float* %x, i64 %tmp9 ; <float*> [#uses=1]
|
D | mmx-punpckhdq.ll | 10 …%tmp9 = shufflevector <2 x i32> %tmp6, <2 x i32> undef, <2 x i32> < i32 1, i32 1 > ; <<2 x i32>> … 11 %tmp10 = bitcast <2 x i32> %tmp9 to <1 x i64> ; <<1 x i64>> [#uses=1] 24 %tmp9 = tail call x86_mmx @llvm.x86.mmx.punpckhdq (x86_mmx %tmp2, x86_mmx %tmp2) 25 store x86_mmx %tmp9, x86_mmx* %x
|
D | vec_set-5.ll | 9 %tmp9 = insertelement <4 x float> %tmp8, float 0.000000e+00, i32 3 ; <<4 x float>> [#uses=1] 10 ret <4 x float> %tmp9 17 %tmp9 = insertelement <4 x float> %tmp8, float 0.000000e+00, i32 3 ; <<4 x float>> [#uses=1] 18 ret <4 x float> %tmp9
|
D | coalescer-commute2.ll | 16 %tmp9 = add <8 x i16> %tmp8, %tmp6 ; <<8 x i16>> [#uses=1] 17 %tmp10 = bitcast <8 x i16> %tmp9 to <2 x i64> ; <<2 x i64>> [#uses=1] 25 %tmp9 = add <8 x i16> %tmp8, %tmp6 ; <<8 x i16>> [#uses=1] 26 %tmp10 = bitcast <8 x i16> %tmp9 to <2 x i64> ; <<2 x i64>> [#uses=1]
|
/external/llvm/test/CodeGen/X86/ |
D | lea-recursion.ll | 18 %tmp9 = add i32 %tmp4, 1 ; <i32> [#uses=1] 19 %tmp10 = add i32 %tmp9, %tmp8 ; <i32> [#uses=2] 22 %tmp9.1 = add i32 %tmp10, 1 ; <i32> [#uses=1] 23 %tmp10.1 = add i32 %tmp9.1, %tmp8.1 ; <i32> [#uses=2] 26 %tmp9.2 = add i32 %tmp10.1, 1 ; <i32> [#uses=1] 27 %tmp10.2 = add i32 %tmp9.2, %tmp8.2 ; <i32> [#uses=2] 30 %tmp9.3 = add i32 %tmp10.2, 1 ; <i32> [#uses=1] 31 %tmp10.3 = add i32 %tmp9.3, %tmp8.3 ; <i32> [#uses=2] 34 %tmp9.4 = add i32 %tmp10.3, 1 ; <i32> [#uses=1] 35 %tmp10.4 = add i32 %tmp9.4, %tmp8.4 ; <i32> [#uses=2] [all …]
|
D | 2006-05-08-CoalesceSubRegClass.ll | 20 %tmp9 = add i32 %tmp8, %tmp6 ; <i32> [#uses=1] 21 %tmp9.upgrd.3 = inttoptr i32 %tmp9 to i16* ; <i16*> [#uses=1] 22 store i16* %tmp9.upgrd.3, i16** @C
|
D | vec_shift.ll | 18 …%tmp9 = tail call <8 x i16> @llvm.x86.sse2.psll.w( <8 x i16> %tmp8, <8 x i16> %tmp6 ) nounwind rea… 19 %tmp10 = bitcast <8 x i16> %tmp9 to <2 x i64> ; <<2 x i64>> [#uses=1] 39 …%tmp9 = tail call <8 x i16> @llvm.x86.sse2.psra.w( <8 x i16> %tmp2, <8 x i16> %tmp8 ) ; <<8 x i16… 40 %tmp11 = bitcast <8 x i16> %tmp9 to <2 x i64> ; <<2 x i64>> [#uses=1] 57 …%tmp9 = tail call <2 x i64> @llvm.x86.sse2.psrl.q( <2 x i64> %b1, <2 x i64> %c ) nounwind readnone… 58 ret <2 x i64> %tmp9
|
D | 2009-11-16-MachineLICM.ll | 20 %tmp9 = shl i64 %indvar, 2 ; <i64> [#uses=4] 21 %tmp1016 = or i64 %tmp9, 1 ; <i64> [#uses=1] 23 %tmp1117 = or i64 %tmp9, 2 ; <i64> [#uses=1] 25 %tmp1318 = or i64 %tmp9, 3 ; <i64> [#uses=1] 27 %x_addr.03 = getelementptr float, float* %x, i64 %tmp9 ; <float*> [#uses=1]
|
D | coalescer-commute2.ll | 16 %tmp9 = add <8 x i16> %tmp8, %tmp6 ; <<8 x i16>> [#uses=1] 17 %tmp10 = bitcast <8 x i16> %tmp9 to <2 x i64> ; <<2 x i64>> [#uses=1] 25 %tmp9 = add <8 x i16> %tmp8, %tmp6 ; <<8 x i16>> [#uses=1] 26 %tmp10 = bitcast <8 x i16> %tmp9 to <2 x i64> ; <<2 x i64>> [#uses=1]
|
/external/llvm/test/Transforms/SimplifyCFG/ |
D | phi-undef-loadstore.ll | 24 %tmp9 = load i32, i32* %x.0 25 ret i32 %tmp9 53 %tmp9 = load i32, i32* %x.0 54 ret i32 %tmp9 82 %tmp9 = load i32, i32* %x.0 83 ret i32 %tmp9 109 %tmp9 = load i32, i32* %gep 110 %tmp10 = or i32 %tmp9, 1 112 ret i32 %tmp9
|
/external/llvm/test/Transforms/TailCallElim/ |
D | dont_reorder_load.ll | 24 %tmp9 = load i32, i32* @extern_weak_global ; <i32> [#uses=1] 25 %tmp10 = add i32 %tmp9, %tmp8 ; <i32> [#uses=1] 43 %tmp9 = load i32, i32* %a_arg ; <i32> [#uses=1] 44 %tmp10 = add i32 %tmp9, %tmp8 ; <i32> [#uses=1] 61 %tmp9 = load volatile i32, i32* %a_arg ; <i32> [#uses=1] 62 %tmp10 = add i32 %tmp9, %tmp8 ; <i32> [#uses=1] 79 %tmp9 = load i32, i32* %a_arg ; <i32> [#uses=1] 80 %tmp10 = add i32 %tmp9, %tmp8 ; <i32> [#uses=1]
|
D | reorder_load.ll | 32 %tmp9 = load i32, i32* %a_arg ; <i32> [#uses=1] 33 %tmp10 = add i32 %tmp9, %tmp8 ; <i32> [#uses=1] 63 %tmp9 = load i32, i32* @global ; <i32> [#uses=1] 64 %tmp10 = add i32 %tmp9, %tmp8 ; <i32> [#uses=1] 87 %tmp9 = load i32, i32* @extern_weak_global ; <i32> [#uses=1] 88 %tmp10 = add i32 %tmp9, %tmp8 ; <i32> [#uses=1] 144 %tmp9 = load i32, i32* %a_arg ; <i32> [#uses=1] 145 %tmp10 = add i32 %tmp9, %tmp8 ; <i32> [#uses=1]
|
/external/swiftshader/third_party/LLVM/test/Transforms/IndVarSimplify/ |
D | gep-with-mul-base.ll | 28 %tmp9 = getelementptr double* %p, i64 %i.01 ; <double*> [#uses=1] 29 %tmp10 = load double* %tmp9, align 8 ; <double> [#uses=1] 31 store double %tmp11, double* %tmp9, align 8 55 %tmp9 = getelementptr double* %p, i64 %i.01 ; <double*> [#uses=1] 56 %tmp10 = load double* %tmp9, align 8 ; <double> [#uses=1] 58 store double %tmp11, double* %tmp9, align 8
|
/external/swiftshader/third_party/LLVM/test/Transforms/TailCallElim/ |
D | dont_reorder_load.ll | 24 %tmp9 = load i32* @extern_weak_global ; <i32> [#uses=1] 25 %tmp10 = add i32 %tmp9, %tmp8 ; <i32> [#uses=1] 43 %tmp9 = load i32* %a_arg ; <i32> [#uses=1] 44 %tmp10 = add i32 %tmp9, %tmp8 ; <i32> [#uses=1] 61 %tmp9 = volatile load i32* %a_arg ; <i32> [#uses=1] 62 %tmp10 = add i32 %tmp9, %tmp8 ; <i32> [#uses=1]
|
/external/swiftshader/third_party/LLVM/test/Transforms/InstCombine/ |
D | 2007-05-10-icmp-or.ll | 2 define i1 @test(i32 %tmp9) { 3 %tmp20 = icmp ugt i32 %tmp9, 255 ; <i1> [#uses=1] 4 %tmp11.not = icmp sgt i32 %tmp9, 255 ; <i1> [#uses=1]
|
D | 2007-06-06-AshrSignBit.ll | 4 define void @av_cmp_q_cond_true(i32* %retval, i32* %tmp9, i64* %tmp10) { 17 store i32 %tmp33, i32* %tmp9 18 %tmp34 = load i32* %tmp9 ; <i32> [#uses=1]
|
/external/llvm/test/Transforms/InstCombine/ |
D | 2007-05-10-icmp-or.ll | 2 define i1 @test(i32 %tmp9) { 3 %tmp20 = icmp ugt i32 %tmp9, 255 ; <i1> [#uses=1] 4 %tmp11.not = icmp sgt i32 %tmp9, 255 ; <i1> [#uses=1]
|
D | 2007-06-06-AshrSignBit.ll | 4 define void @av_cmp_q_cond_true(i32* %retval, i32* %tmp9, i64* %tmp10) { 17 store i32 %tmp33, i32* %tmp9 18 %tmp34 = load i32, i32* %tmp9 ; <i32> [#uses=1]
|
/external/llvm/test/Transforms/Reassociate/ |
D | repeats.ll | 139 %tmp9 = mul i4 %tmp8, %x 140 ret i4 %tmp9 159 %tmp9 = mul i4 %tmp8, %x 160 %tmp10 = mul i4 %tmp9, %x 178 %tmp9 = mul i4 %tmp8, %x 179 %tmp10 = mul i4 %tmp9, %x 199 %tmp9 = mul i4 %tmp8, %x 200 %tmp10 = mul i4 %tmp9, %x 221 %tmp9 = mul i4 %tmp8, %x 222 %tmp10 = mul i4 %tmp9, %x [all …]
|
/external/swiftshader/third_party/LLVM/test/Transforms/SimplifyCFG/ |
D | phi-undef-loadstore.ll | 24 %tmp9 = load i32* %x.0 25 ret i32 %tmp9 53 %tmp9 = load i32* %x.0 54 ret i32 %tmp9 82 %tmp9 = load i32* %x.0 83 ret i32 %tmp9
|
/external/llvm/test/CodeGen/PowerPC/ |
D | vec_shuffle_p8vector_le.ll | 13 %tmp9 = bitcast <4 x i32> %tmp8 to <2 x i64> 14 store <2 x i64> %tmp9, <2 x i64>* %A 32 %tmp9 = insertelement <4 x i32> undef, i32 %tmp5, i32 0 33 %tmp10 = insertelement <4 x i32> %tmp9, i32 %tmp6, i32 1
|