/external/llvm/test/Transforms/InstCombine/ |
D | apint-cast.ll | 8 %tmp21 = lshr i37 %tmp, 8 ; <i37> [#uses=1] 9 ; CHECK: %tmp21 = lshr i17 %a, 8 12 %tmp.upgrd.32 = or i37 %tmp21, %tmp5 ; <i37> [#uses=1] 13 ; CHECK: %tmp.upgrd.32 = or i17 %tmp21, %tmp5 21 %tmp21 = lshr i577 %tmp, 9 ; <i577> [#uses=1] 22 ; CHECK: %tmp21 = lshr i167 %a, 9 25 %tmp.upgrd.32 = or i577 %tmp21, %tmp5 ; <i577> [#uses=1] 26 ; CHECK: %tmp.upgrd.32 = or i167 %tmp21, %tmp5
|
D | 2008-01-21-MulTrunc.ll | 7 %tmp21 = lshr i32 %tmp, 8 ; <i32> [#uses=1] 8 ; CHECK: %tmp21 = lshr i16 %a, 8 11 %tmp.upgrd.32 = or i32 %tmp21, %tmp5 ; <i32> [#uses=1] 12 ; CHECK: %tmp.upgrd.32 = or i16 %tmp21, %tmp5
|
D | gepphigep.ll | 22 %tmp21 = getelementptr inbounds %struct2, %struct2* %tmp20, i64 0, i32 0 23 store i32 0, i32* %tmp21, align 4 48 %tmp21 = getelementptr inbounds %struct2, %struct2* %tmp20, i64 0, i32 0 49 store i32 0, i32* %tmp21, align 4 62 define i32 @test3(%struct3* %dm, i1 %tmp4, i64 %tmp9, i64 %tmp19, i64 %tmp20, i64 %tmp21) personali… 88 %tmp34 = getelementptr inbounds %struct4, %struct4* %phi, i64 %tmp21, i32 1
|
D | 2006-12-15-Range-Test.ll | 27 %tmp21 = icmp sgt i32 %tmp16, 31 ; <i1> [#uses=1] 28 %bothcond = or i1 %tmp18, %tmp21 ; <i1> [#uses=1]
|
/external/llvm/test/CodeGen/AMDGPU/ |
D | si-sgpr-spill.ll | 28 %tmp21 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp, !tbaa !0 29 %tmp22 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 96) 30 %tmp23 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 100) 31 %tmp24 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 104) 32 %tmp25 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 112) 33 %tmp26 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 116) 34 %tmp27 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 120) 35 %tmp28 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 128) 36 %tmp29 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 132) 37 %tmp30 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 140) [all …]
|
D | llvm.AMDGPU.cube.ll | 31 %tmp21 = fdiv float 1.000000e+00, %tmp20 32 %tmp22 = fmul float %tmp16, %tmp21 34 %tmp24 = fmul float %tmp17, %tmp21
|
D | coalescer_distribute.ll | 30 %tmp21 = phi i64 [ undef, %bb17 ], [ %tmp9, %bb11 ] 31 %tmp22 = trunc i64 %tmp21 to i32
|
/external/llvm/test/CodeGen/PowerPC/ |
D | rlwimi3.ll | 18 %tmp21 = lshr i32 %tmp19, 5 ; <i32> [#uses=1] 19 %tmp21.upgrd.1 = trunc i32 %tmp21 to i16 ; <i16> [#uses=1] 20 %tmp = and i16 %tmp21.upgrd.1, 31775 ; <i16> [#uses=1]
|
D | mask64.ll | 13 %tmp21 = load i8, i8* %tmp19, align 1 ; <i8> [#uses=1] 14 switch i8 %tmp21, label %bb49 [
|
/external/llvm/test/Transforms/NaryReassociate/ |
D | pr24301.ll | 9 %tmp21 = add i32 119, %tmp4 11 %tmp23 = add i32 %tmp21, -128 12 ; CHECK: %tmp23 = add i32 %tmp21, -128
|
/external/llvm/test/CodeGen/Generic/ |
D | i128-addsub.ll | 16 %tmp21 = lshr i128 %tmp15, 64 ; <i128> [#uses=1] 17 %tmp2122 = trunc i128 %tmp21 to i64 ; <i64> [#uses=1] 35 %tmp21 = lshr i128 %tmp15, 64 ; <i128> [#uses=1] 36 %tmp2122 = trunc i128 %tmp21 to i64 ; <i64> [#uses=1]
|
D | 2008-02-04-ExtractSubvector.ll | 8 %tmp21 = fadd <8 x double> zeroinitializer, zeroinitializer ; <<8 x double>> [#uses=1] 12 store <8 x double> %tmp21, <8 x double>* null, align 64
|
/external/libjpeg-turbo/ |
D | jidctint.c | 1078 JLONG tmp20, tmp21, tmp22, tmp23, tmp24; in jpeg_idct_10x10() local 1119 tmp21 = tmp11 + tmp13; in jpeg_idct_10x10() 1153 wsptr[8 * 1] = (int)RIGHT_SHIFT(tmp21 + tmp11, CONST_BITS - PASS1_BITS); in jpeg_idct_10x10() 1154 wsptr[8 * 8] = (int)RIGHT_SHIFT(tmp21 - tmp11, CONST_BITS - PASS1_BITS); in jpeg_idct_10x10() 1191 tmp21 = tmp11 + tmp13; in jpeg_idct_10x10() 1229 outptr[1] = range_limit[(int)RIGHT_SHIFT(tmp21 + tmp11, in jpeg_idct_10x10() 1232 outptr[8] = range_limit[(int)RIGHT_SHIFT(tmp21 - tmp11, in jpeg_idct_10x10() 1273 JLONG tmp20, tmp21, tmp22, tmp23, tmp24, tmp25; in jpeg_idct_11x11() local 1307 tmp21 = tmp20 + tmp23 + tmp25 - in jpeg_idct_11x11() 1345 wsptr[8 * 1] = (int)RIGHT_SHIFT(tmp21 + tmp11, CONST_BITS - PASS1_BITS); in jpeg_idct_11x11() [all …]
|
/external/llvm/test/Transforms/SimplifyCFG/ |
D | 2009-01-19-UnconditionalTrappingConstantExpr.ll | 12 define i32 @test(i32 %tmp21, i32 %tmp24) { 13 %tmp25 = icmp sle i32 %tmp21, %tmp24 31 define i32 @test2(i32 %tmp21, i32 %tmp24, i1 %tmp34) {
|
/external/webrtc/common_audio/signal_processing/ |
D | resample_by_2_mips.c | 61 int32_t tmp11, tmp12, tmp21, tmp22; in WebRtcSpl_DownsampleBy2() local 110 : [tmp22] "=r" (tmp22), [tmp21] "=&r" (tmp21), in WebRtcSpl_DownsampleBy2() 144 : [tmp22] "r" (tmp22), [tmp21] "r" (tmp21), in WebRtcSpl_DownsampleBy2()
|
/external/llvm/test/Transforms/LCSSA/ |
D | indirectbr.ll | 553 %tmp21 = add i32 undef, 677038203 560 ; CHECK: %tmp21.lcssa1 = phi i32 [ %tmp21.lcssa1, %lab4 ], [ %tmp21, %lab2 ] 561 %tmp12 = phi i32 [ %tmp21, %lab2 ], [ %tmp12, %lab4 ] 568 ; CHECK: %tmp21.lcssa1.lcssa = phi i32 [ %tmp21.lcssa1, %lab3 ]
|
/external/llvm/test/Transforms/SeparateConstOffsetFromGEP/AMDGPU/ |
D | split-gep-and-gvn-addrspace-addressing-modes.ll | 30 %tmp21 = fadd float %tmp17, %tmp20 31 store float %tmp21, float addrspace(1)* %output, align 4 63 %tmp21 = fadd float %tmp17, %tmp20 64 store float %tmp21, float addrspace(1)* %output, align 4 91 %tmp21 = fadd float %tmp17, %tmp20 92 store float %tmp21, float addrspace(1)* %output, align 4
|
/external/llvm/test/Transforms/PhaseOrdering/ |
D | PR6627.ll | 33 %tmp21 = load i8, i8* %arrayidx20, align 1 34 %conv22 = zext i8 %tmp21 to i32 77 %tmp21 = load i8, i8* %arrayidx20, align 1 78 %conv22 = zext i8 %tmp21 to i32
|
/external/llvm/test/CodeGen/X86/ |
D | mmx-arith.ll | 22 %tmp21 = tail call x86_mmx @llvm.x86.mmx.paddus.b(x86_mmx %tmp12, x86_mmx %tmp16) 23 store x86_mmx %tmp21, x86_mmx* %A 25 %tmp21a = bitcast x86_mmx %tmp21 to <8 x i8> 84 %tmp21 = load x86_mmx, x86_mmx* %B 86 %tmp21a = bitcast x86_mmx %tmp21 to <2 x i32> 121 %tmp21 = tail call x86_mmx @llvm.x86.mmx.paddus.w(x86_mmx %tmp12, x86_mmx %tmp16) 122 store x86_mmx %tmp21, x86_mmx* %A 124 %tmp21a = bitcast x86_mmx %tmp21 to <4 x i16> 185 %tmp21 = add <1 x i64> %tmp19, %tmp14 186 %tmp22 = add <1 x i64> %tmp21, %sum.035.0
|
D | pr2326.ll | 13 %tmp21 = icmp ule i32 %tmp19, %tmp20 ; <i1> [#uses=1] 14 %tmp2122 = zext i1 %tmp21 to i8 ; <i8> [#uses=1]
|
D | 2007-11-06-InstrSched.ll | 19 %tmp21 = icmp ult i32 %tmp17, %c ; <i1> [#uses=1] 20 br i1 %tmp21, label %bb18, label %bb23
|
D | loop-strength-reduce4.ll | 40 %tmp21 = xor i32 %tmp20, %tmp17 ; <i32> [#uses=2] 41 store i32 %tmp21, i32* %tmp16, align 4 45 %tmp32 = getelementptr [0 x i32], [0 x i32]* @S, i32 0, i32 %tmp21 ; <i32*> [#uses=1]
|
/external/llvm/test/CodeGen/Hexagon/ |
D | sube.ll | 25 %tmp21 = lshr i128 %tmp15, 64 26 %tmp2122 = trunc i128 %tmp21 to i64
|
D | adde.ll | 30 %tmp21 = lshr i128 %tmp15, 64 31 %tmp2122 = trunc i128 %tmp21 to i64
|
/external/llvm/test/Transforms/IndVarSimplify/ |
D | lrev-existing-umin.ll | 18 %tmp21 = trunc i32 %tmp20 to i8 20 store i8 %tmp21, i8* %addr22, align 1
|