/external/llvm/test/Transforms/StructurizeCFG/ |
D | nested-loop-order.ll | 14 %temp4.0.ph = phi i32 [ 0, %main_body ], [ %tmp20, %ENDIF28 ] 20 %temp4.0 = phi i32 [ %temp4.0.ph, %LOOP.outer ], [ %tmp20, %IF29 ] 21 %tmp20 = add i32 %temp4.0, 1 22 %tmp22 = icmp sgt i32 %tmp20, 3 32 %tmp23 = icmp eq i32 %tmp20, 3 40 %tmp31 = icmp sgt i32 %tmp20, 1 49 %tmp32 = icmp sgt i32 %tmp20, 2 62 %tmp36 = icmp sgt i32 %tmp20, 2
|
/external/swiftshader/third_party/LLVM/test/CodeGen/X86/ |
D | 2008-02-27-DeadSlotElimBug.ll | 39 %tmp20.i39 = fadd double %tmp17.i, %tmp17.i63 ; <double> [#uses=1] 40 %tmp20.i23 = fadd double %tmp20.i39, %tmp17.i76 ; <double> [#uses=1] 48 %tmp20.i7 = getelementptr %struct.CompAtom* %d, i32 0, i32 2 ; <i32*> [#uses=2] 50 %tmp74.i = load i32* %tmp20.i7, align 1 ; <i32> [#uses=1] 54 %tmp88.i = load i32* %tmp20.i7, align 1 ; <i32> [#uses=1] 58 %tmp6.i = fadd double 0.000000e+00, %tmp20.i23 ; <double> [#uses=0]
|
D | fp-stack-compare.ll | 8 %tmp20 = fsub float -0.000000e+00, %tmp 9 %iftmp.2.0 = select i1 %tmp16, float %tmp20, float %tmp
|
D | tailcall-fastisel.ll | 9 %tmp20 = tail call fastcc i8* @"visit_array_aux<`Reference>"(%0 %arg, i32 undef) ; <i8*> [#uses=1] 10 ret i8* %tmp20
|
D | phi-immediate-factoring.ll | 27 %i.248.1 = phi i32 [ 0, %entry ], [ %tmp20, %cond_next18 ] ; <i32> [#uses=2] 33 %tmp20 = add i32 %i.248.1, 1 ; <i32> [#uses=2] 34 icmp slt i32 %tmp20, 1000 ; <i1>:3 [#uses=1]
|
D | 2008-02-08-LoadFoldingBug.ll | 23 %tmp20.i24 = add i32 %tmp19.i, %indvar94.i ; <i32> [#uses=3] 24 %tmp21.i = getelementptr double* %tmp7.i21, i32 %tmp20.i24 ; <double*> [#uses=1] 38 %tmp20.i4.i = fmul double %tmp17.i.i33, %tmp17.i.i33 ; <double> [#uses=1] 39 %tmp21.i.i34 = fadd double %tmp20.i4.i, 1.000000e+00 ; <double> [#uses=1] 61 …lyGivens( double** %tmp12.sub.i.i, double %s.0.i44, double %c.0.i45, i32 %tmp20.i24, i32 %tmp10.i,… 65 …call void @main_bb114_2E_outer_2E_i_bb3_2E_i27_bb_2E_i48_2E_i( i32 %tmp10.i, i32 %tmp20.i24, doubl…
|
D | pr2326.ll | 12 %tmp20 = load i32* %l_108, align 4 ; <i32> [#uses=1] 13 %tmp21 = icmp ule i32 %tmp19, %tmp20 ; <i1> [#uses=1]
|
D | 2008-02-06-LoadFoldingBug.ll | 9 %tmp20 = load double* %tmp19, align 8 ; <double> [#uses=1] 12 %tmp32 = fsub double -0.000000e+00, %tmp20 ; <double> [#uses=1]
|
/external/llvm/test/CodeGen/X86/ |
D | 2008-02-27-DeadSlotElimBug.ll | 39 %tmp20.i39 = fadd double %tmp17.i, %tmp17.i63 ; <double> [#uses=1] 40 %tmp20.i23 = fadd double %tmp20.i39, %tmp17.i76 ; <double> [#uses=1] 48 %tmp20.i7 = getelementptr %struct.CompAtom, %struct.CompAtom* %d, i32 0, i32 2 ; <i32*> [#uses=2] 50 %tmp74.i = load i32, i32* %tmp20.i7, align 1 ; <i32> [#uses=1] 54 %tmp88.i = load i32, i32* %tmp20.i7, align 1 ; <i32> [#uses=1] 58 %tmp6.i = fadd double 0.000000e+00, %tmp20.i23 ; <double> [#uses=0]
|
D | fp-stack-compare-cmov.ll | 9 %tmp20 = fsub float -0.000000e+00, %tmp 10 %iftmp.2.0 = select i1 %tmp16, float %tmp20, float %tmp
|
D | fp-stack-compare.ll | 11 %tmp20 = fsub float -0.000000e+00, %tmp 12 %iftmp.2.0 = select i1 %tmp16, float %tmp20, float %tmp
|
D | tailcall-fastisel.ll | 7 %tmp20 = tail call fastcc i8* @"visit_array_aux<`Reference>"(%0 %arg, i32 undef) ; <i8*> [#uses=1] 9 ret i8* %tmp20
|
D | phi-immediate-factoring.ll | 29 %i.248.1 = phi i32 [ 0, %entry ], [ %tmp20, %cond_next18 ] ; <i32> [#uses=2] 35 %tmp20 = add i32 %i.248.1, 1 ; <i32> [#uses=2] 36 icmp slt i32 %tmp20, 1000 ; <i1>:3 [#uses=1]
|
D | pr2326.ll | 12 %tmp20 = load i32, i32* %l_108, align 4 ; <i32> [#uses=1] 13 %tmp21 = icmp ule i32 %tmp19, %tmp20 ; <i1> [#uses=1]
|
/external/llvm/test/CodeGen/AMDGPU/ |
D | sgpr-copy.ll | 17 %tmp20 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp, !tbaa !0 18 %tmp21 = call float @llvm.SI.load.const(<16 x i8> %tmp20, i32 0) 19 %tmp22 = call float @llvm.SI.load.const(<16 x i8> %tmp20, i32 16) 20 %tmp23 = call float @llvm.SI.load.const(<16 x i8> %tmp20, i32 32) 41 %tmp20 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp, !tbaa !0 42 %tmp21 = call float @llvm.SI.load.const(<16 x i8> %tmp20, i32 16) 43 %tmp22 = call float @llvm.SI.load.const(<16 x i8> %tmp20, i32 32) 44 %tmp23 = call float @llvm.SI.load.const(<16 x i8> %tmp20, i32 36) 45 %tmp24 = call float @llvm.SI.load.const(<16 x i8> %tmp20, i32 40) 46 %tmp25 = call float @llvm.SI.load.const(<16 x i8> %tmp20, i32 48) [all …]
|
/external/libjpeg-turbo/ |
D | jidctint.c | 1078 JLONG tmp20, tmp21, tmp22, tmp23, tmp24; in jpeg_idct_10x10() local 1117 tmp20 = tmp10 + tmp12; in jpeg_idct_10x10() 1151 wsptr[8*0] = (int) RIGHT_SHIFT(tmp20 + tmp10, CONST_BITS-PASS1_BITS); in jpeg_idct_10x10() 1152 wsptr[8*9] = (int) RIGHT_SHIFT(tmp20 - tmp10, CONST_BITS-PASS1_BITS); in jpeg_idct_10x10() 1189 tmp20 = tmp10 + tmp12; in jpeg_idct_10x10() 1223 outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp20 + tmp10, in jpeg_idct_10x10() 1226 outptr[9] = range_limit[(int) RIGHT_SHIFT(tmp20 - tmp10, in jpeg_idct_10x10() 1273 JLONG tmp20, tmp21, tmp22, tmp23, tmp24, tmp25; in jpeg_idct_11x11() local 1301 tmp20 = MULTIPLY(z2 - z3, FIX(2.546640132)); /* c2+c4 */ in jpeg_idct_11x11() 1307 tmp21 = tmp20 + tmp23 + tmp25 - in jpeg_idct_11x11() [all …]
|
/external/swiftshader/third_party/LLVM/test/Transforms/InstCombine/ |
D | 2007-05-10-icmp-or.ll | 3 %tmp20 = icmp ugt i32 %tmp9, 255 ; <i1> [#uses=1] 5 %bothcond = or i1 %tmp20, %tmp11.not ; <i1> [#uses=1]
|
/external/llvm/test/Transforms/InstCombine/ |
D | 2007-05-10-icmp-or.ll | 3 %tmp20 = icmp ugt i32 %tmp9, 255 ; <i1> [#uses=1] 5 %bothcond = or i1 %tmp20, %tmp11.not ; <i1> [#uses=1]
|
D | gepphigep.ll | 21 %tmp20 = getelementptr inbounds %struct2, %struct2* %tmp1, i64 %tmp19 22 %tmp21 = getelementptr inbounds %struct2, %struct2* %tmp20, i64 0, i32 0 27 %phi = phi %struct2* [ %tmp10, %bb1 ], [ %tmp20, %bb2 ] 47 %tmp20 = getelementptr inbounds %struct2, %struct2* %tmp1, i64 %tmp19 48 %tmp21 = getelementptr inbounds %struct2, %struct2* %tmp20, i64 0, i32 0 62 define i32 @test3(%struct3* %dm, i1 %tmp4, i64 %tmp9, i64 %tmp19, i64 %tmp20, i64 %tmp21) personali… 74 %tmp2 = getelementptr inbounds %struct3, %struct3* %tmp, i64 %tmp20, i32 1
|
/external/llvm/test/Transforms/SeparateConstOffsetFromGEP/AMDGPU/ |
D | split-gep-and-gvn-addrspace-addressing-modes.ll | 29 %tmp20 = load float, float addrspace(2)* %tmp18, align 4 30 %tmp21 = fadd float %tmp17, %tmp20 62 %tmp20 = load float, float addrspace(2)* %tmp18, align 4 63 %tmp21 = fadd float %tmp17, %tmp20 90 %tmp20 = load float, float addrspace(3)* %tmp18, align 4 91 %tmp21 = fadd float %tmp17, %tmp20
|
/external/llvm/test/Bitcode/ |
D | metadata-2.ll | 42 %tmp20 = add i32 %tmp18, %tmp16 ; <i32> [#uses=1] 43 %tmp22 = and i32 %tmp20, 16711935 ; <i32> [#uses=2] 66 %tmp20 = or i32 %tmp16, %tmp19 ; <i32> [#uses=2] 67 %tmp22 = lshr i32 %tmp20, 8 ; <i32> [#uses=1] 69 %tmp25 = shl i32 %tmp20, 8 ; <i32> [#uses=1]
|
/external/swiftshader/third_party/LLVM/test/Bitcode/ |
D | metadata-2.ll | 41 %tmp20 = add i32 %tmp18, %tmp16 ; <i32> [#uses=1] 42 %tmp22 = and i32 %tmp20, 16711935 ; <i32> [#uses=2] 65 %tmp20 = or i32 %tmp16, %tmp19 ; <i32> [#uses=2] 66 %tmp22 = lshr i32 %tmp20, 8 ; <i32> [#uses=1] 68 %tmp25 = shl i32 %tmp20, 8 ; <i32> [#uses=1]
|
/external/swiftshader/third_party/LLVM/test/CodeGen/ARM/ |
D | 2010-06-11-vmovdrr-bitcast.ll | 14 %tmp20.i = bitcast i64 %tmp9 to <8 x i8> ; <<8 x i8>> [#uses=1] 15 …tail call void @llvm.arm.neon.vst2.v8i8(i8* %b, <8 x i8> %tmp16.i, <8 x i8> %tmp20.i, i32 1) nounw…
|
/external/llvm/test/CodeGen/ARM/ |
D | 2010-06-11-vmovdrr-bitcast.ll | 14 %tmp20.i = bitcast i64 %tmp9 to <8 x i8> ; <<8 x i8>> [#uses=1] 15 …tail call void @llvm.arm.neon.vst2.p0i8.v8i8(i8* %b, <8 x i8> %tmp16.i, <8 x i8> %tmp20.i, i32 1) …
|
/external/llvm/test/Transforms/SROA/ |
D | vector-lifetime-intrinsic.ll | 22 %tmp20 = bitcast <4 x float>* %tmp to i8* 23 call void @llvm.lifetime.end(i64 16, i8* %tmp20)
|