/external/llvm/test/CodeGen/X86/ |
D | lea-recursion.ll | 19 %tmp10 = add i32 %tmp9, %tmp8 ; <i32> [#uses=2] 20 store i32 %tmp10, i32* getelementptr ([1000 x i32], [1000 x i32]* @g0, i32 0, i32 1) 22 %tmp9.1 = add i32 %tmp10, 1 ; <i32> [#uses=1] 23 %tmp10.1 = add i32 %tmp9.1, %tmp8.1 ; <i32> [#uses=2] 24 store i32 %tmp10.1, i32* getelementptr ([1000 x i32], [1000 x i32]* @g0, i32 0, i32 2) 26 %tmp9.2 = add i32 %tmp10.1, 1 ; <i32> [#uses=1] 27 %tmp10.2 = add i32 %tmp9.2, %tmp8.2 ; <i32> [#uses=2] 28 store i32 %tmp10.2, i32* getelementptr ([1000 x i32], [1000 x i32]* @g0, i32 0, i32 3) 30 %tmp9.3 = add i32 %tmp10.2, 1 ; <i32> [#uses=1] 31 %tmp10.3 = add i32 %tmp9.3, %tmp8.3 ; <i32> [#uses=2] [all …]
|
D | coalescer-commute2.ll | 17 %tmp10 = bitcast <8 x i16> %tmp9 to <2 x i64> ; <<2 x i64>> [#uses=1] 18 ret <2 x i64> %tmp10 26 %tmp10 = bitcast <8 x i16> %tmp9 to <2 x i64> ; <<2 x i64>> [#uses=1] 27 ret <2 x i64> %tmp10
|
/external/libjpeg-turbo/ |
D | jidctint.c | 178 JLONG tmp10, tmp11, tmp12, tmp13; variable 245 tmp10 = tmp0 + tmp3; 284 wsptr[DCTSIZE * 0] = (int)DESCALE(tmp10 + tmp3, CONST_BITS - PASS1_BITS); 285 wsptr[DCTSIZE * 7] = (int)DESCALE(tmp10 - tmp3, CONST_BITS - PASS1_BITS); 347 tmp10 = tmp0 + tmp3; 386 outptr[0] = range_limit[(int)DESCALE(tmp10 + tmp3, 389 outptr[7] = range_limit[(int)DESCALE(tmp10 - tmp3, 431 JLONG tmp0, tmp1, tmp2, tmp10, tmp11, tmp12, tmp13; in jpeg_idct_7x7() local 459 tmp10 = MULTIPLY(z2 - z3, FIX(0.881747734)); /* c4 */ in jpeg_idct_7x7() 461 tmp11 = tmp10 + tmp12 + tmp13 - MULTIPLY(z2, FIX(1.841218003)); /* c2+c4-c6 */ in jpeg_idct_7x7() [all …]
|
D | jfdctflt.c | 63 FAST_FLOAT tmp10, tmp11, tmp12, tmp13; variable 83 tmp10 = tmp0 + tmp3; /* phase 2 */ 88 dataptr[0] = tmp10 + tmp11; /* phase 3 */ 89 dataptr[4] = tmp10 - tmp11; 97 tmp10 = tmp4 + tmp5; /* phase 2 */ 102 z5 = (tmp10 - tmp12) * ((FAST_FLOAT)0.382683433); /* c6 */ 103 z2 = ((FAST_FLOAT)0.541196100) * tmp10 + z5; /* c2-c6 */ 133 tmp10 = tmp0 + tmp3; /* phase 2 */ 138 dataptr[DCTSIZE * 0] = tmp10 + tmp11; /* phase 3 */ 139 dataptr[DCTSIZE * 4] = tmp10 - tmp11; [all …]
|
D | jfdctfst.c | 120 DCTELEM tmp10, tmp11, tmp12, tmp13; variable 141 tmp10 = tmp0 + tmp3; /* phase 2 */ 146 dataptr[0] = tmp10 + tmp11; /* phase 3 */ 147 dataptr[4] = tmp10 - tmp11; 155 tmp10 = tmp4 + tmp5; /* phase 2 */ 160 z5 = MULTIPLY(tmp10 - tmp12, FIX_0_382683433); /* c6 */ 161 z2 = MULTIPLY(tmp10, FIX_0_541196100) + z5; /* c2-c6 */ 191 tmp10 = tmp0 + tmp3; /* phase 2 */ 196 dataptr[DCTSIZE * 0] = tmp10 + tmp11; /* phase 3 */ 197 dataptr[DCTSIZE * 4] = tmp10 - tmp11; [all …]
|
D | jidctflt.c | 77 FAST_FLOAT tmp10, tmp11, tmp12, tmp13; variable 133 tmp10 = tmp0 + tmp2; /* phase 3 */ 139 tmp0 = tmp10 + tmp13; /* phase 2 */ 140 tmp3 = tmp10 - tmp13; 160 tmp10 = z5 - z12 * ((FAST_FLOAT)1.082392200); /* 2*(c2-c6) */ 165 tmp4 = tmp10 - tmp5; 196 tmp10 = z5 + wsptr[4]; 202 tmp0 = tmp10 + tmp13; 203 tmp3 = tmp10 - tmp13; 218 tmp10 = z5 - z12 * ((FAST_FLOAT)1.082392200); /* 2*(c2-c6) */ [all …]
|
D | jidctred.c | 125 JLONG tmp0, tmp2, tmp10, tmp12; variable 170 tmp10 = tmp0 + tmp2; 193 (int)DESCALE(tmp10 + tmp2, CONST_BITS - PASS1_BITS + 1); 195 (int)DESCALE(tmp10 - tmp2, CONST_BITS - PASS1_BITS + 1); 233 tmp10 = tmp0 + tmp2; 255 outptr[0] = range_limit[(int)DESCALE(tmp10 + tmp2, 258 outptr[3] = range_limit[(int)DESCALE(tmp10 - tmp2, 283 JLONG tmp0, tmp10, z1; in jpeg_idct_2x2() local 317 tmp10 = LEFT_SHIFT(z1, CONST_BITS + 2); in jpeg_idct_2x2() 333 (int)DESCALE(tmp10 + tmp0, CONST_BITS - PASS1_BITS + 2); in jpeg_idct_2x2() [all …]
|
D | jidctfst.c | 176 DCTELEM tmp10, tmp11, tmp12, tmp13; variable 232 tmp10 = tmp0 + tmp2; /* phase 3 */ 238 tmp0 = tmp10 + tmp13; /* phase 2 */ 239 tmp3 = tmp10 - tmp13; 259 tmp10 = MULTIPLY(z12, FIX_1_082392200) - z5; /* 2*(c2-c6) */ 264 tmp4 = tmp10 + tmp5; 318 tmp10 = ((DCTELEM)wsptr[0] + (DCTELEM)wsptr[4]); 325 tmp0 = tmp10 + tmp13; 326 tmp3 = tmp10 - tmp13; 341 tmp10 = MULTIPLY(z12, FIX_1_082392200) - z5; /* 2*(c2-c6) */ [all …]
|
/external/llvm-project/llvm/test/CodeGen/X86/ |
D | lea-recursion.ll | 51 %tmp10 = add i32 %tmp9, %tmp8 ; <i32> [#uses=2] 52 store i32 %tmp10, i32* getelementptr ([1000 x i32], [1000 x i32]* @g0, i32 0, i32 1) 54 %tmp9.1 = add i32 %tmp10, 1 ; <i32> [#uses=1] 55 %tmp10.1 = add i32 %tmp9.1, %tmp8.1 ; <i32> [#uses=2] 56 store i32 %tmp10.1, i32* getelementptr ([1000 x i32], [1000 x i32]* @g0, i32 0, i32 2) 58 %tmp9.2 = add i32 %tmp10.1, 1 ; <i32> [#uses=1] 59 %tmp10.2 = add i32 %tmp9.2, %tmp8.2 ; <i32> [#uses=2] 60 store i32 %tmp10.2, i32* getelementptr ([1000 x i32], [1000 x i32]* @g0, i32 0, i32 3) 62 %tmp9.3 = add i32 %tmp10.2, 1 ; <i32> [#uses=1] 63 %tmp10.3 = add i32 %tmp9.3, %tmp8.3 ; <i32> [#uses=2] [all …]
|
D | coalescer-commute2.ll | 17 %tmp10 = bitcast <8 x i16> %tmp9 to <2 x i64> ; <<2 x i64>> [#uses=1] 18 ret <2 x i64> %tmp10 26 %tmp10 = bitcast <8 x i16> %tmp9 to <2 x i64> ; <<2 x i64>> [#uses=1] 27 ret <2 x i64> %tmp10
|
/external/libjpeg-turbo/simd/arm/ |
D | jidctfst-neon.c | 132 int16x4_t tmp10 = vadd_s16(tmp0, tmp2); /* phase 3 */ in jsimd_idct_ifast_neon() local 141 tmp0 = vadd_s16(tmp10, tmp13); /* phase 2 */ in jsimd_idct_ifast_neon() 142 tmp3 = vsub_s16(tmp10, tmp13); in jsimd_idct_ifast_neon() 165 tmp10 = vqdmulh_lane_s16(z12, consts, 0); in jsimd_idct_ifast_neon() 166 tmp10 = vadd_s16(tmp10, z12); in jsimd_idct_ifast_neon() 167 tmp10 = vsub_s16(tmp10, z5); in jsimd_idct_ifast_neon() 174 tmp4 = vadd_s16(tmp10, tmp5); in jsimd_idct_ifast_neon() 207 int16x4_t tmp10 = vadd_s16(tmp0, tmp2); /* phase 3 */ in jsimd_idct_ifast_neon() local 216 tmp0 = vadd_s16(tmp10, tmp13); /* phase 2 */ in jsimd_idct_ifast_neon() 217 tmp3 = vsub_s16(tmp10, tmp13); in jsimd_idct_ifast_neon() [all …]
|
D | jfdctfst-neon.c | 99 int16x8_t tmp10 = vaddq_s16(tmp0, tmp3); /* phase 2 */ in jsimd_fdct_ifast_neon() local 104 col0 = vaddq_s16(tmp10, tmp11); /* phase 3 */ in jsimd_fdct_ifast_neon() 105 col4 = vsubq_s16(tmp10, tmp11); in jsimd_fdct_ifast_neon() 112 tmp10 = vaddq_s16(tmp4, tmp5); /* phase 2 */ in jsimd_fdct_ifast_neon() 116 int16x8_t z5 = vqdmulhq_lane_s16(vsubq_s16(tmp10, tmp12), consts, 0); in jsimd_fdct_ifast_neon() 117 int16x8_t z2 = vqdmulhq_lane_s16(tmp10, consts, 1); in jsimd_fdct_ifast_neon() 173 tmp10 = vaddq_s16(tmp0, tmp3); /* phase 2 */ in jsimd_fdct_ifast_neon() 178 row0 = vaddq_s16(tmp10, tmp11); /* phase 3 */ in jsimd_fdct_ifast_neon() 179 row4 = vsubq_s16(tmp10, tmp11); in jsimd_fdct_ifast_neon() 186 tmp10 = vaddq_s16(tmp4, tmp5); /* phase 2 */ in jsimd_fdct_ifast_neon() [all …]
|
/external/llvm-project/llvm/test/CodeGen/SystemZ/ |
D | codegenprepare-form-OF-ops.ll | 14 %tmp10 = icmp ne i32 %arg, 0 16 %tmp12 = select i1 %tmp10, i32 %tmp11, i32 %arg 26 %tmp10 = icmp ne i64 %arg, 0 28 %tmp12 = select i1 %tmp10, i64 %tmp11, i64 %arg 38 %tmp10 = icmp eq i32 %arg, 0 40 %tmp12 = select i1 %tmp10, i32 %tmp11, i32 %arg 50 %tmp10 = icmp eq i64 %arg, 0 52 %tmp12 = select i1 %tmp10, i64 %tmp11, i64 %arg
|
D | vec-move-22.ll | 9 %tmp10 = load i64*, i64** %Addr 10 store i64 %arg, i64* %tmp10 11 %tmp12 = insertelement <2 x i64*> undef, i64* %tmp10, i32 0 12 %tmp13 = insertelement <2 x i64*> %tmp12, i64* %tmp10, i32 1
|
/external/libyuv/files/source/ |
D | rotate_mmi.cc | 28 uint64_t tmp7, tmp8, tmp9, tmp10, tmp11, tmp12, tmp13; in TransposeWx8_MMI() local 141 [tmp9] "=&f"(tmp9), [tmp10] "=&f"(tmp10), [tmp11] "=&f"(tmp11), in TransposeWx8_MMI() 157 uint64_t tmp7, tmp8, tmp9, tmp10, tmp11, tmp12, tmp13; in TransposeUVWx8_MMI() local 278 [tmp9] "=&f"(tmp9), [tmp10] "=&f"(tmp10), [tmp11] "=&f"(tmp11), in TransposeUVWx8_MMI()
|
/external/llvm/test/Transforms/Inline/ |
D | nested-inline.ll | 29 %tmp10 = and i32 %X, 4 ; <i32> [#uses=1] 30 %tmp10.upgrd.3 = icmp eq i32 %tmp10, 0 ; <i1> [#uses=1] 31 br i1 %tmp10.upgrd.3, label %cond_next13, label %cond_true11 72 %tmp10 = and i32 %X, 4 ; <i32> [#uses=1] 73 %tmp10.upgrd.3 = icmp eq i32 %tmp10, 0 ; <i1> [#uses=1] 74 br i1 %tmp10.upgrd.3, label %cond_next13, label %cond_true11
|
/external/llvm-project/llvm/test/Transforms/Inline/ |
D | nested-inline.ll | 30 %tmp10 = and i32 %X, 4 ; <i32> [#uses=1] 31 %tmp10.upgrd.3 = icmp eq i32 %tmp10, 0 ; <i1> [#uses=1] 32 br i1 %tmp10.upgrd.3, label %cond_next13, label %cond_true11 73 %tmp10 = and i32 %X, 4 ; <i32> [#uses=1] 74 %tmp10.upgrd.3 = icmp eq i32 %tmp10, 0 ; <i1> [#uses=1] 75 br i1 %tmp10.upgrd.3, label %cond_next13, label %cond_true11
|
/external/llvm/test/Transforms/TailCallElim/ |
D | dont_reorder_load.ll | 25 %tmp10 = add i32 %tmp9, %tmp8 ; <i32> [#uses=1] 26 ret i32 %tmp10 44 %tmp10 = add i32 %tmp9, %tmp8 ; <i32> [#uses=1] 45 ret i32 %tmp10 62 %tmp10 = add i32 %tmp9, %tmp8 ; <i32> [#uses=1] 63 ret i32 %tmp10 80 %tmp10 = add i32 %tmp9, %tmp8 ; <i32> [#uses=1] 81 ret i32 %tmp10
|
D | reorder_load.ll | 33 %tmp10 = add i32 %tmp9, %tmp8 ; <i32> [#uses=1] 34 ret i32 %tmp10 64 %tmp10 = add i32 %tmp9, %tmp8 ; <i32> [#uses=1] 65 ret i32 %tmp10 88 %tmp10 = add i32 %tmp9, %tmp8 ; <i32> [#uses=1] 89 ret i32 %tmp10 122 %tmp10 = add i32 %second, %tmp8 ; <i32> [#uses=1] 123 ret i32 %tmp10 145 %tmp10 = add i32 %tmp9, %tmp8 ; <i32> [#uses=1] 146 ret i32 %tmp10
|
/external/llvm-project/llvm/test/Transforms/TailCallElim/ |
D | dont_reorder_load.ll | 25 %tmp10 = add i32 %tmp9, %tmp8 ; <i32> [#uses=1] 26 ret i32 %tmp10 44 %tmp10 = add i32 %tmp9, %tmp8 ; <i32> [#uses=1] 45 ret i32 %tmp10 62 %tmp10 = add i32 %tmp9, %tmp8 ; <i32> [#uses=1] 63 ret i32 %tmp10 80 %tmp10 = add i32 %tmp9, %tmp8 ; <i32> [#uses=1] 81 ret i32 %tmp10
|
D | reorder_load.ll | 34 %tmp10 = add i32 %tmp9, %tmp8 ; <i32> [#uses=1] 35 ret i32 %tmp10 65 %tmp10 = add i32 %tmp9, %tmp8 ; <i32> [#uses=1] 66 ret i32 %tmp10 89 %tmp10 = add i32 %tmp9, %tmp8 ; <i32> [#uses=1] 90 ret i32 %tmp10 123 %tmp10 = add i32 %second, %tmp8 ; <i32> [#uses=1] 124 ret i32 %tmp10 146 %tmp10 = add i32 %tmp9, %tmp8 ; <i32> [#uses=1] 147 ret i32 %tmp10 [all …]
|
/external/llvm/test/Analysis/ScalarEvolution/ |
D | pr25369.ll | 15 %tmp = add i32 %tmp10, -1 20 %tmp6 = phi i32 [ %tmp10, %bb4 ], [ 0, %bb2 ], [ 0, %bb ] 24 %tmp10 = add i32 undef, %tmp9 52 %tmp = add i32 %tmp10, -1 57 %tmp6 = phi i32 [ %tmp10, %bb4 ], [ 0, %bb2 ], [ 0, %bb ] 61 %tmp10 = add i32 undef, %tmp9
|
/external/llvm-project/llvm/test/Analysis/ScalarEvolution/ |
D | pr25369.ll | 16 %tmp = add i32 %tmp10, -1 21 %tmp6 = phi i32 [ %tmp10, %bb4 ], [ 0, %bb2 ], [ 0, %bb ] 25 %tmp10 = add i32 undef, %tmp9 53 %tmp = add i32 %tmp10, -1 58 %tmp6 = phi i32 [ %tmp10, %bb4 ], [ 0, %bb2 ], [ 0, %bb ] 62 %tmp10 = add i32 undef, %tmp9
|
/external/llvm/test/Transforms/InstCombine/ |
D | 2010-11-01-lshr-mask.ll | 38 %tmp10 = lshr i8 %tmp8, 7 39 %tmp11 = shl i8 %tmp10, 5 41 ; CHECK: %tmp10 = lshr i8 %tmp8, 7 42 ; CHECK: %tmp11 = shl nuw nsw i8 %tmp10, 5
|
/external/llvm-project/llvm/test/Transforms/SCCP/ |
D | conditions-iter-order.ll | 6 ; Make sure we can eliminate `%tmp17 = icmp ult i32 %tmp10, 3`. 41 %tmp10 = icmp ne i32* %tmp7, null 42 br i1 %tmp10, label %bb17, label %bb13 68 %tmp10 = load i32, i32* %tmp7, align 8 69 %tmp11 = icmp ne i32 %tmp10, 0 73 %tmp17 = icmp ult i32 %tmp10, 3
|