/external/libjpeg-turbo/ |
D | jidctint.c | 178 JLONG tmp10, tmp11, tmp12, tmp13; variable 247 tmp11 = tmp1 + tmp2; 286 wsptr[DCTSIZE * 1] = (int)DESCALE(tmp11 + tmp2, CONST_BITS - PASS1_BITS); 287 wsptr[DCTSIZE * 6] = (int)DESCALE(tmp11 - tmp2, CONST_BITS - PASS1_BITS); 349 tmp11 = tmp1 + tmp2; 392 outptr[1] = range_limit[(int)DESCALE(tmp11 + tmp2, 395 outptr[6] = range_limit[(int)DESCALE(tmp11 - tmp2, 431 JLONG tmp0, tmp1, tmp2, tmp10, tmp11, tmp12, tmp13; in jpeg_idct_7x7() local 461 tmp11 = tmp10 + tmp12 + tmp13 - MULTIPLY(z2, FIX(1.841218003)); /* c2+c4-c6 */ in jpeg_idct_7x7() 489 wsptr[7 * 1] = (int)RIGHT_SHIFT(tmp11 + tmp1, CONST_BITS - PASS1_BITS); in jpeg_idct_7x7() [all …]
|
D | jfdctflt.c | 63 FAST_FLOAT tmp10, tmp11, tmp12, tmp13; variable 85 tmp11 = tmp1 + tmp2; 88 dataptr[0] = tmp10 + tmp11; /* phase 3 */ 89 dataptr[4] = tmp10 - tmp11; 98 tmp11 = tmp5 + tmp6; 105 z3 = tmp11 * ((FAST_FLOAT)0.707106781); /* c4 */ 135 tmp11 = tmp1 + tmp2; 138 dataptr[DCTSIZE * 0] = tmp10 + tmp11; /* phase 3 */ 139 dataptr[DCTSIZE * 4] = tmp10 - tmp11; 148 tmp11 = tmp5 + tmp6; [all …]
|
D | jfdctfst.c | 120 DCTELEM tmp10, tmp11, tmp12, tmp13; variable 143 tmp11 = tmp1 + tmp2; 146 dataptr[0] = tmp10 + tmp11; /* phase 3 */ 147 dataptr[4] = tmp10 - tmp11; 156 tmp11 = tmp5 + tmp6; 163 z3 = MULTIPLY(tmp11, FIX_0_707106781); /* c4 */ 193 tmp11 = tmp1 + tmp2; 196 dataptr[DCTSIZE * 0] = tmp10 + tmp11; /* phase 3 */ 197 dataptr[DCTSIZE * 4] = tmp10 - tmp11; 206 tmp11 = tmp5 + tmp6; [all …]
|
D | jidctflt.c | 77 FAST_FLOAT tmp10, tmp11, tmp12, tmp13; variable 134 tmp11 = tmp0 - tmp2; 141 tmp1 = tmp11 + tmp12; 142 tmp2 = tmp11 - tmp12; 157 tmp11 = (z11 - z13) * ((FAST_FLOAT)1.414213562); /* 2*c4 */ 164 tmp5 = tmp11 - tmp6; 197 tmp11 = z5 - wsptr[4]; 204 tmp1 = tmp11 + tmp12; 205 tmp2 = tmp11 - tmp12; 215 tmp11 = (z11 - z13) * ((FAST_FLOAT)1.414213562); [all …]
|
D | jidctfst.c | 176 DCTELEM tmp10, tmp11, tmp12, tmp13; variable 233 tmp11 = tmp0 - tmp2; 240 tmp1 = tmp11 + tmp12; 241 tmp2 = tmp11 - tmp12; 256 tmp11 = MULTIPLY(z11 - z13, FIX_1_414213562); /* 2*c4 */ 263 tmp5 = tmp11 - tmp6; 319 tmp11 = ((DCTELEM)wsptr[0] - (DCTELEM)wsptr[4]); 327 tmp1 = tmp11 + tmp12; 328 tmp2 = tmp11 - tmp12; 338 tmp11 = MULTIPLY(z11 - z13, FIX_1_414213562); /* 2*c4 */ [all …]
|
D | jfdctint.c | 146 JLONG tmp10, tmp11, tmp12, tmp13; variable 173 tmp11 = tmp1 + tmp2; 176 dataptr[0] = (DCTELEM)LEFT_SHIFT(tmp10 + tmp11, PASS1_BITS); 177 dataptr[4] = (DCTELEM)LEFT_SHIFT(tmp10 - tmp11, PASS1_BITS); 238 tmp11 = tmp1 + tmp2; 241 dataptr[DCTSIZE * 0] = (DCTELEM)DESCALE(tmp10 + tmp11, PASS1_BITS); 242 dataptr[DCTSIZE * 4] = (DCTELEM)DESCALE(tmp10 - tmp11, PASS1_BITS);
|
/external/libjpeg-turbo/simd/arm/ |
D | jidctfst-neon.c | 133 int16x4_t tmp11 = vsub_s16(tmp0, tmp2); in jsimd_idct_ifast_neon() local 143 tmp1 = vadd_s16(tmp11, tmp12); in jsimd_idct_ifast_neon() 144 tmp2 = vsub_s16(tmp11, tmp12); in jsimd_idct_ifast_neon() 159 tmp11 = vqdmulh_lane_s16(z11_sub_z13, consts, 1); in jsimd_idct_ifast_neon() 160 tmp11 = vadd_s16(tmp11, z11_sub_z13); in jsimd_idct_ifast_neon() 173 tmp5 = vsub_s16(tmp11, tmp6); in jsimd_idct_ifast_neon() 208 int16x4_t tmp11 = vsub_s16(tmp0, tmp2); in jsimd_idct_ifast_neon() local 218 tmp1 = vadd_s16(tmp11, tmp12); in jsimd_idct_ifast_neon() 219 tmp2 = vsub_s16(tmp11, tmp12); in jsimd_idct_ifast_neon() 234 tmp11 = vqdmulh_lane_s16(z11_sub_z13, consts, 1); in jsimd_idct_ifast_neon() [all …]
|
D | jfdctfst-neon.c | 101 int16x8_t tmp11 = vaddq_s16(tmp1, tmp2); in jsimd_fdct_ifast_neon() local 104 col0 = vaddq_s16(tmp10, tmp11); /* phase 3 */ in jsimd_fdct_ifast_neon() 105 col4 = vsubq_s16(tmp10, tmp11); in jsimd_fdct_ifast_neon() 113 tmp11 = vaddq_s16(tmp5, tmp6); in jsimd_fdct_ifast_neon() 122 int16x8_t z3 = vqdmulhq_lane_s16(tmp11, consts, 2); in jsimd_fdct_ifast_neon() 175 tmp11 = vaddq_s16(tmp1, tmp2); in jsimd_fdct_ifast_neon() 178 row0 = vaddq_s16(tmp10, tmp11); /* phase 3 */ in jsimd_fdct_ifast_neon() 179 row4 = vsubq_s16(tmp10, tmp11); in jsimd_fdct_ifast_neon() 187 tmp11 = vaddq_s16(tmp5, tmp6); in jsimd_fdct_ifast_neon() 196 z3 = vqdmulhq_lane_s16(tmp11, consts, 2); in jsimd_fdct_ifast_neon()
|
D | jidctint-neon.c | 384 int32x4_t tmp11 = vaddq_s32(tmp1, tmp2); in jsimd_idct_islow_pass1_regular() local 446 vrshrn_n_s32(vaddq_s32(tmp11, tmp2), DESCALE_P1), in jsimd_idct_islow_pass1_regular() 453 vrshrn_n_s32(vsubq_s32(tmp11, tmp2), DESCALE_P1), in jsimd_idct_islow_pass1_regular() 506 int32x4_t tmp11 = vaddq_s32(tmp1, tmp2); in jsimd_idct_islow_pass1_sparse() local 529 vrshrn_n_s32(vaddq_s32(tmp11, tmp2), DESCALE_P1), in jsimd_idct_islow_pass1_sparse() 536 vrshrn_n_s32(vsubq_s32(tmp11, tmp2), DESCALE_P1), in jsimd_idct_islow_pass1_sparse() 595 int32x4_t tmp11 = vaddq_s32(tmp1, tmp2); in jsimd_idct_islow_pass2_regular() local 657 int16x8_t cols_13_s16 = vcombine_s16(vaddhn_s32(tmp11, tmp2), in jsimd_idct_islow_pass2_regular() 660 vsubhn_s32(tmp11, tmp2)); in jsimd_idct_islow_pass2_regular() 737 int32x4_t tmp11 = vaddq_s32(tmp1, tmp2); in jsimd_idct_islow_pass2_sparse() local [all …]
|
/external/libyuv/files/source/ |
D | rotate_mmi.cc | 28 uint64_t tmp7, tmp8, tmp9, tmp10, tmp11, tmp12, tmp13; in TransposeWx8_MMI() local 141 [tmp9] "=&f"(tmp9), [tmp10] "=&f"(tmp10), [tmp11] "=&f"(tmp11), in TransposeWx8_MMI() 157 uint64_t tmp7, tmp8, tmp9, tmp10, tmp11, tmp12, tmp13; in TransposeUVWx8_MMI() local 278 [tmp9] "=&f"(tmp9), [tmp10] "=&f"(tmp10), [tmp11] "=&f"(tmp11), in TransposeUVWx8_MMI()
|
/external/llvm/test/Analysis/ScalarEvolution/ |
D | pr25369.ll | 19 %tmp5 = phi i64 [ %tmp11, %bb4 ], [ 1, %bb2 ], [ 1, %bb ] 25 %tmp11 = add nsw i64 %tmp5, 3 26 %tmp12 = icmp eq i64 %tmp11, 64 56 %tmp5 = phi i64 [ %tmp11, %bb4 ], [ 1, %bb2 ], [ 3, %bb ] 62 %tmp11 = add nsw i64 %tmp5, 3 63 %tmp12 = icmp eq i64 %tmp11, 64
|
D | pr3909.ll | 13 %i.0 = phi i64 [ 0, %entry ], [ %tmp11, %endwhile5 ] ; <i64> [#uses=1] 14 %m.0 = phi i64 [ 0, %entry ], [ %tmp11, %endwhile5 ] ; <i64> [#uses=2] 28 %tmp11 = add i64 %i.0, 1 ; <i64> [#uses=2]
|
/external/llvm/test/Transforms/DeadStoreElimination/ |
D | cs-cs-aliasing.ll | 33 %tmp11 = bitcast %union.anon* %tmp10 to i8* 36 %tmp14 = getelementptr inbounds i8, i8* %tmp11, i64 1 53 store i8* %tmp11, i8** %tmp12, align 8 54 store i8 125, i8* %tmp11, align 8 58 ; CHECK: store i8* %tmp11, i8** %tmp12, align 8 59 ; CHECK: store i8 125, i8* %tmp11, align 8 64 call void @llvm.memset.p0i8.i64(i8* %tmp11, i8 -51, i64 16, i32 8, i1 false) #0
|
/external/llvm/test/Transforms/ObjCARC/ |
D | pointer-types.ll | 11 ; CHECK-NEXT: %tmp11 = bitcast void ()* %otherBlock to i8* 12 ; CHECK-NEXT: call void @objc_release(i8* %tmp11) 24 %tmp11 = bitcast void ()* %otherBlock to i8* 25 call void @objc_release(i8* %tmp11) nounwind
|
/external/llvm/test/CodeGen/X86/ |
D | 2006-08-07-CycleInDAG.ll | 14 %tmp11.s = load i32, i32* null ; <i32> [#uses=1] 15 %tmp11.i = bitcast i32 %tmp11.s to i32 ; <i32> [#uses=1] 17 %tmp13.i7 = mul i32 %tmp11.i, %n.i ; <i32> [#uses=1]
|
D | vec_logical.ll | 35 %tmp11 = xor <4 x i32> %tmp9, %tmp10 36 %tmp13 = bitcast <4 x i32> %tmp11 to <4 x float> 53 %tmp11 = and <2 x i64> %tmp9, %tmp10 54 %tmp13 = bitcast <2 x i64> %tmp11 to <2 x double> 78 %tmp11 = bitcast <4 x float> %a to <4 x i32> 80 %tmp13 = xor <4 x i32> %tmp11, < i32 -1, i32 -1, i32 -1, i32 -1 >
|
/external/llvm/test/CodeGen/Generic/ |
D | i128-addsub.ll | 11 %tmp11 = shl i128 %tmp89, 64 ; <i128> [#uses=1] 12 %tmp12 = or i128 %tmp11, %tmp67 ; <i128> [#uses=1] 30 %tmp11 = shl i128 %tmp89, 64 ; <i128> [#uses=1] 31 %tmp12 = or i128 %tmp11, %tmp67 ; <i128> [#uses=1]
|
D | 2006-05-06-GEP-Cast-Sink-Crash.ll | 12 …%tmp11 = getelementptr %struct.SYMBOL_TABLE_ENTRY, %struct.SYMBOL_TABLE_ENTRY* %SYM_TAB, i32 0, i3… 13 %tmp.i = bitcast i8* %tmp11 to i8* ; <i8*> [#uses=1] 21 call void (i32, ...) @fprintf( i32 0, i8* %tmp11, i8* null )
|
/external/libvpx/vpx_dsp/mips/ |
D | intrapred16_dspr2.c | 17 int32_t tmp9, tmp10, tmp11, tmp12, tmp13, tmp14, tmp15, tmp16; in vpx_h_predictor_16x16_dspr2() local 153 [tmp10] "=&r"(tmp10), [tmp11] "=&r"(tmp11), [tmp12] "=&r"(tmp12), in vpx_h_predictor_16x16_dspr2()
|
/external/llvm/test/Transforms/InstCombine/ |
D | 2010-11-01-lshr-mask.ll | 39 %tmp11 = shl i8 %tmp10, 5 42 ; CHECK: %tmp11 = shl nuw nsw i8 %tmp10, 5 44 %tmp12 = xor i8 %tmp11, %tmp9
|
D | 2007-05-10-icmp-or.ll | 4 %tmp11.not = icmp sgt i32 %tmp9, 255 ; <i1> [#uses=1] 5 %bothcond = or i1 %tmp20, %tmp11.not ; <i1> [#uses=1]
|
D | vec_demanded_elts.ll | 14 %tmp11 = insertelement <4 x float> %tmp10, float 0.000000e+00, i32 2 ; <<4 x float>> [#uses=1] 15 %tmp12 = insertelement <4 x float> %tmp11, float 0.000000e+00, i32 3 ; <<4 x float>> [#uses=1] 33 %tmp11 = insertelement <4 x float> %tmp10, float 0.000000e+00, i32 2 34 %tmp12 = insertelement <4 x float> %tmp11, float 0.000000e+00, i32 3 80 %tmp11 = sext i32 %tmp10 to i64 84 %tmp15 = add i64 %tmp11, %tmp14 157 %tmp11 = insertelement <4 x double> %tmp10, double 0.000000e+00, i32 2 158 %tmp12 = insertelement <4 x double> %tmp11, double 0.000000e+00, i32 3 171 %tmp11 = insertelement <4 x float> %tmp10, float 0.000000e+00, i32 2 172 %tmp12 = insertelement <4 x float> %tmp11, float 0.000000e+00, i32 3
|
/external/llvm/test/Transforms/Reassociate/ |
D | repeats.ll | 180 %tmp11 = mul i4 %tmp10, %x 181 ret i4 %tmp11 201 %tmp11 = mul i4 %tmp10, %x 202 %tmp12 = mul i4 %tmp11, %x 223 %tmp11 = mul i4 %tmp10, %x 224 %tmp12 = mul i4 %tmp11, %x 247 %tmp11 = mul i4 %tmp10, %x 248 %tmp12 = mul i4 %tmp11, %x
|
/external/llvm/test/CodeGen/PowerPC/ |
D | ppcf128-4.ll | 8 %tmp11 = fadd ppc_fp128 %tmp789, %tmp6 9 ret ppc_fp128 %tmp11
|
/external/llvm/test/CodeGen/WebAssembly/ |
D | irreducible-cfg.ll | 31 %tmp11 = load double, double* %tmp10, align 4 32 %tmp12 = fmul double %tmp11, 2.300000e+00 72 %tmp11 = load double, double* %tmp10, align 4 73 %tmp12 = fmul double %tmp11, 2.300000e+00
|