/external/libjpeg-turbo/ |
D | jidctint.c | 178 JLONG tmp10, tmp11, tmp12, tmp13; variable 247 tmp11 = tmp1 + tmp2; 286 wsptr[DCTSIZE * 1] = (int)DESCALE(tmp11 + tmp2, CONST_BITS - PASS1_BITS); 287 wsptr[DCTSIZE * 6] = (int)DESCALE(tmp11 - tmp2, CONST_BITS - PASS1_BITS); 349 tmp11 = tmp1 + tmp2; 392 outptr[1] = range_limit[(int)DESCALE(tmp11 + tmp2, 395 outptr[6] = range_limit[(int)DESCALE(tmp11 - tmp2, 431 JLONG tmp0, tmp1, tmp2, tmp10, tmp11, tmp12, tmp13; in jpeg_idct_7x7() local 461 tmp11 = tmp10 + tmp12 + tmp13 - MULTIPLY(z2, FIX(1.841218003)); /* c2+c4-c6 */ in jpeg_idct_7x7() 489 wsptr[7 * 1] = (int)RIGHT_SHIFT(tmp11 + tmp1, CONST_BITS - PASS1_BITS); in jpeg_idct_7x7() [all …]
|
D | jfdctflt.c | 63 FAST_FLOAT tmp10, tmp11, tmp12, tmp13; variable 85 tmp11 = tmp1 + tmp2; 88 dataptr[0] = tmp10 + tmp11; /* phase 3 */ 89 dataptr[4] = tmp10 - tmp11; 98 tmp11 = tmp5 + tmp6; 105 z3 = tmp11 * ((FAST_FLOAT)0.707106781); /* c4 */ 135 tmp11 = tmp1 + tmp2; 138 dataptr[DCTSIZE * 0] = tmp10 + tmp11; /* phase 3 */ 139 dataptr[DCTSIZE * 4] = tmp10 - tmp11; 148 tmp11 = tmp5 + tmp6; [all …]
|
D | jfdctfst.c | 120 DCTELEM tmp10, tmp11, tmp12, tmp13; variable 143 tmp11 = tmp1 + tmp2; 146 dataptr[0] = tmp10 + tmp11; /* phase 3 */ 147 dataptr[4] = tmp10 - tmp11; 156 tmp11 = tmp5 + tmp6; 163 z3 = MULTIPLY(tmp11, FIX_0_707106781); /* c4 */ 193 tmp11 = tmp1 + tmp2; 196 dataptr[DCTSIZE * 0] = tmp10 + tmp11; /* phase 3 */ 197 dataptr[DCTSIZE * 4] = tmp10 - tmp11; 206 tmp11 = tmp5 + tmp6; [all …]
|
D | jidctflt.c | 77 FAST_FLOAT tmp10, tmp11, tmp12, tmp13; variable 134 tmp11 = tmp0 - tmp2; 141 tmp1 = tmp11 + tmp12; 142 tmp2 = tmp11 - tmp12; 157 tmp11 = (z11 - z13) * ((FAST_FLOAT)1.414213562); /* 2*c4 */ 164 tmp5 = tmp11 - tmp6; 197 tmp11 = z5 - wsptr[4]; 204 tmp1 = tmp11 + tmp12; 205 tmp2 = tmp11 - tmp12; 215 tmp11 = (z11 - z13) * ((FAST_FLOAT)1.414213562); [all …]
|
D | jidctfst.c | 176 DCTELEM tmp10, tmp11, tmp12, tmp13; variable 233 tmp11 = tmp0 - tmp2; 240 tmp1 = tmp11 + tmp12; 241 tmp2 = tmp11 - tmp12; 256 tmp11 = MULTIPLY(z11 - z13, FIX_1_414213562); /* 2*c4 */ 263 tmp5 = tmp11 - tmp6; 319 tmp11 = ((DCTELEM)wsptr[0] - (DCTELEM)wsptr[4]); 327 tmp1 = tmp11 + tmp12; 328 tmp2 = tmp11 - tmp12; 338 tmp11 = MULTIPLY(z11 - z13, FIX_1_414213562); /* 2*c4 */ [all …]
|
D | jfdctint.c | 146 JLONG tmp10, tmp11, tmp12, tmp13; variable 173 tmp11 = tmp1 + tmp2; 176 dataptr[0] = (DCTELEM)LEFT_SHIFT(tmp10 + tmp11, PASS1_BITS); 177 dataptr[4] = (DCTELEM)LEFT_SHIFT(tmp10 - tmp11, PASS1_BITS); 238 tmp11 = tmp1 + tmp2; 241 dataptr[DCTSIZE * 0] = (DCTELEM)DESCALE(tmp10 + tmp11, PASS1_BITS); 242 dataptr[DCTSIZE * 4] = (DCTELEM)DESCALE(tmp10 - tmp11, PASS1_BITS);
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/AMDGPU/ |
D | buffer-schedule.ll | 22 %tmp11 = shl i32 %tmp10, 2 25 …%tmp14 = call float @llvm.amdgcn.buffer.load.f32(<4 x i32> %tmp13, i32 0, i32 %tmp11, i1 false, i1… 27 …call void @llvm.amdgcn.buffer.store.f32(float %tmp14, <4 x i32> %tmp17, i32 0, i32 %tmp11, i1 fals… 29 …%tmp21 = call float @llvm.amdgcn.buffer.load.f32(<4 x i32> %tmp20, i32 0, i32 %tmp11, i1 false, i1… 31 …call void @llvm.amdgcn.buffer.store.f32(float %tmp22, <4 x i32> %tmp20, i32 0, i32 %tmp11, i1 fals… 33 …%tmp26 = call float @llvm.amdgcn.buffer.load.f32(<4 x i32> %tmp25, i32 0, i32 %tmp11, i1 false, i1… 35 …call void @llvm.amdgcn.buffer.store.f32(float %tmp27, <4 x i32> %tmp25, i32 0, i32 %tmp11, i1 fals… 37 …%tmp31 = call float @llvm.amdgcn.buffer.load.f32(<4 x i32> %tmp30, i32 0, i32 %tmp11, i1 false, i1… 39 …call void @llvm.amdgcn.buffer.store.f32(float %tmp32, <4 x i32> %tmp30, i32 0, i32 %tmp11, i1 fals… 41 …%tmp36 = call float @llvm.amdgcn.buffer.load.f32(<4 x i32> %tmp35, i32 0, i32 %tmp11, i1 false, i1… [all …]
|
D | wait.ll | 19 …%tmp11 = call <4 x float> @llvm.amdgcn.buffer.load.format.v4f32(<4 x i32> %tmp10.cast, i32 %arg6, … 20 %tmp12 = extractelement <4 x float> %tmp11, i32 0 21 %tmp13 = extractelement <4 x float> %tmp11, i32 1 23 %tmp14 = extractelement <4 x float> %tmp11, i32 2 52 %tmp11 = load <16 x i8>, <16 x i8> addrspace(4)* %tmp, align 16, !tbaa !0 54 %tmp11.cast = bitcast <16 x i8> %tmp11 to <4 x i32> 55 …%tmp13 = call <4 x float> @llvm.amdgcn.buffer.load.format.v4f32(<4 x i32> %tmp11.cast, i32 %tmp12,…
|
/external/libjpeg-turbo/simd/powerpc/ |
D | jfdctfst-altivec.c | 50 tmp11 = vec_add(tmp1, tmp2); \ 53 out0 = vec_add(tmp10, tmp11); \ 54 out4 = vec_sub(tmp10, tmp11); \ 66 tmp11 = vec_add(tmp5, tmp6); \ 77 tmp11 = vec_sl(tmp11, pre_multiply_scale_bits); \ 78 z3 = vec_madds(tmp11, pw_0707, pw_zero); \ 94 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp10, tmp11, tmp12, tmp13, in jsimd_fdct_ifast_altivec() local
|
D | jidctfst-altivec.c | 51 tmp11 = vec_sub(in##0, in##4); \ 61 tmp1 = vec_add(tmp11, tmp12); \ 62 tmp2 = vec_sub(tmp11, tmp12); \ 73 tmp11 = vec_sub(z11, z13); \ 74 tmp11 = vec_sl(tmp11, pre_multiply_scale_bits); \ 75 tmp11 = vec_madds(tmp11, pw_F1414, pw_zero); \ 98 tmp5 = vec_sub(tmp11, tmp6); \ 121 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp10, tmp11, tmp12, tmp13, in jsimd_idct_ifast_altivec() local
|
D | jfdctint-altivec.c | 152 tmp11 = vec_add(tmp1, tmp2); \ 155 out0 = vec_add(tmp10, tmp11); \ 157 out4 = vec_sub(tmp10, tmp11); \ 168 tmp11 = vec_add(tmp1, tmp2); \ 171 out0 = vec_add(tmp10, tmp11); \ 174 out4 = vec_sub(tmp10, tmp11); \ 186 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp10, tmp11, tmp12, tmp13, in jsimd_fdct_islow_altivec() local
|
/external/swiftshader/third_party/LLVM/test/CodeGen/X86/ |
D | vec_logical.ll | 16 %tmp11 = xor <4 x i32> %tmp9, %tmp10 ; <<4 x i32>> [#uses=1] 17 %tmp13 = bitcast <4 x i32> %tmp11 to <4 x float> ; <<4 x float>> [#uses=1] 25 %tmp11 = and <2 x i64> %tmp9, %tmp10 ; <<2 x i64>> [#uses=1] 26 %tmp13 = bitcast <2 x i64> %tmp11 to <2 x double> ; <<2 x double>> [#uses=1] 33 %tmp11 = bitcast <4 x float> %a to <4 x i32> ; <<4 x i32>> [#uses=1] 35 %tmp13 = xor <4 x i32> %tmp11, < i32 -1, i32 -1, i32 -1, i32 -1 > ; <<4 x i32>> [#uses=1]
|
D | 2006-08-07-CycleInDAG.ll | 14 %tmp11.s = load i32* null ; <i32> [#uses=1] 15 %tmp11.i = bitcast i32 %tmp11.s to i32 ; <i32> [#uses=1] 17 %tmp13.i7 = mul i32 %tmp11.i, %n.i ; <i32> [#uses=1]
|
/external/llvm/test/Transforms/DeadStoreElimination/ |
D | cs-cs-aliasing.ll | 33 %tmp11 = bitcast %union.anon* %tmp10 to i8* 36 %tmp14 = getelementptr inbounds i8, i8* %tmp11, i64 1 53 store i8* %tmp11, i8** %tmp12, align 8 54 store i8 125, i8* %tmp11, align 8 58 ; CHECK: store i8* %tmp11, i8** %tmp12, align 8 59 ; CHECK: store i8 125, i8* %tmp11, align 8 64 call void @llvm.memset.p0i8.i64(i8* %tmp11, i8 -51, i64 16, i32 8, i1 false) #0
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/DeadStoreElimination/ |
D | cs-cs-aliasing.ll | 33 %tmp11 = bitcast %union.anon* %tmp10 to i8* 36 %tmp14 = getelementptr inbounds i8, i8* %tmp11, i64 1 53 store i8* %tmp11, i8** %tmp12, align 8 54 store i8 125, i8* %tmp11, align 8 58 ; CHECK: store i8* %tmp11, i8** %tmp12, align 8 59 ; CHECK: store i8 125, i8* %tmp11, align 8 64 call void @llvm.memset.p0i8.i64(i8* align 8 %tmp11, i8 -51, i64 16, i1 false) #0
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Analysis/ScalarEvolution/ |
D | pr25369.ll | 19 %tmp5 = phi i64 [ %tmp11, %bb4 ], [ 1, %bb2 ], [ 1, %bb ] 25 %tmp11 = add nsw i64 %tmp5, 3 26 %tmp12 = icmp eq i64 %tmp11, 64 56 %tmp5 = phi i64 [ %tmp11, %bb4 ], [ 1, %bb2 ], [ 3, %bb ] 62 %tmp11 = add nsw i64 %tmp5, 3 63 %tmp12 = icmp eq i64 %tmp11, 64
|
/external/llvm/test/Analysis/ScalarEvolution/ |
D | pr25369.ll | 19 %tmp5 = phi i64 [ %tmp11, %bb4 ], [ 1, %bb2 ], [ 1, %bb ] 25 %tmp11 = add nsw i64 %tmp5, 3 26 %tmp12 = icmp eq i64 %tmp11, 64 56 %tmp5 = phi i64 [ %tmp11, %bb4 ], [ 1, %bb2 ], [ 3, %bb ] 62 %tmp11 = add nsw i64 %tmp5, 3 63 %tmp12 = icmp eq i64 %tmp11, 64
|
/external/llvm/test/Transforms/ObjCARC/ |
D | pointer-types.ll | 11 ; CHECK-NEXT: %tmp11 = bitcast void ()* %otherBlock to i8* 12 ; CHECK-NEXT: call void @objc_release(i8* %tmp11) 24 %tmp11 = bitcast void ()* %otherBlock to i8* 25 call void @objc_release(i8* %tmp11) nounwind
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/ObjCARC/ |
D | pointer-types.ll | 11 ; CHECK-NEXT: %tmp11 = bitcast void ()* %otherBlock to i8* 12 ; CHECK-NEXT: call void @objc_release(i8* %tmp11) 24 %tmp11 = bitcast void ()* %otherBlock to i8* 25 call void @objc_release(i8* %tmp11) nounwind
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/X86/ |
D | 2006-08-07-CycleInDAG.ll | 14 %tmp11.s = load i32, i32* null ; <i32> [#uses=1] 15 %tmp11.i = bitcast i32 %tmp11.s to i32 ; <i32> [#uses=1] 17 %tmp13.i7 = mul i32 %tmp11.i, %n.i ; <i32> [#uses=1]
|
D | vec_logical.ll | 35 %tmp11 = xor <4 x i32> %tmp9, %tmp10 36 %tmp13 = bitcast <4 x i32> %tmp11 to <4 x float> 53 %tmp11 = and <2 x i64> %tmp9, %tmp10 54 %tmp13 = bitcast <2 x i64> %tmp11 to <2 x double> 78 %tmp11 = bitcast <4 x float> %a to <4 x i32> 80 %tmp13 = xor <4 x i32> %tmp11, < i32 -1, i32 -1, i32 -1, i32 -1 >
|
/external/llvm/test/CodeGen/X86/ |
D | 2006-08-07-CycleInDAG.ll | 14 %tmp11.s = load i32, i32* null ; <i32> [#uses=1] 15 %tmp11.i = bitcast i32 %tmp11.s to i32 ; <i32> [#uses=1] 17 %tmp13.i7 = mul i32 %tmp11.i, %n.i ; <i32> [#uses=1]
|
D | vec_logical.ll | 35 %tmp11 = xor <4 x i32> %tmp9, %tmp10 36 %tmp13 = bitcast <4 x i32> %tmp11 to <4 x float> 53 %tmp11 = and <2 x i64> %tmp9, %tmp10 54 %tmp13 = bitcast <2 x i64> %tmp11 to <2 x double> 78 %tmp11 = bitcast <4 x float> %a to <4 x i32> 80 %tmp13 = xor <4 x i32> %tmp11, < i32 -1, i32 -1, i32 -1, i32 -1 >
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/LoopStrengthReduce/X86/ |
D | lsr-filtering-scaledreg.ll | 32 %tmp11 = phi i8* [ %tmp8, %bb ], [ %tmp17, %bb12 ] 40 %tmp17 = getelementptr inbounds i8, i8* %tmp11, i64 16 44 %tmp19 = icmp ugt i8* %tmp11, null 46 %tmp21 = getelementptr inbounds i8, i8* %tmp11, i64 8 48 %tmp23 = select i1 %tmp19, i8* %tmp11, i8* %tmp21
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/LowerExpectIntrinsic/ |
D | phi_tern.ll | 20 %tmp11 = sext i32 %tmp10 to i64 21 %expect = call i64 @llvm.expect.i64(i64 %tmp11, i64 0) 41 %tmp11 = sext i32 %tmp10 to i64 42 %expect = call i64 @llvm.expect.i64(i64 %tmp11, i64 0)
|