Home
last modified time | relevance | path

Searched refs:tmp0 (Results 1 – 25 of 283) sorted by relevance

12345678910>>...12

/external/webrtc/webrtc/common_audio/signal_processing/
Dresample_by_2_internal.c34 int32_t tmp0, tmp1, diff; in WebRtcSpl_DownBy2IntToShort() local
42 tmp0 = in[i << 1]; in WebRtcSpl_DownBy2IntToShort()
43 diff = tmp0 - state[1]; in WebRtcSpl_DownBy2IntToShort()
47 state[0] = tmp0; in WebRtcSpl_DownBy2IntToShort()
53 tmp0 = state[1] + diff * kResampleAllpass[1][1]; in WebRtcSpl_DownBy2IntToShort()
55 diff = tmp0 - state[3]; in WebRtcSpl_DownBy2IntToShort()
61 state[2] = tmp0; in WebRtcSpl_DownBy2IntToShort()
72 tmp0 = in[i << 1]; in WebRtcSpl_DownBy2IntToShort()
73 diff = tmp0 - state[5]; in WebRtcSpl_DownBy2IntToShort()
77 state[4] = tmp0; in WebRtcSpl_DownBy2IntToShort()
[all …]
/external/aac/libFDK/src/arm/
Dscale_arm.cpp116 FIXP_DBL tmp0 = mySpec[0]; in scaleValuesWithFactor() local
120 tmp0 = fMultDiv2(tmp0, factor); in scaleValuesWithFactor()
124 tmp0 <<= shift; in scaleValuesWithFactor()
128 *mySpec++ = tmp0; in scaleValuesWithFactor()
135 FIXP_DBL tmp0 = mySpec[0]; in scaleValuesWithFactor() local
136 tmp0 = fMultDiv2(tmp0, factor); in scaleValuesWithFactor()
137 tmp0 <<= shift; in scaleValuesWithFactor()
138 *mySpec++ = tmp0; in scaleValuesWithFactor()
146 FIXP_DBL tmp0 = mySpec[0]; in scaleValuesWithFactor() local
150 tmp0 = fMultDiv2(tmp0, factor); in scaleValuesWithFactor()
[all …]
/external/libjpeg-turbo/
Djidctred.c125 JLONG tmp0, tmp2, tmp10, tmp12; variable
162 tmp0 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]);
163 tmp0 = LEFT_SHIFT(tmp0, CONST_BITS+1);
170 tmp10 = tmp0 + tmp2;
171 tmp12 = tmp0 - tmp2;
180 tmp0 = MULTIPLY(z1, - FIX_0_211164243) /* sqrt(2) * (c3-c1) */
194 wsptr[DCTSIZE*1] = (int) DESCALE(tmp12 + tmp0, CONST_BITS-PASS1_BITS+1);
195 wsptr[DCTSIZE*2] = (int) DESCALE(tmp12 - tmp0, CONST_BITS-PASS1_BITS+1);
224 tmp0 = LEFT_SHIFT((JLONG) wsptr[0], CONST_BITS+1);
229 tmp10 = tmp0 + tmp2;
[all …]
Djidctint.c177 JLONG tmp0, tmp1, tmp2, tmp3; variable
242 tmp0 = LEFT_SHIFT(z2 + z3, CONST_BITS);
245 tmp10 = tmp0 + tmp3;
246 tmp13 = tmp0 - tmp3;
254 tmp0 = DEQUANTIZE(inptr[DCTSIZE*7], quantptr[DCTSIZE*7]);
259 z1 = tmp0 + tmp3;
261 z3 = tmp0 + tmp2;
265 tmp0 = MULTIPLY(tmp0, FIX_0_298631336); /* sqrt(2) * (-c1+c3+c5-c7) */
277 tmp0 += z1 + z3;
290 wsptr[DCTSIZE*3] = (int) DESCALE(tmp13 + tmp0, CONST_BITS-PASS1_BITS);
[all …]
/external/mesa3d/src/gallium/auxiliary/tgsi/
Dtgsi_aa_point.c102 unsigned tmp0; in aa_prolog() local
148 tmp0 = ts->tmp; in aa_prolog()
152 TGSI_FILE_TEMPORARY, tmp0, TGSI_WRITEMASK_XY, in aa_prolog()
158 TGSI_FILE_TEMPORARY, tmp0, TGSI_WRITEMASK_X, in aa_prolog()
159 TGSI_FILE_TEMPORARY, tmp0, in aa_prolog()
160 TGSI_FILE_TEMPORARY, tmp0, false); in aa_prolog()
164 TGSI_FILE_TEMPORARY, tmp0, TGSI_WRITEMASK_X, in aa_prolog()
165 TGSI_FILE_TEMPORARY, tmp0); in aa_prolog()
171 TGSI_FILE_TEMPORARY, tmp0, TGSI_WRITEMASK_W, in aa_prolog()
177 TGSI_FILE_TEMPORARY, tmp0, TGSI_WRITEMASK_Y, in aa_prolog()
[all …]
/external/pdfium/third_party/libjpeg/
Dfpdfapi_jidctred.c122 INT32 tmp0, tmp2, tmp10, tmp12; variable
158 tmp0 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]);
159 tmp0 <<= (CONST_BITS+1);
166 tmp10 = tmp0 + tmp2;
167 tmp12 = tmp0 - tmp2;
176 tmp0 = MULTIPLY(z1, - FIX_0_211164243) /* sqrt(2) * (c3-c1) */
190 wsptr[DCTSIZE*1] = (int) DESCALE(tmp12 + tmp0, CONST_BITS-PASS1_BITS+1);
191 wsptr[DCTSIZE*2] = (int) DESCALE(tmp12 - tmp0, CONST_BITS-PASS1_BITS+1);
220 tmp0 = ((INT32) wsptr[0]) << (CONST_BITS+1);
225 tmp10 = tmp0 + tmp2;
[all …]
Dfpdfapi_jidctint.c152 INT32 tmp0, tmp1, tmp2, tmp3; variable
216 tmp0 = (z2 + z3) << CONST_BITS;
219 tmp10 = tmp0 + tmp3;
220 tmp13 = tmp0 - tmp3;
228 tmp0 = DEQUANTIZE(inptr[DCTSIZE*7], quantptr[DCTSIZE*7]);
233 z1 = tmp0 + tmp3;
235 z3 = tmp0 + tmp2;
239 tmp0 = MULTIPLY(tmp0, FIX_0_298631336); /* sqrt(2) * (-c1+c3+c5-c7) */
251 tmp0 += z1 + z3;
264 wsptr[DCTSIZE*3] = (int) DESCALE(tmp13 + tmp0, CONST_BITS-PASS1_BITS);
[all …]
/external/llvm/test/CodeGen/AMDGPU/
Dflat_atomics_i64.ll9 %tmp0 = atomicrmw volatile add i64 addrspace(4)* %gep, i64 %in seq_cst
19 %tmp0 = atomicrmw volatile add i64 addrspace(4)* %gep, i64 %in seq_cst
20 store i64 %tmp0, i64 addrspace(4)* %out2
30 %tmp0 = atomicrmw volatile add i64 addrspace(4)* %gep, i64 %in seq_cst
41 %tmp0 = atomicrmw volatile add i64 addrspace(4)* %gep, i64 %in seq_cst
42 store i64 %tmp0, i64 addrspace(4)* %out2
50 %tmp0 = atomicrmw volatile add i64 addrspace(4)* %out, i64 %in seq_cst
59 %tmp0 = atomicrmw volatile add i64 addrspace(4)* %out, i64 %in seq_cst
60 store i64 %tmp0, i64 addrspace(4)* %out2
69 %tmp0 = atomicrmw volatile add i64 addrspace(4)* %ptr, i64 %in seq_cst
[all …]
Doperand-folding.ll7 %tmp0 = icmp ne i32 %fold, 0
8 br i1 %tmp0, label %if, label %endif
26 %tmp0 = icmp ne i32 %cmp, 0
27 br i1 %tmp0, label %if, label %endif
51 %tmp0 = add i64 %val, 1
52 store i64 %tmp0, i64 addrspace(1)* %out
66 %tmp0 = call i32 @llvm.amdgcn.workitem.id.x()
67 %tmp1 = add i32 %tmp0, 1
68 %tmp2 = add i32 %tmp0, 2
69 %tmp3 = add i32 %tmp0, 3
[all …]
Dglobal_atomics_i64.ll9 %tmp0 = atomicrmw volatile add i64 addrspace(1)* %gep, i64 %in seq_cst
19 %tmp0 = atomicrmw volatile add i64 addrspace(1)* %gep, i64 %in seq_cst
20 store i64 %tmp0, i64 addrspace(1)* %out2
31 %tmp0 = atomicrmw volatile add i64 addrspace(1)* %gep, i64 %in seq_cst
43 %tmp0 = atomicrmw volatile add i64 addrspace(1)* %gep, i64 %in seq_cst
44 store i64 %tmp0, i64 addrspace(1)* %out2
52 %tmp0 = atomicrmw volatile add i64 addrspace(1)* %out, i64 %in seq_cst
61 %tmp0 = atomicrmw volatile add i64 addrspace(1)* %out, i64 %in seq_cst
62 store i64 %tmp0, i64 addrspace(1)* %out2
72 %tmp0 = atomicrmw volatile add i64 addrspace(1)* %ptr, i64 %in seq_cst
[all …]
Dcaptured-frame-index.ll31 %tmp0 = alloca float
33 store float 4.0, float* %tmp0
35 store volatile float* %tmp0, float* addrspace(3)* %ptr
67 %tmp0 = alloca [512 x i32]
71 %tmp0.cast = bitcast [512 x i32]* %tmp0 to i32*
72 store volatile i32 32, i32* %tmp0.cast
93 %tmp0 = alloca i32*
96 store volatile i32* inttoptr (i32 1234 to i32*), i32** %tmp0
131 %tmp0 = alloca float
134 store volatile float 0.0, float *%tmp0
[all …]
Dload-local-f32.ll12 %tmp0 = load float, float addrspace(3)* %in
13 store float %tmp0, float addrspace(1)* %out
25 %tmp0 = load <2 x float>, <2 x float> addrspace(3)* %in
26 store <2 x float> %tmp0, <2 x float> addrspace(1)* %out
43 %tmp0 = load <3 x float>, <3 x float> addrspace(3)* %in
44 store <3 x float> %tmp0, <3 x float> addrspace(3)* %out
57 %tmp0 = load <4 x float>, <4 x float> addrspace(3)* %in
58 store <4 x float> %tmp0, <4 x float> addrspace(3)* %out
76 %tmp0 = load <8 x float>, <8 x float> addrspace(3)* %in
77 store <8 x float> %tmp0, <8 x float> addrspace(3)* %out
[all …]
/external/sonivox/arm-wt-22k/lib_src/
Deas_wtengine.c83 EAS_I32 tmp0; in WT_VoiceGain() local
117 tmp0 = *pInputBuffer++; in WT_VoiceGain()
123 tmp2 *= tmp0; in WT_VoiceGain()
135 tmp0 = tmp2 * gainLeft; in WT_VoiceGain()
137 tmp0 = tmp0 >> NUM_MIXER_GUARD_BITS; in WT_VoiceGain()
138 tmp1 += tmp0; in WT_VoiceGain()
145 tmp0 = tmp2 * gainRight; in WT_VoiceGain()
147 tmp0 = tmp0 >> NUM_MIXER_GUARD_BITS; in WT_VoiceGain()
148 tmp1 += tmp0; in WT_VoiceGain()
445 EAS_I32 tmp0; in WT_NoiseGenerator() local
[all …]
DARM-E_interpolate_loop_gnu.s45 tmp0 .req r1 @reuse register label
81 SUBS tmp0, pPhaseAccum, pLoopEnd @ check for loop end
82 ADDGE pPhaseAccum, pLoopStart, tmp0 @ loop back to start
85 LDRSB tmp0, [pPhaseAccum] @ tmp0 = x0
88 LDRSH tmp0, [pPhaseAccum] @ tmp0 = x0
94 SUB tmp1, tmp1, tmp0 @ tmp1 = x1 - x0
105 MOV tmp0, tmp0, LSL #6 @ boost 8-bit signal by 36dB
107 MOV tmp0, tmp0, ASR #2 @ reduce 16-bit signal by 12dB
110 ADD tmp1, tmp0, tmp1, ASR #(NUM_EG1_FRAC_BITS-6) @ tmp1 = tmp0 + (tmp1 >> (15-6))
DARM-E_voice_gain_gnu.s45 tmp0 .req r4 label
85 LDR tmp0, [pWTFrame, #m_prevGain]
92 SMULBB gainLeft, tmp0, gainLeft
100 SMULBB gainRight, tmp0, gainRight
107 LDRSH tmp0, [pInputBuffer], #2
114 SMLAWB tmp1, gainLeft, tmp0, tmp1
122 SMLAWB tmp2, gainRight, tmp0, tmp2
126 LDRGTSH tmp0, [pInputBuffer], #2
146 LDRSH tmp0, [pInputBuffer], #NEXT_OUTPUT_PCM @ fetch voice output
150 SMULWB tmp0, gain, tmp0 @ sample * local gain
[all …]
/external/mesa3d/src/gallium/auxiliary/draw/
Ddraw_pipe_aapoint.c123 int tmp0, colorTemp; /**< temp registers */ member
173 int tmp0; in aa_transform_prolog() local
180 if (aactx->tmp0 < 0) in aa_transform_prolog()
181 aactx->tmp0 = i; in aa_transform_prolog()
189 assert(aactx->colorTemp != aactx->tmp0); in aa_transform_prolog()
191 tmp0 = aactx->tmp0; in aa_transform_prolog()
199 tgsi_transform_temp_decl(ctx, tmp0); in aa_transform_prolog()
214 TGSI_FILE_TEMPORARY, tmp0, TGSI_WRITEMASK_XY, in aa_transform_prolog()
220 TGSI_FILE_TEMPORARY, tmp0, TGSI_WRITEMASK_X, in aa_transform_prolog()
221 TGSI_FILE_TEMPORARY, tmp0, TGSI_SWIZZLE_X, in aa_transform_prolog()
[all …]
/external/llvm/test/CodeGen/AArch64/
Dnontemporal.ll128 %tmp0 = getelementptr <2 x double>, <2 x double>* %p, i32 1
129 store <2 x double> %v, <2 x double>* %tmp0, align 1, !nontemporal !0
138 %tmp0 = getelementptr <2 x double>, <2 x double>* %p, i32 -1
139 store <2 x double> %v, <2 x double>* %tmp0, align 1, !nontemporal !0
148 %tmp0 = getelementptr <2 x float>, <2 x float>* %p, i32 1
149 store <2 x float> %v, <2 x float>* %tmp0, align 1, !nontemporal !0
158 %tmp0 = getelementptr <2 x float>, <2 x float>* %p, i32 -1
159 store <2 x float> %v, <2 x float>* %tmp0, align 1, !nontemporal !0
168 %tmp0 = getelementptr i64, i64* %p, i32 1
169 store i64 %v, i64* %tmp0, align 1, !nontemporal !0
[all …]
/external/libmpeg2/common/x86/
Dimpeg2_inter_pred_sse42_intr.c480 __m128i tmp0, tmp1; in impeg2_mc_halfx_halfy_8x8_sse42() local
498 tmp0 = _mm_add_epi16(src_r0, src_r0_1); //Row 0 horizontal interpolation in impeg2_mc_halfx_halfy_8x8_sse42()
500 tmp0 = _mm_add_epi16(tmp0, tmp1); //Row 0 vertical interpolation in impeg2_mc_halfx_halfy_8x8_sse42()
501 tmp0 = _mm_add_epi16(tmp0, value_2); in impeg2_mc_halfx_halfy_8x8_sse42()
502 tmp0 = _mm_srli_epi16(tmp0, 2); in impeg2_mc_halfx_halfy_8x8_sse42()
503 tmp0 = _mm_packus_epi16(tmp0, value_2); in impeg2_mc_halfx_halfy_8x8_sse42()
505 _mm_storel_epi64((__m128i *)out, tmp0); in impeg2_mc_halfx_halfy_8x8_sse42()
518 tmp0 = _mm_add_epi16(src_r0, src_r0_1); //Row 2 horizontal interpolation in impeg2_mc_halfx_halfy_8x8_sse42()
519 tmp1 = _mm_add_epi16(tmp0, tmp1); //Row 1 vertical interpolation in impeg2_mc_halfx_halfy_8x8_sse42()
539 tmp0 = _mm_add_epi16(tmp0, tmp1); //Row 2 vertical interpolation in impeg2_mc_halfx_halfy_8x8_sse42()
[all …]
/external/llvm/test/CodeGen/X86/
Datomic-eflags-reuse.ll12 %tmp0 = atomicrmw add i64* %p, i64 1 seq_cst
13 %tmp1 = icmp slt i64 %tmp0, 0
26 %tmp0 = atomicrmw add i64* %p, i64 1 seq_cst
27 %tmp1 = icmp sge i64 %tmp0, 0
40 %tmp0 = atomicrmw sub i64* %p, i64 1 seq_cst
41 %tmp1 = icmp sle i64 %tmp0, 0
54 %tmp0 = atomicrmw sub i64* %p, i64 1 seq_cst
55 %tmp1 = icmp sgt i64 %tmp0, 0
70 %tmp0 = atomicrmw add i64* %p, i64 1 seq_cst
71 %tmp1 = icmp slt i64 %tmp0, 0
[all …]
/external/libvpx/libvpx/vpx_dsp/mips/
Dvpx_convolve8_vert_msa.c73 v16u8 tmp0, tmp1; in common_vt_8t_8w_msa() local
105 tmp0 = PCKEV_XORI128_UB(out0_r, out1_r); in common_vt_8t_8w_msa()
107 ST8x4_UB(tmp0, tmp1, dst, dst_stride); in common_vt_8t_8w_msa()
129 v16u8 tmp0, tmp1, tmp2, tmp3; in common_vt_8t_16w_msa() local
177 tmp0, tmp1, tmp2, tmp3); in common_vt_8t_16w_msa()
178 XORI_B4_128_UB(tmp0, tmp1, tmp2, tmp3); in common_vt_8t_16w_msa()
179 ST_UB4(tmp0, tmp1, tmp2, tmp3, dst, dst_stride); in common_vt_8t_16w_msa()
210 v16u8 tmp0, tmp1, tmp2, tmp3; in common_vt_8t_16w_mult_msa() local
261 out3_r, tmp0, tmp1, tmp2, tmp3); in common_vt_8t_16w_mult_msa()
262 XORI_B4_128_UB(tmp0, tmp1, tmp2, tmp3); in common_vt_8t_16w_mult_msa()
[all …]
/external/swiftshader/third_party/LLVM/test/CodeGen/ARM/
Dvst1.ll15 %tmp0 = bitcast i16* %A to i8*
17 call void @llvm.arm.neon.vst1.v4i16(i8* %tmp0, <4 x i16> %tmp1, i32 1)
24 %tmp0 = bitcast i32* %A to i8*
26 call void @llvm.arm.neon.vst1.v2i32(i8* %tmp0, <2 x i32> %tmp1, i32 1)
33 %tmp0 = bitcast float* %A to i8*
35 call void @llvm.arm.neon.vst1.v2f32(i8* %tmp0, <2 x float> %tmp1, i32 1)
44 %tmp0 = bitcast float* %A to i8*
46 call void @llvm.arm.neon.vst1.v2f32(i8* %tmp0, <2 x float> %tmp1, i32 1)
55 %tmp0 = bitcast i64* %A to i8*
57 call void @llvm.arm.neon.vst1.v1i64(i8* %tmp0, <1 x i64> %tmp1, i32 1)
[all …]
Dvld1.ll15 %tmp0 = bitcast i16* %A to i8*
16 %tmp1 = call <4 x i16> @llvm.arm.neon.vld1.v4i16(i8* %tmp0, i32 1)
25 %tmp0 = bitcast i16* %A to i8*
26 %tmp1 = call <4 x i16> @llvm.arm.neon.vld1.v4i16(i8* %tmp0, i32 1)
35 %tmp0 = bitcast i32* %A to i8*
36 %tmp1 = call <2 x i32> @llvm.arm.neon.vld1.v2i32(i8* %tmp0, i32 1)
45 %tmp0 = bitcast i32* %A to i8*
46 %tmp1 = call <2 x i32> @llvm.arm.neon.vld1.v2i32(i8* %tmp0, i32 1)
55 %tmp0 = bitcast float* %A to i8*
56 %tmp1 = call <2 x float> @llvm.arm.neon.vld1.v2f32(i8* %tmp0, i32 1)
[all …]
Dvlddup.ll36 %tmp0 = load float* %A
37 %tmp1 = insertelement <2 x float> undef, float %tmp0, i32 0
55 %tmp0 = load float* %A
56 %tmp1 = insertelement <4 x float> undef, float %tmp0, i32 0
69 …%tmp0 = tail call %struct.__neon_int8x8x2_t @llvm.arm.neon.vld2lane.v8i8(i8* %A, <8 x i8> undef, <…
70 %tmp1 = extractvalue %struct.__neon_int8x8x2_t %tmp0, 0
72 %tmp3 = extractvalue %struct.__neon_int8x8x2_t %tmp0, 1
83 …%tmp0 = tail call %struct.__neon_int4x16x2_t @llvm.arm.neon.vld2lane.v4i16(i16* %A, <4 x i16> unde…
84 %tmp1 = extractvalue %struct.__neon_int4x16x2_t %tmp0, 0
86 %tmp3 = extractvalue %struct.__neon_int4x16x2_t %tmp0, 1
[all …]
/external/llvm/test/CodeGen/ARM/
Dvld1.ll17 %tmp0 = bitcast i16* %A to i8*
18 %tmp1 = call <4 x i16> @llvm.arm.neon.vld1.v4i16.p0i8(i8* %tmp0, i32 1)
27 %tmp0 = bitcast i16* %A to i8*
28 %tmp1 = call <4 x i16> @llvm.arm.neon.vld1.v4i16.p0i8(i8* %tmp0, i32 1)
37 %tmp0 = bitcast i32* %A to i8*
38 %tmp1 = call <2 x i32> @llvm.arm.neon.vld1.v2i32.p0i8(i8* %tmp0, i32 1)
47 %tmp0 = bitcast i32* %A to i8*
48 %tmp1 = call <2 x i32> @llvm.arm.neon.vld1.v2i32.p0i8(i8* %tmp0, i32 1)
57 %tmp0 = bitcast float* %A to i8*
58 %tmp1 = call <2 x float> @llvm.arm.neon.vld1.v2f32.p0i8(i8* %tmp0, i32 1)
[all …]
Dvst1.ll15 %tmp0 = bitcast i16* %A to i8*
17 call void @llvm.arm.neon.vst1.p0i8.v4i16(i8* %tmp0, <4 x i16> %tmp1, i32 1)
24 %tmp0 = bitcast i32* %A to i8*
26 call void @llvm.arm.neon.vst1.p0i8.v2i32(i8* %tmp0, <2 x i32> %tmp1, i32 1)
33 %tmp0 = bitcast float* %A to i8*
35 call void @llvm.arm.neon.vst1.p0i8.v2f32(i8* %tmp0, <2 x float> %tmp1, i32 1)
44 %tmp0 = bitcast float* %A to i8*
46 call void @llvm.arm.neon.vst1.p0i8.v2f32(i8* %tmp0, <2 x float> %tmp1, i32 1)
55 %tmp0 = bitcast i64* %A to i8*
57 call void @llvm.arm.neon.vst1.p0i8.v1i64(i8* %tmp0, <1 x i64> %tmp1, i32 1)
[all …]

12345678910>>...12