Home
last modified time | relevance | path

Searched refs:tmp64 (Results 1 – 25 of 42) sorted by relevance

12

/external/libopus/silk/
DLPC_inv_pred_gain.c80 opus_int64 tmp64; in LPC_inverse_pred_gain_QA_c() local
83 tmp64 = silk_RSHIFT_ROUND64( silk_SMULL( silk_SUB_SAT32(tmp1, in LPC_inverse_pred_gain_QA_c()
85 if( tmp64 > silk_int32_MAX || tmp64 < silk_int32_MIN ) { in LPC_inverse_pred_gain_QA_c()
88 A_QA[ n ] = ( opus_int32 )tmp64; in LPC_inverse_pred_gain_QA_c()
89 tmp64 = silk_RSHIFT_ROUND64( silk_SMULL( silk_SUB_SAT32(tmp2, in LPC_inverse_pred_gain_QA_c()
91 if( tmp64 > silk_int32_MAX || tmp64 < silk_int32_MIN ) { in LPC_inverse_pred_gain_QA_c()
94 A_QA[ k - n - 1 ] = ( opus_int32 )tmp64; in LPC_inverse_pred_gain_QA_c()
/external/webrtc/modules/audio_processing/agc/legacy/
Ddigital_agc.cc529 int64_t tmp64 = ((int64_t)(out[i][k * L + n])) * (gain32 >> 4); in WebRtcAgc_ApplyDigitalGains() local
530 tmp64 = tmp64 >> 16; in WebRtcAgc_ApplyDigitalGains()
531 if (tmp64 > 32767) { in WebRtcAgc_ApplyDigitalGains()
533 } else if (tmp64 < -32768) { in WebRtcAgc_ApplyDigitalGains()
536 out[i][k * L + n] = (int16_t)(tmp64); in WebRtcAgc_ApplyDigitalGains()
583 int64_t tmp64; in WebRtcAgc_ProcessVad() local
689 tmp64 = tmp32; in WebRtcAgc_ProcessVad()
690 tmp64 += tmp32b >> 10; in WebRtcAgc_ProcessVad()
691 tmp64 >>= 6; in WebRtcAgc_ProcessVad()
694 if (tmp64 > 2048) { in WebRtcAgc_ProcessVad()
[all …]
/external/libopus/silk/arm/
DLPC_inv_pred_gain_neon_intr.c138 opus_int64 tmp64; in LPC_inverse_pred_gain_QA_neon() local
141 tmp64 = silk_RSHIFT_ROUND64( silk_SMULL( silk_SUB_SAT32(tmp1, in LPC_inverse_pred_gain_QA_neon()
143 if( tmp64 > silk_int32_MAX || tmp64 < silk_int32_MIN ) { in LPC_inverse_pred_gain_QA_neon()
146 A_QA[ n ] = ( opus_int32 )tmp64; in LPC_inverse_pred_gain_QA_neon()
147 tmp64 = silk_RSHIFT_ROUND64( silk_SMULL( silk_SUB_SAT32(tmp2, in LPC_inverse_pred_gain_QA_neon()
149 if( tmp64 > silk_int32_MAX || tmp64 < silk_int32_MIN ) { in LPC_inverse_pred_gain_QA_neon()
152 A_QA[ k - n - 1 ] = ( opus_int32 )tmp64; in LPC_inverse_pred_gain_QA_neon()
/external/llvm/test/CodeGen/ARM/
D2010-04-14-SplitVector.ll10 %tmp64 = trunc i128 %tmp63 to i32
14 %0 = phi i32 [ %tmp64, %bb9 ], [ undef, %bb ]
D2007-04-30-CombinerCrash.ll21 %tmp64 = or i64 %tmp63, 0 ; <i64> [#uses=1]
23 %tmp66 = and i64 %tmp65, %tmp64 ; <i64> [#uses=1]
D2007-03-13-InstrSched.ll24 %d2.1 = phi i32 [ %tmp64, %bb26 ], [ 8192, %newFuncRoot ] ; <i32> [#uses=2]
46 %tmp64 = add i32 %tmp62, %d2.1 ; <i32> [#uses=1]
D2012-01-26-CopyPropKills.ll82 %tmp64 = bitcast <4 x float> %tmp54 to i128
88 %tmp70 = lshr i128 %tmp64, 64
D2012-01-23-PostRA-LICM.ll81 %tmp64 = lshr i128 %tmp63, 64
82 %tmp65 = trunc i128 %tmp64 to i64
/external/llvm/test/Transforms/InstCombine/
D2006-12-01-BadFPVectorXform.ll11 %tmp64 = fadd <4 x float> %tmp26, %tmp53
12 %tmp75 = fsub <4 x float> %tmp64, %tmp53
D2012-06-06-LoadOfPHIs.ll131 %tmp64 = fdiv double %tmp60, %tmp63
132 %tmp65 = fadd double 2.000000e+00, %tmp64
/external/llvm/test/CodeGen/X86/
D2006-05-25-CycleInDAG.ll14 %tmp64.i = fadd double %tmp62.i.upgrd.2, %tmp44.i ; <double> [#uses=1]
15 %tmp68.i = call double @foo( double %tmp64.i, i32 0 ) ; <double> [#uses=0]
Dmmx-arith.ll49 %tmp64 = or <8 x i8> %tmp58, %tmp63a
50 %tmp64a = bitcast <8 x i8> %tmp64 to x86_mmx
144 %tmp64 = load x86_mmx, x86_mmx* %B
145 %tmp69 = tail call x86_mmx @llvm.x86.mmx.pmadd.wd(x86_mmx %tmp60, x86_mmx %tmp64)
D2012-01-10-UndefExceptionEdge.ll129 %tmp64 = getelementptr i32, i32* %tmp13, i32 %tmp63
130 store i32 0, i32* %tmp64, align 4
Dmisched-balance.ll70 %tmp64 = load i32, i32* %arrayidx12.us.i61.3, align 4
71 %mul.us.i.3 = mul nsw i32 %tmp64, %tmp63
176 %tmp64 = load i32, i32* %arrayidx12.us.i61.3, align 4
206 %mul.us.i.3 = mul nsw i32 %tmp64, %tmp63
Dnancvt.ll120 %tmp64 = load i64, i64* %tmp6263, align 8 ; <i64> [#uses=1]
121 %tmp6465 = trunc i64 %tmp64 to i32 ; <i32> [#uses=1]
Dpr24139.ll73 …%tmp64 = fadd <4 x float> %tmp63, <float 0x3F81106840000000, float 0x3F81106840000000, float 0x3F8…
76 %tmp67 = fmul <4 x float> %tmp59, %tmp64
/external/llvm/test/Object/Inputs/
Dshared.ll10 ; llc -mtriple=x86_64-linux-gnu shared.ll -filetype=obj -o tmp64.o -relocation-model=pic
11 ; ld -melf_x86_64 -shared tmp64.o -o shared-object-test.elf-x86-64 $LDARGS
/external/tensorflow/tensorflow/core/kernels/
Ddecode_proto_op.cc421 protobuf_uint64 tmp64; in SkipValue() local
424 return input->ReadLittleEndian64(&tmp64); in SkipValue()
428 return input->ReadVarint64(&tmp64); in SkipValue()
430 return input->ReadVarint64(&tmp64); in SkipValue()
434 return input->ReadLittleEndian64(&tmp64); in SkipValue()
456 return input->ReadLittleEndian64(&tmp64); in SkipValue()
460 return input->ReadVarint64(&tmp64); in SkipValue()
/external/libopus/doc/
Dopus_update.patch61 + opus_int64 tmp64;
63 + tmp64 = silk_RSHIFT_ROUND64( silk_SMULL( tmp_QA, rc_mult2 ), mult2Q);
64 + if( tmp64 > silk_int32_MAX || tmp64 < silk_int32_MIN ) {
67 + Anew_QA[ n ] = ( opus_int32 )tmp64;
/external/llvm/test/Transforms/LoopStrengthReduce/
D2013-01-14-ReuseCast.ll75 %tmp64 = getelementptr inbounds i8, i8* %tmp3, i64 %i.0.i
76 %tmp65 = load i8, i8* %tmp64, align 1
/external/libvpx/third_party/libyuv/source/
Dscale_neon64.cc878 int64_t tmp64; in ScaleARGBCols_NEON() local
899 "=&r"(tmp64), // %5 in ScaleARGBCols_NEON()
/external/libaom/third_party/libyuv/source/
Dscale_neon64.cc903 int64_t tmp64; in ScaleARGBCols_NEON() local
925 "=&r"(tmp64), // %5 in ScaleARGBCols_NEON()
/external/llvm/test/Transforms/ObjCARC/
Dmove-and-merge-autorelease.ll80 %tmp64 = icmp eq i8 %tmp62, 0
81 br i1 %tmp64, label %bb76, label %bb65
Dmove-and-form-retain-autorelease.ll162 …%tmp64 = tail call i8* %tmp63(i8* %tmp61, %1* bitcast (%0* @"\01l_objc_msgSend_fixup_objectAtIndex…
164 …%tmp66 = tail call i8* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8* (i8*, i8*)*)(i8* %tmp64,…
/external/libyuv/files/source/
Dscale_neon64.cc1385 int64_t tmp64; in ScaleARGBCols_NEON() local
1407 "=&r"(tmp64), // %5 in ScaleARGBCols_NEON()

12