/external/llvm/test/CodeGen/ARM/ |
D | dyn-stackalloc.ll | 22 %tmp6 = load i32, i32* null 23 %tmp8 = alloca float, i32 %tmp6 50 %tmp6 = alloca i8, i32 %tmp5 51 %tmp9 = call i8* @strcpy(i8* %tmp6, i8* %tag) 52 %tmp6.len = call i32 @strlen(i8* %tmp6) 53 %tmp6.indexed = getelementptr i8, i8* %tmp6, i32 %tmp6.len 54 …call void @llvm.memcpy.p0i8.p0i8.i32(i8* %tmp6.indexed, i8* getelementptr inbounds ([2 x i8], [2 x… 55 %tmp15 = call i8* @strcat(i8* %tmp6, i8* %contents) 56 call fastcc void @comment_add(%struct.comment* %vc, i8* %tmp6)
|
D | vbsl-constant.ll | 12 %tmp6 = and <8 x i8> %tmp3, <i8 -4, i8 -4, i8 -4, i8 -4, i8 -4, i8 -4, i8 -4, i8 -4> 13 %tmp7 = or <8 x i8> %tmp4, %tmp6 26 %tmp6 = and <4 x i16> %tmp3, <i16 -4, i16 -4, i16 -4, i16 -4> 27 %tmp7 = or <4 x i16> %tmp4, %tmp6 40 %tmp6 = and <2 x i32> %tmp3, <i32 -4, i32 -4> 41 %tmp7 = or <2 x i32> %tmp4, %tmp6 55 %tmp6 = and <1 x i64> %tmp3, <i64 -4> 56 %tmp7 = or <1 x i64> %tmp4, %tmp6 69 …%tmp6 = and <16 x i8> %tmp3, <i8 -4, i8 -4, i8 -4, i8 -4, i8 -4, i8 -4, i8 -4, i8 -4, i8 -4, i8 -4… 70 %tmp7 = or <16 x i8> %tmp4, %tmp6 [all …]
|
D | uxtb.ll | 23 %tmp6 = and i32 %tmp1, 16711935 ; <i32> [#uses=1] 24 ret i32 %tmp6 38 %tmp6 = or i32 %tmp2, %tmp5 ; <i32> [#uses=1] 39 ret i32 %tmp6 47 %tmp6 = or i32 %tmp2, %tmp5 ; <i32> [#uses=1] 48 ret i32 %tmp6 55 %tmp6 = or i32 %tmp2, %tmp5 ; <i32> [#uses=1] 56 ret i32 %tmp6 63 %tmp6 = or i32 %tmp5, %tmp1 ; <i32> [#uses=1] 64 ret i32 %tmp6
|
/external/libjpeg-turbo/ |
D | jfdctint.c | 145 JLONG tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; variable 161 tmp6 = dataptr[1] - dataptr[6]; 191 z2 = tmp5 + tmp6; 192 z3 = tmp4 + tmp6; 198 tmp6 = MULTIPLY(tmp6, FIX_3_072711026); /* sqrt(2) * ( c1+c3+c5-c7) */ 210 dataptr[3] = (DCTELEM)DESCALE(tmp6 + z2 + z3, CONST_BITS - PASS1_BITS); 226 tmp6 = dataptr[DCTSIZE * 1] - dataptr[DCTSIZE * 6]; 258 z2 = tmp5 + tmp6; 259 z3 = tmp4 + tmp6; 265 tmp6 = MULTIPLY(tmp6, FIX_3_072711026); /* sqrt(2) * ( c1+c3+c5-c7) */ [all …]
|
D | jidctflt.c | 76 FAST_FLOAT tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; variable 148 tmp6 = DEQUANTIZE(inptr[DCTSIZE * 5], quantptr[DCTSIZE * 5] * _0_125); 151 z13 = tmp6 + tmp5; /* phase 6 */ 152 z10 = tmp6 - tmp5; 163 tmp6 = tmp12 - tmp7; /* phase 2 */ 164 tmp5 = tmp11 - tmp6; 169 wsptr[DCTSIZE * 1] = tmp1 + tmp6; 170 wsptr[DCTSIZE * 6] = tmp1 - tmp6; 221 tmp6 = tmp12 - tmp7; 222 tmp5 = tmp11 - tmp6; [all …]
|
D | jidctfst.c | 175 DCTELEM tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; variable 247 tmp6 = DEQUANTIZE(inptr[DCTSIZE * 5], quantptr[DCTSIZE * 5]); 250 z13 = tmp6 + tmp5; /* phase 6 */ 251 z10 = tmp6 - tmp5; 262 tmp6 = tmp12 - tmp7; /* phase 2 */ 263 tmp5 = tmp11 - tmp6; 268 wsptr[DCTSIZE * 1] = (int)(tmp1 + tmp6); 269 wsptr[DCTSIZE * 6] = (int)(tmp1 - tmp6); 344 tmp6 = tmp12 - tmp7; /* phase 2 */ 345 tmp5 = tmp11 - tmp6; [all …]
|
D | jfdctflt.c | 62 FAST_FLOAT tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; variable 75 tmp6 = dataptr[1] - dataptr[6]; 98 tmp11 = tmp5 + tmp6; 99 tmp12 = tmp6 + tmp7; 125 tmp6 = dataptr[DCTSIZE * 1] - dataptr[DCTSIZE * 6]; 148 tmp11 = tmp5 + tmp6; 149 tmp12 = tmp6 + tmp7;
|
D | jfdctfst.c | 119 DCTELEM tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; variable 133 tmp6 = dataptr[1] - dataptr[6]; 156 tmp11 = tmp5 + tmp6; 157 tmp12 = tmp6 + tmp7; 183 tmp6 = dataptr[DCTSIZE * 1] - dataptr[DCTSIZE * 6]; 206 tmp11 = tmp5 + tmp6; 207 tmp12 = tmp6 + tmp7;
|
/external/llvm/test/CodeGen/Thumb/ |
D | dyn-stackalloc.ll | 15 %tmp6 = load i32, i32* null 16 %tmp8 = alloca float, i32 %tmp6 60 %tmp6 = alloca i8, i32 %tmp5 61 %tmp9 = call i8* @strcpy( i8* %tmp6, i8* %tag ) 62 %tmp6.len = call i32 @strlen( i8* %tmp6 ) 63 %tmp6.indexed = getelementptr i8, i8* %tmp6, i32 %tmp6.len 64 …call void @llvm.memcpy.p0i8.p0i8.i32(i8* %tmp6.indexed, i8* getelementptr inbounds ([2 x i8], [2 x… 65 %tmp15 = call i8* @strcat( i8* %tmp6, i8* %contents ) 66 call fastcc void @comment_add( %struct.comment* %vc, i8* %tmp6 )
|
/external/llvm/test/CodeGen/PowerPC/ |
D | 2006-01-20-ShiftPartsCrash.ll | 10 %tmp6.u = add i32 %tmp5, 32 ; <i32> [#uses=1] 11 %tmp6 = bitcast i32 %tmp6.u to i32 ; <i32> [#uses=1] 13 %tmp6.upgrd.1 = trunc i32 %tmp6 to i8 ; <i8> [#uses=1] 14 %shift.upgrd.2 = zext i8 %tmp6.upgrd.1 to i64 ; <i64> [#uses=1]
|
/external/libjpeg-turbo/simd/arm/ |
D | jidctfst-neon.c | 149 int16x4_t tmp6 = vmul_s16(vget_high_s16(row5), quant_row5); in jsimd_idct_ifast_neon() local 152 int16x4_t z13 = vadd_s16(tmp6, tmp5); /* phase 6 */ in jsimd_idct_ifast_neon() 153 int16x4_t neg_z10 = vsub_s16(tmp5, tmp6); in jsimd_idct_ifast_neon() 172 tmp6 = vsub_s16(tmp12, tmp7); /* phase 2 */ in jsimd_idct_ifast_neon() 173 tmp5 = vsub_s16(tmp11, tmp6); in jsimd_idct_ifast_neon() 178 row1 = vcombine_s16(dcval, vadd_s16(tmp1, tmp6)); in jsimd_idct_ifast_neon() 179 row6 = vcombine_s16(dcval, vsub_s16(tmp1, tmp6)); in jsimd_idct_ifast_neon() 224 int16x4_t tmp6 = vmul_s16(vget_low_s16(row5), quant_row5); in jsimd_idct_ifast_neon() local 227 int16x4_t z13 = vadd_s16(tmp6, tmp5); /* phase 6 */ in jsimd_idct_ifast_neon() 228 int16x4_t neg_z10 = vsub_s16(tmp5, tmp6); in jsimd_idct_ifast_neon() [all …]
|
/external/llvm/test/Transforms/IndVarSimplify/ |
D | iv-fold.ll | 17 %tmp6 = load i32, i32* %arrayidx, align 4 21 %tmp6.1 = load i32, i32* %arrayidx.1, align 4 27 %r = add i32 %tmp6, %tmp6.1 44 %tmp6 = load i32, i32* %arrayidx, align 4 48 %tmp6.1 = load i32, i32* %arrayidx.1, align 4 54 %r = add i32 %tmp6, %tmp6.1
|
/external/llvm/test/Transforms/InstCombine/ |
D | and-or-not.ll | 10 %tmp6 = and i32 %b, %a ; <i32> [#uses=1] 11 %tmp7 = or i32 %tmp6, %tmp3not ; <i32> [#uses=1] 22 %tmp6 = and i32 %b, %a ; <i32> [#uses=1] 23 %tmp6not = xor i32 %tmp6, -1 ; <i32> [#uses=1] 35 %tmp6 = and <4 x i32> %a, %b ; <<4 x i32>> [#uses=1] 36 %tmp7 = or <4 x i32> %tmp6, %tmp3not ; <<4 x i32>> [#uses=1] 47 %tmp6 = and <4 x i32> %a, %b ; <<4 x i32>> [#uses=1] 48 …%tmp6not = xor <4 x i32> %tmp6, < i32 -1, i32 -1, i32 -1, i32 -1 > ; <<4 x i32>> [#us…
|
D | and-xor-merge.ll | 10 %tmp6 = and i32 %z, %y 11 %tmp7 = xor i32 %tmp3, %tmp6 21 %tmp6 = or i32 %y, %x 22 %tmp7 = xor i32 %tmp3, %tmp6
|
D | onehot_merge.ll | 12 %tmp6 = icmp eq i32 %tmp5, 0 13 %or = or i1 %tmp2, %tmp6 31 %tmp6 = icmp eq i32 %tmp5, 0 32 %or = or i1 %tmp2, %tmp6
|
/external/llvm/test/Transforms/Reassociate/ |
D | repeats.ll | 86 %tmp6 = mul i3 %tmp5, %x 87 ret i3 %tmp6 101 %tmp6 = mul i4 %tmp5, %x 102 %tmp7 = mul i4 %tmp6, %x 118 %tmp6 = mul i4 %tmp5, %x 119 %tmp7 = mul i4 %tmp6, %x 136 %tmp6 = mul i4 %tmp5, %x 137 %tmp7 = mul i4 %tmp6, %x 156 %tmp6 = mul i4 %tmp5, %x 157 %tmp7 = mul i4 %tmp6, %x [all …]
|
/external/libyuv/files/source/ |
D | rotate_mmi.cc | 27 uint64_t tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6; in TransposeWx8_MMI() local 140 [tmp6] "=&f"(tmp6), [tmp7] "=&f"(tmp7), [tmp8] "=&f"(tmp8), in TransposeWx8_MMI() 156 uint64_t tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6; in TransposeUVWx8_MMI() local 277 [tmp6] "=&f"(tmp6), [tmp7] "=&f"(tmp7), [tmp8] "=&f"(tmp8), in TransposeUVWx8_MMI()
|
/external/webrtc/common_audio/signal_processing/ |
D | complex_fft_mips.c | 35 int32_t tmp6 = 0; in WebRtcSpl_ComplexFFT() local 137 [tmp4] "=&r" (tmp4), [tmp5] "=&r" (tmp5), [tmp6] "=&r" (tmp6), in WebRtcSpl_ComplexFFT() 157 int32_t tmp5 = 0, tmp6 = 0, tmp = 0, tempMax = 0, round2 = 0; in WebRtcSpl_ComplexIFFT() local 314 [tmp4] "=&r" (tmp4), [tmp5] "=&r" (tmp5), [tmp6] "=&r" (tmp6), in WebRtcSpl_ComplexIFFT()
|
/external/llvm/test/CodeGen/X86/ |
D | avx1-logical-load-folding.ll | 13 %tmp6 = extractelement <8 x float> %tmp5, i32 0 14 store float %tmp6, float* %C 27 %tmp6 = extractelement <8 x float> %tmp5, i32 0 28 store float %tmp6, float* %C 41 %tmp6 = extractelement <8 x float> %tmp5, i32 0 42 store float %tmp6, float* %C 54 %tmp6 = bitcast <8 x i32> %tmp5 to <8 x float> 55 %tmp7 = extractelement <8 x float> %tmp6, i32 0
|
/external/webrtc/modules/audio_coding/codecs/isac/fix/source/ |
D | pitch_estimator_mips.c | 34 int32_t tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp8; in WebRtcIsacfix_PCorr2Q32() local 79 [tmp4] "=&r" (tmp4), [tmp5] "=&r" (tmp5), [tmp6] "=&r" (tmp6), in WebRtcIsacfix_PCorr2Q32() 105 int32_t tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp8; in WebRtcIsacfix_PCorr2Q32() local 171 [tmp4] "=&r" (tmp4), [tmp5] "=&r" (tmp5), [tmp6] "=&r" (tmp6), in WebRtcIsacfix_PCorr2Q32()
|
/external/llvm/test/CodeGen/Thumb2/ |
D | thumb2-uxtb.ll | 47 %tmp6 = and i32 %tmp1, 16711935 ; <i32> [#uses=1] 48 ret i32 %tmp6 74 %tmp6 = or i32 %tmp2, %tmp5 ; <i32> [#uses=1] 75 ret i32 %tmp6 89 %tmp6 = or i32 %tmp2, %tmp5 ; <i32> [#uses=1] 90 ret i32 %tmp6 103 %tmp6 = or i32 %tmp2, %tmp5 ; <i32> [#uses=1] 104 ret i32 %tmp6 117 %tmp6 = or i32 %tmp5, %tmp1 ; <i32> [#uses=1] 118 ret i32 %tmp6
|
/external/llvm/test/CodeGen/AArch64/ |
D | aarch64-smull.ll | 77 %tmp6 = mul <8 x i16> %tmp4, %tmp5 78 %tmp7 = add <8 x i16> %tmp1, %tmp6 90 %tmp6 = mul <4 x i32> %tmp4, %tmp5 91 %tmp7 = add <4 x i32> %tmp1, %tmp6 103 %tmp6 = mul <2 x i64> %tmp4, %tmp5 104 %tmp7 = add <2 x i64> %tmp1, %tmp6 116 %tmp6 = mul <8 x i16> %tmp4, %tmp5 117 %tmp7 = add <8 x i16> %tmp1, %tmp6 129 %tmp6 = mul <4 x i32> %tmp4, %tmp5 130 %tmp7 = add <4 x i32> %tmp1, %tmp6 [all …]
|
D | arm64-vext.ll | 68 %tmp6 = bitcast <8 x i8> %tmp3 to <4 x i16> 70 %vext = shufflevector <4 x i16> %tmp6, <4 x i16> %tmp7, <4 x i32> <i32 1, i32 2, i32 3, i32 4> 89 %tmp6 = bitcast <8 x i8> %tmp3 to <4 x i16> 91 %vext = shufflevector <4 x i16> %tmp6, <4 x i16> %tmp7, <4 x i32> <i32 2, i32 3, i32 4, i32 5> 110 %tmp6 = bitcast <8 x i8> %tmp3 to <4 x i16> 112 %vext = shufflevector <4 x i16> %tmp6, <4 x i16> %tmp7, <4 x i32> <i32 3, i32 4, i32 5, i32 6> 131 %tmp6 = bitcast <8 x i8> %tmp3 to <2 x i32> 133 %vext = shufflevector <2 x i32> %tmp6, <2 x i32> %tmp7, <2 x i32> <i32 1, i32 2> 152 %tmp6 = bitcast <8 x i8> %tmp3 to <2 x i32> 154 %vext = shufflevector <2 x i32> %tmp6, <2 x i32> %tmp7, <2 x i32> <i32 1, i32 2> [all …]
|
/external/llvm/test/Analysis/ScalarEvolution/ |
D | 2007-08-06-MisinterpretBranch.ll | 12 %x_addr.0 = phi i32 [ %tmp6, %bb ], [ %x, %entry ] ; <i32> [#uses=1] 13 %tmp6 = add i32 %x_addr.0, 1 ; <i32> [#uses=3] 14 %tmp9 = icmp slt i32 %tmp6, %y ; <i1> [#uses=1] 18 %x_addr.1 = phi i32 [ %x, %entry ], [ %tmp6, %bb ] ; <i32> [#uses=1]
|
/external/libvpx/libvpx/vpx_dsp/mips/ |
D | avg_msa.c | 64 v8i16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; in vpx_hadamard_8x8_msa() local 68 tmp6, tmp7, tmp5, tmp3, tmp1); in vpx_hadamard_8x8_msa() 69 BUTTERFLY_8(tmp0, tmp1, tmp4, tmp5, tmp7, tmp6, tmp3, tmp2, src0, src1, src4, in vpx_hadamard_8x8_msa() 72 tmp4, tmp5, tmp1, tmp6, tmp2); in vpx_hadamard_8x8_msa() 73 TRANSPOSE8x8_SH_SH(tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, src0, src1, in vpx_hadamard_8x8_msa() 76 tmp6, tmp7, tmp5, tmp3, tmp1); in vpx_hadamard_8x8_msa() 77 BUTTERFLY_8(tmp0, tmp1, tmp4, tmp5, tmp7, tmp6, tmp3, tmp2, src0, src1, src4, in vpx_hadamard_8x8_msa() 80 tmp4, tmp5, tmp1, tmp6, tmp2); in vpx_hadamard_8x8_msa() 81 TRANSPOSE8x8_SH_SH(tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, src0, src1, in vpx_hadamard_8x8_msa() 90 v8i16 tmp6, tmp7, tmp8, tmp9, tmp10, tmp11, tmp12, tmp13, tmp14, tmp15; in vpx_hadamard_16x16_msa() local [all …]
|