/external/swiftshader/third_party/LLVM/test/CodeGen/X86/ |
D | vec_splat-4.ll | 11 …%tmp6 = shufflevector <16 x i8 > %T0, <16 x i8 > %T1, <16 x i32> < i32 0, i32 undef, i32 undef, i3… 12 ret <16 x i8 > %tmp6 17 …%tmp6 = shufflevector <16 x i8 > %T0, <16 x i8 > %T1, <16 x i32> < i32 1, i32 1, i32 undef, i32 un… 18 ret <16 x i8 > %tmp6 23 …%tmp6 = shufflevector <16 x i8 > %T0, <16 x i8 > %T1, <16 x i32> < i32 2, i32 undef, i32 undef, i3… 24 ret <16 x i8 > %tmp6 29 …%tmp6 = shufflevector <16 x i8 > %T0, <16 x i8 > %T1, <16 x i32> < i32 3, i32 undef, i32 undef, i3… 30 ret <16 x i8 > %tmp6 36 …%tmp6 = shufflevector <16 x i8 > %T0, <16 x i8 > %T1, <16 x i32> < i32 4, i32 undef, i32 undef, i3… 37 ret <16 x i8 > %tmp6 [all …]
|
D | vec_splat-3.ll | 10 …%tmp6 = shufflevector <8 x i16> %T0, <8 x i16> %T1, <8 x i32> < i32 0, i32 undef, i32 undef, i32 0… 11 ret <8 x i16> %tmp6 16 …%tmp6 = shufflevector <8 x i16> %T0, <8 x i16> %T1, <8 x i32> < i32 1, i32 1, i32 undef, i32 undef… 17 ret <8 x i16> %tmp6 22 …%tmp6 = shufflevector <8 x i16> %T0, <8 x i16> %T1, <8 x i32> < i32 2, i32 undef, i32 undef, i32 2… 23 ret <8 x i16> %tmp6 28 …%tmp6 = shufflevector <8 x i16> %T0, <8 x i16> %T1, <8 x i32> < i32 3, i32 3, i32 undef, i32 undef… 29 ret <8 x i16> %tmp6 34 …%tmp6 = shufflevector <8 x i16> %T0, <8 x i16> %T1, <8 x i32> < i32 4, i32 undef, i32 undef, i32 u… 35 ret <8 x i16> %tmp6 [all …]
|
D | vec_shuffle-14.ll | 10 …%tmp6 = shufflevector <4 x i32> zeroinitializer, <4 x i32> %tmp, <4 x i32> < i32 4, i32 1, i32 2, … 11 ret <4 x i32> %tmp6 17 …%tmp6 = shufflevector <2 x i64> zeroinitializer, <2 x i64> %tmp, <2 x i32> < i32 2, i32 1 > ; <<4… 18 ret <2 x i64> %tmp6 24 %tmp6 = bitcast <2 x i64> %tmp4 to <4 x i32> ; <<4 x i32>> [#uses=1] 25 …%tmp7 = shufflevector <4 x i32> zeroinitializer, <4 x i32> %tmp6, <4 x i32> < i32 4, i32 5, i32 2,… 33 …%tmp6 = shufflevector <4 x i32> zeroinitializer, <4 x i32> %tmp5, <4 x i32> < i32 4, i32 5, i32 2,… 34 %tmp7 = bitcast <4 x i32> %tmp6 to <2 x i64> ; <<2 x i64>> [#uses=1] 40 …%tmp6 = shufflevector <2 x i64> zeroinitializer, <2 x i64> %a, <2 x i32> < i32 2, i32 1 > ; <<4 x… 41 ret <2 x i64> %tmp6
|
/external/swiftshader/third_party/LLVM/test/CodeGen/ARM/ |
D | dyn-stackalloc.ll | 10 %tmp6 = load i32* null 11 %tmp8 = alloca float, i32 %tmp6 38 %tmp6 = alloca i8, i32 %tmp5 39 %tmp9 = call i8* @strcpy(i8* %tmp6, i8* %tag) 40 %tmp6.len = call i32 @strlen(i8* %tmp6) 41 %tmp6.indexed = getelementptr i8* %tmp6, i32 %tmp6.len 42 …call void @llvm.memcpy.p0i8.p0i8.i32(i8* %tmp6.indexed, i8* getelementptr inbounds ([2 x i8]* @str… 43 %tmp15 = call i8* @strcat(i8* %tmp6, i8* %contents) 44 call fastcc void @comment_add(%struct.comment* %vc, i8* %tmp6)
|
D | vbsl.ll | 11 %tmp6 = and <8 x i8> %tmp5, %tmp3 12 %tmp7 = or <8 x i8> %tmp4, %tmp6 24 %tmp6 = and <4 x i16> %tmp5, %tmp3 25 %tmp7 = or <4 x i16> %tmp4, %tmp6 37 %tmp6 = and <2 x i32> %tmp5, %tmp3 38 %tmp7 = or <2 x i32> %tmp4, %tmp6 50 %tmp6 = and <1 x i64> %tmp5, %tmp3 51 %tmp7 = or <1 x i64> %tmp4, %tmp6 63 %tmp6 = and <16 x i8> %tmp5, %tmp3 64 %tmp7 = or <16 x i8> %tmp4, %tmp6 [all …]
|
D | vbsl-constant.ll | 12 %tmp6 = and <8 x i8> %tmp3, <i8 -4, i8 -4, i8 -4, i8 -4, i8 -4, i8 -4, i8 -4, i8 -4> 13 %tmp7 = or <8 x i8> %tmp4, %tmp6 26 %tmp6 = and <4 x i16> %tmp3, <i16 -4, i16 -4, i16 -4, i16 -4> 27 %tmp7 = or <4 x i16> %tmp4, %tmp6 40 %tmp6 = and <2 x i32> %tmp3, <i32 -4, i32 -4> 41 %tmp7 = or <2 x i32> %tmp4, %tmp6 55 %tmp6 = and <1 x i64> %tmp3, <i64 -4> 56 %tmp7 = or <1 x i64> %tmp4, %tmp6 69 …%tmp6 = and <16 x i8> %tmp3, <i8 -4, i8 -4, i8 -4, i8 -4, i8 -4, i8 -4, i8 -4, i8 -4, i8 -4, i8 -4… 70 %tmp7 = or <16 x i8> %tmp4, %tmp6 [all …]
|
D | uxtb.ll | 23 %tmp6 = and i32 %tmp1, 16711935 ; <i32> [#uses=1] 24 ret i32 %tmp6 38 %tmp6 = or i32 %tmp2, %tmp5 ; <i32> [#uses=1] 39 ret i32 %tmp6 47 %tmp6 = or i32 %tmp2, %tmp5 ; <i32> [#uses=1] 48 ret i32 %tmp6 55 %tmp6 = or i32 %tmp2, %tmp5 ; <i32> [#uses=1] 56 ret i32 %tmp6 63 %tmp6 = or i32 %tmp5, %tmp1 ; <i32> [#uses=1] 64 ret i32 %tmp6
|
/external/llvm/test/CodeGen/ARM/ |
D | dyn-stackalloc.ll | 22 %tmp6 = load i32, i32* null 23 %tmp8 = alloca float, i32 %tmp6 50 %tmp6 = alloca i8, i32 %tmp5 51 %tmp9 = call i8* @strcpy(i8* %tmp6, i8* %tag) 52 %tmp6.len = call i32 @strlen(i8* %tmp6) 53 %tmp6.indexed = getelementptr i8, i8* %tmp6, i32 %tmp6.len 54 …call void @llvm.memcpy.p0i8.p0i8.i32(i8* %tmp6.indexed, i8* getelementptr inbounds ([2 x i8], [2 x… 55 %tmp15 = call i8* @strcat(i8* %tmp6, i8* %contents) 56 call fastcc void @comment_add(%struct.comment* %vc, i8* %tmp6)
|
D | vbsl-constant.ll | 12 %tmp6 = and <8 x i8> %tmp3, <i8 -4, i8 -4, i8 -4, i8 -4, i8 -4, i8 -4, i8 -4, i8 -4> 13 %tmp7 = or <8 x i8> %tmp4, %tmp6 26 %tmp6 = and <4 x i16> %tmp3, <i16 -4, i16 -4, i16 -4, i16 -4> 27 %tmp7 = or <4 x i16> %tmp4, %tmp6 40 %tmp6 = and <2 x i32> %tmp3, <i32 -4, i32 -4> 41 %tmp7 = or <2 x i32> %tmp4, %tmp6 55 %tmp6 = and <1 x i64> %tmp3, <i64 -4> 56 %tmp7 = or <1 x i64> %tmp4, %tmp6 69 …%tmp6 = and <16 x i8> %tmp3, <i8 -4, i8 -4, i8 -4, i8 -4, i8 -4, i8 -4, i8 -4, i8 -4, i8 -4, i8 -4… 70 %tmp7 = or <16 x i8> %tmp4, %tmp6 [all …]
|
D | uxtb.ll | 23 %tmp6 = and i32 %tmp1, 16711935 ; <i32> [#uses=1] 24 ret i32 %tmp6 38 %tmp6 = or i32 %tmp2, %tmp5 ; <i32> [#uses=1] 39 ret i32 %tmp6 47 %tmp6 = or i32 %tmp2, %tmp5 ; <i32> [#uses=1] 48 ret i32 %tmp6 55 %tmp6 = or i32 %tmp2, %tmp5 ; <i32> [#uses=1] 56 ret i32 %tmp6 63 %tmp6 = or i32 %tmp5, %tmp1 ; <i32> [#uses=1] 64 ret i32 %tmp6
|
/external/syslinux/com32/lib/jpeg/ |
D | jidctflt.c | 125 FAST_FLOAT tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; in tinyjpeg_idct_float() local 194 tmp6 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]); in tinyjpeg_idct_float() 197 z13 = tmp6 + tmp5; /* phase 6 */ in tinyjpeg_idct_float() 198 z10 = tmp6 - tmp5; in tinyjpeg_idct_float() 209 tmp6 = tmp12 - tmp7; /* phase 2 */ in tinyjpeg_idct_float() 210 tmp5 = tmp11 - tmp6; in tinyjpeg_idct_float() 215 wsptr[DCTSIZE*1] = tmp1 + tmp6; in tinyjpeg_idct_float() 216 wsptr[DCTSIZE*6] = tmp1 - tmp6; in tinyjpeg_idct_float() 266 tmp6 = tmp12 - tmp7; in tinyjpeg_idct_float() 267 tmp5 = tmp11 - tmp6; in tinyjpeg_idct_float() [all …]
|
/external/swiftshader/third_party/LLVM/test/CodeGen/Thumb/ |
D | dyn-stackalloc.ll | 15 %tmp6 = load i32* null 16 %tmp8 = alloca float, i32 %tmp6 59 %tmp6 = alloca i8, i32 %tmp5 60 %tmp9 = call i8* @strcpy( i8* %tmp6, i8* %tag ) 61 %tmp6.len = call i32 @strlen( i8* %tmp6 ) 62 %tmp6.indexed = getelementptr i8* %tmp6, i32 %tmp6.len 63 …call void @llvm.memcpy.p0i8.p0i8.i32(i8* %tmp6.indexed, i8* getelementptr inbounds ([2 x i8]* @str… 64 %tmp15 = call i8* @strcat( i8* %tmp6, i8* %contents ) 65 call fastcc void @comment_add( %struct.comment* %vc, i8* %tmp6 )
|
/external/libjpeg-turbo/ |
D | jfdctint.c | 145 JLONG tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; variable 161 tmp6 = dataptr[1] - dataptr[6]; 191 z2 = tmp5 + tmp6; 192 z3 = tmp4 + tmp6; 198 tmp6 = MULTIPLY(tmp6, FIX_3_072711026); /* sqrt(2) * ( c1+c3+c5-c7) */ 210 dataptr[3] = (DCTELEM) DESCALE(tmp6 + z2 + z3, CONST_BITS-PASS1_BITS); 226 tmp6 = dataptr[DCTSIZE*1] - dataptr[DCTSIZE*6]; 256 z2 = tmp5 + tmp6; 257 z3 = tmp4 + tmp6; 263 tmp6 = MULTIPLY(tmp6, FIX_3_072711026); /* sqrt(2) * ( c1+c3+c5-c7) */ [all …]
|
D | jidctflt.c | 76 FAST_FLOAT tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; variable 148 tmp6 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5] * _0_125); 151 z13 = tmp6 + tmp5; /* phase 6 */ 152 z10 = tmp6 - tmp5; 163 tmp6 = tmp12 - tmp7; /* phase 2 */ 164 tmp5 = tmp11 - tmp6; 169 wsptr[DCTSIZE*1] = tmp1 + tmp6; 170 wsptr[DCTSIZE*6] = tmp1 - tmp6; 221 tmp6 = tmp12 - tmp7; 222 tmp5 = tmp11 - tmp6; [all …]
|
D | jidctfst.c | 175 DCTELEM tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; variable 247 tmp6 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]); 250 z13 = tmp6 + tmp5; /* phase 6 */ 251 z10 = tmp6 - tmp5; 262 tmp6 = tmp12 - tmp7; /* phase 2 */ 263 tmp5 = tmp11 - tmp6; 268 wsptr[DCTSIZE*1] = (int) (tmp1 + tmp6); 269 wsptr[DCTSIZE*6] = (int) (tmp1 - tmp6); 344 tmp6 = tmp12 - tmp7; /* phase 2 */ 345 tmp5 = tmp11 - tmp6; [all …]
|
/external/llvm/test/CodeGen/Thumb/ |
D | dyn-stackalloc.ll | 15 %tmp6 = load i32, i32* null 16 %tmp8 = alloca float, i32 %tmp6 60 %tmp6 = alloca i8, i32 %tmp5 61 %tmp9 = call i8* @strcpy( i8* %tmp6, i8* %tag ) 62 %tmp6.len = call i32 @strlen( i8* %tmp6 ) 63 %tmp6.indexed = getelementptr i8, i8* %tmp6, i32 %tmp6.len 64 …call void @llvm.memcpy.p0i8.p0i8.i32(i8* %tmp6.indexed, i8* getelementptr inbounds ([2 x i8], [2 x… 65 %tmp15 = call i8* @strcat( i8* %tmp6, i8* %contents ) 66 call fastcc void @comment_add( %struct.comment* %vc, i8* %tmp6 )
|
/external/llvm/test/CodeGen/PowerPC/ |
D | 2006-01-20-ShiftPartsCrash.ll | 10 %tmp6.u = add i32 %tmp5, 32 ; <i32> [#uses=1] 11 %tmp6 = bitcast i32 %tmp6.u to i32 ; <i32> [#uses=1] 13 %tmp6.upgrd.1 = trunc i32 %tmp6 to i8 ; <i8> [#uses=1] 14 %shift.upgrd.2 = zext i8 %tmp6.upgrd.1 to i64 ; <i64> [#uses=1]
|
/external/swiftshader/third_party/LLVM/test/CodeGen/PowerPC/ |
D | 2006-01-20-ShiftPartsCrash.ll | 9 %tmp6.u = add i32 %tmp5, 32 ; <i32> [#uses=1] 10 %tmp6 = bitcast i32 %tmp6.u to i32 ; <i32> [#uses=1] 12 %tmp6.upgrd.1 = trunc i32 %tmp6 to i8 ; <i8> [#uses=1] 13 %shift.upgrd.2 = zext i8 %tmp6.upgrd.1 to i64 ; <i64> [#uses=1]
|
/external/swiftshader/third_party/LLVM/test/Transforms/InstCombine/ |
D | and-or-not.ll | 13 %tmp6 = and i32 %b, %a ; <i32> [#uses=1] 14 %tmp7 = or i32 %tmp6, %tmp3not ; <i32> [#uses=1] 22 %tmp6 = and i32 %b, %a ; <i32> [#uses=1] 23 %tmp6not = xor i32 %tmp6, -1 ; <i32> [#uses=1] 32 %tmp6 = and <4 x i32> %a, %b ; <<4 x i32>> [#uses=1] 33 %tmp7 = or <4 x i32> %tmp6, %tmp3not ; <<4 x i32>> [#uses=1] 41 %tmp6 = and <4 x i32> %a, %b ; <<4 x i32>> [#uses=1] 42 …%tmp6not = xor <4 x i32> %tmp6, < i32 -1, i32 -1, i32 -1, i32 -1 > ; <<4 x i32>> [#us…
|
/external/swiftshader/third_party/LLVM/test/Transforms/IndVarSimplify/ |
D | iv-fold.ll | 17 %tmp6 = load i32* %arrayidx, align 4 21 %tmp6.1 = load i32* %arrayidx.1, align 4 27 %r = add i32 %tmp6, %tmp6.1 44 %tmp6 = load i32* %arrayidx, align 4 48 %tmp6.1 = load i32* %arrayidx.1, align 4 54 %r = add i32 %tmp6, %tmp6.1
|
/external/llvm/test/Transforms/IndVarSimplify/ |
D | iv-fold.ll | 17 %tmp6 = load i32, i32* %arrayidx, align 4 21 %tmp6.1 = load i32, i32* %arrayidx.1, align 4 27 %r = add i32 %tmp6, %tmp6.1 44 %tmp6 = load i32, i32* %arrayidx, align 4 48 %tmp6.1 = load i32, i32* %arrayidx.1, align 4 54 %r = add i32 %tmp6, %tmp6.1
|
/external/llvm/test/Transforms/Reassociate/ |
D | repeats.ll | 86 %tmp6 = mul i3 %tmp5, %x 87 ret i3 %tmp6 101 %tmp6 = mul i4 %tmp5, %x 102 %tmp7 = mul i4 %tmp6, %x 118 %tmp6 = mul i4 %tmp5, %x 119 %tmp7 = mul i4 %tmp6, %x 136 %tmp6 = mul i4 %tmp5, %x 137 %tmp7 = mul i4 %tmp6, %x 156 %tmp6 = mul i4 %tmp5, %x 157 %tmp7 = mul i4 %tmp6, %x [all …]
|
/external/llvm/test/Transforms/InstCombine/ |
D | and-or-not.ll | 10 %tmp6 = and i32 %b, %a ; <i32> [#uses=1] 11 %tmp7 = or i32 %tmp6, %tmp3not ; <i32> [#uses=1] 22 %tmp6 = and i32 %b, %a ; <i32> [#uses=1] 23 %tmp6not = xor i32 %tmp6, -1 ; <i32> [#uses=1] 35 %tmp6 = and <4 x i32> %a, %b ; <<4 x i32>> [#uses=1] 36 %tmp7 = or <4 x i32> %tmp6, %tmp3not ; <<4 x i32>> [#uses=1] 47 %tmp6 = and <4 x i32> %a, %b ; <<4 x i32>> [#uses=1] 48 …%tmp6not = xor <4 x i32> %tmp6, < i32 -1, i32 -1, i32 -1, i32 -1 > ; <<4 x i32>> [#us…
|
/external/llvm/test/CodeGen/X86/ |
D | avx1-logical-load-folding.ll | 13 %tmp6 = extractelement <8 x float> %tmp5, i32 0 14 store float %tmp6, float* %C 27 %tmp6 = extractelement <8 x float> %tmp5, i32 0 28 store float %tmp6, float* %C 41 %tmp6 = extractelement <8 x float> %tmp5, i32 0 42 store float %tmp6, float* %C 54 %tmp6 = bitcast <8 x i32> %tmp5 to <8 x float> 55 %tmp7 = extractelement <8 x float> %tmp6, i32 0
|
/external/webrtc/webrtc/common_audio/signal_processing/ |
D | complex_fft_mips.c | 35 int32_t tmp6 = 0; in WebRtcSpl_ComplexFFT() local 137 [tmp4] "=&r" (tmp4), [tmp5] "=&r" (tmp5), [tmp6] "=&r" (tmp6), in WebRtcSpl_ComplexFFT() 157 int32_t tmp5 = 0, tmp6 = 0, tmp = 0, tempMax = 0, round2 = 0; in WebRtcSpl_ComplexIFFT() local 314 [tmp4] "=&r" (tmp4), [tmp5] "=&r" (tmp5), [tmp6] "=&r" (tmp6), in WebRtcSpl_ComplexIFFT()
|