/external/valgrind/none/tests/mips64/ |
D | extract_insert_bit_field.stdout.exp-mips64r2 | 2 ins :: in 0x0, in1 0x0, out 0x0, pos: 0, size: 1 3 ins :: in 0x0, in1 0xffffffffffffffff, out 0xfffffffffffffffe, pos: 0, size: 1 4 ins :: in 0x0, in1 0x98765432, out 0xffffffff98765432, pos: 0, size: 1 5 ins :: in 0x0, in1 0xffffffffff865421, out 0xffffffffff865420, pos: 0, size: 1 6 ins :: in 0xffffffffffffffff, in1 0x0, out 0x1, pos: 0, size: 1 7 ins :: in 0xffffffffffffffff, in1 0xffffffffffffffff, out 0xffffffffffffffff, pos: 0, size: 1 8 ins :: in 0xffffffffffffffff, in1 0x98765432, out 0xffffffff98765433, pos: 0, size: 1 9 ins :: in 0xffffffffffffffff, in1 0xffffffffff865421, out 0xffffffffff865421, pos: 0, size: 1 10 ins :: in 0x98765432, in1 0x0, out 0x0, pos: 0, size: 1 11 ins :: in 0x98765432, in1 0xffffffffffffffff, out 0xfffffffffffffffe, pos: 0, size: 1 [all …]
|
/external/libvpx/libvpx/vp9/encoder/mips/msa/ |
D | vp9_fdct4x4_msa.c | 18 v8i16 in0, in1, in2, in3, in4; in vp9_fwht4x4_msa() local 20 LD_SH4(input, src_stride, in0, in1, in2, in3); in vp9_fwht4x4_msa() 22 in0 += in1; in vp9_fwht4x4_msa() 25 SUB2(in4, in1, in4, in2, in1, in2); in vp9_fwht4x4_msa() 27 in3 += in1; in vp9_fwht4x4_msa() 29 TRANSPOSE4x4_SH_SH(in0, in2, in3, in1, in0, in2, in3, in1); in vp9_fwht4x4_msa() 32 in1 -= in3; in vp9_fwht4x4_msa() 33 in4 = (in0 - in1) >> 1; in vp9_fwht4x4_msa() 36 in1 += in2; in vp9_fwht4x4_msa() 38 SLLI_4V(in0, in1, in2, in3, 2); in vp9_fwht4x4_msa() [all …]
|
D | vp9_fdct8x8_msa.c | 18 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; in vp9_fht8x8_msa() local 20 LD_SH8(input, stride, in0, in1, in2, in3, in4, in5, in6, in7); in vp9_fht8x8_msa() 21 SLLI_4V(in0, in1, in2, in3, 2); in vp9_fht8x8_msa() 26 VP9_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7, in vp9_fht8x8_msa() 27 in0, in1, in2, in3, in4, in5, in6, in7); in vp9_fht8x8_msa() 28 TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in vp9_fht8x8_msa() 29 in0, in1, in2, in3, in4, in5, in6, in7); in vp9_fht8x8_msa() 30 VP9_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7, in vp9_fht8x8_msa() 31 in0, in1, in2, in3, in4, in5, in6, in7); in vp9_fht8x8_msa() 34 VP9_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, in vp9_fht8x8_msa() [all …]
|
/external/libvpx/libvpx/vp9/common/mips/msa/ |
D | vp9_idct4x4_msa.c | 18 v8i16 in0, in1, in2, in3; in vp9_iht4x4_16_add_msa() local 21 LD4x4_SH(input, in0, in1, in2, in3); in vp9_iht4x4_16_add_msa() 22 TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3); in vp9_iht4x4_16_add_msa() 27 VP9_IDCT4x4(in0, in1, in2, in3, in0, in1, in2, in3); in vp9_iht4x4_16_add_msa() 29 TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3); in vp9_iht4x4_16_add_msa() 30 VP9_IDCT4x4(in0, in1, in2, in3, in0, in1, in2, in3); in vp9_iht4x4_16_add_msa() 34 VP9_IDCT4x4(in0, in1, in2, in3, in0, in1, in2, in3); in vp9_iht4x4_16_add_msa() 36 TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3); in vp9_iht4x4_16_add_msa() 37 VP9_IADST4x4(in0, in1, in2, in3, in0, in1, in2, in3); in vp9_iht4x4_16_add_msa() 41 VP9_IADST4x4(in0, in1, in2, in3, in0, in1, in2, in3); in vp9_iht4x4_16_add_msa() [all …]
|
D | vp9_idct8x8_msa.c | 18 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; in vp9_iht8x8_64_add_msa() local 21 LD_SH8(input, 8, in0, in1, in2, in3, in4, in5, in6, in7); in vp9_iht8x8_64_add_msa() 23 TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in vp9_iht8x8_64_add_msa() 24 in0, in1, in2, in3, in4, in5, in6, in7); in vp9_iht8x8_64_add_msa() 29 VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, in vp9_iht8x8_64_add_msa() 30 in0, in1, in2, in3, in4, in5, in6, in7); in vp9_iht8x8_64_add_msa() 32 TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in vp9_iht8x8_64_add_msa() 33 in0, in1, in2, in3, in4, in5, in6, in7); in vp9_iht8x8_64_add_msa() 34 VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, in vp9_iht8x8_64_add_msa() 35 in0, in1, in2, in3, in4, in5, in6, in7); in vp9_iht8x8_64_add_msa() [all …]
|
/external/skia/src/gpu/glsl/ |
D | GrGLSL_impl.h | 25 inline Self GrGLSLExpr<Self>::Mul(T0 in0, T1 in1) { in Mul() argument 26 if (in0.isZeros() || in1.isZeros()) { in Mul() 30 return Self::VectorCast(in1); in Mul() 32 if (in1.isOnes()) { in Mul() 35 return Self("(%s * %s)", in0.c_str(), in1.c_str()); in Mul() 40 inline Self GrGLSLExpr<Self>::Add(T0 in0, T1 in1) { in Add() argument 41 if (in1.isZeros()) { in Add() 45 return Self::VectorCast(in1); in Add() 47 if (in0.isOnes() && in1.isOnes()) { in Add() 50 return Self("(%s + %s)", in0.c_str(), in1.c_str()); in Add() [all …]
|
D | GrGLSL.h | 205 GrGLSLExpr(const char format[], const char in0[], const char in1[]) in GrGLSLExpr() argument 207 fExpr.appendf(format, in0, in1); in GrGLSLExpr() 224 static Self Mul(T0 in0, T1 in1); 231 static Self Add(T0 in0, T1 in1); 238 static Self Sub(T0 in0, T1 in1); 283 GrGLSLExpr1(const char format[], const char in0[], const char in1[]) in GrGLSLExpr1() argument 284 : INHERITED(format, in0, in1) { in GrGLSLExpr1() 292 friend GrGLSLExpr1 operator*(const GrGLSLExpr1& in0, const GrGLSLExpr1&in1); 293 friend GrGLSLExpr1 operator+(const GrGLSLExpr1& in0, const GrGLSLExpr1&in1); 294 friend GrGLSLExpr1 operator-(const GrGLSLExpr1& in0, const GrGLSLExpr1&in1); [all …]
|
/external/libvpx/libvpx/vpx_dsp/x86/ |
D | fwd_txfm_sse2.c | 18 __m128i in0, in1; in vpx_fdct4x4_1_sse2() local 22 in1 = _mm_loadl_epi64((const __m128i *)(input + 1 * stride)); in vpx_fdct4x4_1_sse2() 23 in1 = _mm_unpacklo_epi64(in1, _mm_loadl_epi64((const __m128i *) in vpx_fdct4x4_1_sse2() 28 tmp = _mm_add_epi16(in0, in1); in vpx_fdct4x4_1_sse2() 30 in1 = _mm_unpackhi_epi16(zero, tmp); in vpx_fdct4x4_1_sse2() 32 in1 = _mm_srai_epi32(in1, 16); in vpx_fdct4x4_1_sse2() 34 tmp = _mm_add_epi32(in0, in1); in vpx_fdct4x4_1_sse2() 36 in1 = _mm_unpackhi_epi32(tmp, zero); in vpx_fdct4x4_1_sse2() 38 tmp = _mm_add_epi32(in0, in1); in vpx_fdct4x4_1_sse2() 41 in1 = _mm_add_epi32(tmp, in0); in vpx_fdct4x4_1_sse2() [all …]
|
/external/clang/test/CodeGen/ |
D | mult-alt-generic.c | 43 register int in1 = 1; in single_lt() local 45 asm("foo %1,%0" : "=r" (out0) : "<r" (in1)); in single_lt() 47 asm("foo %1,%0" : "=r" (out0) : "r<" (in1)); in single_lt() 54 register int in1 = 1; in single_gt() local 56 asm("foo %1,%0" : "=r" (out0) : ">r" (in1)); in single_gt() 58 asm("foo %1,%0" : "=r" (out0) : "r>" (in1)); in single_gt() 65 register int in1 = 1; in single_r() local 67 asm("foo %1,%0" : "=r" (out0) : "r" (in1)); in single_r() 113 register int in1 = 1; in single_g() local 115 asm("foo %1,%0" : "=r" (out0) : "g" (in1)); in single_g() [all …]
|
/external/libvpx/libvpx/vpx_dsp/mips/ |
D | idct4x4_msa.c | 15 v8i16 in0, in1, in2, in3; in vpx_iwht4x4_16_add_msa() local 19 LD4x4_SH(input, in0, in2, in3, in1); in vpx_iwht4x4_16_add_msa() 20 TRANSPOSE4x4_SH_SH(in0, in2, in3, in1, in0, in2, in3, in1); in vpx_iwht4x4_16_add_msa() 24 UNPCK_R_SH_SW(in1, in1_r); in vpx_iwht4x4_16_add_msa() 46 in0, in1, in2, in3); in vpx_iwht4x4_16_add_msa() 47 ADDBLK_ST4x4_UB(in0, in3, in1, in2, dst, dst_stride); in vpx_iwht4x4_16_add_msa() 53 v8i16 in1, in0 = { 0 }; in vpx_iwht4x4_1_add_msa() local 64 in1 = in0 >> 1; in vpx_iwht4x4_1_add_msa() 65 in0 -= in1; in vpx_iwht4x4_1_add_msa() 67 ADDBLK_ST4x4_UB(in0, in1, in1, in1, dst, dst_stride); in vpx_iwht4x4_1_add_msa() [all …]
|
D | macros_msa.h | 268 #define SW4(in0, in1, in2, in3, pdst, stride) { \ argument 270 SW(in1, (pdst) + stride); \ 282 #define SD4(in0, in1, in2, in3, pdst, stride) { \ argument 284 SD(in1, (pdst) + stride); \ 400 #define ST_B2(RTYPE, in0, in1, pdst, stride) { \ argument 402 ST_B(RTYPE, in1, (pdst) + stride); \ 406 #define ST_B4(RTYPE, in0, in1, in2, in3, pdst, stride) { \ argument 407 ST_B2(RTYPE, in0, in1, (pdst), stride); \ 412 #define ST_B8(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ argument 414 ST_B4(RTYPE, in0, in1, in2, in3, pdst, stride); \ [all …]
|
D | idct8x8_msa.c | 15 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; in vpx_idct8x8_64_add_msa() local 18 LD_SH8(input, 8, in0, in1, in2, in3, in4, in5, in6, in7); in vpx_idct8x8_64_add_msa() 21 TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in vpx_idct8x8_64_add_msa() 22 in0, in1, in2, in3, in4, in5, in6, in7); in vpx_idct8x8_64_add_msa() 24 VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, in vpx_idct8x8_64_add_msa() 25 in0, in1, in2, in3, in4, in5, in6, in7); in vpx_idct8x8_64_add_msa() 27 TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in vpx_idct8x8_64_add_msa() 28 in0, in1, in2, in3, in4, in5, in6, in7); in vpx_idct8x8_64_add_msa() 30 VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, in vpx_idct8x8_64_add_msa() 31 in0, in1, in2, in3, in4, in5, in6, in7); in vpx_idct8x8_64_add_msa() [all …]
|
D | fwd_txfm_msa.c | 16 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; in fdct8x16_1d_column() local 29 in0, in1, in2, in3, in4, in5, in6, in7, in fdct8x16_1d_column() 31 SLLI_4V(in0, in1, in2, in3, 2); in fdct8x16_1d_column() 35 ADD4(in0, in15, in1, in14, in2, in13, in3, in12, tmp0, tmp1, tmp2, tmp3); in fdct8x16_1d_column() 40 SUB4(in0, in15, in1, in14, in2, in13, in3, in12, in15, in14, in13, in12); in fdct8x16_1d_column() 135 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; in fdct16x8_1d_row() local 138 LD_SH8(input, 16, in0, in1, in2, in3, in4, in5, in6, in7); in fdct16x8_1d_row() 140 TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in fdct16x8_1d_row() 141 in0, in1, in2, in3, in4, in5, in6, in7); in fdct16x8_1d_row() 144 ADD4(in0, 1, in1, 1, in2, 1, in3, 1, in0, in1, in2, in3); in fdct16x8_1d_row() [all …]
|
/external/libvpx/libvpx/vp8/common/mips/msa/ |
D | vp8_macros_msa.h | 256 #define SW4(in0, in1, in2, in3, pdst, stride) \ argument 259 SW(in1, (pdst) + stride); \ 271 #define SD4(in0, in1, in2, in3, pdst, stride) \ argument 274 SD(in1, (pdst) + stride); \ 363 #define ST_B2(RTYPE, in0, in1, pdst, stride) \ argument 366 ST_B(RTYPE, in1, (pdst) + stride); \ 370 #define ST_B4(RTYPE, in0, in1, in2, in3, pdst, stride) \ argument 372 ST_B2(RTYPE, in0, in1, (pdst), stride); \ 378 #define ST_B8(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ argument 381 ST_B4(RTYPE, in0, in1, in2, in3, pdst, stride); \ [all …]
|
/external/libvpx/libvpx/vp8/encoder/mips/msa/ |
D | dct_msa.c | 14 #define TRANSPOSE4x4_H(in0, in1, in2, in3, out0, out1, out2, out3) \ argument 18 ILVR_H2_SH(in2, in0, in3, in1, s0_m, s1_m); \ 20 ILVL_H2_SH(in2, in0, in3, in1, s0_m, s1_m); \ 71 v8i16 in0, in1, in2, in3; in vp8_short_fdct4x4_msa() local 78 LD_SH4(input, pitch / 2, in0, in1, in2, in3); in vp8_short_fdct4x4_msa() 79 TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3); in vp8_short_fdct4x4_msa() 81 BUTTERFLY_4(in0, in1, in2, in3, temp0, temp1, in1, in3); in vp8_short_fdct4x4_msa() 82 SLLI_4V(temp0, temp1, in1, in3, 3); in vp8_short_fdct4x4_msa() 86 temp0 = __msa_ilvr_h(in3, in1); in vp8_short_fdct4x4_msa() 87 in1 = __msa_splati_h(coeff, 3); in vp8_short_fdct4x4_msa() [all …]
|
/external/boringssl/src/crypto/aes/asm/ |
D | aesv8-armx.pl | 69 my ($zero,$rcon,$mask,$in0,$in1,$tmp,$key)= 170 vld1.8 {$in1},[$inp],#8 176 vtbl.8 $key,{$in1},$mask 178 vst1.32 {$in1},[$out],#8 189 veor $tmp,$tmp,$in1 191 vext.8 $in1,$zero,$in1,#12 193 veor $in1,$in1,$tmp 195 veor $in1,$in1,$key 205 vld1.8 {$in1},[$inp] 211 vtbl.8 $key,{$in1},$mask [all …]
|
/external/llvm/test/CodeGen/SPARC/ |
D | mult-alt-generic-sparc.ll | 33 %in1 = alloca i32, align 4 35 store i32 1, i32* %in1, align 4 36 %tmp = load i32, i32* %in1, align 4 39 %tmp1 = load i32, i32* %in1, align 4 48 %in1 = alloca i32, align 4 50 store i32 1, i32* %in1, align 4 51 %tmp = load i32, i32* %in1, align 4 54 %tmp1 = load i32, i32* %in1, align 4 63 %in1 = alloca i32, align 4 65 store i32 1, i32* %in1, align 4 [all …]
|
/external/llvm/test/CodeGen/ARM/ |
D | mult-alt-generic-arm.ll | 33 %in1 = alloca i32, align 4 35 store i32 1, i32* %in1, align 4 36 %tmp = load i32, i32* %in1, align 4 39 %tmp1 = load i32, i32* %in1, align 4 48 %in1 = alloca i32, align 4 50 store i32 1, i32* %in1, align 4 51 %tmp = load i32, i32* %in1, align 4 54 %tmp1 = load i32, i32* %in1, align 4 63 %in1 = alloca i32, align 4 65 store i32 1, i32* %in1, align 4 [all …]
|
/external/llvm/test/CodeGen/MSP430/ |
D | mult-alt-generic-msp430.ll | 33 %in1 = alloca i16, align 2 35 store i16 1, i16* %in1, align 2 36 %tmp = load i16, i16* %in1, align 2 39 %tmp1 = load i16, i16* %in1, align 2 48 %in1 = alloca i16, align 2 50 store i16 1, i16* %in1, align 2 51 %tmp = load i16, i16* %in1, align 2 54 %tmp1 = load i16, i16* %in1, align 2 63 %in1 = alloca i16, align 2 65 store i16 1, i16* %in1, align 2 [all …]
|
/external/llvm/test/CodeGen/PowerPC/ |
D | mult-alt-generic-powerpc.ll | 33 %in1 = alloca i32, align 4 35 store i32 1, i32* %in1, align 4 36 %tmp = load i32, i32* %in1, align 4 39 %tmp1 = load i32, i32* %in1, align 4 48 %in1 = alloca i32, align 4 50 store i32 1, i32* %in1, align 4 51 %tmp = load i32, i32* %in1, align 4 54 %tmp1 = load i32, i32* %in1, align 4 63 %in1 = alloca i32, align 4 65 store i32 1, i32* %in1, align 4 [all …]
|
D | mult-alt-generic-powerpc64.ll | 33 %in1 = alloca i32, align 4 35 store i32 1, i32* %in1, align 4 36 %tmp = load i32, i32* %in1, align 4 39 %tmp1 = load i32, i32* %in1, align 4 48 %in1 = alloca i32, align 4 50 store i32 1, i32* %in1, align 4 51 %tmp = load i32, i32* %in1, align 4 54 %tmp1 = load i32, i32* %in1, align 4 63 %in1 = alloca i32, align 4 65 store i32 1, i32* %in1, align 4 [all …]
|
/external/llvm/test/CodeGen/AMDGPU/ |
D | fcmp64.ll | 6 define void @flt_f64(i32 addrspace(1)* %out, double addrspace(1)* %in1, 8 %r0 = load double, double addrspace(1)* %in1 18 define void @fle_f64(i32 addrspace(1)* %out, double addrspace(1)* %in1, 20 %r0 = load double, double addrspace(1)* %in1 30 define void @fgt_f64(i32 addrspace(1)* %out, double addrspace(1)* %in1, 32 %r0 = load double, double addrspace(1)* %in1 42 define void @fge_f64(i32 addrspace(1)* %out, double addrspace(1)* %in1, 44 %r0 = load double, double addrspace(1)* %in1 54 define void @fne_f64(double addrspace(1)* %out, double addrspace(1)* %in1, 56 %r0 = load double, double addrspace(1)* %in1 [all …]
|
/external/webrtc/webrtc/common_audio/signal_processing/ |
D | resample.c | 312 static void WebRtcSpl_DotProdIntToInt(const int32_t* in1, const int32_t* in2, in WebRtcSpl_DotProdIntToInt() argument 321 tmp1 += coef * in1[0]; in WebRtcSpl_DotProdIntToInt() 325 tmp1 += coef * in1[1]; in WebRtcSpl_DotProdIntToInt() 329 tmp1 += coef * in1[2]; in WebRtcSpl_DotProdIntToInt() 333 tmp1 += coef * in1[3]; in WebRtcSpl_DotProdIntToInt() 337 tmp1 += coef * in1[4]; in WebRtcSpl_DotProdIntToInt() 341 tmp1 += coef * in1[5]; in WebRtcSpl_DotProdIntToInt() 345 tmp1 += coef * in1[6]; in WebRtcSpl_DotProdIntToInt() 349 tmp1 += coef * in1[7]; in WebRtcSpl_DotProdIntToInt() 353 *out1 = tmp1 + coef * in1[8]; in WebRtcSpl_DotProdIntToInt() [all …]
|
/external/llvm/test/CodeGen/X86/ |
D | mult-alt-generic-x86_64.ll | 33 %in1 = alloca i32, align 4 35 store i32 1, i32* %in1, align 4 36 %tmp = load i32, i32* %in1, align 4 39 %tmp1 = load i32, i32* %in1, align 4 48 %in1 = alloca i32, align 4 50 store i32 1, i32* %in1, align 4 51 %tmp = load i32, i32* %in1, align 4 54 %tmp1 = load i32, i32* %in1, align 4 63 %in1 = alloca i32, align 4 65 store i32 1, i32* %in1, align 4 [all …]
|
D | mult-alt-generic-i686.ll | 33 %in1 = alloca i32, align 4 35 store i32 1, i32* %in1, align 4 36 %tmp = load i32, i32* %in1, align 4 39 %tmp1 = load i32, i32* %in1, align 4 48 %in1 = alloca i32, align 4 50 store i32 1, i32* %in1, align 4 51 %tmp = load i32, i32* %in1, align 4 54 %tmp1 = load i32, i32* %in1, align 4 63 %in1 = alloca i32, align 4 65 store i32 1, i32* %in1, align 4 [all …]
|