Home
last modified time | relevance | path

Searched refs:in2 (Results 1 – 25 of 266) sorted by relevance

1234567891011

/external/libvpx/libvpx/vp9/encoder/mips/msa/
Dvp9_fdct4x4_msa.c18 v8i16 in0, in1, in2, in3, in4; in vp9_fwht4x4_msa() local
20 LD_SH4(input, src_stride, in0, in1, in2, in3); in vp9_fwht4x4_msa()
23 in3 -= in2; in vp9_fwht4x4_msa()
25 SUB2(in4, in1, in4, in2, in1, in2); in vp9_fwht4x4_msa()
26 in0 -= in2; in vp9_fwht4x4_msa()
29 TRANSPOSE4x4_SH_SH(in0, in2, in3, in1, in0, in2, in3, in1); in vp9_fwht4x4_msa()
31 in0 += in2; in vp9_fwht4x4_msa()
34 SUB2(in4, in2, in4, in3, in2, in3); in vp9_fwht4x4_msa()
36 in1 += in2; in vp9_fwht4x4_msa()
38 SLLI_4V(in0, in1, in2, in3, 2); in vp9_fwht4x4_msa()
[all …]
Dvp9_fdct8x8_msa.c18 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; in vp9_fht8x8_msa() local
20 LD_SH8(input, stride, in0, in1, in2, in3, in4, in5, in6, in7); in vp9_fht8x8_msa()
21 SLLI_4V(in0, in1, in2, in3, 2); in vp9_fht8x8_msa()
26 VP9_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4, in vp9_fht8x8_msa()
28 TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in vp9_fht8x8_msa()
30 VP9_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4, in vp9_fht8x8_msa()
34 VP9_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4, in vp9_fht8x8_msa()
36 TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in vp9_fht8x8_msa()
38 VP9_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4, in vp9_fht8x8_msa()
42 VP9_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4, in vp9_fht8x8_msa()
[all …]
/external/libvpx/libvpx/vp9/common/mips/msa/
Dvp9_idct4x4_msa.c18 v8i16 in0, in1, in2, in3; in vp9_iht4x4_16_add_msa() local
21 LD4x4_SH(input, in0, in1, in2, in3); in vp9_iht4x4_16_add_msa()
22 TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3); in vp9_iht4x4_16_add_msa()
27 VP9_IDCT4x4(in0, in1, in2, in3, in0, in1, in2, in3); in vp9_iht4x4_16_add_msa()
29 TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3); in vp9_iht4x4_16_add_msa()
30 VP9_IDCT4x4(in0, in1, in2, in3, in0, in1, in2, in3); in vp9_iht4x4_16_add_msa()
34 VP9_IDCT4x4(in0, in1, in2, in3, in0, in1, in2, in3); in vp9_iht4x4_16_add_msa()
36 TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3); in vp9_iht4x4_16_add_msa()
37 VP9_IADST4x4(in0, in1, in2, in3, in0, in1, in2, in3); in vp9_iht4x4_16_add_msa()
41 VP9_IADST4x4(in0, in1, in2, in3, in0, in1, in2, in3); in vp9_iht4x4_16_add_msa()
[all …]
Dvp9_idct8x8_msa.c18 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; in vp9_iht8x8_64_add_msa() local
21 LD_SH8(input, 8, in0, in1, in2, in3, in4, in5, in6, in7); in vp9_iht8x8_64_add_msa()
23 TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in vp9_iht8x8_64_add_msa()
29 VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in vp9_iht8x8_64_add_msa()
32 TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in vp9_iht8x8_64_add_msa()
34 VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in vp9_iht8x8_64_add_msa()
39 VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in vp9_iht8x8_64_add_msa()
42 TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in vp9_iht8x8_64_add_msa()
44 VP9_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4, in vp9_iht8x8_64_add_msa()
49 VP9_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4, in vp9_iht8x8_64_add_msa()
[all …]
/external/libaom/libaom/av1/encoder/mips/msa/
Dfdct4x4_msa.c18 v8i16 in0, in1, in2, in3, in4; in av1_fwht4x4_msa() local
20 LD_SH4(input, src_stride, in0, in1, in2, in3); in av1_fwht4x4_msa()
23 in3 -= in2; in av1_fwht4x4_msa()
25 SUB2(in4, in1, in4, in2, in1, in2); in av1_fwht4x4_msa()
26 in0 -= in2; in av1_fwht4x4_msa()
29 TRANSPOSE4x4_SH_SH(in0, in2, in3, in1, in0, in2, in3, in1); in av1_fwht4x4_msa()
31 in0 += in2; in av1_fwht4x4_msa()
34 SUB2(in4, in2, in4, in3, in2, in3); in av1_fwht4x4_msa()
36 in1 += in2; in av1_fwht4x4_msa()
38 SLLI_4V(in0, in1, in2, in3, 2); in av1_fwht4x4_msa()
[all …]
/external/libvpx/libvpx/vpx_dsp/mips/
Dfwd_txfm_msa.c15 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; in vpx_fdct8x8_1_msa() local
18 LD_SH8(input, stride, in0, in1, in2, in3, in4, in5, in6, in7); in vpx_fdct8x8_1_msa()
19 ADD4(in0, in1, in2, in3, in4, in5, in6, in7, in0, in2, in4, in6); in vpx_fdct8x8_1_msa()
20 ADD2(in0, in2, in4, in6, in0, in4); in vpx_fdct8x8_1_msa()
31 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; in fdct8x16_1d_column() local
44 LD_SH16(input, src_stride, in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, in fdct8x16_1d_column()
46 SLLI_4V(in0, in1, in2, in3, 2); in fdct8x16_1d_column()
50 ADD4(in0, in15, in1, in14, in2, in13, in3, in12, tmp0, tmp1, tmp2, tmp3); in fdct8x16_1d_column()
55 SUB4(in0, in15, in1, in14, in2, in13, in3, in12, in15, in14, in13, in12); in fdct8x16_1d_column()
150 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; in fdct16x8_1d_row() local
[all …]
Didct4x4_msa.c15 v8i16 in0, in1, in2, in3; in vpx_iwht4x4_16_add_msa() local
19 LD4x4_SH(input, in0, in2, in3, in1); in vpx_iwht4x4_16_add_msa()
20 TRANSPOSE4x4_SH_SH(in0, in2, in3, in1, in0, in2, in3, in1); in vpx_iwht4x4_16_add_msa()
22 UNPCK_R_SH_SW(in2, in2_r); in vpx_iwht4x4_16_add_msa()
46 in2, in3); in vpx_iwht4x4_16_add_msa()
47 ADDBLK_ST4x4_UB(in0, in3, in1, in2, dst, dst_stride); in vpx_iwht4x4_16_add_msa()
72 v8i16 in0, in1, in2, in3; in vpx_idct4x4_16_add_msa() local
75 LD4x4_SH(input, in0, in1, in2, in3); in vpx_idct4x4_16_add_msa()
77 TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3); in vpx_idct4x4_16_add_msa()
78 VP9_IDCT4x4(in0, in1, in2, in3, in0, in1, in2, in3); in vpx_idct4x4_16_add_msa()
[all …]
Didct8x8_msa.c15 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; in vpx_idct8x8_64_add_msa() local
18 LD_SH8(input, 8, in0, in1, in2, in3, in4, in5, in6, in7); in vpx_idct8x8_64_add_msa()
21 TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in vpx_idct8x8_64_add_msa()
24 VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in vpx_idct8x8_64_add_msa()
27 TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in vpx_idct8x8_64_add_msa()
30 VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in vpx_idct8x8_64_add_msa()
33 SRARI_H4_SH(in0, in1, in2, in3, 5); in vpx_idct8x8_64_add_msa()
36 VP9_ADDBLK_ST8x4_UB(dst, dst_stride, in0, in1, in2, in3); in vpx_idct8x8_64_add_msa()
43 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; in vpx_idct8x8_12_add_msa() local
49 LD_SH8(input, 8, in0, in1, in2, in3, in4, in5, in6, in7); in vpx_idct8x8_12_add_msa()
[all …]
Dmacros_msa.h205 #define SW4(in0, in1, in2, in3, pdst, stride) \ argument
209 SW(in2, (pdst) + 2 * stride); \
220 #define SD4(in0, in1, in2, in3, pdst, stride) \ argument
224 SD(in2, (pdst) + 2 * stride); \
323 #define ST_V4(RTYPE, in0, in1, in2, in3, pdst, stride) \ argument
326 ST_V2(RTYPE, in2, in3, (pdst) + 2 * stride, stride); \
331 #define ST_V8(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, pdst, stride) \ argument
333 ST_V4(RTYPE, in0, in1, in2, in3, pdst, stride); \
481 #define AVER_UB2(RTYPE, in0, in1, in2, in3, out0, out1) \ argument
484 out1 = (RTYPE)__msa_aver_u_b((v16u8)in2, (v16u8)in3); \
[all …]
Dfwd_dct32x32_msa.c16 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; in fdct8x32_1d_column_load_butterfly() local
22 LD_SH4(input, src_stride, in0, in1, in2, in3); in fdct8x32_1d_column_load_butterfly()
26 SLLI_4V(in0, in1, in2, in3, 2); in fdct8x32_1d_column_load_butterfly()
30 BUTTERFLY_8(in0, in1, in2, in3, in4, in5, in6, in7, step0, step1, step2, in fdct8x32_1d_column_load_butterfly()
40 LD_SH4(input + (8 * src_stride), src_stride, in0, in1, in2, in3); in fdct8x32_1d_column_load_butterfly()
44 SLLI_4V(in0, in1, in2, in3, 2); in fdct8x32_1d_column_load_butterfly()
48 BUTTERFLY_8(in0, in1, in2, in3, in4, in5, in6, in7, step0, step1, step2, in fdct8x32_1d_column_load_butterfly()
59 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; in fdct8x32_1d_column_even_store() local
65 LD_SH4(input, 8, in0, in1, in2, in3); in fdct8x32_1d_column_even_store()
67 BUTTERFLY_8(in0, in1, in2, in3, in12, in13, in14, in15, vec0, vec1, vec2, in fdct8x32_1d_column_even_store()
[all …]
/external/libvpx/libvpx/vp8/encoder/mips/msa/
Ddct_msa.c14 #define TRANSPOSE4x4_H(in0, in1, in2, in3, out0, out1, out2, out3) \ argument
18 ILVR_H2_SH(in2, in0, in3, in1, s0_m, s1_m); \
20 ILVL_H2_SH(in2, in0, in3, in1, s0_m, s1_m); \
70 v8i16 in0, in1, in2, in3; in vp8_short_fdct4x4_msa() local
77 LD_SH4(input, pitch / 2, in0, in1, in2, in3); in vp8_short_fdct4x4_msa()
78 TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3); in vp8_short_fdct4x4_msa()
80 BUTTERFLY_4(in0, in1, in2, in3, temp0, temp1, in1, in3); in vp8_short_fdct4x4_msa()
83 in2 = temp0 - temp1; in vp8_short_fdct4x4_msa()
94 TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3); in vp8_short_fdct4x4_msa()
96 BUTTERFLY_4(in0, in1, in2, in3, temp0, temp1, in1, in3); in vp8_short_fdct4x4_msa()
[all …]
/external/libaom/libaom/aom_dsp/mips/
Dmacros_msa.h265 #define SW4(in0, in1, in2, in3, pdst, stride) \ argument
269 SW(in2, (pdst) + 2 * stride); \
280 #define SD4(in0, in1, in2, in3, pdst, stride) \ argument
284 SD(in2, (pdst) + 2 * stride); \
416 #define ST_B4(RTYPE, in0, in1, in2, in3, pdst, stride) \ argument
419 ST_B2(RTYPE, in2, in3, (pdst) + 2 * stride, stride); \
423 #define ST_B8(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, pdst, stride) \ argument
425 ST_B4(RTYPE, in0, in1, in2, in3, pdst, stride); \
442 #define ST_H4(RTYPE, in0, in1, in2, in3, pdst, stride) \ argument
445 ST_H2(RTYPE, in2, in3, (pdst) + 2 * stride, stride); \
[all …]
/external/webp/src/dsp/
Dmsa_macro.h168 #define SW4(in0, in1, in2, in3, pdst, stride) do { \ argument
174 SW(in2, ptmp); \
179 #define SW3(in0, in1, in2, pdst, stride) do { \ argument
185 SW(in2, ptmp); \
202 #define SD4(in0, in1, in2, in3, pdst, stride) do { \ argument
208 SD(in2, ptmp); \
303 #define ST_B4(RTYPE, in0, in1, in2, in3, pdst, stride) do { \ argument
305 ST_B2(RTYPE, in2, in3, pdst + 2 * stride, stride); \
310 #define ST_B8(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ argument
312 ST_B4(RTYPE, in0, in1, in2, in3, pdst, stride); \
[all …]
/external/libvpx/libvpx/vp8/common/mips/msa/
Dvp8_macros_msa.h238 #define SW4(in0, in1, in2, in3, pdst, stride) \ argument
242 SW(in2, (pdst) + 2 * stride); \
253 #define SD4(in0, in1, in2, in3, pdst, stride) \ argument
257 SD(in2, (pdst) + 2 * stride); \
352 #define ST_B4(RTYPE, in0, in1, in2, in3, pdst, stride) \ argument
355 ST_B2(RTYPE, in2, in3, (pdst) + 2 * stride, stride); \
360 #define ST_B8(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, pdst, stride) \ argument
362 ST_B4(RTYPE, in0, in1, in2, in3, pdst, stride); \
549 #define VSHF_B2(RTYPE, in0, in1, in2, in3, mask0, mask1, out0, out1) \ argument
552 out1 = (RTYPE)__msa_vshf_b((v16i8)mask1, (v16i8)in3, (v16i8)in2); \
[all …]
/external/tensorflow/tensorflow/core/kernels/
Daggregate_ops_cpu.h39 typename TTypes<T>::ConstFlat in2) {
40 Add2EigenImpl<CPUDevice, T>::Compute(d, out, in1, in2);
47 typename TTypes<T>::ConstFlat in2,
49 Add3EigenImpl<CPUDevice, T>::Compute(d, out, in1, in2, in3);
56 typename TTypes<T>::ConstFlat in2,
59 Add4EigenImpl<CPUDevice, T>::Compute(d, out, in1, in2, in3, in4);
66 typename TTypes<T>::ConstFlat in2,
70 Add5EigenImpl<CPUDevice, T>::Compute(d, out, in1, in2, in3, in4, in5);
77 typename TTypes<T>::ConstFlat in2,
82 Add6EigenImpl<CPUDevice, T>::Compute(d, out, in1, in2, in3, in4, in5, in6);
[all …]
Daggregate_ops_gpu.cu.cc36 typename TTypes<T>::ConstFlat in2) { in operator ()()
37 Add2EigenImpl<GPUDevice, T>::Compute(d, out, in1, in2); in operator ()()
45 typename TTypes<T>::ConstFlat in2, in operator ()()
47 Add3EigenImpl<GPUDevice, T>::Compute(d, out, in1, in2, in3); in operator ()()
55 typename TTypes<T>::ConstFlat in2, in operator ()()
58 Add4EigenImpl<GPUDevice, T>::Compute(d, out, in1, in2, in3, in4); in operator ()()
66 typename TTypes<T>::ConstFlat in2, in operator ()()
70 Add5EigenImpl<GPUDevice, T>::Compute(d, out, in1, in2, in3, in4, in5); in operator ()()
78 typename TTypes<T>::ConstFlat in2, in operator ()()
83 Add6EigenImpl<GPUDevice, T>::Compute(d, out, in1, in2, in3, in4, in5, in6); in operator ()()
[all …]
Daggregate_ops.h31 typename TTypes<T>::ConstFlat in2);
38 typename TTypes<T>::ConstFlat in2) { in Compute()
39 out.device(d) = in1 + in2; in Compute()
47 typename TTypes<T>::ConstFlat in2,
55 typename TTypes<T>::ConstFlat in2, in Compute()
57 out.device(d) = in1 + in2 + in3; in Compute()
65 typename TTypes<T>::ConstFlat in2,
74 typename TTypes<T>::ConstFlat in2, in Compute()
77 out.device(d) = in1 + in2 + in3 + in4; in Compute()
85 typename TTypes<T>::ConstFlat in2,
[all …]
/external/libvpx/libvpx/vpx_dsp/x86/
Dfwd_txfm_sse2.c50 __m128i in2 = _mm_load_si128((const __m128i *)(input + 2 * stride)); in vpx_fdct8x8_1_sse2() local
55 u1 = _mm_add_epi16(in2, in3); in vpx_fdct8x8_1_sse2()
59 in2 = _mm_load_si128((const __m128i *)(input + 6 * stride)); in vpx_fdct8x8_1_sse2()
65 in2 = _mm_add_epi16(in2, in3); in vpx_fdct8x8_1_sse2()
69 sum = _mm_add_epi16(sum, in2); in vpx_fdct8x8_1_sse2()
89 __m128i in0, in1, in2, in3; in vpx_fdct16x16_1_sse2() local
97 in2 = _mm_load_si128((const __m128i *)(input + 1 * stride + 0)); in vpx_fdct16x16_1_sse2()
101 u1 = _mm_add_epi16(in2, in3); in vpx_fdct16x16_1_sse2()
106 in2 = _mm_load_si128((const __m128i *)(input + 3 * stride + 0)); in vpx_fdct16x16_1_sse2()
111 u1 = _mm_add_epi16(in2, in3); in vpx_fdct16x16_1_sse2()
[all …]
/external/eigen/unsupported/test/
Dcxx11_tensor_device.cu27 …Tensor<float, 3>& in1, Eigen::Tensor<float, 3>& in2, Eigen::Tensor<float, 3>& out) : in1_(in1), in… in CPUContext()
49 const Eigen::Tensor<float, 3>& in2() const { return in2_; } in in2() function
70 …::TensorMap<Eigen::Tensor<float, 3> >& in2, Eigen::TensorMap<Eigen::Tensor<float, 3> >& out) : in1… in GPUContext()
92 const Eigen::TensorMap<Eigen::Tensor<float, 3> >& in2() const { return in2_; } in in2() function
116 …context->out().device(context->device()) = context->in1() + context->in2() * 3.14f + context->in1(… in test_contextual_eval()
122 …context->out().device(context->device()) = (context->in1() + context->in2()).eval() * 3.14f + cont… in test_forced_contextual_eval()
129 context->out().device(context->device()) += context->in1() + context->in2() * 3.14f; in test_compound_assignment()
145 …e).slice(indices, sizes).device(context->device()) = context->in1().contract(context->in2(), dims); in test_contraction()
182 Eigen::Tensor<float, 3> in2(40,50,70); in test_cpu() local
186 in2 = in2.random() + in2.constant(10.0f); in test_cpu()
[all …]
Dcxx11_tensor_forced_eval_sycl.cpp32 Eigen::Tensor<float, 3> in2(tensorRange); in test_forced_eval_sycl() local
36 …float * gpu_in2_data = static_cast<float*>(sycl_device.allocate(in2.dimensions().TotalSize()*size… in test_forced_eval_sycl()
40 in2 = in2.random() + in2.constant(10.0f); in test_forced_eval_sycl()
47 …sycl_device.memcpyHostToDevice(gpu_in2_data, in2.data(),(in1.dimensions().TotalSize())*sizeof(floa… in test_forced_eval_sycl()
55 (in1(i, j, k) + in2(i, j, k)) * in2(i, j, k)); in test_forced_eval_sycl()
/external/llvm/test/CodeGen/AMDGPU/
Dfcmp64.ll7 double addrspace(1)* %in2) {
9 %r1 = load double, double addrspace(1)* %in2
19 double addrspace(1)* %in2) {
21 %r1 = load double, double addrspace(1)* %in2
31 double addrspace(1)* %in2) {
33 %r1 = load double, double addrspace(1)* %in2
43 double addrspace(1)* %in2) {
45 %r1 = load double, double addrspace(1)* %in2
55 double addrspace(1)* %in2) {
57 %r1 = load double, double addrspace(1)* %in2
[all …]
Dfrem.ll19 float addrspace(1)* %in2) #0 {
20 %gep2 = getelementptr float, float addrspace(1)* %in2, i32 4
38 float addrspace(1)* %in2) #1 {
39 %gep2 = getelementptr float, float addrspace(1)* %in2, i32 4
59 double addrspace(1)* %in2) #0 {
61 %r1 = load double, double addrspace(1)* %in2, align 8
75 double addrspace(1)* %in2) #1 {
77 %r1 = load double, double addrspace(1)* %in2, align 8
84 <2 x float> addrspace(1)* %in2) #0 {
85 %gep2 = getelementptr <2 x float>, <2 x float> addrspace(1)* %in2, i32 4
[all …]
/external/webrtc/webrtc/common_audio/signal_processing/
Dresample.c312 static void WebRtcSpl_DotProdIntToInt(const int32_t* in1, const int32_t* in2, in WebRtcSpl_DotProdIntToInt() argument
322 tmp2 += coef * in2[-0]; in WebRtcSpl_DotProdIntToInt()
326 tmp2 += coef * in2[-1]; in WebRtcSpl_DotProdIntToInt()
330 tmp2 += coef * in2[-2]; in WebRtcSpl_DotProdIntToInt()
334 tmp2 += coef * in2[-3]; in WebRtcSpl_DotProdIntToInt()
338 tmp2 += coef * in2[-4]; in WebRtcSpl_DotProdIntToInt()
342 tmp2 += coef * in2[-5]; in WebRtcSpl_DotProdIntToInt()
346 tmp2 += coef * in2[-6]; in WebRtcSpl_DotProdIntToInt()
350 tmp2 += coef * in2[-7]; in WebRtcSpl_DotProdIntToInt()
354 *out2 = tmp2 + coef * in2[-8]; in WebRtcSpl_DotProdIntToInt()
[all …]
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/AMDGPU/
Dfcmp64.ll7 double addrspace(1)* %in2) {
9 %r1 = load double, double addrspace(1)* %in2
19 double addrspace(1)* %in2) {
21 %r1 = load double, double addrspace(1)* %in2
31 double addrspace(1)* %in2) {
33 %r1 = load double, double addrspace(1)* %in2
43 double addrspace(1)* %in2) {
45 %r1 = load double, double addrspace(1)* %in2
55 double addrspace(1)* %in2) {
57 %r1 = load double, double addrspace(1)* %in2
[all …]
/external/libchrome/ipc/
Dipc_sync_message_unittest.cc57 void On_2_1(int in1, bool in2, bool* out1) { in On_2_1() argument
59 DCHECK(!in2); in On_2_1()
63 void On_2_2(bool in1, int in2, bool* out1, int* out2) { in On_2_2() argument
65 DCHECK_EQ(2, in2); in On_2_2()
70 void On_2_3(int in1, bool in2, std::string* out1, int* out2, bool* out3) { in On_2_3() argument
72 DCHECK(in2); in On_2_3()
78 void On_3_1(int in1, bool in2, const std::string& in3, bool* out1) { in On_3_1() argument
80 DCHECK(!in2); in On_3_1()
86 bool in2, in On_3_2() argument
91 DCHECK(!in2); in On_3_2()
[all …]

1234567891011