/external/boringssl/src/crypto/poly1305/ |
D | poly1305_vec.c | 194 p->S21.v = _mm_mul_epu32(p->R21.v, FIVE); in poly1305_first_block() 195 p->S22.v = _mm_mul_epu32(p->R22.v, FIVE); in poly1305_first_block() 196 p->S23.v = _mm_mul_epu32(p->R23.v, FIVE); in poly1305_first_block() 197 p->S24.v = _mm_mul_epu32(p->R24.v, FIVE); in poly1305_first_block() 248 T0 = _mm_mul_epu32(H0, p->R20.v); in poly1305_blocks() 249 T1 = _mm_mul_epu32(H0, p->R21.v); in poly1305_blocks() 250 T2 = _mm_mul_epu32(H0, p->R22.v); in poly1305_blocks() 251 T3 = _mm_mul_epu32(H0, p->R23.v); in poly1305_blocks() 252 T4 = _mm_mul_epu32(H0, p->R24.v); in poly1305_blocks() 253 T5 = _mm_mul_epu32(H1, p->S24.v); in poly1305_blocks() [all …]
|
/external/libvpx/libvpx/vpx_dsp/x86/ |
D | highbd_idct4x4_add_sse2.c | 43 temp1[0] = _mm_mul_epu32(temp1[0], cospi_p16_p16); // ([0] + [2])*cospi_16_64 in highbd_idct4_small_sse2() 44 temp1[1] = _mm_mul_epu32(temp1[1], cospi_p16_p16); // ([0] + [2])*cospi_16_64 in highbd_idct4_small_sse2() 45 temp2[0] = _mm_mul_epu32(temp2[0], cospi_p16_p16); // ([0] - [2])*cospi_16_64 in highbd_idct4_small_sse2() 46 temp2[1] = _mm_mul_epu32(temp2[1], cospi_p16_p16); // ([0] - [2])*cospi_16_64 in highbd_idct4_small_sse2() 52 temp1[0] = _mm_mul_epu32(io[1], cospi_p24_p24); // input[1] * cospi_24_64 in highbd_idct4_small_sse2() 53 temp1[1] = _mm_mul_epu32(temp1[3], cospi_p24_p24); // input[1] * cospi_24_64 in highbd_idct4_small_sse2() 54 temp2[0] = _mm_mul_epu32(io[1], cospi_p08_p08); // input[1] * cospi_8_64 in highbd_idct4_small_sse2() 55 temp2[1] = _mm_mul_epu32(temp1[3], cospi_p08_p08); // input[1] * cospi_8_64 in highbd_idct4_small_sse2() 56 temp1[2] = _mm_mul_epu32(io[3], cospi_p08_p08); // input[3] * cospi_8_64 in highbd_idct4_small_sse2() 57 temp1[3] = _mm_mul_epu32(temp2[3], cospi_p08_p08); // input[3] * cospi_8_64 in highbd_idct4_small_sse2() [all …]
|
D | fwd_txfm_sse2.h | 23 buf0 = _mm_mul_epu32(a, b); in k_madd_epi32() 26 buf1 = _mm_mul_epu32(a, b); in k_madd_epi32()
|
/external/XNNPACK/src/requantization/ |
D | precise-sse4.c | 63 const __m128i x_absmul02 = _mm_mul_epu32(x_abs0123, vmultiplier); in xnn_requantize_precise__sse4() 64 const __m128i y_absmul02 = _mm_mul_epu32(y_abs0123, vmultiplier); in xnn_requantize_precise__sse4() 65 const __m128i z_absmul02 = _mm_mul_epu32(z_abs0123, vmultiplier); in xnn_requantize_precise__sse4() 66 const __m128i w_absmul02 = _mm_mul_epu32(w_abs0123, vmultiplier); in xnn_requantize_precise__sse4() 68 const __m128i x_absmul13 = _mm_mul_epu32(x_abs1032, vmultiplier); in xnn_requantize_precise__sse4() 69 const __m128i y_absmul13 = _mm_mul_epu32(y_abs1032, vmultiplier); in xnn_requantize_precise__sse4() 70 const __m128i z_absmul13 = _mm_mul_epu32(z_abs1032, vmultiplier); in xnn_requantize_precise__sse4() 71 const __m128i w_absmul13 = _mm_mul_epu32(w_abs1032, vmultiplier); in xnn_requantize_precise__sse4()
|
D | precise-ssse3.c | 62 const __m128i x_absmul02 = _mm_mul_epu32(x_abs0123, vmultiplier); in xnn_requantize_precise__ssse3() 63 const __m128i y_absmul02 = _mm_mul_epu32(y_abs0123, vmultiplier); in xnn_requantize_precise__ssse3() 64 const __m128i z_absmul02 = _mm_mul_epu32(z_abs0123, vmultiplier); in xnn_requantize_precise__ssse3() 65 const __m128i w_absmul02 = _mm_mul_epu32(w_abs0123, vmultiplier); in xnn_requantize_precise__ssse3() 67 const __m128i x_absmul13 = _mm_mul_epu32(x_abs1032, vmultiplier); in xnn_requantize_precise__ssse3() 68 const __m128i y_absmul13 = _mm_mul_epu32(y_abs1032, vmultiplier); in xnn_requantize_precise__ssse3() 69 const __m128i z_absmul13 = _mm_mul_epu32(z_abs1032, vmultiplier); in xnn_requantize_precise__ssse3() 70 const __m128i w_absmul13 = _mm_mul_epu32(w_abs1032, vmultiplier); in xnn_requantize_precise__ssse3()
|
D | precise-sse2.c | 67 const __m128i x_absmul02 = _mm_mul_epu32(x_abs0123, vmultiplier); in xnn_requantize_precise__sse2() 68 const __m128i y_absmul02 = _mm_mul_epu32(y_abs0123, vmultiplier); in xnn_requantize_precise__sse2() 69 const __m128i z_absmul02 = _mm_mul_epu32(z_abs0123, vmultiplier); in xnn_requantize_precise__sse2() 70 const __m128i w_absmul02 = _mm_mul_epu32(w_abs0123, vmultiplier); in xnn_requantize_precise__sse2() 72 const __m128i x_absmul13 = _mm_mul_epu32(x_abs1032, vmultiplier); in xnn_requantize_precise__sse2() 73 const __m128i y_absmul13 = _mm_mul_epu32(y_abs1032, vmultiplier); in xnn_requantize_precise__sse2() 74 const __m128i z_absmul13 = _mm_mul_epu32(z_abs1032, vmultiplier); in xnn_requantize_precise__sse2() 75 const __m128i w_absmul13 = _mm_mul_epu32(w_abs1032, vmultiplier); in xnn_requantize_precise__sse2()
|
D | q31-ssse3.c | 76 const __m128i x_abs_product_even = _mm_mul_epu32(x_abs, vmultiplier); in xnn_requantize_q31__ssse3() 77 const __m128i y_abs_product_even = _mm_mul_epu32(y_abs, vmultiplier); in xnn_requantize_q31__ssse3() 78 const __m128i z_abs_product_even = _mm_mul_epu32(z_abs, vmultiplier); in xnn_requantize_q31__ssse3() 79 const __m128i w_abs_product_even = _mm_mul_epu32(w_abs, vmultiplier); in xnn_requantize_q31__ssse3() 96 const __m128i x_abs_product_odd = _mm_mul_epu32(x_abs_rev, vmultiplier); in xnn_requantize_q31__ssse3() 97 const __m128i y_abs_product_odd = _mm_mul_epu32(y_abs_rev, vmultiplier); in xnn_requantize_q31__ssse3() 98 const __m128i z_abs_product_odd = _mm_mul_epu32(z_abs_rev, vmultiplier); in xnn_requantize_q31__ssse3() 99 const __m128i w_abs_product_odd = _mm_mul_epu32(w_abs_rev, vmultiplier); in xnn_requantize_q31__ssse3()
|
D | q31-sse2.c | 76 const __m128i x_abs_product_even = _mm_mul_epu32(x_abs, vmultiplier); in xnn_requantize_q31__sse2() 77 const __m128i y_abs_product_even = _mm_mul_epu32(y_abs, vmultiplier); in xnn_requantize_q31__sse2() 78 const __m128i z_abs_product_even = _mm_mul_epu32(z_abs, vmultiplier); in xnn_requantize_q31__sse2() 79 const __m128i w_abs_product_even = _mm_mul_epu32(w_abs, vmultiplier); in xnn_requantize_q31__sse2() 96 const __m128i x_abs_product_odd = _mm_mul_epu32(x_abs_rev, vmultiplier); in xnn_requantize_q31__sse2() 97 const __m128i y_abs_product_odd = _mm_mul_epu32(y_abs_rev, vmultiplier); in xnn_requantize_q31__sse2() 98 const __m128i z_abs_product_odd = _mm_mul_epu32(z_abs_rev, vmultiplier); in xnn_requantize_q31__sse2() 99 const __m128i w_abs_product_odd = _mm_mul_epu32(w_abs_rev, vmultiplier); in xnn_requantize_q31__sse2()
|
D | gemmlowp-sse.h | 64 mul_us = _mm_mul_epu32(a_neg, b_neg); // uses 0 and 2nd data lanes, (abs), the in gemmlowp_sse_mul_s32()
|
/external/flac/libFLAC/ |
D | lpc_intrin_sse2.c | 443 …xmm7 = _mm_mul_epu32(xmm7, xmm5); /* we use _unsigned_ multiplication and discard high dword of th… in FLAC__lpc_compute_residual_from_qlp_coefficients_intrin_sse2() 449 xmm6 = _mm_mul_epu32(xmm6, xmm4); in FLAC__lpc_compute_residual_from_qlp_coefficients_intrin_sse2() 456 xmm6 = _mm_mul_epu32(xmm6, xmm3); in FLAC__lpc_compute_residual_from_qlp_coefficients_intrin_sse2() 463 xmm6 = _mm_mul_epu32(xmm6, xmm2); in FLAC__lpc_compute_residual_from_qlp_coefficients_intrin_sse2() 470 xmm6 = _mm_mul_epu32(xmm6, xmm1); in FLAC__lpc_compute_residual_from_qlp_coefficients_intrin_sse2() 477 xmm6 = _mm_mul_epu32(xmm6, xmm0); in FLAC__lpc_compute_residual_from_qlp_coefficients_intrin_sse2() 503 xmm7 = _mm_mul_epu32(xmm7, xmm5); in FLAC__lpc_compute_residual_from_qlp_coefficients_intrin_sse2() 509 xmm6 = _mm_mul_epu32(xmm6, xmm4); in FLAC__lpc_compute_residual_from_qlp_coefficients_intrin_sse2() 516 xmm6 = _mm_mul_epu32(xmm6, xmm3); in FLAC__lpc_compute_residual_from_qlp_coefficients_intrin_sse2() 523 xmm6 = _mm_mul_epu32(xmm6, xmm2); in FLAC__lpc_compute_residual_from_qlp_coefficients_intrin_sse2() [all …]
|
/external/XNNPACK/src/q8-gavgpool/ |
D | up7-sse2.c | 98 const __m128i vabsmul_lo02 = _mm_mul_epu32(vabs_lo0123, vmultiplier); in xnn_q8_gavgpool_ukernel_up7__sse2() 99 const __m128i vabsmul_hi02 = _mm_mul_epu32(vabs_hi0123, vmultiplier); in xnn_q8_gavgpool_ukernel_up7__sse2() 101 const __m128i vabsmul_lo13 = _mm_mul_epu32(vabs_lo1032, vmultiplier); in xnn_q8_gavgpool_ukernel_up7__sse2() 102 const __m128i vabsmul_hi13 = _mm_mul_epu32(vabs_hi1032, vmultiplier); in xnn_q8_gavgpool_ukernel_up7__sse2() 167 const __m128i vabsmul_lo02 = _mm_mul_epu32(vabs_lo0123, vmultiplier); in xnn_q8_gavgpool_ukernel_up7__sse2() 168 const __m128i vabsmul_hi02 = _mm_mul_epu32(vabs_hi0123, vmultiplier); in xnn_q8_gavgpool_ukernel_up7__sse2() 170 const __m128i vabsmul_lo13 = _mm_mul_epu32(vabs_lo1032, vmultiplier); in xnn_q8_gavgpool_ukernel_up7__sse2() 171 const __m128i vabsmul_hi13 = _mm_mul_epu32(vabs_hi1032, vmultiplier); in xnn_q8_gavgpool_ukernel_up7__sse2()
|
D | mp7p7q-sse2.c | 192 const __m128i vabsmul_lo02 = _mm_mul_epu32(vabs_lo0123, vmultiplier); in xnn_q8_gavgpool_ukernel_mp7p7q__sse2() 193 const __m128i vabsmul_hi02 = _mm_mul_epu32(vabs_hi0123, vmultiplier); in xnn_q8_gavgpool_ukernel_mp7p7q__sse2() 195 const __m128i vabsmul_lo13 = _mm_mul_epu32(vabs_lo1032, vmultiplier); in xnn_q8_gavgpool_ukernel_mp7p7q__sse2() 196 const __m128i vabsmul_hi13 = _mm_mul_epu32(vabs_hi1032, vmultiplier); in xnn_q8_gavgpool_ukernel_mp7p7q__sse2() 263 const __m128i vabsmul_lo02 = _mm_mul_epu32(vabs_lo0123, vmultiplier); in xnn_q8_gavgpool_ukernel_mp7p7q__sse2() 264 const __m128i vabsmul_hi02 = _mm_mul_epu32(vabs_hi0123, vmultiplier); in xnn_q8_gavgpool_ukernel_mp7p7q__sse2() 266 const __m128i vabsmul_lo13 = _mm_mul_epu32(vabs_lo1032, vmultiplier); in xnn_q8_gavgpool_ukernel_mp7p7q__sse2() 267 const __m128i vabsmul_hi13 = _mm_mul_epu32(vabs_hi1032, vmultiplier); in xnn_q8_gavgpool_ukernel_mp7p7q__sse2()
|
/external/XNNPACK/src/q8-avgpool/ |
D | up9-sse2.c | 117 const __m128i vabsmul_lo02 = _mm_mul_epu32(vabs_lo0123, vmultiplier); in xnn_q8_avgpool_ukernel_up9__sse2() 118 const __m128i vabsmul_hi02 = _mm_mul_epu32(vabs_hi0123, vmultiplier); in xnn_q8_avgpool_ukernel_up9__sse2() 120 const __m128i vabsmul_lo13 = _mm_mul_epu32(vabs_lo1032, vmultiplier); in xnn_q8_avgpool_ukernel_up9__sse2() 121 const __m128i vabsmul_hi13 = _mm_mul_epu32(vabs_hi1032, vmultiplier); in xnn_q8_avgpool_ukernel_up9__sse2() 192 const __m128i vabsmul_lo02 = _mm_mul_epu32(vabs_lo0123, vmultiplier); in xnn_q8_avgpool_ukernel_up9__sse2() 193 const __m128i vabsmul_hi02 = _mm_mul_epu32(vabs_hi0123, vmultiplier); in xnn_q8_avgpool_ukernel_up9__sse2() 195 const __m128i vabsmul_lo13 = _mm_mul_epu32(vabs_lo1032, vmultiplier); in xnn_q8_avgpool_ukernel_up9__sse2() 196 const __m128i vabsmul_hi13 = _mm_mul_epu32(vabs_hi1032, vmultiplier); in xnn_q8_avgpool_ukernel_up9__sse2()
|
D | mp9p8q-sse2.c | 218 const __m128i vabsmul_lo02 = _mm_mul_epu32(vabs_lo0123, vmultiplier); in xnn_q8_avgpool_ukernel_mp9p8q__sse2() 219 const __m128i vabsmul_hi02 = _mm_mul_epu32(vabs_hi0123, vmultiplier); in xnn_q8_avgpool_ukernel_mp9p8q__sse2() 221 const __m128i vabsmul_lo13 = _mm_mul_epu32(vabs_lo1032, vmultiplier); in xnn_q8_avgpool_ukernel_mp9p8q__sse2() 222 const __m128i vabsmul_hi13 = _mm_mul_epu32(vabs_hi1032, vmultiplier); in xnn_q8_avgpool_ukernel_mp9p8q__sse2() 293 const __m128i vabsmul_lo02 = _mm_mul_epu32(vabs_lo0123, vmultiplier); in xnn_q8_avgpool_ukernel_mp9p8q__sse2() 294 const __m128i vabsmul_hi02 = _mm_mul_epu32(vabs_hi0123, vmultiplier); in xnn_q8_avgpool_ukernel_mp9p8q__sse2() 296 const __m128i vabsmul_lo13 = _mm_mul_epu32(vabs_lo1032, vmultiplier); in xnn_q8_avgpool_ukernel_mp9p8q__sse2() 297 const __m128i vabsmul_hi13 = _mm_mul_epu32(vabs_hi1032, vmultiplier); in xnn_q8_avgpool_ukernel_mp9p8q__sse2()
|
/external/webp/src/dsp/ |
D | rescaler_sse2.c | 153 const __m128i D1 = _mm_mul_epu32(frac, mult1); // 32b x 16b -> 64b in RescalerImportRowShrink_SSE2() 154 const __m128i D2 = _mm_mul_epu32(D0, mult1); in RescalerImportRowShrink_SSE2() 182 *out0 = _mm_mul_epu32(A0, *mult); in LoadDispatchAndMult_SSE2() 183 *out1 = _mm_mul_epu32(A1, *mult); in LoadDispatchAndMult_SSE2() 184 *out2 = _mm_mul_epu32(A2, *mult); in LoadDispatchAndMult_SSE2() 185 *out3 = _mm_mul_epu32(A3, *mult); in LoadDispatchAndMult_SSE2() 202 const __m128i B0 = _mm_mul_epu32(*A0, *mult); in ProcessRow_SSE2() 203 const __m128i B1 = _mm_mul_epu32(*A1, *mult); in ProcessRow_SSE2() 204 const __m128i B2 = _mm_mul_epu32(*A2, *mult); in ProcessRow_SSE2() 205 const __m128i B3 = _mm_mul_epu32(*A3, *mult); in ProcessRow_SSE2()
|
/external/mesa3d/src/gallium/auxiliary/util/ |
D | u_sse.h | 198 mul02 = _mm_mul_epu32(a, b); in mm_mullohi_epi32() 199 mul13 = _mm_mul_epu32(a13, b13); in mm_mullohi_epi32() 221 __m128i ba = _mm_mul_epu32(b, a); /* multply dwords 0, 2 */ in mm_mullo_epi32() 222 __m128i b4a4 = _mm_mul_epu32(b4, a4); /* multiply dwords 1, 3 */ in mm_mullo_epi32()
|
/external/XNNPACK/src/q8-gemm/ |
D | 4x4c2-sse2.c | 221 const __m128i vabsprod0x02 = _mm_mul_epu32(vabsacc0x0123, vmultiplier); in xnn_q8_gemm_ukernel_4x4c2__sse2() 222 const __m128i vabsprod1x02 = _mm_mul_epu32(vabsacc1x0123, vmultiplier); in xnn_q8_gemm_ukernel_4x4c2__sse2() 223 const __m128i vabsprod2x02 = _mm_mul_epu32(vabsacc2x0123, vmultiplier); in xnn_q8_gemm_ukernel_4x4c2__sse2() 224 const __m128i vabsprod3x02 = _mm_mul_epu32(vabsacc3x0123, vmultiplier); in xnn_q8_gemm_ukernel_4x4c2__sse2() 241 const __m128i vabsprod0x13 = _mm_mul_epu32(vabsacc0x1032, vmultiplier); in xnn_q8_gemm_ukernel_4x4c2__sse2() 242 const __m128i vabsprod1x13 = _mm_mul_epu32(vabsacc1x1032, vmultiplier); in xnn_q8_gemm_ukernel_4x4c2__sse2() 243 const __m128i vabsprod2x13 = _mm_mul_epu32(vabsacc2x1032, vmultiplier); in xnn_q8_gemm_ukernel_4x4c2__sse2() 244 const __m128i vabsprod3x13 = _mm_mul_epu32(vabsacc3x1032, vmultiplier); in xnn_q8_gemm_ukernel_4x4c2__sse2()
|
D | 2x4c8-sse2.c | 119 const __m128i vabsprod0x02 = _mm_mul_epu32(vabsacc0x0123, vmultiplier); in xnn_q8_gemm_ukernel_2x4c8__sse2() 120 const __m128i vabsprod1x02 = _mm_mul_epu32(vabsacc1x0123, vmultiplier); in xnn_q8_gemm_ukernel_2x4c8__sse2() 131 const __m128i vabsprod0x13 = _mm_mul_epu32(vabsacc0x1032, vmultiplier); in xnn_q8_gemm_ukernel_2x4c8__sse2() 132 const __m128i vabsprod1x13 = _mm_mul_epu32(vabsacc1x1032, vmultiplier); in xnn_q8_gemm_ukernel_2x4c8__sse2()
|
/external/XNNPACK/src/q8-igemm/ |
D | 4x4c2-sse2.c | 201 const __m128i vabsprod0x02 = _mm_mul_epu32(vabsacc0x0123, vmultiplier); in xnn_q8_igemm_ukernel_4x4c2__sse2() 202 const __m128i vabsprod1x02 = _mm_mul_epu32(vabsacc1x0123, vmultiplier); in xnn_q8_igemm_ukernel_4x4c2__sse2() 203 const __m128i vabsprod2x02 = _mm_mul_epu32(vabsacc2x0123, vmultiplier); in xnn_q8_igemm_ukernel_4x4c2__sse2() 204 const __m128i vabsprod3x02 = _mm_mul_epu32(vabsacc3x0123, vmultiplier); in xnn_q8_igemm_ukernel_4x4c2__sse2() 221 const __m128i vabsprod0x13 = _mm_mul_epu32(vabsacc0x1032, vmultiplier); in xnn_q8_igemm_ukernel_4x4c2__sse2() 222 const __m128i vabsprod1x13 = _mm_mul_epu32(vabsacc1x1032, vmultiplier); in xnn_q8_igemm_ukernel_4x4c2__sse2() 223 const __m128i vabsprod2x13 = _mm_mul_epu32(vabsacc2x1032, vmultiplier); in xnn_q8_igemm_ukernel_4x4c2__sse2() 224 const __m128i vabsprod3x13 = _mm_mul_epu32(vabsacc3x1032, vmultiplier); in xnn_q8_igemm_ukernel_4x4c2__sse2()
|
/external/XNNPACK/src/q8-dwconv/ |
D | up8x9-sse2.c | 141 const __m128i vabsprod_lo02 = _mm_mul_epu32(vabsacc_lo0123, vmultiplier); in xnn_q8_dwconv_ukernel_up8x9__sse2() 142 const __m128i vabsprod_hi02 = _mm_mul_epu32(vabsacc_hi0123, vmultiplier); in xnn_q8_dwconv_ukernel_up8x9__sse2() 153 const __m128i vabsprod_lo13 = _mm_mul_epu32(vabsacc_lo1032, vmultiplier); in xnn_q8_dwconv_ukernel_up8x9__sse2() 154 const __m128i vabsprod_hi13 = _mm_mul_epu32(vabsacc_hi1032, vmultiplier); in xnn_q8_dwconv_ukernel_up8x9__sse2() 291 const __m128i vabsprod_lo02 = _mm_mul_epu32(vabsacc_lo0123, vmultiplier); in xnn_q8_dwconv_ukernel_up8x9__sse2() 292 const __m128i vabsprod_hi02 = _mm_mul_epu32(vabsacc_hi0123, vmultiplier); in xnn_q8_dwconv_ukernel_up8x9__sse2() 303 const __m128i vabsprod_lo13 = _mm_mul_epu32(vabsacc_lo1032, vmultiplier); in xnn_q8_dwconv_ukernel_up8x9__sse2() 304 const __m128i vabsprod_hi13 = _mm_mul_epu32(vabsacc_hi1032, vmultiplier); in xnn_q8_dwconv_ukernel_up8x9__sse2()
|
/external/libaom/libaom/aom_dsp/x86/ |
D | fwd_txfm_sse2.h | 21 buf0 = _mm_mul_epu32(a, b); in k_madd_epi32() 24 buf1 = _mm_mul_epu32(a, b); in k_madd_epi32()
|
D | highbd_adaptive_quantize_sse2.c | 30 __m128i prod_lo = _mm_mul_epu32(*x, abs_y); in highbd_mul_shift_sse2() 33 prod_hi = _mm_mul_epu32(prod_hi, mult_hi); in highbd_mul_shift_sse2()
|
/external/skia/include/private/ |
D | SkNx_sse.h | 236 __m128i mul20 = _mm_mul_epu32(a, b), in mullo32() 237 mul31 = _mm_mul_epu32(_mm_srli_si128(a, 4), _mm_srli_si128(b, 4)); in mullo32() 404 SkNx v20{_mm_mul_epu32(m.fVec, fVec)}; in mulHi() 405 SkNx v31{_mm_mul_epu32(_mm_srli_si128(m.fVec, 4), _mm_srli_si128(fVec, 4))}; in mulHi()
|
/external/skqp/include/private/ |
D | SkNx_sse.h | 236 __m128i mul20 = _mm_mul_epu32(a, b), in mullo32() 237 mul31 = _mm_mul_epu32(_mm_srli_si128(a, 4), _mm_srli_si128(b, 4)); in mullo32() 404 SkNx v20{_mm_mul_epu32(m.fVec, fVec)}; in mulHi() 405 SkNx v31{_mm_mul_epu32(_mm_srli_si128(m.fVec, 4), _mm_srli_si128(fVec, 4))}; in mulHi()
|
/external/libaom/libaom/aom_dsp/simd/ |
D | v64_intrinsics_x86.h | 366 _mm_mul_epu32(a, b), in v64_mullo_s32() 367 _mm_mul_epu32(_mm_srli_si128(a, 4), _mm_srli_si128(b, 4))); in v64_mullo_s32()
|