Home
last modified time | relevance | path

Searched refs:_mm_xor_si128 (Results 1 – 25 of 180) sorted by relevance

12345678

/external/angle/third_party/zlib/
Dcrc32_simd.c48 x1 = _mm_xor_si128(x1, _mm_cvtsi32_si128(crc)); in crc32_sse42_simd_()
75 x1 = _mm_xor_si128(x1, x5); in crc32_sse42_simd_()
76 x2 = _mm_xor_si128(x2, x6); in crc32_sse42_simd_()
77 x3 = _mm_xor_si128(x3, x7); in crc32_sse42_simd_()
78 x4 = _mm_xor_si128(x4, x8); in crc32_sse42_simd_()
80 x1 = _mm_xor_si128(x1, y5); in crc32_sse42_simd_()
81 x2 = _mm_xor_si128(x2, y6); in crc32_sse42_simd_()
82 x3 = _mm_xor_si128(x3, y7); in crc32_sse42_simd_()
83 x4 = _mm_xor_si128(x4, y8); in crc32_sse42_simd_()
96 x1 = _mm_xor_si128(x1, x2); in crc32_sse42_simd_()
[all …]
Dcrc_folding.c241 xmm_shr = _mm_xor_si128(xmm_shr, xmm_mask3); in partial_fold()
314 xmm_crc0 = _mm_xor_si128(xmm_crc0, xmm_t0); in crc_fold_copy()
315 xmm_crc1 = _mm_xor_si128(xmm_crc1, xmm_t1); in crc_fold_copy()
316 xmm_crc2 = _mm_xor_si128(xmm_crc2, xmm_t2); in crc_fold_copy()
317 xmm_crc3 = _mm_xor_si128(xmm_crc3, xmm_t3); in crc_fold_copy()
339 xmm_crc1 = _mm_xor_si128(xmm_crc1, xmm_t0); in crc_fold_copy()
340 xmm_crc2 = _mm_xor_si128(xmm_crc2, xmm_t1); in crc_fold_copy()
341 xmm_crc3 = _mm_xor_si128(xmm_crc3, xmm_t2); in crc_fold_copy()
359 xmm_crc2 = _mm_xor_si128(xmm_crc2, xmm_t0); in crc_fold_copy()
360 xmm_crc3 = _mm_xor_si128(xmm_crc3, xmm_t1); in crc_fold_copy()
[all …]
/external/zlib/
Dcrc32_simd.c48 x1 = _mm_xor_si128(x1, _mm_cvtsi32_si128(crc)); in crc32_sse42_simd_()
75 x1 = _mm_xor_si128(x1, x5); in crc32_sse42_simd_()
76 x2 = _mm_xor_si128(x2, x6); in crc32_sse42_simd_()
77 x3 = _mm_xor_si128(x3, x7); in crc32_sse42_simd_()
78 x4 = _mm_xor_si128(x4, x8); in crc32_sse42_simd_()
80 x1 = _mm_xor_si128(x1, y5); in crc32_sse42_simd_()
81 x2 = _mm_xor_si128(x2, y6); in crc32_sse42_simd_()
82 x3 = _mm_xor_si128(x3, y7); in crc32_sse42_simd_()
83 x4 = _mm_xor_si128(x4, y8); in crc32_sse42_simd_()
96 x1 = _mm_xor_si128(x1, x2); in crc32_sse42_simd_()
[all …]
Dcrc_folding.c241 xmm_shr = _mm_xor_si128(xmm_shr, xmm_mask3); in partial_fold()
314 xmm_crc0 = _mm_xor_si128(xmm_crc0, xmm_t0); in crc_fold_copy()
315 xmm_crc1 = _mm_xor_si128(xmm_crc1, xmm_t1); in crc_fold_copy()
316 xmm_crc2 = _mm_xor_si128(xmm_crc2, xmm_t2); in crc_fold_copy()
317 xmm_crc3 = _mm_xor_si128(xmm_crc3, xmm_t3); in crc_fold_copy()
339 xmm_crc1 = _mm_xor_si128(xmm_crc1, xmm_t0); in crc_fold_copy()
340 xmm_crc2 = _mm_xor_si128(xmm_crc2, xmm_t1); in crc_fold_copy()
341 xmm_crc3 = _mm_xor_si128(xmm_crc3, xmm_t2); in crc_fold_copy()
359 xmm_crc2 = _mm_xor_si128(xmm_crc2, xmm_t0); in crc_fold_copy()
360 xmm_crc3 = _mm_xor_si128(xmm_crc3, xmm_t1); in crc_fold_copy()
[all …]
/external/lzma/C/
DAesOpt.c25 m = _mm_xor_si128(m, *data); in AesCbc_Encode_Intel()
26 m = _mm_xor_si128(m, p[2]); in AesCbc_Encode_Intel()
65 m0 = _mm_xor_si128(t, data[0]); in AesCbc_Decode_Intel()
66 m1 = _mm_xor_si128(t, data[1]); in AesCbc_Decode_Intel()
67 m2 = _mm_xor_si128(t, data[2]); in AesCbc_Decode_Intel()
82 t = _mm_xor_si128(m0, iv); iv = data[0]; data[0] = t; in AesCbc_Decode_Intel()
83 t = _mm_xor_si128(m1, iv); iv = data[1]; data[1] = t; in AesCbc_Decode_Intel()
84 t = _mm_xor_si128(m2, iv); iv = data[2]; data[2] = t; in AesCbc_Decode_Intel()
91 __m128i m = _mm_xor_si128(w[2], *data); in AesCbc_Decode_Intel()
103 m = _mm_xor_si128(m, iv); in AesCbc_Decode_Intel()
[all …]
/external/scrypt/lib/crypto/
Dcrypto_scrypt-sse.c77 D[i] = _mm_xor_si128(D[i], S[i]); in blkxor()
99 X1 = _mm_xor_si128(X1, _mm_slli_epi32(T, 7)); in salsa20_8()
100 X1 = _mm_xor_si128(X1, _mm_srli_epi32(T, 25)); in salsa20_8()
102 X2 = _mm_xor_si128(X2, _mm_slli_epi32(T, 9)); in salsa20_8()
103 X2 = _mm_xor_si128(X2, _mm_srli_epi32(T, 23)); in salsa20_8()
105 X3 = _mm_xor_si128(X3, _mm_slli_epi32(T, 13)); in salsa20_8()
106 X3 = _mm_xor_si128(X3, _mm_srli_epi32(T, 19)); in salsa20_8()
108 X0 = _mm_xor_si128(X0, _mm_slli_epi32(T, 18)); in salsa20_8()
109 X0 = _mm_xor_si128(X0, _mm_srli_epi32(T, 14)); in salsa20_8()
118 X3 = _mm_xor_si128(X3, _mm_slli_epi32(T, 7)); in salsa20_8()
[all …]
/external/rust/crates/libz-sys/src/zlib-ng/arch/x86/
Dcrc_folding.c197 xmm_shr = _mm_xor_si128(xmm_shr, xmm_mask3); in partial_fold()
281 xmm_crc0 = _mm_xor_si128(xmm_crc0, xmm_t0); in crc_fold_copy()
282 xmm_crc1 = _mm_xor_si128(xmm_crc1, xmm_t1); in crc_fold_copy()
283 xmm_crc2 = _mm_xor_si128(xmm_crc2, xmm_t2); in crc_fold_copy()
284 xmm_crc3 = _mm_xor_si128(xmm_crc3, xmm_t3); in crc_fold_copy()
306 xmm_crc1 = _mm_xor_si128(xmm_crc1, xmm_t0); in crc_fold_copy()
307 xmm_crc2 = _mm_xor_si128(xmm_crc2, xmm_t1); in crc_fold_copy()
308 xmm_crc3 = _mm_xor_si128(xmm_crc3, xmm_t2); in crc_fold_copy()
326 xmm_crc2 = _mm_xor_si128(xmm_crc2, xmm_t0); in crc_fold_copy()
327 xmm_crc3 = _mm_xor_si128(xmm_crc3, xmm_t1); in crc_fold_copy()
[all …]
/external/python/cpython3/Modules/_blake2/impl/
Dblake2b-round.h36 : (-(c) == 63) ? _mm_xor_si128(_mm_srli_epi64((x), -(c)), _mm_add_epi64((x), (x))) \
37 : _mm_xor_si128(_mm_srli_epi64((x), -(c)), _mm_slli_epi64((x), 64-(-(c))))
39 #define _mm_roti_epi64(r, c) _mm_xor_si128(_mm_srli_epi64( (r), -(c) ),_mm_slli_epi64( (r), 64-(-(c…
51 row4l = _mm_xor_si128(row4l, row1l); \
52 row4h = _mm_xor_si128(row4h, row1h); \
60 row2l = _mm_xor_si128(row2l, row3l); \
61 row2h = _mm_xor_si128(row2h, row3h); \
70 row4l = _mm_xor_si128(row4l, row1l); \
71 row4h = _mm_xor_si128(row4h, row1h); \
79 row2l = _mm_xor_si128(row2l, row3l); \
[all …]
Dblake2s-round.h35 : _mm_xor_si128(_mm_srli_epi32( (r), -(c) ),_mm_slli_epi32( (r), 32-(-(c)) )) )
37 #define _mm_roti_epi32(r, c) _mm_xor_si128(_mm_srli_epi32( (r), -(c) ),_mm_slli_epi32( (r), 32-(-(c…
46 row4 = _mm_xor_si128( row4, row1 ); \
49 row2 = _mm_xor_si128( row2, row3 ); \
54 row4 = _mm_xor_si128( row4, row1 ); \
57 row2 = _mm_xor_si128( row2, row3 ); \
Dblake2b.c325 row4l = _mm_xor_si128( LOADU( &blake2b_IV[4] ), LOADU( &S->t[0] ) ); in blake2b_compress()
326 row4h = _mm_xor_si128( LOADU( &blake2b_IV[6] ), LOADU( &S->f[0] ) ); in blake2b_compress()
339 row1l = _mm_xor_si128( row3l, row1l ); in blake2b_compress()
340 row1h = _mm_xor_si128( row3h, row1h ); in blake2b_compress()
341 STOREU( &S->h[0], _mm_xor_si128( LOADU( &S->h[0] ), row1l ) ); in blake2b_compress()
342 STOREU( &S->h[2], _mm_xor_si128( LOADU( &S->h[2] ), row1h ) ); in blake2b_compress()
343 row2l = _mm_xor_si128( row4l, row2l ); in blake2b_compress()
344 row2h = _mm_xor_si128( row4h, row2h ); in blake2b_compress()
345 STOREU( &S->h[4], _mm_xor_si128( LOADU( &S->h[4] ), row2l ) ); in blake2b_compress()
346 STOREU( &S->h[6], _mm_xor_si128( LOADU( &S->h[6] ), row2h ) ); in blake2b_compress()
/external/XNNPACK/src/qs8-requantization/
Dq31-sse2.c67 const __m128i x_abs = _mm_sub_epi32(_mm_xor_si128(x, x_neg_mask), x_neg_mask); in xnn_qs8_requantize_q31__sse2()
68 const __m128i y_abs = _mm_sub_epi32(_mm_xor_si128(y, y_neg_mask), y_neg_mask); in xnn_qs8_requantize_q31__sse2()
69 const __m128i z_abs = _mm_sub_epi32(_mm_xor_si128(z, z_neg_mask), z_neg_mask); in xnn_qs8_requantize_q31__sse2()
70 const __m128i w_abs = _mm_sub_epi32(_mm_xor_si128(w, w_neg_mask), w_neg_mask); in xnn_qs8_requantize_q31__sse2()
87 …const __m128i x_product_even = _mm_sub_epi64(_mm_xor_si128(x_abs_product_even, x_neg_mask_even), x… in xnn_qs8_requantize_q31__sse2()
88 …const __m128i y_product_even = _mm_sub_epi64(_mm_xor_si128(y_abs_product_even, y_neg_mask_even), y… in xnn_qs8_requantize_q31__sse2()
89 …const __m128i z_product_even = _mm_sub_epi64(_mm_xor_si128(z_abs_product_even, z_neg_mask_even), z… in xnn_qs8_requantize_q31__sse2()
90 …const __m128i w_product_even = _mm_sub_epi64(_mm_xor_si128(w_abs_product_even, w_neg_mask_even), w… in xnn_qs8_requantize_q31__sse2()
107 …const __m128i x_product_odd = _mm_sub_epi64(_mm_xor_si128(x_abs_product_odd, x_neg_mask_odd), x_ne… in xnn_qs8_requantize_q31__sse2()
108 …const __m128i y_product_odd = _mm_sub_epi64(_mm_xor_si128(y_abs_product_odd, y_neg_mask_odd), y_ne… in xnn_qs8_requantize_q31__sse2()
[all …]
Dprecise-sse2.c58 const __m128i x_abs0123 = _mm_sub_epi32(_mm_xor_si128(x, x_neg_mask), x_neg_mask); in xnn_qs8_requantize_precise__sse2()
59 const __m128i y_abs0123 = _mm_sub_epi32(_mm_xor_si128(y, y_neg_mask), y_neg_mask); in xnn_qs8_requantize_precise__sse2()
60 const __m128i z_abs0123 = _mm_sub_epi32(_mm_xor_si128(z, z_neg_mask), z_neg_mask); in xnn_qs8_requantize_precise__sse2()
61 const __m128i w_abs0123 = _mm_sub_epi32(_mm_xor_si128(w, w_neg_mask), w_neg_mask); in xnn_qs8_requantize_precise__sse2()
101 const __m128i x_scaled = _mm_sub_epi32(_mm_xor_si128(x_abs_scaled, x_neg_mask), x_neg_mask); in xnn_qs8_requantize_precise__sse2()
102 const __m128i y_scaled = _mm_sub_epi32(_mm_xor_si128(y_abs_scaled, y_neg_mask), y_neg_mask); in xnn_qs8_requantize_precise__sse2()
103 const __m128i z_scaled = _mm_sub_epi32(_mm_xor_si128(z_abs_scaled, z_neg_mask), z_neg_mask); in xnn_qs8_requantize_precise__sse2()
104 const __m128i w_scaled = _mm_sub_epi32(_mm_xor_si128(w_abs_scaled, w_neg_mask), w_neg_mask); in xnn_qs8_requantize_precise__sse2()
Dq31-ssse3.c87 …const __m128i x_product_even = _mm_sub_epi64(_mm_xor_si128(x_abs_product_even, x_neg_mask_even), x… in xnn_qs8_requantize_q31__ssse3()
88 …const __m128i y_product_even = _mm_sub_epi64(_mm_xor_si128(y_abs_product_even, y_neg_mask_even), y… in xnn_qs8_requantize_q31__ssse3()
89 …const __m128i z_product_even = _mm_sub_epi64(_mm_xor_si128(z_abs_product_even, z_neg_mask_even), z… in xnn_qs8_requantize_q31__ssse3()
90 …const __m128i w_product_even = _mm_sub_epi64(_mm_xor_si128(w_abs_product_even, w_neg_mask_even), w… in xnn_qs8_requantize_q31__ssse3()
107 …const __m128i x_product_odd = _mm_sub_epi64(_mm_xor_si128(x_abs_product_odd, x_neg_mask_odd), x_ne… in xnn_qs8_requantize_q31__ssse3()
108 …const __m128i y_product_odd = _mm_sub_epi64(_mm_xor_si128(y_abs_product_odd, y_neg_mask_odd), y_ne… in xnn_qs8_requantize_q31__ssse3()
109 …const __m128i z_product_odd = _mm_sub_epi64(_mm_xor_si128(z_abs_product_odd, z_neg_mask_odd), z_ne… in xnn_qs8_requantize_q31__ssse3()
110 …const __m128i w_product_odd = _mm_sub_epi64(_mm_xor_si128(w_abs_product_odd, w_neg_mask_odd), w_ne… in xnn_qs8_requantize_q31__ssse3()
/external/XNNPACK/src/qu8-requantization/
Dq31-sse2.c67 const __m128i x_abs = _mm_sub_epi32(_mm_xor_si128(x, x_neg_mask), x_neg_mask); in xnn_qu8_requantize_q31__sse2()
68 const __m128i y_abs = _mm_sub_epi32(_mm_xor_si128(y, y_neg_mask), y_neg_mask); in xnn_qu8_requantize_q31__sse2()
69 const __m128i z_abs = _mm_sub_epi32(_mm_xor_si128(z, z_neg_mask), z_neg_mask); in xnn_qu8_requantize_q31__sse2()
70 const __m128i w_abs = _mm_sub_epi32(_mm_xor_si128(w, w_neg_mask), w_neg_mask); in xnn_qu8_requantize_q31__sse2()
87 …const __m128i x_product_even = _mm_sub_epi64(_mm_xor_si128(x_abs_product_even, x_neg_mask_even), x… in xnn_qu8_requantize_q31__sse2()
88 …const __m128i y_product_even = _mm_sub_epi64(_mm_xor_si128(y_abs_product_even, y_neg_mask_even), y… in xnn_qu8_requantize_q31__sse2()
89 …const __m128i z_product_even = _mm_sub_epi64(_mm_xor_si128(z_abs_product_even, z_neg_mask_even), z… in xnn_qu8_requantize_q31__sse2()
90 …const __m128i w_product_even = _mm_sub_epi64(_mm_xor_si128(w_abs_product_even, w_neg_mask_even), w… in xnn_qu8_requantize_q31__sse2()
107 …const __m128i x_product_odd = _mm_sub_epi64(_mm_xor_si128(x_abs_product_odd, x_neg_mask_odd), x_ne… in xnn_qu8_requantize_q31__sse2()
108 …const __m128i y_product_odd = _mm_sub_epi64(_mm_xor_si128(y_abs_product_odd, y_neg_mask_odd), y_ne… in xnn_qu8_requantize_q31__sse2()
[all …]
Dprecise-sse2.c58 const __m128i x_abs0123 = _mm_sub_epi32(_mm_xor_si128(x, x_neg_mask), x_neg_mask); in xnn_qu8_requantize_precise__sse2()
59 const __m128i y_abs0123 = _mm_sub_epi32(_mm_xor_si128(y, y_neg_mask), y_neg_mask); in xnn_qu8_requantize_precise__sse2()
60 const __m128i z_abs0123 = _mm_sub_epi32(_mm_xor_si128(z, z_neg_mask), z_neg_mask); in xnn_qu8_requantize_precise__sse2()
61 const __m128i w_abs0123 = _mm_sub_epi32(_mm_xor_si128(w, w_neg_mask), w_neg_mask); in xnn_qu8_requantize_precise__sse2()
101 const __m128i x_scaled = _mm_sub_epi32(_mm_xor_si128(x_abs_scaled, x_neg_mask), x_neg_mask); in xnn_qu8_requantize_precise__sse2()
102 const __m128i y_scaled = _mm_sub_epi32(_mm_xor_si128(y_abs_scaled, y_neg_mask), y_neg_mask); in xnn_qu8_requantize_precise__sse2()
103 const __m128i z_scaled = _mm_sub_epi32(_mm_xor_si128(z_abs_scaled, z_neg_mask), z_neg_mask); in xnn_qu8_requantize_precise__sse2()
104 const __m128i w_scaled = _mm_sub_epi32(_mm_xor_si128(w_abs_scaled, w_neg_mask), w_neg_mask); in xnn_qu8_requantize_precise__sse2()
Dq31-ssse3.c87 …const __m128i x_product_even = _mm_sub_epi64(_mm_xor_si128(x_abs_product_even, x_neg_mask_even), x… in xnn_qu8_requantize_q31__ssse3()
88 …const __m128i y_product_even = _mm_sub_epi64(_mm_xor_si128(y_abs_product_even, y_neg_mask_even), y… in xnn_qu8_requantize_q31__ssse3()
89 …const __m128i z_product_even = _mm_sub_epi64(_mm_xor_si128(z_abs_product_even, z_neg_mask_even), z… in xnn_qu8_requantize_q31__ssse3()
90 …const __m128i w_product_even = _mm_sub_epi64(_mm_xor_si128(w_abs_product_even, w_neg_mask_even), w… in xnn_qu8_requantize_q31__ssse3()
107 …const __m128i x_product_odd = _mm_sub_epi64(_mm_xor_si128(x_abs_product_odd, x_neg_mask_odd), x_ne… in xnn_qu8_requantize_q31__ssse3()
108 …const __m128i y_product_odd = _mm_sub_epi64(_mm_xor_si128(y_abs_product_odd, y_neg_mask_odd), y_ne… in xnn_qu8_requantize_q31__ssse3()
109 …const __m128i z_product_odd = _mm_sub_epi64(_mm_xor_si128(z_abs_product_odd, z_neg_mask_odd), z_ne… in xnn_qu8_requantize_q31__ssse3()
110 …const __m128i w_product_odd = _mm_sub_epi64(_mm_xor_si128(w_abs_product_odd, w_neg_mask_odd), w_ne… in xnn_qu8_requantize_q31__ssse3()
/external/libvpx/libvpx/vp8/encoder/x86/
Dvp8_quantize_sse2.c66 x0 = _mm_xor_si128(z0, sz0); in vp8_regular_quantize_b_sse2()
67 x1 = _mm_xor_si128(z1, sz1); in vp8_regular_quantize_b_sse2()
101 y0 = _mm_xor_si128(y0, sz0); in vp8_regular_quantize_b_sse2()
102 y1 = _mm_xor_si128(y1, sz1); in vp8_regular_quantize_b_sse2()
163 x0 = _mm_xor_si128(z0, sz0); in vp8_fast_quantize_b_sse2()
164 x1 = _mm_xor_si128(z1, sz1); in vp8_fast_quantize_b_sse2()
177 y0 = _mm_xor_si128(y0, sz0); in vp8_fast_quantize_b_sse2()
178 y1 = _mm_xor_si128(y1, sz1); in vp8_fast_quantize_b_sse2()
202 x0 = _mm_xor_si128(x0, ones); in vp8_fast_quantize_b_sse2()
203 x1 = _mm_xor_si128(x1, ones); in vp8_fast_quantize_b_sse2()
/external/rust/crates/quiche/deps/boringssl/src/crypto/fipsmodule/modes/
Dgcm_nohw.c100 _mm_xor_si128(_mm_mul_epu32(a0a0, b0b1), _mm_mul_epu32(a2a2, b2b3)); in gcm_mul32_nohw()
102 _mm_xor_si128(_mm_mul_epu32(a2a2, b0b1), _mm_mul_epu32(a0a0, b2b3)); in gcm_mul32_nohw()
113 c0c1 = _mm_xor_si128(c0c1, _mm_mul_epu32(a1a1, b3b0)); in gcm_mul32_nohw()
114 c0c1 = _mm_xor_si128(c0c1, _mm_mul_epu32(a3a3, b1b2)); in gcm_mul32_nohw()
115 c2c3 = _mm_xor_si128(c2c3, _mm_mul_epu32(a3a3, b3b0)); in gcm_mul32_nohw()
116 c2c3 = _mm_xor_si128(c2c3, _mm_mul_epu32(a1a1, b1b2)); in gcm_mul32_nohw()
123 c0c1 = _mm_xor_si128(c0c1, c2c3); in gcm_mul32_nohw()
125 c0c1 = _mm_xor_si128(c0c1, _mm_srli_si128(c0c1, 8)); in gcm_mul32_nohw()
139 mid = _mm_xor_si128(mid, lo); in gcm_mul64_nohw()
140 mid = _mm_xor_si128(mid, hi); in gcm_mul64_nohw()
[all …]
/external/boringssl/src/crypto/fipsmodule/modes/
Dgcm_nohw.c100 _mm_xor_si128(_mm_mul_epu32(a0a0, b0b1), _mm_mul_epu32(a2a2, b2b3)); in gcm_mul32_nohw()
102 _mm_xor_si128(_mm_mul_epu32(a2a2, b0b1), _mm_mul_epu32(a0a0, b2b3)); in gcm_mul32_nohw()
113 c0c1 = _mm_xor_si128(c0c1, _mm_mul_epu32(a1a1, b3b0)); in gcm_mul32_nohw()
114 c0c1 = _mm_xor_si128(c0c1, _mm_mul_epu32(a3a3, b1b2)); in gcm_mul32_nohw()
115 c2c3 = _mm_xor_si128(c2c3, _mm_mul_epu32(a3a3, b3b0)); in gcm_mul32_nohw()
116 c2c3 = _mm_xor_si128(c2c3, _mm_mul_epu32(a1a1, b1b2)); in gcm_mul32_nohw()
123 c0c1 = _mm_xor_si128(c0c1, c2c3); in gcm_mul32_nohw()
125 c0c1 = _mm_xor_si128(c0c1, _mm_srli_si128(c0c1, 8)); in gcm_mul32_nohw()
139 mid = _mm_xor_si128(mid, lo); in gcm_mul64_nohw()
140 mid = _mm_xor_si128(mid, hi); in gcm_mul64_nohw()
[all …]
/external/libvpx/libvpx/vp9/encoder/x86/
Dvp9_quantize_sse2.c64 qcoeff0 = _mm_xor_si128(coeff0, coeff0_sign); in vp9_quantize_fp_sse2()
65 qcoeff1 = _mm_xor_si128(coeff1, coeff1_sign); in vp9_quantize_fp_sse2()
77 qcoeff0 = _mm_xor_si128(qtmp0, coeff0_sign); in vp9_quantize_fp_sse2()
78 qcoeff1 = _mm_xor_si128(qtmp1, coeff1_sign); in vp9_quantize_fp_sse2()
131 qcoeff0 = _mm_xor_si128(coeff0, coeff0_sign); in vp9_quantize_fp_sse2()
132 qcoeff1 = _mm_xor_si128(coeff1, coeff1_sign); in vp9_quantize_fp_sse2()
146 qcoeff0 = _mm_xor_si128(qtmp0, coeff0_sign); in vp9_quantize_fp_sse2()
147 qcoeff1 = _mm_xor_si128(qtmp1, coeff1_sign); in vp9_quantize_fp_sse2()
/external/OpenCL-CTS/test_common/harness/
Dmt19937.cpp159 _mm_xor_si128(_mm_loadu_si128((__m128i *)(mt + kk + M)), in genrand_int32()
162 vr = _mm_xor_si128(vr, vmag01); in genrand_int32()
196 _mm_xor_si128(_mm_loadu_si128((__m128i *)(mt + kk + M - N)), in genrand_int32()
199 vr = _mm_xor_si128(vr, vmag01); in genrand_int32()
219 vy = _mm_xor_si128(vy, _mm_srli_epi32(vy, 11)); in genrand_int32()
221 vy = _mm_xor_si128(vy, _mm_and_si128(_mm_slli_epi32(vy, 7), c0.v)); in genrand_int32()
223 vy = _mm_xor_si128(vy, _mm_and_si128(_mm_slli_epi32(vy, 15), c1.v)); in genrand_int32()
225 vy = _mm_xor_si128(vy, _mm_srli_epi32(vy, 18)); in genrand_int32()
/external/XNNPACK/src/qs8-gavgpool/gen/
D7x-minmax-sse2-c24-acc2.c151 const __m128i vabsacc0123 = _mm_sub_epi32(_mm_xor_si128(vacc0123, vsgnacc0123), vsgnacc0123); in xnn_qs8_gavgpool_minmax_ukernel_7x__sse2_c24_acc2()
152 const __m128i vabsacc4567 = _mm_sub_epi32(_mm_xor_si128(vacc4567, vsgnacc4567), vsgnacc4567); in xnn_qs8_gavgpool_minmax_ukernel_7x__sse2_c24_acc2()
153 const __m128i vabsacc89AB = _mm_sub_epi32(_mm_xor_si128(vacc89AB, vsgnacc89AB), vsgnacc89AB); in xnn_qs8_gavgpool_minmax_ukernel_7x__sse2_c24_acc2()
154 const __m128i vabsaccCDEF = _mm_sub_epi32(_mm_xor_si128(vaccCDEF, vsgnaccCDEF), vsgnaccCDEF); in xnn_qs8_gavgpool_minmax_ukernel_7x__sse2_c24_acc2()
155 const __m128i vabsaccGHIJ = _mm_sub_epi32(_mm_xor_si128(vaccGHIJ, vsgnaccGHIJ), vsgnaccGHIJ); in xnn_qs8_gavgpool_minmax_ukernel_7x__sse2_c24_acc2()
156 const __m128i vabsaccKLMN = _mm_sub_epi32(_mm_xor_si128(vaccKLMN, vsgnaccKLMN), vsgnaccKLMN); in xnn_qs8_gavgpool_minmax_ukernel_7x__sse2_c24_acc2()
211 const __m128i vout0123 = _mm_sub_epi32(_mm_xor_si128(vabsout0123, vsgnacc0123), vsgnacc0123); in xnn_qs8_gavgpool_minmax_ukernel_7x__sse2_c24_acc2()
212 const __m128i vout4567 = _mm_sub_epi32(_mm_xor_si128(vabsout4567, vsgnacc4567), vsgnacc4567); in xnn_qs8_gavgpool_minmax_ukernel_7x__sse2_c24_acc2()
213 const __m128i vout89AB = _mm_sub_epi32(_mm_xor_si128(vabsout89AB, vsgnacc89AB), vsgnacc89AB); in xnn_qs8_gavgpool_minmax_ukernel_7x__sse2_c24_acc2()
214 const __m128i voutCDEF = _mm_sub_epi32(_mm_xor_si128(vabsoutCDEF, vsgnaccCDEF), vsgnaccCDEF); in xnn_qs8_gavgpool_minmax_ukernel_7x__sse2_c24_acc2()
[all …]
D7x-minmax-sse2-c16-acc2.c126 const __m128i vabsacc0123 = _mm_sub_epi32(_mm_xor_si128(vacc0123, vsgnacc0123), vsgnacc0123); in xnn_qs8_gavgpool_minmax_ukernel_7x__sse2_c16_acc2()
127 const __m128i vabsacc4567 = _mm_sub_epi32(_mm_xor_si128(vacc4567, vsgnacc4567), vsgnacc4567); in xnn_qs8_gavgpool_minmax_ukernel_7x__sse2_c16_acc2()
128 const __m128i vabsacc89AB = _mm_sub_epi32(_mm_xor_si128(vacc89AB, vsgnacc89AB), vsgnacc89AB); in xnn_qs8_gavgpool_minmax_ukernel_7x__sse2_c16_acc2()
129 const __m128i vabsaccCDEF = _mm_sub_epi32(_mm_xor_si128(vaccCDEF, vsgnaccCDEF), vsgnaccCDEF); in xnn_qs8_gavgpool_minmax_ukernel_7x__sse2_c16_acc2()
168 const __m128i vout0123 = _mm_sub_epi32(_mm_xor_si128(vabsout0123, vsgnacc0123), vsgnacc0123); in xnn_qs8_gavgpool_minmax_ukernel_7x__sse2_c16_acc2()
169 const __m128i vout4567 = _mm_sub_epi32(_mm_xor_si128(vabsout4567, vsgnacc4567), vsgnacc4567); in xnn_qs8_gavgpool_minmax_ukernel_7x__sse2_c16_acc2()
170 const __m128i vout89AB = _mm_sub_epi32(_mm_xor_si128(vabsout89AB, vsgnacc89AB), vsgnacc89AB); in xnn_qs8_gavgpool_minmax_ukernel_7x__sse2_c16_acc2()
171 const __m128i voutCDEF = _mm_sub_epi32(_mm_xor_si128(vabsoutCDEF, vsgnaccCDEF), vsgnaccCDEF); in xnn_qs8_gavgpool_minmax_ukernel_7x__sse2_c16_acc2()
231 const __m128i vabsacc0123 = _mm_sub_epi32(_mm_xor_si128(vacc0123, vsgnacc0123), vsgnacc0123); in xnn_qs8_gavgpool_minmax_ukernel_7x__sse2_c16_acc2()
232 const __m128i vabsacc4567 = _mm_sub_epi32(_mm_xor_si128(vacc4567, vsgnacc4567), vsgnacc4567); in xnn_qs8_gavgpool_minmax_ukernel_7x__sse2_c16_acc2()
[all …]
/external/fec/
Dviterbi615_sse2.c143 …m0 = _mm_add_epi16(_mm_xor_si128(Branchtab615[0].v[i],sym0v),_mm_xor_si128(Branchtab615[1].v[i],sy… in update_viterbi615_blk_sse2()
144 …m1 = _mm_add_epi16(_mm_xor_si128(Branchtab615[2].v[i],sym2v),_mm_xor_si128(Branchtab615[3].v[i],sy… in update_viterbi615_blk_sse2()
145 …m2 = _mm_add_epi16(_mm_xor_si128(Branchtab615[4].v[i],sym4v),_mm_xor_si128(Branchtab615[5].v[i],sy… in update_viterbi615_blk_sse2()
/external/rust/crates/crc32fast/src/specialized/
Dpclmulqdq.rs107 x3 = arch::_mm_xor_si128(x3, arch::_mm_cvtsi32_si128(!crc as i32)); in calculate()
148 let x = arch::_mm_xor_si128( in calculate()
152 let x = arch::_mm_xor_si128( in calculate()
183 let c = arch::_mm_extract_epi32(arch::_mm_xor_si128(x, t2), 1) as u32; in calculate()
195 arch::_mm_xor_si128(arch::_mm_xor_si128(b, t1), t2) in reduce128()

12345678