Home
last modified time | relevance | path

Searched refs:_mm_or_si128 (Results 1 – 25 of 147) sorted by relevance

123456

/external/oboe/samples/RhythmGame/third_party/glm/simd/
Dinteger.h27 Reg1 = _mm_or_si128(Reg2, Reg1); in glm_i128_interleave()
33 Reg1 = _mm_or_si128(Reg2, Reg1); in glm_i128_interleave()
39 Reg1 = _mm_or_si128(Reg2, Reg1); in glm_i128_interleave()
45 Reg1 = _mm_or_si128(Reg2, Reg1); in glm_i128_interleave()
51 Reg1 = _mm_or_si128(Reg2, Reg1); in glm_i128_interleave()
57 Reg1 = _mm_or_si128(Reg1, Reg2); in glm_i128_interleave()
80 Reg1 = _mm_or_si128(Reg2, Reg1); in glm_i128_interleave2()
86 Reg1 = _mm_or_si128(Reg2, Reg1); in glm_i128_interleave2()
92 Reg1 = _mm_or_si128(Reg2, Reg1); in glm_i128_interleave2()
98 Reg1 = _mm_or_si128(Reg2, Reg1); in glm_i128_interleave2()
[all …]
/external/webp/src/dsp/
Dcommon_sse41.h82 const __m128i RG0 = _mm_or_si128(R0, G0); in VP8PlanarTo24b_SSE41()
83 const __m128i RG1 = _mm_or_si128(R1, G1); in VP8PlanarTo24b_SSE41()
84 const __m128i RG2 = _mm_or_si128(R2, G2); in VP8PlanarTo24b_SSE41()
85 const __m128i RG3 = _mm_or_si128(R3, G3); in VP8PlanarTo24b_SSE41()
86 const __m128i RG4 = _mm_or_si128(R4, G4); in VP8PlanarTo24b_SSE41()
87 const __m128i RG5 = _mm_or_si128(R5, G5); in VP8PlanarTo24b_SSE41()
88 *in0 = _mm_or_si128(RG0, B0); in VP8PlanarTo24b_SSE41()
89 *in1 = _mm_or_si128(RG1, B1); in VP8PlanarTo24b_SSE41()
90 *in2 = _mm_or_si128(RG2, B2); in VP8PlanarTo24b_SSE41()
91 *in3 = _mm_or_si128(RG3, B3); in VP8PlanarTo24b_SSE41()
[all …]
Dalpha_processing_sse41.c56 const __m128i c0 = _mm_or_si128(b0, b1); in ExtractAlpha_SSE41()
57 const __m128i c1 = _mm_or_si128(b2, b3); in ExtractAlpha_SSE41()
58 const __m128i d0 = _mm_or_si128(c0, c1); in ExtractAlpha_SSE41()
/external/XNNPACK/src/f32-argmaxpool/
D9p8x-sse2-c4.c80 vidx = _mm_or_si128(_mm_andnot_si128(vm1, vidx), _mm_and_si128(vm1, _mm_set1_epi32(1))); in xnn_f32_argmaxpool_ukernel_9p8x__sse2_c4()
84 vidx = _mm_or_si128(_mm_andnot_si128(vm2, vidx), _mm_and_si128(vm2, _mm_set1_epi32(2))); in xnn_f32_argmaxpool_ukernel_9p8x__sse2_c4()
88 vidx = _mm_or_si128(_mm_andnot_si128(vm3, vidx), _mm_and_si128(vm3, _mm_set1_epi32(3))); in xnn_f32_argmaxpool_ukernel_9p8x__sse2_c4()
92 vidx = _mm_or_si128(_mm_andnot_si128(vm4, vidx), _mm_and_si128(vm4, _mm_set1_epi32(4))); in xnn_f32_argmaxpool_ukernel_9p8x__sse2_c4()
96 vidx = _mm_or_si128(_mm_andnot_si128(vm5, vidx), _mm_and_si128(vm5, _mm_set1_epi32(5))); in xnn_f32_argmaxpool_ukernel_9p8x__sse2_c4()
100 vidx = _mm_or_si128(_mm_andnot_si128(vm6, vidx), _mm_and_si128(vm6, _mm_set1_epi32(6))); in xnn_f32_argmaxpool_ukernel_9p8x__sse2_c4()
104 vidx = _mm_or_si128(_mm_andnot_si128(vm7, vidx), _mm_and_si128(vm7, _mm_set1_epi32(7))); in xnn_f32_argmaxpool_ukernel_9p8x__sse2_c4()
108 vidx = _mm_or_si128(_mm_andnot_si128(vm8, vidx), _mm_and_si128(vm8, _mm_set1_epi32(8))); in xnn_f32_argmaxpool_ukernel_9p8x__sse2_c4()
165 vidx = _mm_or_si128(_mm_andnot_si128(vm0, vidx), _mm_and_si128(vm0, vidx0)); in xnn_f32_argmaxpool_ukernel_9p8x__sse2_c4()
170 vidx = _mm_or_si128(_mm_andnot_si128(vm1, vidx), _mm_and_si128(vm1, vidx1)); in xnn_f32_argmaxpool_ukernel_9p8x__sse2_c4()
[all …]
D9x-sse2-c4.c99 vidx = _mm_or_si128(_mm_andnot_si128(vm1, vidx), _mm_and_si128(vm1, _mm_set1_epi32(1))); in xnn_f32_argmaxpool_ukernel_9x__sse2_c4()
103 vidx = _mm_or_si128(_mm_andnot_si128(vm2, vidx), _mm_and_si128(vm2, _mm_set1_epi32(2))); in xnn_f32_argmaxpool_ukernel_9x__sse2_c4()
107 vidx = _mm_or_si128(_mm_andnot_si128(vm3, vidx), _mm_and_si128(vm3, _mm_set1_epi32(3))); in xnn_f32_argmaxpool_ukernel_9x__sse2_c4()
111 vidx = _mm_or_si128(_mm_andnot_si128(vm4, vidx), _mm_and_si128(vm4, _mm_set1_epi32(4))); in xnn_f32_argmaxpool_ukernel_9x__sse2_c4()
115 vidx = _mm_or_si128(_mm_andnot_si128(vm5, vidx), _mm_and_si128(vm5, _mm_set1_epi32(5))); in xnn_f32_argmaxpool_ukernel_9x__sse2_c4()
119 vidx = _mm_or_si128(_mm_andnot_si128(vm6, vidx), _mm_and_si128(vm6, _mm_set1_epi32(6))); in xnn_f32_argmaxpool_ukernel_9x__sse2_c4()
123 vidx = _mm_or_si128(_mm_andnot_si128(vm7, vidx), _mm_and_si128(vm7, _mm_set1_epi32(7))); in xnn_f32_argmaxpool_ukernel_9x__sse2_c4()
127 vidx = _mm_or_si128(_mm_andnot_si128(vm8, vidx), _mm_and_si128(vm8, _mm_set1_epi32(8))); in xnn_f32_argmaxpool_ukernel_9x__sse2_c4()
150 vidx = _mm_or_si128(_mm_andnot_si128(vm1, vidx), _mm_and_si128(vm1, _mm_set1_epi32(1))); in xnn_f32_argmaxpool_ukernel_9x__sse2_c4()
154 vidx = _mm_or_si128(_mm_andnot_si128(vm2, vidx), _mm_and_si128(vm2, _mm_set1_epi32(2))); in xnn_f32_argmaxpool_ukernel_9x__sse2_c4()
[all …]
D4x-sse2-c4.c64 vidx = _mm_or_si128(_mm_andnot_si128(vm1, vidx), _mm_and_si128(vm1, _mm_set1_epi32(1))); in xnn_f32_argmaxpool_ukernel_4x__sse2_c4()
68 vidx = _mm_or_si128(_mm_andnot_si128(vm2, vidx), _mm_and_si128(vm2, _mm_set1_epi32(2))); in xnn_f32_argmaxpool_ukernel_4x__sse2_c4()
72 vidx = _mm_or_si128(_mm_andnot_si128(vm3, vidx), _mm_and_si128(vm3, _mm_set1_epi32(3))); in xnn_f32_argmaxpool_ukernel_4x__sse2_c4()
90 vidx = _mm_or_si128(_mm_andnot_si128(vm1, vidx), _mm_and_si128(vm1, _mm_set1_epi32(1))); in xnn_f32_argmaxpool_ukernel_4x__sse2_c4()
94 vidx = _mm_or_si128(_mm_andnot_si128(vm2, vidx), _mm_and_si128(vm2, _mm_set1_epi32(2))); in xnn_f32_argmaxpool_ukernel_4x__sse2_c4()
98 vidx = _mm_or_si128(_mm_andnot_si128(vm3, vidx), _mm_and_si128(vm3, _mm_set1_epi32(3))); in xnn_f32_argmaxpool_ukernel_4x__sse2_c4()
/external/libvpx/libvpx/vpx_dsp/x86/
Dloopfilter_avx2.c53 _mm_or_si128(_mm_subs_epu8(q1p1, q0p0), _mm_subs_epu8(q0p0, q1p1)); in vpx_lpf_horizontal_16_avx2()
58 _mm_or_si128(_mm_subs_epu8(q0p0, p0q0), _mm_subs_epu8(p0q0, q0p0)); in vpx_lpf_horizontal_16_avx2()
60 _mm_or_si128(_mm_subs_epu8(q1p1, p1q1), _mm_subs_epu8(p1q1, q1p1)); in vpx_lpf_horizontal_16_avx2()
75 _mm_or_si128(_mm_subs_epu8(q2p2, q1p1), _mm_subs_epu8(q1p1, q2p2)), in vpx_lpf_horizontal_16_avx2()
76 _mm_or_si128(_mm_subs_epu8(q3p3, q2p2), _mm_subs_epu8(q2p2, q3p3))); in vpx_lpf_horizontal_16_avx2()
131 _mm_or_si128(_mm_subs_epu8(q2p2, q0p0), _mm_subs_epu8(q0p0, q2p2)), in vpx_lpf_horizontal_16_avx2()
132 _mm_or_si128(_mm_subs_epu8(q3p3, q0p0), _mm_subs_epu8(q0p0, q3p3))); in vpx_lpf_horizontal_16_avx2()
148 _mm_or_si128(_mm_subs_epu8(q4p4, q0p0), _mm_subs_epu8(q0p0, q4p4)), in vpx_lpf_horizontal_16_avx2()
149 _mm_or_si128(_mm_subs_epu8(q5p5, q0p0), _mm_subs_epu8(q0p0, q5p5))); in vpx_lpf_horizontal_16_avx2()
156 _mm_or_si128(_mm_subs_epu8(q6p6, q0p0), _mm_subs_epu8(q0p0, q6p6)), in vpx_lpf_horizontal_16_avx2()
[all …]
Dhighbd_loopfilter_sse2.c40 retval = _mm_andnot_si128(_mm_or_si128(ubounded, lbounded), value); in signed_char_clamp_bd_sse2()
43 retval = _mm_or_si128(retval, ubounded); in signed_char_clamp_bd_sse2()
44 retval = _mm_or_si128(retval, lbounded); in signed_char_clamp_bd_sse2()
104 abs_p1p0 = _mm_or_si128(_mm_subs_epu16(p1, p0), _mm_subs_epu16(p0, p1)); in vpx_highbd_lpf_horizontal_16_sse2()
105 abs_q1q0 = _mm_or_si128(_mm_subs_epu16(q1, q0), _mm_subs_epu16(q0, q1)); in vpx_highbd_lpf_horizontal_16_sse2()
109 abs_p0q0 = _mm_or_si128(_mm_subs_epu16(p0, q0), _mm_subs_epu16(q0, p0)); in vpx_highbd_lpf_horizontal_16_sse2()
110 abs_p1q1 = _mm_or_si128(_mm_subs_epu16(p1, q1), _mm_subs_epu16(q1, p1)); in vpx_highbd_lpf_horizontal_16_sse2()
123 _mm_or_si128(_mm_subs_epu16(p1, p0), _mm_subs_epu16(p0, p1)), in vpx_highbd_lpf_horizontal_16_sse2()
124 _mm_or_si128(_mm_subs_epu16(q1, q0), _mm_subs_epu16(q0, q1))); in vpx_highbd_lpf_horizontal_16_sse2()
127 _mm_or_si128(_mm_subs_epu16(p2, p1), _mm_subs_epu16(p1, p2)), in vpx_highbd_lpf_horizontal_16_sse2()
[all …]
Dloopfilter_sse2.c19 return _mm_or_si128(_mm_subs_epu8(a, b), _mm_subs_epu8(b, a)); in abs_diff()
509 q2p2 = _mm_or_si128(q2p2, flat_q2p2); in vpx_lpf_horizontal_16_sse2()
513 q1p1 = _mm_or_si128(qs1ps1, flat_q1p1); in vpx_lpf_horizontal_16_sse2()
517 q0p0 = _mm_or_si128(qs0ps0, flat_q0p0); in vpx_lpf_horizontal_16_sse2()
521 q6p6 = _mm_or_si128(q6p6, flat2_q6p6); in vpx_lpf_horizontal_16_sse2()
527 q5p5 = _mm_or_si128(q5p5, flat2_q5p5); in vpx_lpf_horizontal_16_sse2()
533 q4p4 = _mm_or_si128(q4p4, flat2_q4p4); in vpx_lpf_horizontal_16_sse2()
539 q3p3 = _mm_or_si128(q3p3, flat2_q3p3); in vpx_lpf_horizontal_16_sse2()
545 q2p2 = _mm_or_si128(q2p2, flat2_q2p2); in vpx_lpf_horizontal_16_sse2()
551 q1p1 = _mm_or_si128(q1p1, flat2_q1p1); in vpx_lpf_horizontal_16_sse2()
[all …]
Dfwd_txfm_sse2.h40 __m128i cmp0 = _mm_or_si128(_mm_cmpeq_epi16(*preg0, max_overflow), in check_epi16_overflow_x2()
42 __m128i cmp1 = _mm_or_si128(_mm_cmpeq_epi16(*preg1, max_overflow), in check_epi16_overflow_x2()
44 cmp0 = _mm_or_si128(cmp0, cmp1); in check_epi16_overflow_x2()
54 __m128i cmp0 = _mm_or_si128(_mm_cmpeq_epi16(*preg0, max_overflow), in check_epi16_overflow_x4()
56 __m128i cmp1 = _mm_or_si128(_mm_cmpeq_epi16(*preg1, max_overflow), in check_epi16_overflow_x4()
58 __m128i cmp2 = _mm_or_si128(_mm_cmpeq_epi16(*preg2, max_overflow), in check_epi16_overflow_x4()
60 __m128i cmp3 = _mm_or_si128(_mm_cmpeq_epi16(*preg3, max_overflow), in check_epi16_overflow_x4()
62 cmp0 = _mm_or_si128(_mm_or_si128(cmp0, cmp1), _mm_or_si128(cmp2, cmp3)); in check_epi16_overflow_x4()
/external/XNNPACK/src/x8-zip/
Dx3-sse2.c39 const __m128i vxeye = _mm_or_si128(_mm_and_si128(vx, vmask0x00FF00FF), _mm_slli_epi16(vy, 8)); in xnn_x8_zip_x3_ukernel__sse2()
41 … const __m128i vyozo = _mm_or_si128(_mm_andnot_si128(vmask0x00FF00FF, vz), _mm_srli_epi16(vy, 8)); in xnn_x8_zip_x3_ukernel__sse2()
43 …const __m128i vzexo = _mm_or_si128(_mm_and_si128(vz, vmask0x00FF00FF), _mm_andnot_si128(vmask0x00F… in xnn_x8_zip_x3_ukernel__sse2()
46 …const __m128i vxeyezexo = _mm_or_si128(_mm_and_si128(vxeye, vmask0x0000FFFF), _mm_slli_epi32(vzexo… in xnn_x8_zip_x3_ukernel__sse2()
48 …const __m128i vyozoxeye = _mm_or_si128(_mm_and_si128(vyozo, vmask0x0000FFFF), _mm_andnot_si128(vma… in xnn_x8_zip_x3_ukernel__sse2()
50 …const __m128i vzexoyozo = _mm_or_si128(_mm_andnot_si128(vmask0x0000FFFF, vyozo), _mm_srli_epi32(vz… in xnn_x8_zip_x3_ukernel__sse2()
88 const __m128i vxeye = _mm_or_si128(_mm_and_si128(vx, vmask0x00FF00FF), _mm_slli_epi16(vy, 8)); in xnn_x8_zip_x3_ukernel__sse2()
90 … const __m128i vyozo = _mm_or_si128(_mm_andnot_si128(vmask0x00FF00FF, vz), _mm_srli_epi16(vy, 8)); in xnn_x8_zip_x3_ukernel__sse2()
92 …const __m128i vzexo = _mm_or_si128(_mm_and_si128(vz, vmask0x00FF00FF), _mm_andnot_si128(vmask0x00F… in xnn_x8_zip_x3_ukernel__sse2()
95 …const __m128i vxeyezexo = _mm_or_si128(_mm_and_si128(vxeye, vmask0x0000FFFF), _mm_slli_epi32(vzexo… in xnn_x8_zip_x3_ukernel__sse2()
[all …]
/external/rust/crates/memchr/src/x86/
Dsse2.rs144 let or1 = _mm_or_si128(eqa, eqb); in memchr()
145 let or2 = _mm_or_si128(eqc, eqd); in memchr()
146 let or3 = _mm_or_si128(or1, or2); in memchr()
226 let or1 = _mm_or_si128(eqa1, eqb1); in memchr2()
227 let or2 = _mm_or_si128(eqa2, eqb2); in memchr2()
228 let or3 = _mm_or_si128(or1, or2); in memchr2()
303 let or1 = _mm_or_si128(eqa1, eqb1); in memchr3()
304 let or2 = _mm_or_si128(eqa2, eqb2); in memchr3()
305 let or3 = _mm_or_si128(eqa3, eqb3); in memchr3()
306 let or4 = _mm_or_si128(or1, or2); in memchr3()
[all …]
/external/libgav1/libgav1/src/dsp/x86/
Dintrapred_filter_sse4.cc116 pixels = _mm_or_si128(left, pixels); in Filter4xH()
130 pixels = _mm_or_si128(left, pixels); in Filter4xH()
157 pixels = _mm_or_si128(left, pixels); in Filter4xH()
185 pixels = _mm_or_si128(left, pixels); in Filter4xH()
200 left = _mm_or_si128(left, keep_top_left); in Filter4xH()
207 pixels = _mm_or_si128(left, pixels); in Filter4xH()
225 pixels = _mm_or_si128(left, pixels); in Filter4xH()
248 pixels = _mm_or_si128(left, pixels); in Filter4xH()
264 pixels = _mm_or_si128(left, pixels); in Filter4xH()
313 pixels = _mm_or_si128(pixels, left); in FilterIntraPredictor_SSE4_1()
[all …]
/external/libaom/libaom/aom_dsp/x86/
Dfwd_txfm_sse2.h38 __m128i cmp0 = _mm_or_si128(_mm_cmpeq_epi16(*preg0, max_overflow), in check_epi16_overflow_x2()
40 __m128i cmp1 = _mm_or_si128(_mm_cmpeq_epi16(*preg1, max_overflow), in check_epi16_overflow_x2()
42 cmp0 = _mm_or_si128(cmp0, cmp1); in check_epi16_overflow_x2()
52 __m128i cmp0 = _mm_or_si128(_mm_cmpeq_epi16(*preg0, max_overflow), in check_epi16_overflow_x4()
54 __m128i cmp1 = _mm_or_si128(_mm_cmpeq_epi16(*preg1, max_overflow), in check_epi16_overflow_x4()
56 __m128i cmp2 = _mm_or_si128(_mm_cmpeq_epi16(*preg2, max_overflow), in check_epi16_overflow_x4()
58 __m128i cmp3 = _mm_or_si128(_mm_cmpeq_epi16(*preg3, max_overflow), in check_epi16_overflow_x4()
60 cmp0 = _mm_or_si128(_mm_or_si128(cmp0, cmp1), _mm_or_si128(cmp2, cmp3)); in check_epi16_overflow_x4()
/external/skqp/src/opts/
DSkBlitRow_opts.h20 __m128i s = _mm_or_si128(_mm_slli_epi32(scale, 16), scale); in SkPMSrcOver_SSE2()
33 return _mm_or_si128(rb, ag); in SkPMSrcOver_SSE2()
97 auto ORed = _mm_or_si128(s3, _mm_or_si128(s2, _mm_or_si128(s1, s0))); in blit_row_s32a_opaque()
145 auto ORed = _mm_or_si128(s3, _mm_or_si128(s2, _mm_or_si128(s1, s0))); in blit_row_s32a_opaque()
/external/XNNPACK/src/f32-vrnd/gen/
Dvrndz-sse2-x8.c37 …const __m128 vrndmask0123 = _mm_castsi128_ps(_mm_or_si128(vmagic, _mm_cmpeq_epi32(vintx0123, vmagi… in xnn_f32_vrndz_ukernel__sse2_x8()
38 …const __m128 vrndmask4567 = _mm_castsi128_ps(_mm_or_si128(vmagic, _mm_cmpeq_epi32(vintx4567, vmagi… in xnn_f32_vrndz_ukernel__sse2_x8()
55 const __m128 vrndmask = _mm_castsi128_ps(_mm_or_si128(vmagic, _mm_cmpeq_epi32(vintx, vmagic))); in xnn_f32_vrndz_ukernel__sse2_x8()
65 const __m128 vrndmask = _mm_castsi128_ps(_mm_or_si128(vmagic, _mm_cmpeq_epi32(vintx, vmagic))); in xnn_f32_vrndz_ukernel__sse2_x8()
Dvrndne-sse2-x8.c37 …const __m128 vrndmask0123 = _mm_castsi128_ps(_mm_or_si128(vmagic, _mm_cmpeq_epi32(vintx0123, vmagi… in xnn_f32_vrndne_ukernel__sse2_x8()
38 …const __m128 vrndmask4567 = _mm_castsi128_ps(_mm_or_si128(vmagic, _mm_cmpeq_epi32(vintx4567, vmagi… in xnn_f32_vrndne_ukernel__sse2_x8()
55 const __m128 vrndmask = _mm_castsi128_ps(_mm_or_si128(vmagic, _mm_cmpeq_epi32(vintx, vmagic))); in xnn_f32_vrndne_ukernel__sse2_x8()
65 const __m128 vrndmask = _mm_castsi128_ps(_mm_or_si128(vmagic, _mm_cmpeq_epi32(vintx, vmagic))); in xnn_f32_vrndne_ukernel__sse2_x8()
Dvrndd-sse2-x8.c38 …const __m128 vrndmask0123 = _mm_castsi128_ps(_mm_or_si128(vmagic, _mm_cmpeq_epi32(vintx0123, vmagi… in xnn_f32_vrndd_ukernel__sse2_x8()
39 …const __m128 vrndmask4567 = _mm_castsi128_ps(_mm_or_si128(vmagic, _mm_cmpeq_epi32(vintx4567, vmagi… in xnn_f32_vrndd_ukernel__sse2_x8()
59 const __m128 vrndmask = _mm_castsi128_ps(_mm_or_si128(vmagic, _mm_cmpeq_epi32(vintx, vmagic))); in xnn_f32_vrndd_ukernel__sse2_x8()
70 const __m128 vrndmask = _mm_castsi128_ps(_mm_or_si128(vmagic, _mm_cmpeq_epi32(vintx, vmagic))); in xnn_f32_vrndd_ukernel__sse2_x8()
Dvrndu-sse2-x8.c38 …const __m128 vrndmask0123 = _mm_castsi128_ps(_mm_or_si128(vmagic, _mm_cmpeq_epi32(vintx0123, vmagi… in xnn_f32_vrndu_ukernel__sse2_x8()
39 …const __m128 vrndmask4567 = _mm_castsi128_ps(_mm_or_si128(vmagic, _mm_cmpeq_epi32(vintx4567, vmagi… in xnn_f32_vrndu_ukernel__sse2_x8()
65 const __m128 vrndmask = _mm_castsi128_ps(_mm_or_si128(vmagic, _mm_cmpeq_epi32(vintx, vmagic))); in xnn_f32_vrndu_ukernel__sse2_x8()
78 const __m128 vrndmask = _mm_castsi128_ps(_mm_or_si128(vmagic, _mm_cmpeq_epi32(vintx, vmagic))); in xnn_f32_vrndu_ukernel__sse2_x8()
Dvrndz-sse2-x4.c35 …const __m128 vrndmask0123 = _mm_castsi128_ps(_mm_or_si128(vmagic, _mm_cmpeq_epi32(vintx0123, vmagi… in xnn_f32_vrndz_ukernel__sse2_x4()
47 const __m128 vrndmask = _mm_castsi128_ps(_mm_or_si128(vmagic, _mm_cmpeq_epi32(vintx, vmagic))); in xnn_f32_vrndz_ukernel__sse2_x4()
Dvrndne-sse2-x4.c35 …const __m128 vrndmask0123 = _mm_castsi128_ps(_mm_or_si128(vmagic, _mm_cmpeq_epi32(vintx0123, vmagi… in xnn_f32_vrndne_ukernel__sse2_x4()
47 const __m128 vrndmask = _mm_castsi128_ps(_mm_or_si128(vmagic, _mm_cmpeq_epi32(vintx, vmagic))); in xnn_f32_vrndne_ukernel__sse2_x4()
/external/mesa3d/src/gallium/drivers/llvmpipe/
Dlp_rast_tri.c335 __m128i c_0 = _mm_or_si128(_mm_or_si128(c0_0, c1_0), c2_0); in lp_rast_triangle_32_3_16()
341 __m128i c_1 = _mm_or_si128(_mm_or_si128(c0_1, c1_1), c2_1); in lp_rast_triangle_32_3_16()
348 __m128i c_2 = _mm_or_si128(_mm_or_si128(c0_2, c1_2), c2_2); in lp_rast_triangle_32_3_16()
354 __m128i c_3 = _mm_or_si128(_mm_or_si128(c0_3, c1_3), c2_3); in lp_rast_triangle_32_3_16()
428 __m128i c_0 = _mm_or_si128(_mm_or_si128(c0_0, c1_0), c2_0); in lp_rast_triangle_32_3_4()
434 __m128i c_1 = _mm_or_si128(_mm_or_si128(c0_1, c1_1), c2_1); in lp_rast_triangle_32_3_4()
441 __m128i c_2 = _mm_or_si128(_mm_or_si128(c0_2, c1_2), c2_2); in lp_rast_triangle_32_3_4()
447 __m128i c_3 = _mm_or_si128(_mm_or_si128(c0_3, c1_3), c2_3); in lp_rast_triangle_32_3_4()
/external/libvpx/libvpx/vp9/encoder/x86/
Dvp9_highbd_block_error_intrin_sse2.c45 _mm_or_si128(_mm_or_si128(cmp0, cmp1), _mm_or_si128(cmp2, cmp3))); in vp9_highbd_block_error_sse2()
/external/libaom/libaom/av1/encoder/x86/
Dhighbd_block_error_intrin_sse2.c46 _mm_or_si128(_mm_or_si128(cmp0, cmp1), _mm_or_si128(cmp2, cmp3))); in av1_highbd_block_error_sse2()
/external/libvpx/libvpx/vp8/encoder/x86/
Ddenoising_sse2.c77 _mm_min_epu8(_mm_or_si128(pdiff, ndiff), k_16); in vp8_denoiser_filter_sse2()
92 adj = _mm_or_si128(adj, adj0); in vp8_denoiser_filter_sse2()
150 const __m128i adj = _mm_min_epu8(_mm_or_si128(pdiff, ndiff), k_delta); in vp8_denoiser_filter_sse2()
252 _mm_min_epu8(_mm_or_si128(pdiff, ndiff), k_16); in vp8_denoiser_filter_uv_sse2()
268 adj = _mm_or_si128(adj, adj0); in vp8_denoiser_filter_uv_sse2()
332 const __m128i adj = _mm_min_epu8(_mm_or_si128(pdiff, ndiff), k_delta); in vp8_denoiser_filter_uv_sse2()

123456