Home
last modified time | relevance | path

Searched refs:_mm_or_si128 (Results 1 – 25 of 100) sorted by relevance

1234

/external/webp/src/dsp/
Dcommon_sse41.h82 const __m128i RG0 = _mm_or_si128(R0, G0); in VP8PlanarTo24b_SSE41()
83 const __m128i RG1 = _mm_or_si128(R1, G1); in VP8PlanarTo24b_SSE41()
84 const __m128i RG2 = _mm_or_si128(R2, G2); in VP8PlanarTo24b_SSE41()
85 const __m128i RG3 = _mm_or_si128(R3, G3); in VP8PlanarTo24b_SSE41()
86 const __m128i RG4 = _mm_or_si128(R4, G4); in VP8PlanarTo24b_SSE41()
87 const __m128i RG5 = _mm_or_si128(R5, G5); in VP8PlanarTo24b_SSE41()
88 *in0 = _mm_or_si128(RG0, B0); in VP8PlanarTo24b_SSE41()
89 *in1 = _mm_or_si128(RG1, B1); in VP8PlanarTo24b_SSE41()
90 *in2 = _mm_or_si128(RG2, B2); in VP8PlanarTo24b_SSE41()
91 *in3 = _mm_or_si128(RG3, B3); in VP8PlanarTo24b_SSE41()
[all …]
Dalpha_processing_sse41.c56 const __m128i c0 = _mm_or_si128(b0, b1); in ExtractAlpha_SSE41()
57 const __m128i c1 = _mm_or_si128(b2, b3); in ExtractAlpha_SSE41()
58 const __m128i d0 = _mm_or_si128(c0, c1); in ExtractAlpha_SSE41()
Dlossless_sse2.c70 const __m128i AC = _mm_or_si128(AC0, CA0); in Select_SSE2()
71 const __m128i BC = _mm_or_si128(BC0, CB0); in Select_SSE2()
329 const __m128i pred = _mm_or_si128(A, B); /* pred = (pa > b)? L : T*/ \
477 const __m128i out = _mm_or_si128(J, A); in TransformColorInverse_SSE2()
541 const __m128i F1 = _mm_or_si128(E1, C1); in ConvertBGRAToRGBA_SSE2()
542 const __m128i F2 = _mm_or_si128(E2, C2); in ConvertBGRAToRGBA_SSE2()
573 const __m128i rgba0 = _mm_or_si128(ga2, rb1); // rg0..rg7 | ba0..ba7 in ConvertBGRAToRGBA4444_SSE2()
613 const __m128i rg1 = _mm_or_si128(rb1, g_lo2); // gr0...gr7|xx in ConvertBGRAToRGB565_SSE2()
615 const __m128i gb1 = _mm_or_si128(b1, g_hi2); // bg0...bg7|xx in ConvertBGRAToRGB565_SSE2()
646 const __m128i c0 = _mm_or_si128(a0l, b0h); // rgbrgb00|rgbrgb00 in ConvertBGRAToBGR_SSE2()
[all …]
Dalpha_processing_sse2.c53 const __m128i b2_lo = _mm_or_si128(b1_lo, a2_lo); in DispatchAlpha_SSE2()
54 const __m128i b2_hi = _mm_or_si128(b1_hi, a2_hi); in DispatchAlpha_SSE2()
160 const __m128i alpha0_lo = _mm_or_si128(argb1_lo, kMask); \
161 const __m128i alpha0_hi = _mm_or_si128(argb1_hi, kMask); \
284 const __m128i A2 = _mm_or_si128(A1, kMask); in MultARGBRow_SSE2()
/external/libvpx/libvpx/vpx_dsp/x86/
Dloopfilter_avx2.c53 _mm_or_si128(_mm_subs_epu8(q1p1, q0p0), _mm_subs_epu8(q0p0, q1p1)); in vpx_lpf_horizontal_16_avx2()
58 _mm_or_si128(_mm_subs_epu8(q0p0, p0q0), _mm_subs_epu8(p0q0, q0p0)); in vpx_lpf_horizontal_16_avx2()
60 _mm_or_si128(_mm_subs_epu8(q1p1, p1q1), _mm_subs_epu8(p1q1, q1p1)); in vpx_lpf_horizontal_16_avx2()
75 _mm_or_si128(_mm_subs_epu8(q2p2, q1p1), _mm_subs_epu8(q1p1, q2p2)), in vpx_lpf_horizontal_16_avx2()
76 _mm_or_si128(_mm_subs_epu8(q3p3, q2p2), _mm_subs_epu8(q2p2, q3p3))); in vpx_lpf_horizontal_16_avx2()
131 _mm_or_si128(_mm_subs_epu8(q2p2, q0p0), _mm_subs_epu8(q0p0, q2p2)), in vpx_lpf_horizontal_16_avx2()
132 _mm_or_si128(_mm_subs_epu8(q3p3, q0p0), _mm_subs_epu8(q0p0, q3p3))); in vpx_lpf_horizontal_16_avx2()
148 _mm_or_si128(_mm_subs_epu8(q4p4, q0p0), _mm_subs_epu8(q0p0, q4p4)), in vpx_lpf_horizontal_16_avx2()
149 _mm_or_si128(_mm_subs_epu8(q5p5, q0p0), _mm_subs_epu8(q0p0, q5p5))); in vpx_lpf_horizontal_16_avx2()
156 _mm_or_si128(_mm_subs_epu8(q6p6, q0p0), _mm_subs_epu8(q0p0, q6p6)), in vpx_lpf_horizontal_16_avx2()
[all …]
Dhighbd_loopfilter_sse2.c40 retval = _mm_andnot_si128(_mm_or_si128(ubounded, lbounded), value); in signed_char_clamp_bd_sse2()
43 retval = _mm_or_si128(retval, ubounded); in signed_char_clamp_bd_sse2()
44 retval = _mm_or_si128(retval, lbounded); in signed_char_clamp_bd_sse2()
104 abs_p1p0 = _mm_or_si128(_mm_subs_epu16(p1, p0), _mm_subs_epu16(p0, p1)); in vpx_highbd_lpf_horizontal_16_sse2()
105 abs_q1q0 = _mm_or_si128(_mm_subs_epu16(q1, q0), _mm_subs_epu16(q0, q1)); in vpx_highbd_lpf_horizontal_16_sse2()
109 abs_p0q0 = _mm_or_si128(_mm_subs_epu16(p0, q0), _mm_subs_epu16(q0, p0)); in vpx_highbd_lpf_horizontal_16_sse2()
110 abs_p1q1 = _mm_or_si128(_mm_subs_epu16(p1, q1), _mm_subs_epu16(q1, p1)); in vpx_highbd_lpf_horizontal_16_sse2()
123 _mm_or_si128(_mm_subs_epu16(p1, p0), _mm_subs_epu16(p0, p1)), in vpx_highbd_lpf_horizontal_16_sse2()
124 _mm_or_si128(_mm_subs_epu16(q1, q0), _mm_subs_epu16(q0, q1))); in vpx_highbd_lpf_horizontal_16_sse2()
127 _mm_or_si128(_mm_subs_epu16(p2, p1), _mm_subs_epu16(p1, p2)), in vpx_highbd_lpf_horizontal_16_sse2()
[all …]
Dloopfilter_sse2.c19 return _mm_or_si128(_mm_subs_epu8(a, b), _mm_subs_epu8(b, a)); in abs_diff()
509 q2p2 = _mm_or_si128(q2p2, flat_q2p2); in vpx_lpf_horizontal_16_sse2()
513 q1p1 = _mm_or_si128(qs1ps1, flat_q1p1); in vpx_lpf_horizontal_16_sse2()
517 q0p0 = _mm_or_si128(qs0ps0, flat_q0p0); in vpx_lpf_horizontal_16_sse2()
521 q6p6 = _mm_or_si128(q6p6, flat2_q6p6); in vpx_lpf_horizontal_16_sse2()
527 q5p5 = _mm_or_si128(q5p5, flat2_q5p5); in vpx_lpf_horizontal_16_sse2()
533 q4p4 = _mm_or_si128(q4p4, flat2_q4p4); in vpx_lpf_horizontal_16_sse2()
539 q3p3 = _mm_or_si128(q3p3, flat2_q3p3); in vpx_lpf_horizontal_16_sse2()
545 q2p2 = _mm_or_si128(q2p2, flat2_q2p2); in vpx_lpf_horizontal_16_sse2()
551 q1p1 = _mm_or_si128(q1p1, flat2_q1p1); in vpx_lpf_horizontal_16_sse2()
[all …]
Dfwd_txfm_sse2.h40 __m128i cmp0 = _mm_or_si128(_mm_cmpeq_epi16(*preg0, max_overflow), in check_epi16_overflow_x2()
42 __m128i cmp1 = _mm_or_si128(_mm_cmpeq_epi16(*preg1, max_overflow), in check_epi16_overflow_x2()
44 cmp0 = _mm_or_si128(cmp0, cmp1); in check_epi16_overflow_x2()
54 __m128i cmp0 = _mm_or_si128(_mm_cmpeq_epi16(*preg0, max_overflow), in check_epi16_overflow_x4()
56 __m128i cmp1 = _mm_or_si128(_mm_cmpeq_epi16(*preg1, max_overflow), in check_epi16_overflow_x4()
58 __m128i cmp2 = _mm_or_si128(_mm_cmpeq_epi16(*preg2, max_overflow), in check_epi16_overflow_x4()
60 __m128i cmp3 = _mm_or_si128(_mm_cmpeq_epi16(*preg3, max_overflow), in check_epi16_overflow_x4()
62 cmp0 = _mm_or_si128(_mm_or_si128(cmp0, cmp1), _mm_or_si128(cmp2, cmp3)); in check_epi16_overflow_x4()
Dquantize_avx.c61 all_zero = _mm_or_si128(cmp_mask0, cmp_mask1); in vpx_quantize_b_avx()
113 all_zero = _mm_or_si128(cmp_mask0, cmp_mask1); in vpx_quantize_b_avx()
202 all_zero = _mm_or_si128(cmp_mask0, cmp_mask1); in vpx_quantize_b_32x32_avx()
252 all_zero = _mm_or_si128(cmp_mask0, cmp_mask1); in vpx_quantize_b_32x32_avx()
/external/XNNPACK/src/f32-argmaxpool/
D9p8x-sse2-c4.c83 vidx = _mm_or_si128(_mm_andnot_si128(vm1, vidx), _mm_and_si128(vm1, _mm_set1_epi32(1))); in xnn_f32_argmaxpool_ukernel_9p8x__sse2_c4()
87 vidx = _mm_or_si128(_mm_andnot_si128(vm2, vidx), _mm_and_si128(vm2, _mm_set1_epi32(2))); in xnn_f32_argmaxpool_ukernel_9p8x__sse2_c4()
91 vidx = _mm_or_si128(_mm_andnot_si128(vm3, vidx), _mm_and_si128(vm3, _mm_set1_epi32(3))); in xnn_f32_argmaxpool_ukernel_9p8x__sse2_c4()
95 vidx = _mm_or_si128(_mm_andnot_si128(vm4, vidx), _mm_and_si128(vm4, _mm_set1_epi32(4))); in xnn_f32_argmaxpool_ukernel_9p8x__sse2_c4()
99 vidx = _mm_or_si128(_mm_andnot_si128(vm5, vidx), _mm_and_si128(vm5, _mm_set1_epi32(5))); in xnn_f32_argmaxpool_ukernel_9p8x__sse2_c4()
103 vidx = _mm_or_si128(_mm_andnot_si128(vm6, vidx), _mm_and_si128(vm6, _mm_set1_epi32(6))); in xnn_f32_argmaxpool_ukernel_9p8x__sse2_c4()
107 vidx = _mm_or_si128(_mm_andnot_si128(vm7, vidx), _mm_and_si128(vm7, _mm_set1_epi32(7))); in xnn_f32_argmaxpool_ukernel_9p8x__sse2_c4()
111 vidx = _mm_or_si128(_mm_andnot_si128(vm8, vidx), _mm_and_si128(vm8, _mm_set1_epi32(8))); in xnn_f32_argmaxpool_ukernel_9p8x__sse2_c4()
168 vidx = _mm_or_si128(_mm_andnot_si128(vm0, vidx), _mm_and_si128(vm0, vidx0)); in xnn_f32_argmaxpool_ukernel_9p8x__sse2_c4()
173 vidx = _mm_or_si128(_mm_andnot_si128(vm1, vidx), _mm_and_si128(vm1, vidx1)); in xnn_f32_argmaxpool_ukernel_9p8x__sse2_c4()
[all …]
D9x-sse2-c4.c105 vidx = _mm_or_si128(_mm_andnot_si128(vm1, vidx), _mm_and_si128(vm1, _mm_set1_epi32(1))); in xnn_f32_argmaxpool_ukernel_9x__sse2_c4()
109 vidx = _mm_or_si128(_mm_andnot_si128(vm2, vidx), _mm_and_si128(vm2, _mm_set1_epi32(2))); in xnn_f32_argmaxpool_ukernel_9x__sse2_c4()
113 vidx = _mm_or_si128(_mm_andnot_si128(vm3, vidx), _mm_and_si128(vm3, _mm_set1_epi32(3))); in xnn_f32_argmaxpool_ukernel_9x__sse2_c4()
117 vidx = _mm_or_si128(_mm_andnot_si128(vm4, vidx), _mm_and_si128(vm4, _mm_set1_epi32(4))); in xnn_f32_argmaxpool_ukernel_9x__sse2_c4()
121 vidx = _mm_or_si128(_mm_andnot_si128(vm5, vidx), _mm_and_si128(vm5, _mm_set1_epi32(5))); in xnn_f32_argmaxpool_ukernel_9x__sse2_c4()
125 vidx = _mm_or_si128(_mm_andnot_si128(vm6, vidx), _mm_and_si128(vm6, _mm_set1_epi32(6))); in xnn_f32_argmaxpool_ukernel_9x__sse2_c4()
129 vidx = _mm_or_si128(_mm_andnot_si128(vm7, vidx), _mm_and_si128(vm7, _mm_set1_epi32(7))); in xnn_f32_argmaxpool_ukernel_9x__sse2_c4()
133 vidx = _mm_or_si128(_mm_andnot_si128(vm8, vidx), _mm_and_si128(vm8, _mm_set1_epi32(8))); in xnn_f32_argmaxpool_ukernel_9x__sse2_c4()
158 vidx = _mm_or_si128(_mm_andnot_si128(vm1, vidx), _mm_and_si128(vm1, _mm_set1_epi32(1))); in xnn_f32_argmaxpool_ukernel_9x__sse2_c4()
162 vidx = _mm_or_si128(_mm_andnot_si128(vm2, vidx), _mm_and_si128(vm2, _mm_set1_epi32(2))); in xnn_f32_argmaxpool_ukernel_9x__sse2_c4()
[all …]
D4x-sse2-c4.c67 vidx = _mm_or_si128(_mm_andnot_si128(vm1, vidx), _mm_and_si128(vm1, _mm_set1_epi32(1))); in xnn_f32_argmaxpool_ukernel_4x__sse2_c4()
71 vidx = _mm_or_si128(_mm_andnot_si128(vm2, vidx), _mm_and_si128(vm2, _mm_set1_epi32(2))); in xnn_f32_argmaxpool_ukernel_4x__sse2_c4()
75 vidx = _mm_or_si128(_mm_andnot_si128(vm3, vidx), _mm_and_si128(vm3, _mm_set1_epi32(3))); in xnn_f32_argmaxpool_ukernel_4x__sse2_c4()
95 vidx = _mm_or_si128(_mm_andnot_si128(vm1, vidx), _mm_and_si128(vm1, _mm_set1_epi32(1))); in xnn_f32_argmaxpool_ukernel_4x__sse2_c4()
99 vidx = _mm_or_si128(_mm_andnot_si128(vm2, vidx), _mm_and_si128(vm2, _mm_set1_epi32(2))); in xnn_f32_argmaxpool_ukernel_4x__sse2_c4()
103 vidx = _mm_or_si128(_mm_andnot_si128(vm3, vidx), _mm_and_si128(vm3, _mm_set1_epi32(3))); in xnn_f32_argmaxpool_ukernel_4x__sse2_c4()
/external/XNNPACK/src/x8-zip/
Dx3-sse2.c39 const __m128i vxeye = _mm_or_si128(_mm_and_si128(vx, vmask0x00FF00FF), _mm_slli_epi16(vy, 8)); in xnn_x8_zip_x3_ukernel__sse2()
41 … const __m128i vyozo = _mm_or_si128(_mm_andnot_si128(vmask0x00FF00FF, vz), _mm_srli_epi16(vy, 8)); in xnn_x8_zip_x3_ukernel__sse2()
43 …const __m128i vzexo = _mm_or_si128(_mm_and_si128(vz, vmask0x00FF00FF), _mm_andnot_si128(vmask0x00F… in xnn_x8_zip_x3_ukernel__sse2()
46 …const __m128i vxeyezexo = _mm_or_si128(_mm_and_si128(vxeye, vmask0x0000FFFF), _mm_slli_epi32(vzexo… in xnn_x8_zip_x3_ukernel__sse2()
48 …const __m128i vyozoxeye = _mm_or_si128(_mm_and_si128(vyozo, vmask0x0000FFFF), _mm_andnot_si128(vma… in xnn_x8_zip_x3_ukernel__sse2()
50 …const __m128i vzexoyozo = _mm_or_si128(_mm_andnot_si128(vmask0x0000FFFF, vyozo), _mm_srli_epi32(vz… in xnn_x8_zip_x3_ukernel__sse2()
88 const __m128i vxeye = _mm_or_si128(_mm_and_si128(vx, vmask0x00FF00FF), _mm_slli_epi16(vy, 8)); in xnn_x8_zip_x3_ukernel__sse2()
90 … const __m128i vyozo = _mm_or_si128(_mm_andnot_si128(vmask0x00FF00FF, vz), _mm_srli_epi16(vy, 8)); in xnn_x8_zip_x3_ukernel__sse2()
92 …const __m128i vzexo = _mm_or_si128(_mm_and_si128(vz, vmask0x00FF00FF), _mm_andnot_si128(vmask0x00F… in xnn_x8_zip_x3_ukernel__sse2()
95 …const __m128i vxeyezexo = _mm_or_si128(_mm_and_si128(vxeye, vmask0x0000FFFF), _mm_slli_epi32(vzexo… in xnn_x8_zip_x3_ukernel__sse2()
[all …]
/external/libaom/libaom/aom_dsp/x86/
Dfwd_txfm_sse2.h38 __m128i cmp0 = _mm_or_si128(_mm_cmpeq_epi16(*preg0, max_overflow), in check_epi16_overflow_x2()
40 __m128i cmp1 = _mm_or_si128(_mm_cmpeq_epi16(*preg1, max_overflow), in check_epi16_overflow_x2()
42 cmp0 = _mm_or_si128(cmp0, cmp1); in check_epi16_overflow_x2()
52 __m128i cmp0 = _mm_or_si128(_mm_cmpeq_epi16(*preg0, max_overflow), in check_epi16_overflow_x4()
54 __m128i cmp1 = _mm_or_si128(_mm_cmpeq_epi16(*preg1, max_overflow), in check_epi16_overflow_x4()
56 __m128i cmp2 = _mm_or_si128(_mm_cmpeq_epi16(*preg2, max_overflow), in check_epi16_overflow_x4()
58 __m128i cmp3 = _mm_or_si128(_mm_cmpeq_epi16(*preg3, max_overflow), in check_epi16_overflow_x4()
60 cmp0 = _mm_or_si128(_mm_or_si128(cmp0, cmp1), _mm_or_si128(cmp2, cmp3)); in check_epi16_overflow_x4()
Dhighbd_loopfilter_sse2.c25 return _mm_or_si128(_mm_subs_epu16(a, b), _mm_subs_epu16(b, a)); in abs_diff16()
481 pq[i] = _mm_or_si128(pq[i], flat_pq[i]); in highbd_lpf_internal_14_sse2()
490 pq[i] = _mm_or_si128(pq[i], flat2_pq[i]); // full list of pq values in highbd_lpf_internal_14_sse2()
676 p[i] = _mm_or_si128(ps[i], flat_p[i]); in highbd_lpf_internal_14_dual_sse2()
679 q[i] = _mm_or_si128(qs[i], flat_q[i]); in highbd_lpf_internal_14_dual_sse2()
685 p[2] = _mm_or_si128(p[2], flat_p[2]); // full list of p2 values in highbd_lpf_internal_14_dual_sse2()
688 q[2] = _mm_or_si128(q[2], flat_q[2]); // full list of q2 values in highbd_lpf_internal_14_dual_sse2()
693 p[i] = _mm_or_si128(ps[i], flat_p[i]); in highbd_lpf_internal_14_dual_sse2()
696 q[i] = _mm_or_si128(qs[i], flat_q[i]); in highbd_lpf_internal_14_dual_sse2()
705 p[i] = _mm_or_si128(p[i], flat2_p[i]); // full list of p values in highbd_lpf_internal_14_dual_sse2()
[all …]
Dloopfilter_sse2.c22 return _mm_or_si128(_mm_subs_epu8(a, b), _mm_subs_epu8(b, a)); in abs_diff()
573 *q2p2 = _mm_or_si128(*q2p2, flat_q2p2); in lpf_internal_14_dual_sse2()
577 *q1p1 = _mm_or_si128(qs1ps1, flat_q1p1); in lpf_internal_14_dual_sse2()
581 *q0p0 = _mm_or_si128(qs0ps0, flat_q0p0); in lpf_internal_14_dual_sse2()
669 *q5p5 = _mm_or_si128(*q5p5, flat2_q5p5); in lpf_internal_14_dual_sse2()
673 *q4p4 = _mm_or_si128(*q4p4, flat2_q4p4); in lpf_internal_14_dual_sse2()
677 *q3p3 = _mm_or_si128(*q3p3, flat2_q3p3); in lpf_internal_14_dual_sse2()
681 *q2p2 = _mm_or_si128(*q2p2, flat2_q2p2); in lpf_internal_14_dual_sse2()
685 *q1p1 = _mm_or_si128(*q1p1, flat2_q1p1); in lpf_internal_14_dual_sse2()
689 *q0p0 = _mm_or_si128(*q0p0, flat2_q0p0); in lpf_internal_14_dual_sse2()
[all …]
/external/skqp/src/opts/
DSkBlitRow_opts.h20 __m128i s = _mm_or_si128(_mm_slli_epi32(scale, 16), scale); in SkPMSrcOver_SSE2()
33 return _mm_or_si128(rb, ag); in SkPMSrcOver_SSE2()
97 auto ORed = _mm_or_si128(s3, _mm_or_si128(s2, _mm_or_si128(s1, s0))); in blit_row_s32a_opaque()
145 auto ORed = _mm_or_si128(s3, _mm_or_si128(s2, _mm_or_si128(s1, s0))); in blit_row_s32a_opaque()
DSkSwizzler_opts.h522 rg = _mm_or_si128(r, _mm_slli_epi16(g, 8)); // rgrgrgrg RGRGRGRG in premul_should_swapRB()
523 ba = _mm_or_si128(b, _mm_slli_epi16(a, 8)); // babababa BABABABA in premul_should_swapRB()
602 __m128i rgba = _mm_or_si128(_mm_shuffle_epi8(rgb, expand), alphaMask); in insert_alpha_should_swaprb()
657 __m128i gg = _mm_or_si128(_mm_and_si128(ga, _mm_set1_epi16(0x00FF)), in grayA_to_RGBA()
684 __m128i gg = _mm_or_si128(g0, _mm_slli_epi16(g0, 8)); in grayA_to_rgbA()
685 __m128i ga = _mm_or_si128(g0, _mm_slli_epi16(a0, 8)); in grayA_to_rgbA()
732 __m128i rg = _mm_or_si128(r, _mm_slli_epi16(g, 8)), // rgrgrgrg RGRGRGRG in inverted_cmyk_to()
733 ba = _mm_or_si128(b, _mm_set1_epi16((uint16_t) 0xFF00)); // b1b1b1b1 B1B1B1B1 in inverted_cmyk_to()
/external/skia/src/opts/
DSkBlitRow_opts.h68 __m128i s = _mm_or_si128(_mm_slli_epi32(scale, 16), scale); in SkPMSrcOver_SSE2()
81 return _mm_or_si128(rb, ag); in SkPMSrcOver_SSE2()
225 auto ORed = _mm_or_si128(s3, _mm_or_si128(s2, _mm_or_si128(s1, s0))); in blit_row_s32a_opaque()
273 auto ORed = _mm_or_si128(s3, _mm_or_si128(s2, _mm_or_si128(s1, s0))); in blit_row_s32a_opaque()
/external/mesa3d/src/gallium/drivers/llvmpipe/
Dlp_rast_tri.c335 __m128i c_0 = _mm_or_si128(_mm_or_si128(c0_0, c1_0), c2_0); in lp_rast_triangle_32_3_16()
341 __m128i c_1 = _mm_or_si128(_mm_or_si128(c0_1, c1_1), c2_1); in lp_rast_triangle_32_3_16()
348 __m128i c_2 = _mm_or_si128(_mm_or_si128(c0_2, c1_2), c2_2); in lp_rast_triangle_32_3_16()
354 __m128i c_3 = _mm_or_si128(_mm_or_si128(c0_3, c1_3), c2_3); in lp_rast_triangle_32_3_16()
428 __m128i c_0 = _mm_or_si128(_mm_or_si128(c0_0, c1_0), c2_0); in lp_rast_triangle_32_3_4()
434 __m128i c_1 = _mm_or_si128(_mm_or_si128(c0_1, c1_1), c2_1); in lp_rast_triangle_32_3_4()
441 __m128i c_2 = _mm_or_si128(_mm_or_si128(c0_2, c1_2), c2_2); in lp_rast_triangle_32_3_4()
447 __m128i c_3 = _mm_or_si128(_mm_or_si128(c0_3, c1_3), c2_3); in lp_rast_triangle_32_3_4()
/external/libvpx/libvpx/vp9/encoder/x86/
Dvp9_highbd_block_error_intrin_sse2.c45 _mm_or_si128(_mm_or_si128(cmp0, cmp1), _mm_or_si128(cmp2, cmp3))); in vp9_highbd_block_error_sse2()
/external/libaom/libaom/av1/encoder/x86/
Dhighbd_block_error_intrin_sse2.c46 _mm_or_si128(_mm_or_si128(cmp0, cmp1), _mm_or_si128(cmp2, cmp3))); in av1_highbd_block_error_sse2()
/external/libvpx/libvpx/vp8/encoder/x86/
Ddenoising_sse2.c77 _mm_min_epu8(_mm_or_si128(pdiff, ndiff), k_16); in vp8_denoiser_filter_sse2()
92 adj = _mm_or_si128(adj, adj0); in vp8_denoiser_filter_sse2()
150 const __m128i adj = _mm_min_epu8(_mm_or_si128(pdiff, ndiff), k_delta); in vp8_denoiser_filter_sse2()
252 _mm_min_epu8(_mm_or_si128(pdiff, ndiff), k_16); in vp8_denoiser_filter_uv_sse2()
268 adj = _mm_or_si128(adj, adj0); in vp8_denoiser_filter_uv_sse2()
332 const __m128i adj = _mm_min_epu8(_mm_or_si128(pdiff, ndiff), k_delta); in vp8_denoiser_filter_uv_sse2()
/external/libhevc/common/x86/
Dihevc_sao_ssse3_intr.c208 band_table3_8x16b = _mm_or_si128(band_table3_8x16b, tmp_set_128i_2); in ihevc_sao_band_offset_luma_ssse3()
212 band_table2_8x16b = _mm_or_si128(band_table2_8x16b, tmp_set_128i_2); in ihevc_sao_band_offset_luma_ssse3()
218 band_table1_8x16b = _mm_or_si128(band_table1_8x16b, tmp_set_128i_2); in ihevc_sao_band_offset_luma_ssse3()
224 band_table0_8x16b = _mm_or_si128(band_table0_8x16b, tmp_set_128i_2); in ihevc_sao_band_offset_luma_ssse3()
274 tmp_set_128i_1 = _mm_or_si128(tmp_set_128i_1, tmp_set_128i_2); in ihevc_sao_band_offset_luma_ssse3()
275 tmp_set_128i_3 = _mm_or_si128(tmp_set_128i_3, tmp_set_128i_4); in ihevc_sao_band_offset_luma_ssse3()
279 tmp_set_128i_1 = _mm_or_si128(tmp_set_128i_1, tmp_set_128i_2); in ihevc_sao_band_offset_luma_ssse3()
280 tmp_set_128i_3 = _mm_or_si128(tmp_set_128i_3, tmp_set_128i_4); in ihevc_sao_band_offset_luma_ssse3()
289 tmp_set_128i_1 = _mm_or_si128(tmp_set_128i_1, cmp_store); in ihevc_sao_band_offset_luma_ssse3()
292 tmp_set_128i_2 = _mm_or_si128(tmp_set_128i_2, cmp_store); in ihevc_sao_band_offset_luma_ssse3()
[all …]
/external/XNNPACK/src/requantization/
Dgemmlowp-sse.h69 return _mm_or_si128(mul_us, mul_us_neg); in gemmlowp_sse_mul_s32()
114 const __m128i saturated_result = _mm_or_si128( in gemmlowp_sse_vqrdmulh_s32()

1234