/external/skia/src/opts/ |
D | SkColor_opts_SSE2.h | 49 __m128i rb = _mm_and_si128(mask, c); in SkAlphaMulQ_SSE2() 55 ag = _mm_and_si128(ag, mask); in SkAlphaMulQ_SSE2() 59 rb = _mm_and_si128(mask, rb); in SkAlphaMulQ_SSE2() 118 r = _mm_and_si128(r, _mm_set1_epi32(SK_R16_MASK)); in SkPacked16ToR32_SSE2() 127 g = _mm_and_si128(g, _mm_set1_epi32(SK_G16_MASK)); in SkPacked16ToG32_SSE2() 136 b = _mm_and_si128(b, _mm_set1_epi32(SK_B16_MASK)); in SkPacked16ToB32_SSE2() 156 r1 = _mm_and_si128(r1, _mm_set1_epi32(SK_R16_MASK)); in SkPixel32ToPixel16_ToU16_SSE2() 159 r2 = _mm_and_si128(r2, _mm_set1_epi32(SK_R16_MASK)); in SkPixel32ToPixel16_ToU16_SSE2() 165 g1 = _mm_and_si128(g1, _mm_set1_epi32(SK_G16_MASK)); in SkPixel32ToPixel16_ToU16_SSE2() 168 g2 = _mm_and_si128(g2, _mm_set1_epi32(SK_G16_MASK)); in SkPixel32ToPixel16_ToU16_SSE2() [all …]
|
D | SkBlitRow_opts_SSE2.cpp | 60 __m128i src_rb = _mm_and_si128(rb_mask, src_pixel); in S32_Blend_BlitRow32_SSE2() 70 __m128i src_ag = _mm_and_si128(ag_mask, src_pixel); in S32_Blend_BlitRow32_SSE2() 78 src_ag = _mm_and_si128(src_ag, ag_mask); in S32_Blend_BlitRow32_SSE2() 82 __m128i dst_rb = _mm_and_si128(rb_mask, dst_pixel); in S32_Blend_BlitRow32_SSE2() 84 __m128i dst_ag = _mm_and_si128(ag_mask, dst_pixel); in S32_Blend_BlitRow32_SSE2() 86 dst_ag = _mm_and_si128(dst_ag, ag_mask); in S32_Blend_BlitRow32_SSE2() 140 __m128i dst_rb = _mm_and_si128(rb_mask, dst_pixel); in S32A_Opaque_BlitRow32_SSE2() 188 __m128i dst_rb = _mm_and_si128(rb_mask, dst_pixel); in S32A_Opaque_BlitRow32_SSE2() 266 __m128i dst_rb = _mm_and_si128(rb_mask, dst_pixel); in S32A_Blend_BlitRow32_SSE2() 267 __m128i src_rb = _mm_and_si128(rb_mask, src_pixel); in S32A_Blend_BlitRow32_SSE2() [all …]
|
D | SkXfermode_opts_SSE2.cpp | 32 sum = _mm_or_si128(_mm_and_si128(cmp, _mm_set1_epi32(255)), in saturated_add_SSE2() 40 __m128i ret = _mm_and_si128(cmp2, _mm_set1_epi32(255)); in clamp_signed_byte_SSE2() 43 ret = _mm_or_si128(_mm_and_si128(cmp, ret), _mm_andnot_si128(cmp, n)); in clamp_signed_byte_SSE2() 62 __m128i cmp = _mm_and_si128(cmp1, cmp2); in clamp_div255round_SSE2() 64 ret = _mm_or_si128(_mm_and_si128(cmp, div), _mm_andnot_si128(cmp, ret)); in clamp_div255round_SSE2() 195 return _mm_or_si128(_mm_and_si128(cmp, a), _mm_andnot_si128(cmp, b)); in SkMin32_SSE2() 275 _mm_and_si128(cmp, rc2)); in overlay_byte_SSE2() 303 __m128i ret = _mm_or_si128(_mm_and_si128(cmp, ret1), in darken_byte_SSE2() 332 __m128i ret = _mm_or_si128(_mm_and_si128(cmp, ret1), in lighten_byte_SSE2() 359 __m128i rc1 = _mm_and_si128(cmp1, SkAlphaMulAlpha_SSE2(sc, ida)); in colordodge_byte_SSE2() [all …]
|
D | SkMath_opts_SSE2.h | 39 __m128i remHi1 = _mm_and_si128(cmp, remHi); in SkSqrtBits_SSE2() 40 __m128i root1 = _mm_and_si128(cmp, root); in SkSqrtBits_SSE2()
|
D | SkBitmapProcState_opts_SSSE3.cpp | 64 const __m128i x1_wide = _mm_and_si128(xx, mask_3FFF); in PrepareConstantsTwoPixelPairs() 69 __m128i all_x = _mm_and_si128(_mm_srli_epi32(xx, 14), mask_000F); in PrepareConstantsTwoPixelPairs() 111 __m128i xy1_wide = _mm_and_si128(xy_wide, mask_3FFF); in PrepareConstantsTwoPixelPairsDXDY() 119 __m128i all_xy = _mm_and_si128(_mm_srli_epi32(xy_wide, 14), mask_000F); in PrepareConstantsTwoPixelPairsDXDY()
|
/external/chromium_org/third_party/skia/src/opts/ |
D | SkColor_opts_SSE2.h | 49 __m128i rb = _mm_and_si128(mask, c); in SkAlphaMulQ_SSE2() 55 ag = _mm_and_si128(ag, mask); in SkAlphaMulQ_SSE2() 59 rb = _mm_and_si128(mask, rb); in SkAlphaMulQ_SSE2() 118 r = _mm_and_si128(r, _mm_set1_epi32(SK_R16_MASK)); in SkPacked16ToR32_SSE2() 127 g = _mm_and_si128(g, _mm_set1_epi32(SK_G16_MASK)); in SkPacked16ToG32_SSE2() 136 b = _mm_and_si128(b, _mm_set1_epi32(SK_B16_MASK)); in SkPacked16ToB32_SSE2() 156 r1 = _mm_and_si128(r1, _mm_set1_epi32(SK_R16_MASK)); in SkPixel32ToPixel16_ToU16_SSE2() 159 r2 = _mm_and_si128(r2, _mm_set1_epi32(SK_R16_MASK)); in SkPixel32ToPixel16_ToU16_SSE2() 165 g1 = _mm_and_si128(g1, _mm_set1_epi32(SK_G16_MASK)); in SkPixel32ToPixel16_ToU16_SSE2() 168 g2 = _mm_and_si128(g2, _mm_set1_epi32(SK_G16_MASK)); in SkPixel32ToPixel16_ToU16_SSE2() [all …]
|
D | SkBlitRow_opts_SSE2.cpp | 60 __m128i src_rb = _mm_and_si128(rb_mask, src_pixel); in S32_Blend_BlitRow32_SSE2() 70 __m128i src_ag = _mm_and_si128(ag_mask, src_pixel); in S32_Blend_BlitRow32_SSE2() 78 src_ag = _mm_and_si128(src_ag, ag_mask); in S32_Blend_BlitRow32_SSE2() 82 __m128i dst_rb = _mm_and_si128(rb_mask, dst_pixel); in S32_Blend_BlitRow32_SSE2() 84 __m128i dst_ag = _mm_and_si128(ag_mask, dst_pixel); in S32_Blend_BlitRow32_SSE2() 86 dst_ag = _mm_and_si128(dst_ag, ag_mask); in S32_Blend_BlitRow32_SSE2() 140 __m128i dst_rb = _mm_and_si128(rb_mask, dst_pixel); in S32A_Opaque_BlitRow32_SSE2() 188 __m128i dst_rb = _mm_and_si128(rb_mask, dst_pixel); in S32A_Opaque_BlitRow32_SSE2() 266 __m128i dst_rb = _mm_and_si128(rb_mask, dst_pixel); in S32A_Blend_BlitRow32_SSE2() 267 __m128i src_rb = _mm_and_si128(rb_mask, src_pixel); in S32A_Blend_BlitRow32_SSE2() [all …]
|
D | SkXfermode_opts_SSE2.cpp | 32 sum = _mm_or_si128(_mm_and_si128(cmp, _mm_set1_epi32(255)), in saturated_add_SSE2() 40 __m128i ret = _mm_and_si128(cmp2, _mm_set1_epi32(255)); in clamp_signed_byte_SSE2() 43 ret = _mm_or_si128(_mm_and_si128(cmp, ret), _mm_andnot_si128(cmp, n)); in clamp_signed_byte_SSE2() 62 __m128i cmp = _mm_and_si128(cmp1, cmp2); in clamp_div255round_SSE2() 64 ret = _mm_or_si128(_mm_and_si128(cmp, div), _mm_andnot_si128(cmp, ret)); in clamp_div255round_SSE2() 195 return _mm_or_si128(_mm_and_si128(cmp, a), _mm_andnot_si128(cmp, b)); in SkMin32_SSE2() 275 _mm_and_si128(cmp, rc2)); in overlay_byte_SSE2() 303 __m128i ret = _mm_or_si128(_mm_and_si128(cmp, ret1), in darken_byte_SSE2() 332 __m128i ret = _mm_or_si128(_mm_and_si128(cmp, ret1), in lighten_byte_SSE2() 359 __m128i rc1 = _mm_and_si128(cmp1, SkAlphaMulAlpha_SSE2(sc, ida)); in colordodge_byte_SSE2() [all …]
|
D | SkMath_opts_SSE2.h | 39 __m128i remHi1 = _mm_and_si128(cmp, remHi); in SkSqrtBits_SSE2() 40 __m128i root1 = _mm_and_si128(cmp, root); in SkSqrtBits_SSE2()
|
D | SkBitmapProcState_opts_SSSE3.cpp | 63 const __m128i x1_wide = _mm_and_si128(xx, mask_3FFF); in PrepareConstantsTwoPixelPairs() 68 __m128i all_x = _mm_and_si128(_mm_srli_epi32(xx, 14), mask_000F); in PrepareConstantsTwoPixelPairs() 110 __m128i xy1_wide = _mm_and_si128(xy_wide, mask_3FFF); in PrepareConstantsTwoPixelPairsDXDY() 118 __m128i all_xy = _mm_and_si128(_mm_srli_epi32(xy_wide, 14), mask_000F); in PrepareConstantsTwoPixelPairsDXDY()
|
/external/chromium_org/third_party/libvpx/source/libvpx/vp9/common/x86/ |
D | vp9_loopfilter_intrin_avx2.c | 63 abs_p1q1 = _mm_srli_epi16(_mm_and_si128(abs_p1q1, fe), 1); in mb_lpf_horizontal_edge_w_avx2_8() 98 filt = _mm_and_si128(_mm_subs_epi8(qs1ps1, qs1), hev); in mb_lpf_horizontal_edge_w_avx2_8() 104 filt = _mm_and_si128(filt, mask); in mb_lpf_horizontal_edge_w_avx2_8() 138 flat = _mm_and_si128(flat, mask); in mb_lpf_horizontal_edge_w_avx2_8() 171 flat2 = _mm_and_si128(flat2, flat); // flat2 & flat & mask in mb_lpf_horizontal_edge_w_avx2_8() 340 flat_q2p2 = _mm_and_si128(flat, flat_q2p2); in mb_lpf_horizontal_edge_w_avx2_8() 344 flat_q1p1 = _mm_and_si128(flat, flat_q1p1); in mb_lpf_horizontal_edge_w_avx2_8() 348 flat_q0p0 = _mm_and_si128(flat, flat_q0p0); in mb_lpf_horizontal_edge_w_avx2_8() 352 flat2_q6p6 = _mm_and_si128(flat2, flat2_q6p6); in mb_lpf_horizontal_edge_w_avx2_8() 358 flat2_q5p5 = _mm_and_si128(flat2, flat2_q5p5); in mb_lpf_horizontal_edge_w_avx2_8() [all …]
|
D | vp9_loopfilter_intrin_sse2.c | 64 abs_p1q1 = _mm_srli_epi16(_mm_and_si128(abs_p1q1, fe), 1); in mb_lpf_horizontal_edge_w_sse2_8() 96 filt = _mm_and_si128(_mm_subs_epi8(qs1ps1, qs1), hev); in mb_lpf_horizontal_edge_w_sse2_8() 102 filt = _mm_and_si128(filt, mask); in mb_lpf_horizontal_edge_w_sse2_8() 132 flat = _mm_and_si128(flat, mask); in mb_lpf_horizontal_edge_w_sse2_8() 151 flat2 = _mm_and_si128(flat2, flat); // flat2 & flat & mask in mb_lpf_horizontal_edge_w_sse2_8() 299 flat_q2p2 = _mm_and_si128(flat, flat_q2p2); in mb_lpf_horizontal_edge_w_sse2_8() 303 flat_q1p1 = _mm_and_si128(flat, flat_q1p1); in mb_lpf_horizontal_edge_w_sse2_8() 307 flat_q0p0 = _mm_and_si128(flat, flat_q0p0); in mb_lpf_horizontal_edge_w_sse2_8() 311 flat2_q6p6 = _mm_and_si128(flat2, flat2_q6p6); in mb_lpf_horizontal_edge_w_sse2_8() 317 flat2_q5p5 = _mm_and_si128(flat2, flat2_q5p5); in mb_lpf_horizontal_edge_w_sse2_8() [all …]
|
/external/libvpx/libvpx/vp9/common/x86/ |
D | vp9_loopfilter_intrin_avx2.c | 63 abs_p1q1 = _mm_srli_epi16(_mm_and_si128(abs_p1q1, fe), 1); in mb_lpf_horizontal_edge_w_avx2_8() 98 filt = _mm_and_si128(_mm_subs_epi8(qs1ps1, qs1), hev); in mb_lpf_horizontal_edge_w_avx2_8() 104 filt = _mm_and_si128(filt, mask); in mb_lpf_horizontal_edge_w_avx2_8() 138 flat = _mm_and_si128(flat, mask); in mb_lpf_horizontal_edge_w_avx2_8() 171 flat2 = _mm_and_si128(flat2, flat); // flat2 & flat & mask in mb_lpf_horizontal_edge_w_avx2_8() 340 flat_q2p2 = _mm_and_si128(flat, flat_q2p2); in mb_lpf_horizontal_edge_w_avx2_8() 344 flat_q1p1 = _mm_and_si128(flat, flat_q1p1); in mb_lpf_horizontal_edge_w_avx2_8() 348 flat_q0p0 = _mm_and_si128(flat, flat_q0p0); in mb_lpf_horizontal_edge_w_avx2_8() 352 flat2_q6p6 = _mm_and_si128(flat2, flat2_q6p6); in mb_lpf_horizontal_edge_w_avx2_8() 358 flat2_q5p5 = _mm_and_si128(flat2, flat2_q5p5); in mb_lpf_horizontal_edge_w_avx2_8() [all …]
|
D | vp9_loopfilter_intrin_sse2.c | 63 abs_p1q1 = _mm_srli_epi16(_mm_and_si128(abs_p1q1, fe), 1); in mb_lpf_horizontal_edge_w_sse2_8() 97 filt = _mm_and_si128(_mm_subs_epi8(qs1ps1, qs1), hev); in mb_lpf_horizontal_edge_w_sse2_8() 103 filt = _mm_and_si128(filt, mask); in mb_lpf_horizontal_edge_w_sse2_8() 136 flat = _mm_and_si128(flat, mask); in mb_lpf_horizontal_edge_w_sse2_8() 164 flat2 = _mm_and_si128(flat2, flat); // flat2 & flat & mask in mb_lpf_horizontal_edge_w_sse2_8() 312 flat_q2p2 = _mm_and_si128(flat, flat_q2p2); in mb_lpf_horizontal_edge_w_sse2_8() 316 flat_q1p1 = _mm_and_si128(flat, flat_q1p1); in mb_lpf_horizontal_edge_w_sse2_8() 320 flat_q0p0 = _mm_and_si128(flat, flat_q0p0); in mb_lpf_horizontal_edge_w_sse2_8() 324 flat2_q6p6 = _mm_and_si128(flat2, flat2_q6p6); in mb_lpf_horizontal_edge_w_sse2_8() 330 flat2_q5p5 = _mm_and_si128(flat2, flat2_q5p5); in mb_lpf_horizontal_edge_w_sse2_8() [all …]
|
/external/chromium_org/third_party/boringssl/src/crypto/poly1305/ |
D | poly1305_vec.c | 272 st->H[0] = _mm_and_si128(MMASK, T5); in poly1305_first_block() 273 st->H[1] = _mm_and_si128(MMASK, _mm_srli_epi64(T5, 26)); in poly1305_first_block() 275 st->H[2] = _mm_and_si128(MMASK, T5); in poly1305_first_block() 276 st->H[3] = _mm_and_si128(MMASK, _mm_srli_epi64(T5, 26)); in poly1305_first_block() 352 M0 = _mm_and_si128(MMASK, T5); in poly1305_blocks() 353 M1 = _mm_and_si128(MMASK, _mm_srli_epi64(T5, 26)); in poly1305_blocks() 355 M2 = _mm_and_si128(MMASK, T5); in poly1305_blocks() 356 M3 = _mm_and_si128(MMASK, _mm_srli_epi64(T5, 26)); in poly1305_blocks() 416 M0 = _mm_and_si128(MMASK, T5); in poly1305_blocks() 417 M1 = _mm_and_si128(MMASK, _mm_srli_epi64(T5, 26)); in poly1305_blocks() [all …]
|
/external/webp/src/dsp/ |
D | lossless_sse2.c | 164 const __m128i in_00g0 = _mm_and_si128(in, mask); // 00g0|00g0|... in SubtractGreenFromBlueAndRed() 180 const __m128i in_00g0 = _mm_and_si128(in, mask); // 00g0|00g0|... in AddGreenToBlueAndRed() 223 const __m128i ag = _mm_and_si128(in, alpha_green_mask); // alpha, green in TransformColor() 224 const __m128i r = _mm_srli_epi32(_mm_and_si128(in, red_mask), 16); in TransformColor() 225 const __m128i g = _mm_srli_epi32(_mm_and_si128(in, green_mask), 8); in TransformColor() 230 _mm_and_si128(_mm_sub_epi32(r, r_delta), lower_8bit_mask); in TransformColor() 237 _mm_and_si128(_mm_sub_epi32(b, b_delta), lower_8bit_mask); in TransformColor() 262 const __m128i ag = _mm_and_si128(in, alpha_green_mask); // alpha, green in TransformColorInverse() 263 const __m128i r = _mm_srli_epi32(_mm_and_si128(in, red_mask), 16); in TransformColorInverse() 264 const __m128i g = _mm_srli_epi32(_mm_and_si128(in, green_mask), 8); in TransformColorInverse() [all …]
|
D | upsampling_sse2.c | 43 const __m128i tmp1 = _mm_and_si128((ij), st); /* (ij) & (s^t) */ \ 46 const __m128i tmp4 = _mm_and_si128(tmp3, one); /* & 1 -> lsb_correction */ \ 77 const __m128i t3 = _mm_and_si128(t2, one); /* (a^d) | (b^c) | (s^t) & 1 */ \
|
/external/chromium_org/third_party/libwebp/dsp/ |
D | lossless_sse2.c | 164 const __m128i in_00g0 = _mm_and_si128(in, mask); // 00g0|00g0|... in SubtractGreenFromBlueAndRed() 180 const __m128i in_00g0 = _mm_and_si128(in, mask); // 00g0|00g0|... in AddGreenToBlueAndRed() 223 const __m128i ag = _mm_and_si128(in, alpha_green_mask); // alpha, green in TransformColor() 224 const __m128i r = _mm_srli_epi32(_mm_and_si128(in, red_mask), 16); in TransformColor() 225 const __m128i g = _mm_srli_epi32(_mm_and_si128(in, green_mask), 8); in TransformColor() 230 _mm_and_si128(_mm_sub_epi32(r, r_delta), lower_8bit_mask); in TransformColor() 237 _mm_and_si128(_mm_sub_epi32(b, b_delta), lower_8bit_mask); in TransformColor() 262 const __m128i ag = _mm_and_si128(in, alpha_green_mask); // alpha, green in TransformColorInverse() 263 const __m128i r = _mm_srli_epi32(_mm_and_si128(in, red_mask), 16); in TransformColorInverse() 264 const __m128i g = _mm_srli_epi32(_mm_and_si128(in, green_mask), 8); in TransformColorInverse() [all …]
|
D | upsampling_sse2.c | 43 const __m128i tmp1 = _mm_and_si128((ij), st); /* (ij) & (s^t) */ \ 46 const __m128i tmp4 = _mm_and_si128(tmp3, one); /* & 1 -> lsb_correction */ \ 77 const __m128i t3 = _mm_and_si128(t2, one); /* (a^d) | (b^c) | (s^t) & 1 */ \
|
/external/chromium_org/third_party/libvpx/source/libvpx/vp8/encoder/x86/ |
D | denoising_sse2.c | 85 __m128i adj2 = _mm_and_si128(mask2, l32); in vp8_denoiser_filter_sse2() 86 const __m128i adj1 = _mm_and_si128(mask1, l21); in vp8_denoiser_filter_sse2() 87 const __m128i adj0 = _mm_and_si128(mask0, clamped_absdiff); in vp8_denoiser_filter_sse2() 98 nadj = _mm_and_si128(diff_sign, adj); in vp8_denoiser_filter_sse2() 157 nadj = _mm_and_si128(diff_sign, adj); in vp8_denoiser_filter_sse2() 262 __m128i adj2 = _mm_and_si128(mask2, l32); in vp8_denoiser_filter_uv_sse2() 263 const __m128i adj1 = _mm_and_si128(mask1, l21); in vp8_denoiser_filter_uv_sse2() 264 const __m128i adj0 = _mm_and_si128(mask0, clamped_absdiff); in vp8_denoiser_filter_uv_sse2() 276 nadj = _mm_and_si128(diff_sign, adj); in vp8_denoiser_filter_uv_sse2() 348 nadj = _mm_and_si128(diff_sign, adj); in vp8_denoiser_filter_uv_sse2()
|
/external/libvpx/libvpx/vp8/encoder/x86/ |
D | denoising_sse2.c | 70 __m128i adj2 = _mm_and_si128(mask2, l32); in vp8_denoiser_filter_sse2() 71 const __m128i adj1 = _mm_and_si128(mask1, l21); in vp8_denoiser_filter_sse2() 72 const __m128i adj0 = _mm_and_si128(mask0, clamped_absdiff); in vp8_denoiser_filter_sse2() 83 nadj = _mm_and_si128(diff_sign, adj); in vp8_denoiser_filter_sse2()
|
/external/libhevc/common/x86/ |
D | ihevc_sao_ssse3_intr.c | 195 band_table0_8x16b = _mm_and_si128(band_table0_8x16b, cmp_store); in ihevc_sao_band_offset_luma_ssse3() 196 band_table1_8x16b = _mm_and_si128(band_table1_8x16b, cmp_store); in ihevc_sao_band_offset_luma_ssse3() 197 band_table2_8x16b = _mm_and_si128(band_table2_8x16b, cmp_store); in ihevc_sao_band_offset_luma_ssse3() 198 band_table3_8x16b = _mm_and_si128(band_table3_8x16b, cmp_store); in ihevc_sao_band_offset_luma_ssse3() 204 band_table0_8x16b = _mm_and_si128(band_table0_8x16b, tmp_set_128i_2); in ihevc_sao_band_offset_luma_ssse3() 214 band_table3_8x16b = _mm_and_si128(band_table3_8x16b, tmp_set_128i_2); in ihevc_sao_band_offset_luma_ssse3() 220 band_table2_8x16b = _mm_and_si128(band_table2_8x16b, tmp_set_128i_2); in ihevc_sao_band_offset_luma_ssse3() 226 band_table1_8x16b = _mm_and_si128(band_table1_8x16b, tmp_set_128i_2); in ihevc_sao_band_offset_luma_ssse3() 238 band_table0_8x16b = _mm_and_si128(band_table0_8x16b, cmp_store); in ihevc_sao_band_offset_luma_ssse3() 239 band_table1_8x16b = _mm_and_si128(band_table1_8x16b, cmp_store); in ihevc_sao_band_offset_luma_ssse3() [all …]
|
D | ihevc_deblk_ssse3_intr.c | 302 const2tc_8x16b = _mm_and_si128(const2tc_8x16b, temp_str0_16x8b); in ihevc_deblk_luma_vert_ssse3() 491 tmp_delta1_8x16b = _mm_and_si128(tmp_delta1_8x16b, tmp0_const_8x16b); in ihevc_deblk_luma_vert_ssse3() 492 tmp_delta0_8x16b = _mm_and_si128(tmp_delta0_8x16b, tmp0_const_8x16b); in ihevc_deblk_luma_vert_ssse3() 500 tmp_delta0_8x16b = _mm_and_si128(tmp_delta0_8x16b, mask_pq_8x16b); in ihevc_deblk_luma_vert_ssse3() 501 tmp_delta1_8x16b = _mm_and_si128(tmp_delta1_8x16b, mask_pq_8x16b); in ihevc_deblk_luma_vert_ssse3() 746 const2tc_8x16b = _mm_and_si128(const2tc_8x16b, temp_str0_16x8b); in ihevc_deblk_luma_horz_ssse3() 929 src_q2_8x16b = _mm_and_si128(src_q2_8x16b, src_p2_8x16b); in ihevc_deblk_luma_horz_ssse3() 941 tmp_pq_str1_8x16b = _mm_and_si128(tmp_pq_str0_8x16b, src_q2_8x16b); in ihevc_deblk_luma_horz_ssse3() 945 tmp_pq_str0_8x16b = _mm_and_si128(tmp_pq_str0_8x16b, src_p2_8x16b); in ihevc_deblk_luma_horz_ssse3() 951 tmp_delta1_8x16b = _mm_and_si128(tmp_delta1_8x16b, tmp_pq_str1_8x16b); in ihevc_deblk_luma_horz_ssse3() [all …]
|
D | ihevc_inter_pred_filters_ssse3_intr.c | 454 res_temp8_8x16b = _mm_and_si128(res_temp7_8x16b, mask_low_32b); in ihevc_inter_pred_luma_horz_ssse3() 455 res_temp7_8x16b = _mm_and_si128(res_temp5_8x16b, mask_high_96b); in ihevc_inter_pred_luma_horz_ssse3() 492 res_temp18_8x16b = _mm_and_si128(res_temp17_8x16b, mask_low_32b); in ihevc_inter_pred_luma_horz_ssse3() 493 res_temp17_8x16b = _mm_and_si128(res_temp15_8x16b, mask_high_96b); in ihevc_inter_pred_luma_horz_ssse3() 612 res_temp8_8x16b = _mm_and_si128(res_temp7_8x16b, mask_low_32b); in ihevc_inter_pred_luma_horz_ssse3() 613 res_temp7_8x16b = _mm_and_si128(res_temp5_8x16b, mask_high_96b); in ihevc_inter_pred_luma_horz_ssse3() 650 res_temp18_8x16b = _mm_and_si128(res_temp17_8x16b, mask_low_32b); in ihevc_inter_pred_luma_horz_ssse3() 651 res_temp17_8x16b = _mm_and_si128(res_temp15_8x16b, mask_high_96b); in ihevc_inter_pred_luma_horz_ssse3() 701 res_temp8_8x16b = _mm_and_si128(res_temp7_8x16b, mask_low_32b); in ihevc_inter_pred_luma_horz_ssse3() 702 res_temp7_8x16b = _mm_and_si128(res_temp5_8x16b, mask_high_96b); in ihevc_inter_pred_luma_horz_ssse3() [all …]
|
/external/eigen/Eigen/src/Core/arch/SSE/ |
D | MathFunctions.h | 301 emm2 = _mm_and_si128(emm2, p4i_not1); 304 emm0 = _mm_and_si128(emm2, p4i_4); 312 emm2 = _mm_and_si128(emm2, p4i_2); 393 emm2 = _mm_and_si128(emm2, p4i_not1); 402 emm2 = _mm_and_si128(emm2, p4i_2);
|