/external/libvpx/libvpx/vpx_dsp/x86/ |
D | highbd_intrapred_intrin_ssse3.c | 59 *row = _mm_alignr_epi8(*ar, *row, 2); in d45_store_8() 70 const __m128i BCDEFGHH = _mm_alignr_epi8(HHHHHHHH, ABCDEFGH, 2); in vpx_highbd_d45_predictor_8x8_ssse3() 71 const __m128i CDEFGHHH = _mm_alignr_epi8(HHHHHHHH, ABCDEFGH, 4); in vpx_highbd_d45_predictor_8x8_ssse3() 89 *row_0 = _mm_alignr_epi8(*row_1, *row_0, 2); in d45_store_16() 90 *row_1 = _mm_alignr_epi8(*ar, *row_1, 2); in d45_store_16() 103 const __m128i B0 = _mm_alignr_epi8(A1, A0, 2); in vpx_highbd_d45_predictor_16x16_ssse3() 104 const __m128i B1 = _mm_alignr_epi8(AR, A1, 2); in vpx_highbd_d45_predictor_16x16_ssse3() 105 const __m128i C0 = _mm_alignr_epi8(A1, A0, 4); in vpx_highbd_d45_predictor_16x16_ssse3() 106 const __m128i C1 = _mm_alignr_epi8(AR, A1, 4); in vpx_highbd_d45_predictor_16x16_ssse3() 140 const __m128i B0 = _mm_alignr_epi8(A1, A0, 2); in vpx_highbd_d45_predictor_32x32_ssse3() [all …]
|
/external/clang/test/CodeGen/ |
D | palignr.c | 4 #define _mm_alignr_epi8(a, b, n) (__builtin_ia32_palignr128((a), (b), (n))) macro 8 int4 align1(int4 a, int4 b) { return _mm_alignr_epi8(a, b, 15); } in align1() 12 int4 align2(int4 a, int4 b) { return _mm_alignr_epi8(a, b, 16); } in align2() 14 int4 align3(int4 a, int4 b) { return _mm_alignr_epi8(a, b, 17); } in align3() 16 int4 align4(int4 a, int4 b) { return _mm_alignr_epi8(a, b, 32); } in align4()
|
D | ssse3-builtins.c | 31 return _mm_alignr_epi8(a, b, 2); in test_mm_alignr_epi8() 37 return _mm_alignr_epi8(a, b, 17); in test2_mm_alignr_epi8()
|
/external/python/cpython3/Modules/_blake2/impl/ |
D | blake2b-load-sse41.h | 61 b1 = _mm_alignr_epi8(m3, m7, 8); \ 84 b0 = _mm_alignr_epi8(m6, m5, 8); \ 109 b1 = _mm_alignr_epi8(m2, m0, 8); \ 172 b0 = _mm_alignr_epi8(m6, m0, 8); \ 221 b1 = _mm_alignr_epi8(m5, m6, 8); \ 252 b0 = _mm_alignr_epi8(m7, m5, 8); \ 277 b1 = _mm_alignr_epi8(m0, m5, 8); \ 285 b1 = _mm_alignr_epi8(m4, m1, 8); \ 293 b1 = _mm_alignr_epi8(m5, m0, 8); \ 332 b0 = _mm_alignr_epi8(m7, m5, 8); \ [all …]
|
D | blake2b-round.h | 87 t0 = _mm_alignr_epi8(row2h, row2l, 8); \ 88 t1 = _mm_alignr_epi8(row2l, row2h, 8); \ 96 t0 = _mm_alignr_epi8(row4h, row4l, 8); \ 97 t1 = _mm_alignr_epi8(row4l, row4h, 8); \ 102 t0 = _mm_alignr_epi8(row2l, row2h, 8); \ 103 t1 = _mm_alignr_epi8(row2h, row2l, 8); \ 111 t0 = _mm_alignr_epi8(row4l, row4h, 8); \ 112 t1 = _mm_alignr_epi8(row4h, row4l, 8); \
|
/external/libhevc/common/x86/ |
D | ihevc_sao_ssse3_intr.c | 957 left_store_16x8b = _mm_alignr_epi8(left_store_16x8b, left_store_16x8b, 2); in ihevc_sao_edge_offset_class0_ssse3() 959 left1_16x8b = _mm_alignr_epi8(src_temp1_16x8b, left_store_16x8b, 15); in ihevc_sao_edge_offset_class0_ssse3() 960 left_store_16x8b = _mm_alignr_epi8(left_store_16x8b, src_temp1_16x8b, 15); in ihevc_sao_edge_offset_class0_ssse3() 962 left0_16x8b = _mm_alignr_epi8(src_temp0_16x8b, left_store_16x8b, 15); in ihevc_sao_edge_offset_class0_ssse3() 963 left_store_16x8b = _mm_alignr_epi8(left_store_16x8b, src_temp0_16x8b, 15); in ihevc_sao_edge_offset_class0_ssse3() 1078 left_store_16x8b = _mm_alignr_epi8(left_store_16x8b, left_store_16x8b, 4); in ihevc_sao_edge_offset_class0_ssse3() 1081 cmp_lt1_16x8b = _mm_alignr_epi8(cmp_gt1_16x8b, left_store_16x8b, 15); in ihevc_sao_edge_offset_class0_ssse3() 1082 left_store_16x8b = _mm_alignr_epi8(left_store_16x8b, edge0_16x8b, 15); in ihevc_sao_edge_offset_class0_ssse3() 1085 left1_16x8b = _mm_alignr_epi8(src_temp1_16x8b, left_store_16x8b, 15); in ihevc_sao_edge_offset_class0_ssse3() 1086 left_store_16x8b = _mm_alignr_epi8(left_store_16x8b, edge0_16x8b, 15); in ihevc_sao_edge_offset_class0_ssse3() [all …]
|
/external/libaom/libaom/av1/common/x86/ |
D | intra_edge_sse4.c | 71 in0 = _mm_alignr_epi8(in1, in0, 8); in av1_filter_intra_edge_sse4_1() 108 in0 = _mm_alignr_epi8(in1, in0, 8); in av1_filter_intra_edge_sse4_1() 149 __m128i in1 = _mm_alignr_epi8(in8, in0, 2); in av1_filter_intra_edge_high_sse4_1() 150 __m128i in2 = _mm_alignr_epi8(in8, in0, 4); in av1_filter_intra_edge_high_sse4_1() 178 __m128i in1 = _mm_alignr_epi8(in8, in0, 2); in av1_filter_intra_edge_high_sse4_1() 179 __m128i in2 = _mm_alignr_epi8(in8, in0, 4); in av1_filter_intra_edge_high_sse4_1() 180 __m128i in3 = _mm_alignr_epi8(in8, in0, 6); in av1_filter_intra_edge_high_sse4_1() 181 __m128i in4 = _mm_alignr_epi8(in8, in0, 8); in av1_filter_intra_edge_high_sse4_1() 238 __m128i in8 = _mm_alignr_epi8(in16, in0, 8); in av1_upsample_intra_edge_sse4_1() 255 __m128i in1 = _mm_alignr_epi8(in16, in0, 1); in av1_upsample_intra_edge_sse4_1() [all …]
|
D | highbd_convolve_2d_ssse3.c | 70 s[0] = _mm_alignr_epi8(row01, row00, 0); in av1_highbd_convolve_2d_sr_ssse3() 71 s[1] = _mm_alignr_epi8(row01, row00, 4); in av1_highbd_convolve_2d_sr_ssse3() 72 s[2] = _mm_alignr_epi8(row01, row00, 8); in av1_highbd_convolve_2d_sr_ssse3() 73 s[3] = _mm_alignr_epi8(row01, row00, 12); in av1_highbd_convolve_2d_sr_ssse3() 80 s[0] = _mm_alignr_epi8(row01, row00, 2); in av1_highbd_convolve_2d_sr_ssse3() 81 s[1] = _mm_alignr_epi8(row01, row00, 6); in av1_highbd_convolve_2d_sr_ssse3() 82 s[2] = _mm_alignr_epi8(row01, row00, 10); in av1_highbd_convolve_2d_sr_ssse3() 83 s[3] = _mm_alignr_epi8(row01, row00, 14); in av1_highbd_convolve_2d_sr_ssse3()
|
D | highbd_wiener_convolve_ssse3.c | 78 _mm_madd_epi16(_mm_alignr_epi8(data2, data, 4), coeff_23); in av1_highbd_wiener_convolve_add_src_ssse3() 80 _mm_madd_epi16(_mm_alignr_epi8(data2, data, 8), coeff_45); in av1_highbd_wiener_convolve_add_src_ssse3() 82 _mm_madd_epi16(_mm_alignr_epi8(data2, data, 12), coeff_67); in av1_highbd_wiener_convolve_add_src_ssse3() 91 _mm_madd_epi16(_mm_alignr_epi8(data2, data, 2), coeff_01); in av1_highbd_wiener_convolve_add_src_ssse3() 93 _mm_madd_epi16(_mm_alignr_epi8(data2, data, 6), coeff_23); in av1_highbd_wiener_convolve_add_src_ssse3() 95 _mm_madd_epi16(_mm_alignr_epi8(data2, data, 10), coeff_45); in av1_highbd_wiener_convolve_add_src_ssse3() 97 _mm_madd_epi16(_mm_alignr_epi8(data2, data, 14), coeff_67); in av1_highbd_wiener_convolve_add_src_ssse3()
|
D | jnt_convolve_ssse3.c | 89 const __m128i src_2 = _mm_alignr_epi8(src_hi, src_lo, 4); in av1_dist_wtd_convolve_2d_ssse3() 91 const __m128i src_4 = _mm_alignr_epi8(src_hi, src_lo, 8); in av1_dist_wtd_convolve_2d_ssse3() 93 const __m128i src_6 = _mm_alignr_epi8(src_hi, src_lo, 12); in av1_dist_wtd_convolve_2d_ssse3() 102 const __m128i src_1 = _mm_alignr_epi8(src_hi, src_lo, 2); in av1_dist_wtd_convolve_2d_ssse3() 104 const __m128i src_3 = _mm_alignr_epi8(src_hi, src_lo, 6); in av1_dist_wtd_convolve_2d_ssse3() 106 const __m128i src_5 = _mm_alignr_epi8(src_hi, src_lo, 10); in av1_dist_wtd_convolve_2d_ssse3() 108 const __m128i src_7 = _mm_alignr_epi8(src_hi, src_lo, 14); in av1_dist_wtd_convolve_2d_ssse3()
|
D | highbd_jnt_convolve_sse4.c | 313 s[0] = _mm_alignr_epi8(row01, row00, 0); in av1_highbd_dist_wtd_convolve_x_sse4_1() 314 s[1] = _mm_alignr_epi8(row01, row00, 4); in av1_highbd_dist_wtd_convolve_x_sse4_1() 315 s[2] = _mm_alignr_epi8(row01, row00, 8); in av1_highbd_dist_wtd_convolve_x_sse4_1() 316 s[3] = _mm_alignr_epi8(row01, row00, 12); in av1_highbd_dist_wtd_convolve_x_sse4_1() 323 s[0] = _mm_alignr_epi8(row01, row00, 2); in av1_highbd_dist_wtd_convolve_x_sse4_1() 324 s[1] = _mm_alignr_epi8(row01, row00, 6); in av1_highbd_dist_wtd_convolve_x_sse4_1() 325 s[2] = _mm_alignr_epi8(row01, row00, 10); in av1_highbd_dist_wtd_convolve_x_sse4_1() 326 s[3] = _mm_alignr_epi8(row01, row00, 14); in av1_highbd_dist_wtd_convolve_x_sse4_1()
|
D | highbd_convolve_2d_sse4.c | 242 _mm_madd_epi16(_mm_alignr_epi8(data2, data, 4), coeff_23); in av1_highbd_dist_wtd_convolve_2d_sse4_1() 244 _mm_madd_epi16(_mm_alignr_epi8(data2, data, 8), coeff_45); in av1_highbd_dist_wtd_convolve_2d_sse4_1() 246 _mm_madd_epi16(_mm_alignr_epi8(data2, data, 12), coeff_67); in av1_highbd_dist_wtd_convolve_2d_sse4_1() 255 _mm_madd_epi16(_mm_alignr_epi8(data2, data, 2), coeff_01); in av1_highbd_dist_wtd_convolve_2d_sse4_1() 257 _mm_madd_epi16(_mm_alignr_epi8(data2, data, 6), coeff_23); in av1_highbd_dist_wtd_convolve_2d_sse4_1() 259 _mm_madd_epi16(_mm_alignr_epi8(data2, data, 10), coeff_45); in av1_highbd_dist_wtd_convolve_2d_sse4_1() 261 _mm_madd_epi16(_mm_alignr_epi8(data2, data, 14), coeff_67); in av1_highbd_dist_wtd_convolve_2d_sse4_1()
|
D | highbd_warp_plane_sse4.c | 126 _mm_madd_epi16(_mm_alignr_epi8(src2_1, src_1, 4), coeff[2]); in highbd_filter_src_pixels() 128 _mm_madd_epi16(_mm_alignr_epi8(src2_1, src_1, 8), coeff[4]); in highbd_filter_src_pixels() 130 _mm_madd_epi16(_mm_alignr_epi8(src2_1, src_1, 12), coeff[6]); in highbd_filter_src_pixels() 138 _mm_madd_epi16(_mm_alignr_epi8(src2_1, src_1, 2), coeff[1]); in highbd_filter_src_pixels() 140 _mm_madd_epi16(_mm_alignr_epi8(src2_1, src_1, 6), coeff[3]); in highbd_filter_src_pixels() 142 _mm_madd_epi16(_mm_alignr_epi8(src2_1, src_1, 10), coeff[5]); in highbd_filter_src_pixels() 144 _mm_madd_epi16(_mm_alignr_epi8(src2_1, src_1, 14), coeff[7]); in highbd_filter_src_pixels()
|
/external/libgav1/libgav1/src/dsp/x86/ |
D | intra_edge_sse4.cc | 73 _mm_alignr_epi8(result_hi, _mm_slli_si128(result_lo, 10), 10); in ComputeKernel1Store12() 113 _mm_alignr_epi8(result_hi, _mm_slli_si128(result_lo, 10), 10); in ComputeKernel2Store12() 150 sum = _mm_alignr_epi8(sum_hi, _mm_slli_si128(sum, 8), 8); in ComputeKernel3Store8() 222 __m128i sum_lo = _mm_sub_epi16(_mm_alignr_epi8(src9_hi, src9_lo, 2), src_lo); in IntraEdgeUpsampler_SSE4_1() 223 sum_lo = _mm_add_epi16(sum_lo, _mm_alignr_epi8(src9_hi, src9_lo, 4)); in IntraEdgeUpsampler_SSE4_1() 224 sum_lo = _mm_sub_epi16(sum_lo, _mm_alignr_epi8(src_hi, src_lo, 6)); in IntraEdgeUpsampler_SSE4_1() 234 _mm_sub_epi16(_mm_alignr_epi8(src9_hi_extra, src9_hi, 2), src_hi); in IntraEdgeUpsampler_SSE4_1() 235 sum_hi = _mm_add_epi16(sum_hi, _mm_alignr_epi8(src9_hi_extra, src9_hi, 4)); in IntraEdgeUpsampler_SSE4_1() 236 sum_hi = _mm_sub_epi16(sum_hi, _mm_alignr_epi8(src_hi_extra, src_hi, 6)); in IntraEdgeUpsampler_SSE4_1()
|
D | loop_restoration_sse4.cc | 48 const auto s01 = _mm_alignr_epi8(s[1], s[0], 1); in WienerHorizontalTap7Kernel() 49 const auto s23 = _mm_alignr_epi8(s[1], s[0], 5); in WienerHorizontalTap7Kernel() 50 const auto s45 = _mm_alignr_epi8(s[1], s[0], 9); in WienerHorizontalTap7Kernel() 51 const auto s67 = _mm_alignr_epi8(s[1], s[0], 13); in WienerHorizontalTap7Kernel() 82 const auto s01 = _mm_alignr_epi8(s[1], s[0], 1); in WienerHorizontalTap5Kernel() 83 const auto s23 = _mm_alignr_epi8(s[1], s[0], 5); in WienerHorizontalTap5Kernel() 84 const auto s45 = _mm_alignr_epi8(s[1], s[0], 9); in WienerHorizontalTap5Kernel() 113 const auto s01 = _mm_alignr_epi8(s[1], s[0], 1); in WienerHorizontalTap3Kernel() 114 const auto s23 = _mm_alignr_epi8(s[1], s[0], 5); in WienerHorizontalTap3Kernel() 903 const auto middle = _mm_alignr_epi8(src[1], src[0], 4); in Sum3Horizontal_32() [all …]
|
D | convolve_sse4.cc | 115 v_src[0] = _mm_alignr_epi8(src_long_dup_hi, src_long_dup_lo, 3); // _21 in SumHorizontalTaps() 116 v_src[1] = _mm_alignr_epi8(src_long_dup_hi, src_long_dup_lo, 7); // _43 in SumHorizontalTaps() 117 v_src[2] = _mm_alignr_epi8(src_long_dup_hi, src_long_dup_lo, 11); // _65 in SumHorizontalTaps() 120 v_src[0] = _mm_alignr_epi8(src_long_dup_hi, src_long_dup_lo, 1); // _10 in SumHorizontalTaps() 121 v_src[1] = _mm_alignr_epi8(src_long_dup_hi, src_long_dup_lo, 5); // _32 in SumHorizontalTaps() 122 v_src[2] = _mm_alignr_epi8(src_long_dup_hi, src_long_dup_lo, 9); // _54 in SumHorizontalTaps() 123 v_src[3] = _mm_alignr_epi8(src_long_dup_hi, src_long_dup_lo, 13); // _76 in SumHorizontalTaps() 126 v_src[0] = _mm_alignr_epi8(src_long_dup_hi, src_long_dup_lo, 7); // _43 in SumHorizontalTaps() 129 v_src[0] = _mm_alignr_epi8(src_long_dup_hi, src_long_dup_lo, 5); // _32 in SumHorizontalTaps() 130 v_src[1] = _mm_alignr_epi8(src_long_dup_hi, src_long_dup_lo, 9); // _54 in SumHorizontalTaps() [all …]
|
D | obmc_sse4.cc | 91 _mm_alignr_epi8(Load4(pred), _mm_slli_si128(pred_val0, 12), 12); in OverlapBlendFromLeft4xH_SSE4_1() 92 const __m128i obmc_pred_val = _mm_alignr_epi8( in OverlapBlendFromLeft4xH_SSE4_1() 212 _mm_alignr_epi8(Load4(pred), _mm_slli_si128(pred_val0, 12), 12); in OverlapBlendFromTop4xH_SSE4_1() 213 const __m128i obmc_pred_val = _mm_alignr_epi8( in OverlapBlendFromTop4xH_SSE4_1()
|
/external/libaom/libaom/aom_dsp/x86/ |
D | highbd_convolve_ssse3.c | 210 s[0] = _mm_alignr_epi8(row01, row00, 0); in av1_highbd_convolve_x_sr_ssse3() 211 s[1] = _mm_alignr_epi8(row01, row00, 4); in av1_highbd_convolve_x_sr_ssse3() 212 s[2] = _mm_alignr_epi8(row01, row00, 8); in av1_highbd_convolve_x_sr_ssse3() 213 s[3] = _mm_alignr_epi8(row01, row00, 12); in av1_highbd_convolve_x_sr_ssse3() 220 s[0] = _mm_alignr_epi8(row01, row00, 2); in av1_highbd_convolve_x_sr_ssse3() 221 s[1] = _mm_alignr_epi8(row01, row00, 6); in av1_highbd_convolve_x_sr_ssse3() 222 s[2] = _mm_alignr_epi8(row01, row00, 10); in av1_highbd_convolve_x_sr_ssse3() 223 s[3] = _mm_alignr_epi8(row01, row00, 14); in av1_highbd_convolve_x_sr_ssse3()
|
D | masked_variance_intrin_ssse3.c | 169 __m128i z = _mm_alignr_epi8(y, x, 1); in bilinear_filter() 183 const __m128i z = _mm_alignr_epi8(y, x, 1); in bilinear_filter() 749 __m128i z = _mm_alignr_epi8(y, x, 2); in highbd_bilinear_filter() 763 const __m128i z = _mm_alignr_epi8(y, x, 2); in highbd_bilinear_filter()
|
/external/flac/libFLAC/ |
D | lpc_intrin_sse41.c | 647 dat[5] = _mm_alignr_epi8(dat[4], dat[5], 8); // ? d[i-10] ? d[i-11] in FLAC__lpc_restore_signal_wide_intrin_sse41() 648 dat[4] = _mm_alignr_epi8(dat[3], dat[4], 8); // ? d[i-8] ? d[i-9] in FLAC__lpc_restore_signal_wide_intrin_sse41() 649 dat[3] = _mm_alignr_epi8(dat[2], dat[3], 8); // ? d[i-6] ? d[i-7] in FLAC__lpc_restore_signal_wide_intrin_sse41() 650 dat[2] = _mm_alignr_epi8(dat[1], dat[2], 8); // ? d[i-4] ? d[i-5] in FLAC__lpc_restore_signal_wide_intrin_sse41() 651 dat[1] = _mm_alignr_epi8(dat[0], dat[1], 8); // ? d[i-2] ? d[i-3] in FLAC__lpc_restore_signal_wide_intrin_sse41() 652 dat[0] = _mm_alignr_epi8(temp, dat[0], 8); // ? d[i ] ? d[i-1] in FLAC__lpc_restore_signal_wide_intrin_sse41() 705 dat[4] = _mm_alignr_epi8(dat[3], dat[4], 8); in FLAC__lpc_restore_signal_wide_intrin_sse41() 706 dat[3] = _mm_alignr_epi8(dat[2], dat[3], 8); in FLAC__lpc_restore_signal_wide_intrin_sse41() 707 dat[2] = _mm_alignr_epi8(dat[1], dat[2], 8); in FLAC__lpc_restore_signal_wide_intrin_sse41() 708 dat[1] = _mm_alignr_epi8(dat[0], dat[1], 8); in FLAC__lpc_restore_signal_wide_intrin_sse41() [all …]
|
D | fixed_intrin_ssse3.c | 98 last_error = _mm_alignr_epi8(err0, err1, 4); // e0 e1 e2 e3 in FLAC__fixed_compute_best_predictor_intrin_ssse3() 189 last_error = _mm_alignr_epi8(err0, err1, 4); // e0 e1 e2 e3 in FLAC__fixed_compute_best_predictor_wide_intrin_ssse3()
|
/external/libopus/silk/fixed/x86/ |
D | prefilter_FIX_sse.c | 121 state_4567 = _mm_alignr_epi8( state_0123, state_4567, 4 ); in silk_warped_LPC_analysis_filter_FIX_sse4_1() 123 … state_0123 = _mm_alignr_epi8( _mm_cvtsi32_si128( silk_LSHIFT( input[ n ], 14 ) ), state_0123, 4 ); in silk_warped_LPC_analysis_filter_FIX_sse4_1()
|
/external/clang/test/Sema/ |
D | 2010-05-31-palignr.c | 18 …v0 = _mm_alignr_epi8(v0, v0, i); // expected-error {{argument to '__builtin_ia32_palignr128' must … in main()
|
/external/clang/lib/Headers/ |
D | tmmintrin.h | 160 #define _mm_alignr_epi8(a, b, n) __extension__ ({ \ macro
|
/external/libopus/silk/x86/ |
D | NSQ_sse4_1.c | 350 psLPC_Q14_hi_89ABCDEF = _mm_alignr_epi8( psLPC_Q14_hi_01234567, psLPC_Q14_hi_89ABCDEF, 2 ); in silk_noise_shape_quantizer_10_16_sse4_1() 351 psLPC_Q14_lo_89ABCDEF = _mm_alignr_epi8( psLPC_Q14_lo_01234567, psLPC_Q14_lo_89ABCDEF, 2 ); in silk_noise_shape_quantizer_10_16_sse4_1()
|