Home
last modified time | relevance | path

Searched refs:_mm_alignr_epi8 (Results 1 – 25 of 27) sorted by relevance

12

/external/libvpx/libvpx/vpx_dsp/x86/
Dhighbd_intrapred_intrin_ssse3.c59 *row = _mm_alignr_epi8(*ar, *row, 2); in d45_store_8()
70 const __m128i BCDEFGHH = _mm_alignr_epi8(HHHHHHHH, ABCDEFGH, 2); in vpx_highbd_d45_predictor_8x8_ssse3()
71 const __m128i CDEFGHHH = _mm_alignr_epi8(HHHHHHHH, ABCDEFGH, 4); in vpx_highbd_d45_predictor_8x8_ssse3()
89 *row_0 = _mm_alignr_epi8(*row_1, *row_0, 2); in d45_store_16()
90 *row_1 = _mm_alignr_epi8(*ar, *row_1, 2); in d45_store_16()
103 const __m128i B0 = _mm_alignr_epi8(A1, A0, 2); in vpx_highbd_d45_predictor_16x16_ssse3()
104 const __m128i B1 = _mm_alignr_epi8(AR, A1, 2); in vpx_highbd_d45_predictor_16x16_ssse3()
105 const __m128i C0 = _mm_alignr_epi8(A1, A0, 4); in vpx_highbd_d45_predictor_16x16_ssse3()
106 const __m128i C1 = _mm_alignr_epi8(AR, A1, 4); in vpx_highbd_d45_predictor_16x16_ssse3()
140 const __m128i B0 = _mm_alignr_epi8(A1, A0, 2); in vpx_highbd_d45_predictor_32x32_ssse3()
[all …]
/external/python/cpython3/Modules/_blake2/impl/
Dblake2b-load-sse41.h63 b1 = _mm_alignr_epi8(m3, m7, 8); \
86 b0 = _mm_alignr_epi8(m6, m5, 8); \
111 b1 = _mm_alignr_epi8(m2, m0, 8); \
174 b0 = _mm_alignr_epi8(m6, m0, 8); \
223 b1 = _mm_alignr_epi8(m5, m6, 8); \
254 b0 = _mm_alignr_epi8(m7, m5, 8); \
279 b1 = _mm_alignr_epi8(m0, m5, 8); \
287 b1 = _mm_alignr_epi8(m4, m1, 8); \
295 b1 = _mm_alignr_epi8(m5, m0, 8); \
334 b0 = _mm_alignr_epi8(m7, m5, 8); \
[all …]
Dblake2b-round.h86 t0 = _mm_alignr_epi8(row2h, row2l, 8); \
87 t1 = _mm_alignr_epi8(row2l, row2h, 8); \
95 t0 = _mm_alignr_epi8(row4h, row4l, 8); \
96 t1 = _mm_alignr_epi8(row4l, row4h, 8); \
101 t0 = _mm_alignr_epi8(row2l, row2h, 8); \
102 t1 = _mm_alignr_epi8(row2h, row2l, 8); \
110 t0 = _mm_alignr_epi8(row4l, row4h, 8); \
111 t1 = _mm_alignr_epi8(row4h, row4l, 8); \
/external/clang/test/CodeGen/
Dpalignr.c4 #define _mm_alignr_epi8(a, b, n) (__builtin_ia32_palignr128((a), (b), (n))) macro
8 int4 align1(int4 a, int4 b) { return _mm_alignr_epi8(a, b, 15); } in align1()
12 int4 align2(int4 a, int4 b) { return _mm_alignr_epi8(a, b, 16); } in align2()
14 int4 align3(int4 a, int4 b) { return _mm_alignr_epi8(a, b, 17); } in align3()
16 int4 align4(int4 a, int4 b) { return _mm_alignr_epi8(a, b, 32); } in align4()
Dssse3-builtins.c31 return _mm_alignr_epi8(a, b, 2); in test_mm_alignr_epi8()
37 return _mm_alignr_epi8(a, b, 17); in test2_mm_alignr_epi8()
/external/libhevc/common/x86/
Dihevc_sao_ssse3_intr.c957 left_store_16x8b = _mm_alignr_epi8(left_store_16x8b, left_store_16x8b, 2); in ihevc_sao_edge_offset_class0_ssse3()
959 left1_16x8b = _mm_alignr_epi8(src_temp1_16x8b, left_store_16x8b, 15); in ihevc_sao_edge_offset_class0_ssse3()
960 left_store_16x8b = _mm_alignr_epi8(left_store_16x8b, src_temp1_16x8b, 15); in ihevc_sao_edge_offset_class0_ssse3()
962 left0_16x8b = _mm_alignr_epi8(src_temp0_16x8b, left_store_16x8b, 15); in ihevc_sao_edge_offset_class0_ssse3()
963 left_store_16x8b = _mm_alignr_epi8(left_store_16x8b, src_temp0_16x8b, 15); in ihevc_sao_edge_offset_class0_ssse3()
1078 left_store_16x8b = _mm_alignr_epi8(left_store_16x8b, left_store_16x8b, 4); in ihevc_sao_edge_offset_class0_ssse3()
1081 cmp_lt1_16x8b = _mm_alignr_epi8(cmp_gt1_16x8b, left_store_16x8b, 15); in ihevc_sao_edge_offset_class0_ssse3()
1082 left_store_16x8b = _mm_alignr_epi8(left_store_16x8b, edge0_16x8b, 15); in ihevc_sao_edge_offset_class0_ssse3()
1085 left1_16x8b = _mm_alignr_epi8(src_temp1_16x8b, left_store_16x8b, 15); in ihevc_sao_edge_offset_class0_ssse3()
1086 left_store_16x8b = _mm_alignr_epi8(left_store_16x8b, edge0_16x8b, 15); in ihevc_sao_edge_offset_class0_ssse3()
[all …]
/external/libaom/libaom/av1/common/x86/
Dintra_edge_sse4.c71 in0 = _mm_alignr_epi8(in1, in0, 8); in av1_filter_intra_edge_sse4_1()
108 in0 = _mm_alignr_epi8(in1, in0, 8); in av1_filter_intra_edge_sse4_1()
149 __m128i in1 = _mm_alignr_epi8(in8, in0, 2); in av1_filter_intra_edge_high_sse4_1()
150 __m128i in2 = _mm_alignr_epi8(in8, in0, 4); in av1_filter_intra_edge_high_sse4_1()
178 __m128i in1 = _mm_alignr_epi8(in8, in0, 2); in av1_filter_intra_edge_high_sse4_1()
179 __m128i in2 = _mm_alignr_epi8(in8, in0, 4); in av1_filter_intra_edge_high_sse4_1()
180 __m128i in3 = _mm_alignr_epi8(in8, in0, 6); in av1_filter_intra_edge_high_sse4_1()
181 __m128i in4 = _mm_alignr_epi8(in8, in0, 8); in av1_filter_intra_edge_high_sse4_1()
238 __m128i in8 = _mm_alignr_epi8(in16, in0, 8); in av1_upsample_intra_edge_sse4_1()
255 __m128i in1 = _mm_alignr_epi8(in16, in0, 1); in av1_upsample_intra_edge_sse4_1()
[all …]
Dhighbd_convolve_2d_ssse3.c70 s[0] = _mm_alignr_epi8(row01, row00, 0); in av1_highbd_convolve_2d_sr_ssse3()
71 s[1] = _mm_alignr_epi8(row01, row00, 4); in av1_highbd_convolve_2d_sr_ssse3()
72 s[2] = _mm_alignr_epi8(row01, row00, 8); in av1_highbd_convolve_2d_sr_ssse3()
73 s[3] = _mm_alignr_epi8(row01, row00, 12); in av1_highbd_convolve_2d_sr_ssse3()
80 s[0] = _mm_alignr_epi8(row01, row00, 2); in av1_highbd_convolve_2d_sr_ssse3()
81 s[1] = _mm_alignr_epi8(row01, row00, 6); in av1_highbd_convolve_2d_sr_ssse3()
82 s[2] = _mm_alignr_epi8(row01, row00, 10); in av1_highbd_convolve_2d_sr_ssse3()
83 s[3] = _mm_alignr_epi8(row01, row00, 14); in av1_highbd_convolve_2d_sr_ssse3()
Dhighbd_wiener_convolve_ssse3.c78 _mm_madd_epi16(_mm_alignr_epi8(data2, data, 4), coeff_23); in av1_highbd_wiener_convolve_add_src_ssse3()
80 _mm_madd_epi16(_mm_alignr_epi8(data2, data, 8), coeff_45); in av1_highbd_wiener_convolve_add_src_ssse3()
82 _mm_madd_epi16(_mm_alignr_epi8(data2, data, 12), coeff_67); in av1_highbd_wiener_convolve_add_src_ssse3()
91 _mm_madd_epi16(_mm_alignr_epi8(data2, data, 2), coeff_01); in av1_highbd_wiener_convolve_add_src_ssse3()
93 _mm_madd_epi16(_mm_alignr_epi8(data2, data, 6), coeff_23); in av1_highbd_wiener_convolve_add_src_ssse3()
95 _mm_madd_epi16(_mm_alignr_epi8(data2, data, 10), coeff_45); in av1_highbd_wiener_convolve_add_src_ssse3()
97 _mm_madd_epi16(_mm_alignr_epi8(data2, data, 14), coeff_67); in av1_highbd_wiener_convolve_add_src_ssse3()
Djnt_convolve_ssse3.c89 const __m128i src_2 = _mm_alignr_epi8(src_hi, src_lo, 4); in av1_dist_wtd_convolve_2d_ssse3()
91 const __m128i src_4 = _mm_alignr_epi8(src_hi, src_lo, 8); in av1_dist_wtd_convolve_2d_ssse3()
93 const __m128i src_6 = _mm_alignr_epi8(src_hi, src_lo, 12); in av1_dist_wtd_convolve_2d_ssse3()
102 const __m128i src_1 = _mm_alignr_epi8(src_hi, src_lo, 2); in av1_dist_wtd_convolve_2d_ssse3()
104 const __m128i src_3 = _mm_alignr_epi8(src_hi, src_lo, 6); in av1_dist_wtd_convolve_2d_ssse3()
106 const __m128i src_5 = _mm_alignr_epi8(src_hi, src_lo, 10); in av1_dist_wtd_convolve_2d_ssse3()
108 const __m128i src_7 = _mm_alignr_epi8(src_hi, src_lo, 14); in av1_dist_wtd_convolve_2d_ssse3()
Dhighbd_jnt_convolve_sse4.c313 s[0] = _mm_alignr_epi8(row01, row00, 0); in av1_highbd_dist_wtd_convolve_x_sse4_1()
314 s[1] = _mm_alignr_epi8(row01, row00, 4); in av1_highbd_dist_wtd_convolve_x_sse4_1()
315 s[2] = _mm_alignr_epi8(row01, row00, 8); in av1_highbd_dist_wtd_convolve_x_sse4_1()
316 s[3] = _mm_alignr_epi8(row01, row00, 12); in av1_highbd_dist_wtd_convolve_x_sse4_1()
323 s[0] = _mm_alignr_epi8(row01, row00, 2); in av1_highbd_dist_wtd_convolve_x_sse4_1()
324 s[1] = _mm_alignr_epi8(row01, row00, 6); in av1_highbd_dist_wtd_convolve_x_sse4_1()
325 s[2] = _mm_alignr_epi8(row01, row00, 10); in av1_highbd_dist_wtd_convolve_x_sse4_1()
326 s[3] = _mm_alignr_epi8(row01, row00, 14); in av1_highbd_dist_wtd_convolve_x_sse4_1()
Dhighbd_convolve_2d_sse4.c242 _mm_madd_epi16(_mm_alignr_epi8(data2, data, 4), coeff_23); in av1_highbd_dist_wtd_convolve_2d_sse4_1()
244 _mm_madd_epi16(_mm_alignr_epi8(data2, data, 8), coeff_45); in av1_highbd_dist_wtd_convolve_2d_sse4_1()
246 _mm_madd_epi16(_mm_alignr_epi8(data2, data, 12), coeff_67); in av1_highbd_dist_wtd_convolve_2d_sse4_1()
255 _mm_madd_epi16(_mm_alignr_epi8(data2, data, 2), coeff_01); in av1_highbd_dist_wtd_convolve_2d_sse4_1()
257 _mm_madd_epi16(_mm_alignr_epi8(data2, data, 6), coeff_23); in av1_highbd_dist_wtd_convolve_2d_sse4_1()
259 _mm_madd_epi16(_mm_alignr_epi8(data2, data, 10), coeff_45); in av1_highbd_dist_wtd_convolve_2d_sse4_1()
261 _mm_madd_epi16(_mm_alignr_epi8(data2, data, 14), coeff_67); in av1_highbd_dist_wtd_convolve_2d_sse4_1()
Dhighbd_warp_plane_sse4.c118 _mm_madd_epi16(_mm_alignr_epi8(src2_1, src_1, 4), coeff[2]); in highbd_filter_src_pixels()
120 _mm_madd_epi16(_mm_alignr_epi8(src2_1, src_1, 8), coeff[4]); in highbd_filter_src_pixels()
122 _mm_madd_epi16(_mm_alignr_epi8(src2_1, src_1, 12), coeff[6]); in highbd_filter_src_pixels()
130 _mm_madd_epi16(_mm_alignr_epi8(src2_1, src_1, 2), coeff[1]); in highbd_filter_src_pixels()
132 _mm_madd_epi16(_mm_alignr_epi8(src2_1, src_1, 6), coeff[3]); in highbd_filter_src_pixels()
134 _mm_madd_epi16(_mm_alignr_epi8(src2_1, src_1, 10), coeff[5]); in highbd_filter_src_pixels()
136 _mm_madd_epi16(_mm_alignr_epi8(src2_1, src_1, 14), coeff[7]); in highbd_filter_src_pixels()
/external/libaom/libaom/aom_dsp/x86/
Dhighbd_convolve_ssse3.c210 s[0] = _mm_alignr_epi8(row01, row00, 0); in av1_highbd_convolve_x_sr_ssse3()
211 s[1] = _mm_alignr_epi8(row01, row00, 4); in av1_highbd_convolve_x_sr_ssse3()
212 s[2] = _mm_alignr_epi8(row01, row00, 8); in av1_highbd_convolve_x_sr_ssse3()
213 s[3] = _mm_alignr_epi8(row01, row00, 12); in av1_highbd_convolve_x_sr_ssse3()
220 s[0] = _mm_alignr_epi8(row01, row00, 2); in av1_highbd_convolve_x_sr_ssse3()
221 s[1] = _mm_alignr_epi8(row01, row00, 6); in av1_highbd_convolve_x_sr_ssse3()
222 s[2] = _mm_alignr_epi8(row01, row00, 10); in av1_highbd_convolve_x_sr_ssse3()
223 s[3] = _mm_alignr_epi8(row01, row00, 14); in av1_highbd_convolve_x_sr_ssse3()
Dmasked_variance_intrin_ssse3.c169 __m128i z = _mm_alignr_epi8(y, x, 1); in bilinear_filter()
183 const __m128i z = _mm_alignr_epi8(y, x, 1); in bilinear_filter()
748 __m128i z = _mm_alignr_epi8(y, x, 2); in highbd_bilinear_filter()
762 const __m128i z = _mm_alignr_epi8(y, x, 2); in highbd_bilinear_filter()
/external/flac/libFLAC/
Dlpc_intrin_sse41.c647 dat[5] = _mm_alignr_epi8(dat[4], dat[5], 8); // ? d[i-10] ? d[i-11] in FLAC__lpc_restore_signal_wide_intrin_sse41()
648 dat[4] = _mm_alignr_epi8(dat[3], dat[4], 8); // ? d[i-8] ? d[i-9] in FLAC__lpc_restore_signal_wide_intrin_sse41()
649 dat[3] = _mm_alignr_epi8(dat[2], dat[3], 8); // ? d[i-6] ? d[i-7] in FLAC__lpc_restore_signal_wide_intrin_sse41()
650 dat[2] = _mm_alignr_epi8(dat[1], dat[2], 8); // ? d[i-4] ? d[i-5] in FLAC__lpc_restore_signal_wide_intrin_sse41()
651 dat[1] = _mm_alignr_epi8(dat[0], dat[1], 8); // ? d[i-2] ? d[i-3] in FLAC__lpc_restore_signal_wide_intrin_sse41()
652 dat[0] = _mm_alignr_epi8(temp, dat[0], 8); // ? d[i ] ? d[i-1] in FLAC__lpc_restore_signal_wide_intrin_sse41()
705 dat[4] = _mm_alignr_epi8(dat[3], dat[4], 8); in FLAC__lpc_restore_signal_wide_intrin_sse41()
706 dat[3] = _mm_alignr_epi8(dat[2], dat[3], 8); in FLAC__lpc_restore_signal_wide_intrin_sse41()
707 dat[2] = _mm_alignr_epi8(dat[1], dat[2], 8); in FLAC__lpc_restore_signal_wide_intrin_sse41()
708 dat[1] = _mm_alignr_epi8(dat[0], dat[1], 8); in FLAC__lpc_restore_signal_wide_intrin_sse41()
[all …]
Dfixed_intrin_ssse3.c98 last_error = _mm_alignr_epi8(err0, err1, 4); // e0 e1 e2 e3 in FLAC__fixed_compute_best_predictor_intrin_ssse3()
189 last_error = _mm_alignr_epi8(err0, err1, 4); // e0 e1 e2 e3 in FLAC__fixed_compute_best_predictor_wide_intrin_ssse3()
/external/libopus/silk/fixed/x86/
Dprefilter_FIX_sse.c121 state_4567 = _mm_alignr_epi8( state_0123, state_4567, 4 ); in silk_warped_LPC_analysis_filter_FIX_sse4_1()
123 … state_0123 = _mm_alignr_epi8( _mm_cvtsi32_si128( silk_LSHIFT( input[ n ], 14 ) ), state_0123, 4 ); in silk_warped_LPC_analysis_filter_FIX_sse4_1()
/external/clang/test/Sema/
D2010-05-31-palignr.c18 …v0 = _mm_alignr_epi8(v0, v0, i); // expected-error {{argument to '__builtin_ia32_palignr128' must … in main()
/external/libvpx/libvpx/vp9/encoder/x86/
Dtemporal_filter_sse4.c93 __m128i shift_right = _mm_alignr_epi8(diff_sq_1_u16, diff_sq_0_u16, 2); in sum_16()
100 shift_left = _mm_alignr_epi8(diff_sq_1_u16, diff_sq_0_u16, 14); in sum_16()
/external/clang/lib/Headers/
Dtmmintrin.h160 #define _mm_alignr_epi8(a, b, n) __extension__ ({ \ macro
/external/libopus/silk/x86/
DNSQ_sse4_1.c350 psLPC_Q14_hi_89ABCDEF = _mm_alignr_epi8( psLPC_Q14_hi_01234567, psLPC_Q14_hi_89ABCDEF, 2 ); in silk_noise_shape_quantizer_10_16_sse4_1()
351 psLPC_Q14_lo_89ABCDEF = _mm_alignr_epi8( psLPC_Q14_lo_01234567, psLPC_Q14_lo_89ABCDEF, 2 ); in silk_noise_shape_quantizer_10_16_sse4_1()
/external/eigen/Eigen/src/Core/arch/SSE/
DPacketMath.h686 …first = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(second), _mm_castps_si128(first), Offset…
696 first = _mm_alignr_epi8(second,first, Offset*4);
706 … first = _mm_castsi128_pd(_mm_alignr_epi8(_mm_castpd_si128(second), _mm_castpd_si128(first), 8));
/external/libaom/libaom/aom_dsp/simd/
Dv128_intrinsics_x86.h67 return c ? _mm_alignr_epi8(a, b, c) : b; in v128_align()
75 #define v128_align(a, b, c) ((c) ? _mm_alignr_epi8(a, b, (uint8_t)(c)) : (b))
/external/libavc/encoder/x86/
Dih264e_intra_modes_eval_ssse3.c560 val_16x8b = _mm_alignr_epi8(temp_16x8b, val_16x8b, 4); in ih264e_evaluate_intra_4x4_modes_ssse3()
566 val_16x8b = _mm_alignr_epi8(left_top_16x8b, val_16x8b, 4); in ih264e_evaluate_intra_4x4_modes_ssse3()

12