Home
last modified time | relevance | path

Searched refs:vrhaddq_u8 (Results 1 – 12 of 12) sorted by relevance

/external/libvpx/libvpx/vpx_dsp/arm/
Dvpx_convolve_avg_neon.c60 d01 = vrhaddq_u8(s01, d01); in vpx_convolve_avg_neon()
78 d0 = vrhaddq_u8(s0, d0); in vpx_convolve_avg_neon()
79 d1 = vrhaddq_u8(s1, d1); in vpx_convolve_avg_neon()
101 d0 = vrhaddq_u8(s0, d0); in vpx_convolve_avg_neon()
102 d1 = vrhaddq_u8(s1, d1); in vpx_convolve_avg_neon()
103 d2 = vrhaddq_u8(s2, d2); in vpx_convolve_avg_neon()
104 d3 = vrhaddq_u8(s3, d3); in vpx_convolve_avg_neon()
127 d0 = vrhaddq_u8(s0, d0); in vpx_convolve_avg_neon()
128 d1 = vrhaddq_u8(s1, d1); in vpx_convolve_avg_neon()
129 d2 = vrhaddq_u8(s2, d2); in vpx_convolve_avg_neon()
[all …]
Davg_pred_neon.c25 const uint8x16_t avg = vrhaddq_u8(p, r); in vpx_comp_avg_pred_neon()
41 r = vrhaddq_u8(r, p); in vpx_comp_avg_pred_neon()
57 r = vrhaddq_u8(r, p); in vpx_comp_avg_pred_neon()
Dsad_neon.c35 const uint8x16_t avg = vrhaddq_u8(ref_u8, second_pred_u8); in vpx_sad4x4_avg_neon()
66 const uint8x16_t avg = vrhaddq_u8(ref_u8, second_pred_u8); in vpx_sad4x8_avg_neon()
160 const uint8x16_t avg = vrhaddq_u8(b_u8, c_u8); in sad16x_avg()
225 const uint8x16_t avg_lo = vrhaddq_u8(b_lo, c_lo); in sad32x_avg()
226 const uint8x16_t avg_hi = vrhaddq_u8(b_hi, c_hi); in sad32x_avg()
313 const uint8x16_t avg_0 = vrhaddq_u8(b_0, c_0); in sad64x_avg()
314 const uint8x16_t avg_1 = vrhaddq_u8(b_1, c_1); in sad64x_avg()
315 const uint8x16_t avg_2 = vrhaddq_u8(b_2, c_2); in sad64x_avg()
316 const uint8x16_t avg_3 = vrhaddq_u8(b_3, c_3); in sad64x_avg()
Dintrapred_neon.c327 uint8x16_t row = vrhaddq_u8(avg1, A1); in vpx_d45_predictor_16x16_neon()
360 uint8x16_t row_0 = vrhaddq_u8(avg_0, A1_0); in vpx_d45_predictor_32x32_neon()
361 uint8x16_t row_1 = vrhaddq_u8(avg_1, A1_1); in vpx_d45_predictor_32x32_neon()
424 const uint8x16_t row = vrhaddq_u8(avg, L6543210XA01234567); in vpx_d135_predictor_8x8_neon()
490 const uint8x16_t row_0 = vrhaddq_u8(avg_0, Ledcba9876543210X); in vpx_d135_predictor_16x16_neon()
491 const uint8x16_t row_1 = vrhaddq_u8(avg_1, A0123456789abcdef); in vpx_d135_predictor_16x16_neon()
542 const uint8x16_t row_0 = vrhaddq_u8(avg_0, LLedcba9876543210Uf); in vpx_d135_predictor_32x32_neon()
550 const uint8x16_t row_1 = vrhaddq_u8(avg_1, LUedcba9876543210X); in vpx_d135_predictor_32x32_neon()
558 const uint8x16_t row_2 = vrhaddq_u8(avg_2, AL0123456789abcdef); in vpx_d135_predictor_32x32_neon()
560 const uint8x16_t row_3 = vrhaddq_u8(avg_3, AR0123456789abcdef); in vpx_d135_predictor_32x32_neon()
Dvpx_convolve8_neon.c388 vrhaddq_u8(vreinterpretq_u8_u32(d0123), vcombine_u8(d01, d23))); in vpx_convolve8_avg_horiz_neon()
473 vrhaddq_u8(vreinterpretq_u8_u32(d0415), vcombine_u8(t0, t1))); in vpx_convolve8_avg_horiz_neon()
475 vrhaddq_u8(vreinterpretq_u8_u32(d2637), vcombine_u8(t2, t3))); in vpx_convolve8_avg_horiz_neon()
570 d01 = vrhaddq_u8(d01, vcombine_u8(t0, t1)); in vpx_convolve8_avg_horiz_neon()
571 d23 = vrhaddq_u8(d23, vcombine_u8(t2, t3)); in vpx_convolve8_avg_horiz_neon()
572 d45 = vrhaddq_u8(d45, vcombine_u8(t4, t5)); in vpx_convolve8_avg_horiz_neon()
573 d67 = vrhaddq_u8(d67, vcombine_u8(t6, t7)); in vpx_convolve8_avg_horiz_neon()
844 vrhaddq_u8(vreinterpretq_u8_u32(d0123), vcombine_u8(d01, d23))); in vpx_convolve8_avg_vert_neon()
933 dd01 = vrhaddq_u8(dd01, d01); in vpx_convolve8_avg_vert_neon()
934 dd23 = vrhaddq_u8(dd23, d23); in vpx_convolve8_avg_vert_neon()
Ddeblock_neon.c56 const uint8x16_t k1 = vrhaddq_u8(a2, a1); in average_k_outq()
57 const uint8x16_t k2 = vrhaddq_u8(b2, b1); in average_k_outq()
58 const uint8x16_t k3 = vrhaddq_u8(k1, k2); in average_k_outq()
59 return vrhaddq_u8(k3, v0); in average_k_outq()
/external/libhevc/encoder/arm/
Dihevce_subpel_neon.c121 uint8x16_t dst = vrhaddq_u8(src_a, src_b); in hme_4x4_qpel_interp_avg_neon()
165 uint8x16_t dst = vrhaddq_u8(src_a, src_b); in hme_16xn_qpel_interp_avg_neon()
189 uint8x16_t dst_0 = vrhaddq_u8(src_a_0, src_b_0); in hme_32xn_qpel_interp_avg_neon()
193 uint8x16_t dst_1 = vrhaddq_u8(src_a_1, src_b_1); in hme_32xn_qpel_interp_avg_neon()
/external/libgav1/libgav1/src/dsp/arm/
Dconvolve_neon.cc2567 vst1q_u8(dst, vrhaddq_u8(left, right)); in HalfAddHorizontal()
2743 vst1q_u8(dst, vrhaddq_u8(row[0], below[0])); in IntraBlockCopyVertical()
2747 vst1q_u8(dst, vrhaddq_u8(row[1], below[1])); in IntraBlockCopyVertical()
2751 vst1q_u8(dst, vrhaddq_u8(row[2], below[2])); in IntraBlockCopyVertical()
2754 vst1q_u8(dst, vrhaddq_u8(row[3], below[3])); in IntraBlockCopyVertical()
2758 vst1q_u8(dst, vrhaddq_u8(row[4], below[4])); in IntraBlockCopyVertical()
2761 vst1q_u8(dst, vrhaddq_u8(row[5], below[5])); in IntraBlockCopyVertical()
2764 vst1q_u8(dst, vrhaddq_u8(row[6], below[6])); in IntraBlockCopyVertical()
2767 vst1q_u8(dst, vrhaddq_u8(row[7], below[7])); in IntraBlockCopyVertical()
/external/libaom/libaom/aom_dsp/simd/
Dv128_intrinsics_arm.h336 vrhaddq_u8(vreinterpretq_u8_s64(x), vreinterpretq_u8_s64(y))); in v128_avg_u8()
/external/clang/test/CodeGen/
Daarch64-neon-intrinsics.c3062 return vrhaddq_u8(v1, v2); in test_vrhaddq_u8()
Darm_neon_intrinsics.c15845 return vrhaddq_u8(a, b); in test_vrhaddq_u8()
/external/neon_2_sse/
DNEON_2_SSE.h500 _NEON2SSESTORAGE uint8x16_t vrhaddq_u8(uint8x16_t a, uint8x16_t b); // VRHADD.U8 q0,q0,q0
3186 _NEON2SSESTORAGE uint8x16_t vrhaddq_u8(uint8x16_t a, uint8x16_t b); // VRHADD.U8 q0,q0,q0
3187 #define vrhaddq_u8 _mm_avg_epu8 //SSE2, results rounded macro