Home
last modified time | relevance | path

Searched refs:q1u8 (Results 1 – 9 of 9) sorted by relevance

/external/libvpx/libvpx/vpx_dsp/arm/
Dvpx_convolve_avg_neon.c30 uint8x16_t q0u8, q1u8, q2u8, q3u8, q8u8, q9u8, q10u8, q11u8; in vpx_convolve_avg_neon() local
38 q1u8 = vld1q_u8(src + 16); in vpx_convolve_avg_neon()
49 q1u8 = vrhaddq_u8(q1u8, q9u8); in vpx_convolve_avg_neon()
54 vst1q_u8(dst + 16, q1u8); in vpx_convolve_avg_neon()
62 q1u8 = vld1q_u8(src + 16); in vpx_convolve_avg_neon()
75 q1u8 = vrhaddq_u8(q1u8, q9u8); in vpx_convolve_avg_neon()
80 vst1q_u8(dst + 16, q1u8); in vpx_convolve_avg_neon()
90 q1u8 = vld1q_u8(src); in vpx_convolve_avg_neon()
98 q1u8 = vrhaddq_u8(q1u8, q3u8); in vpx_convolve_avg_neon()
102 vst1q_u8(dst, q1u8); in vpx_convolve_avg_neon()
[all …]
Dvpx_convolve_copy_neon.c28 uint8x16_t q0u8, q1u8, q2u8, q3u8; in vpx_convolve_copy_neon() local
35 q1u8 = vld1q_u8(src + 16); in vpx_convolve_copy_neon()
41 vst1q_u8(dst + 16, q1u8); in vpx_convolve_copy_neon()
49 q1u8 = vld1q_u8(src + 16); in vpx_convolve_copy_neon()
56 vst1q_u8(dst + 16, q1u8); in vpx_convolve_copy_neon()
66 q1u8 = vld1q_u8(src); in vpx_convolve_copy_neon()
71 vst1q_u8(dst, q1u8); in vpx_convolve_copy_neon()
Dintrapred_neon.c447 uint8x16_t q1u8 = vdupq_n_u8(0); in vpx_v_predictor_32x32_neon() local
451 q1u8 = vld1q_u8(above + 16); in vpx_v_predictor_32x32_neon()
454 vst1q_u8(dst + 16, q1u8); in vpx_v_predictor_32x32_neon()
517 uint8x16_t q1u8 = vdupq_n_u8(0); in vpx_h_predictor_16x16_neon() local
520 q1u8 = vld1q_u8(left); in vpx_h_predictor_16x16_neon()
521 d2u8 = vget_low_u8(q1u8); in vpx_h_predictor_16x16_neon()
522 for (j = 0; j < 2; j++, d2u8 = vget_high_u8(q1u8)) { in vpx_h_predictor_16x16_neon()
555 uint8x16_t q1u8 = vdupq_n_u8(0); in vpx_h_predictor_32x32_neon() local
559 q1u8 = vld1q_u8(left); in vpx_h_predictor_32x32_neon()
560 d2u8 = vget_low_u8(q1u8); in vpx_h_predictor_32x32_neon()
[all …]
Dvpx_convolve8_avg_neon.c62 uint8x16_t q1u8, q3u8, q12u8, q13u8, q14u8, q15u8; in vpx_convolve8_avg_horiz_neon() local
205 q1u8 = vcombine_u8(d0x2u8.val[0], d0x2u8.val[1]); in vpx_convolve8_avg_horiz_neon()
208 q1u8 = vrhaddq_u8(q1u8, q3u8); in vpx_convolve8_avg_horiz_neon()
210 d2u32 = vreinterpret_u32_u8(vget_low_u8(q1u8)); in vpx_convolve8_avg_horiz_neon()
211 d3u32 = vreinterpret_u32_u8(vget_high_u8(q1u8)); in vpx_convolve8_avg_horiz_neon()
251 uint8x16_t q1u8, q3u8; in vpx_convolve8_avg_vert_neon() local
348 q1u8 = vcombine_u8(d2u8, d3u8); in vpx_convolve8_avg_vert_neon()
351 q1u8 = vrhaddq_u8(q1u8, q3u8); in vpx_convolve8_avg_vert_neon()
353 d2u32 = vreinterpret_u32_u8(vget_low_u8(q1u8)); in vpx_convolve8_avg_vert_neon()
354 d3u32 = vreinterpret_u32_u8(vget_high_u8(q1u8)); in vpx_convolve8_avg_vert_neon()
Dloopfilter_16_neon.c33 uint8x16_t q1u8, q2u8, q11u8, q12u8, q13u8, q14u8, q15u8; in loop_filter_neon_16() local
90 q1u8 = vandq_u8(vreinterpretq_u8_s8(q1s8), q14u8); in loop_filter_neon_16()
93 q1s8 = vreinterpretq_s8_u8(q1u8); in loop_filter_neon_16()
103 q1u8 = vandq_u8(vreinterpretq_u8_s8(q1s8), q15u8); in loop_filter_neon_16()
104 q1s8 = vreinterpretq_s8_u8(q1u8); in loop_filter_neon_16()
Dvariance_neon.c166 uint8x16_t q0u8, q1u8, q2u8, q3u8; in vpx_variance16x8_neon() local
178 q1u8 = vld1q_u8(src_ptr); in vpx_variance16x8_neon()
190 q13u16 = vsubl_u8(vget_low_u8(q1u8), vget_low_u8(q3u8)); in vpx_variance16x8_neon()
191 q14u16 = vsubl_u8(vget_high_u8(q1u8), vget_high_u8(q3u8)); in vpx_variance16x8_neon()
309 uint8x16_t q0u8, q1u8, q2u8, q3u8; in vpx_mse16x16_neon() local
322 q1u8 = vld1q_u8(src_ptr); in vpx_mse16x16_neon()
331 q13u16 = vsubl_u8(vget_low_u8(q1u8), vget_low_u8(q3u8)); in vpx_mse16x16_neon()
332 q14u16 = vsubl_u8(vget_high_u8(q1u8), vget_high_u8(q3u8)); in vpx_mse16x16_neon()
/external/libvpx/libvpx/vp8/common/arm/neon/
Dbilinearpredict_neon.c33 uint8x16_t q1u8, q2u8; in vp8_bilinear_predict4x4_neon() local
64 q1u8 = vcombine_u8(d2u8, d3u8); in vp8_bilinear_predict4x4_neon()
70 q4u64 = vshrq_n_u64(vreinterpretq_u64_u8(q1u8), 8); in vp8_bilinear_predict4x4_neon()
74 d0u32x2 = vzip_u32(vreinterpret_u32_u8(vget_low_u8(q1u8)), in vp8_bilinear_predict4x4_neon()
75 vreinterpret_u32_u8(vget_high_u8(q1u8))); in vp8_bilinear_predict4x4_neon()
141 uint8x16_t q1u8, q2u8, q3u8, q4u8, q5u8; in vp8_bilinear_predict8x4_neon() local
152 q1u8 = vld1q_u8(src_ptr); src_ptr += src_pixels_per_line; in vp8_bilinear_predict8x4_neon()
161 q6u16 = vmull_u8(vget_low_u8(q1u8), d0u8); in vp8_bilinear_predict8x4_neon()
167 d3u8 = vext_u8(vget_low_u8(q1u8), vget_high_u8(q1u8), 1); in vp8_bilinear_predict8x4_neon()
228 uint8x16_t q1u8, q2u8, q3u8, q4u8, q5u8; in vp8_bilinear_predict8x8_neon() local
[all …]
Dmbloopfilter_neon.c32 uint8x16_t q0u8, q1u8, q11u8, q12u8, q13u8, q14u8, q15u8; in vp8_mbloop_filter_neon() local
43 q1u8 = vabdq_u8(q9, q8); in vp8_mbloop_filter_neon()
48 q1u8 = vmaxq_u8(q1u8, q0u8); in vp8_mbloop_filter_neon()
56 q15u8 = vmaxq_u8(q15u8, q1u8); in vp8_mbloop_filter_neon()
60 q1u8 = vabdq_u8(q5, q8); in vp8_mbloop_filter_neon()
73 q1u8 = vshrq_n_u8(q1u8, 1); in vp8_mbloop_filter_neon()
74 q12u8 = vqaddq_u8(q12u8, q1u8); in vp8_mbloop_filter_neon()
145 q1u8 = vdupq_n_u8(0x80); in vp8_mbloop_filter_neon()
146 *q9r = veorq_u8(vreinterpretq_u8_s8(q11s8), q1u8); in vp8_mbloop_filter_neon()
147 *q8r = veorq_u8(vreinterpretq_u8_s8(q13s8), q1u8); in vp8_mbloop_filter_neon()
[all …]
Dvp8_loopfilter_neon.c31 uint8x16_t q0u8, q1u8, q2u8, q11u8, q12u8, q13u8, q14u8, q15u8; in vp8_loop_filter_neon() local
90 q1u8 = vandq_u8(vreinterpretq_u8_s8(q1s8), q14u8); in vp8_loop_filter_neon()
93 q1s8 = vreinterpretq_s8_u8(q1u8); in vp8_loop_filter_neon()
102 q1u8 = vandq_u8(vreinterpretq_u8_s8(q1s8), q15u8); in vp8_loop_filter_neon()
103 q1s8 = vreinterpretq_s8_u8(q1u8); in vp8_loop_filter_neon()