Home
last modified time | relevance | path

Searched refs:load_unaligned_u8q (Results 1 – 16 of 16) sorted by relevance

/external/libvpx/libvpx/vpx_dsp/arm/
Dsad_neon.c22 const uint8x16_t src_u8 = load_unaligned_u8q(src_ptr, src_stride); in vpx_sad4x4_neon()
23 const uint8x16_t ref_u8 = load_unaligned_u8q(ref_ptr, ref_stride); in vpx_sad4x4_neon()
32 const uint8x16_t src_u8 = load_unaligned_u8q(src_ptr, src_stride); in vpx_sad4x4_avg_neon()
33 const uint8x16_t ref_u8 = load_unaligned_u8q(ref_ptr, ref_stride); in vpx_sad4x4_avg_neon()
46 const uint8x16_t src_u8 = load_unaligned_u8q(src_ptr, src_stride); in vpx_sad4x8_neon()
47 const uint8x16_t ref_u8 = load_unaligned_u8q(ref_ptr, ref_stride); in vpx_sad4x8_neon()
63 const uint8x16_t src_u8 = load_unaligned_u8q(src_ptr, src_stride); in vpx_sad4x8_avg_neon()
64 const uint8x16_t ref_u8 = load_unaligned_u8q(ref_ptr, ref_stride); in vpx_sad4x8_avg_neon()
Davg_pred_neon.c55 r = load_unaligned_u8q(ref, ref_stride); in vpx_comp_avg_pred_neon()
Dvariance_neon.c42 const uint8x16_t a_u8 = load_unaligned_u8q(src_ptr, src_stride); in variance_neon_w4x4()
43 const uint8x16_t b_u8 = load_unaligned_u8q(ref_ptr, ref_stride); in variance_neon_w4x4()
Dmem_neon.h124 static INLINE uint8x16_t load_unaligned_u8q(const uint8_t *buf, int stride) { in load_unaligned_u8q() function
Davg_neon.c23 const uint8x16_t b = load_unaligned_u8q(a, a_stride); in vpx_avg_4x4_neon()
/external/libhevc/encoder/arm/
Dihevce_sad_compute_neon.c59 const uint8x16_t src_u8 = load_unaligned_u8q(pu1_src, src_strd); in ihevce_4x4_sad_computer_neon()
60 const uint8x16_t ref_u8 = load_unaligned_u8q(pu1_pred, pred_strd); in ihevce_4x4_sad_computer_neon()
Dihevce_ssd_calculator_neon.c65 src = load_unaligned_u8q(pu1_src, src_strd); in ihevce_4x4_ssd_computer_neon()
66 pred = load_unaligned_u8q(pu1_pred, pred_strd); in ihevce_4x4_ssd_computer_neon()
Dihevce_subpel_neon.c119 uint8x16_t src_a = load_unaligned_u8q(pu1_src_a, src_a_strd); in hme_4x4_qpel_interp_avg_neon()
120 uint8x16_t src_b = load_unaligned_u8q(pu1_src_b, src_b_strd); in hme_4x4_qpel_interp_avg_neon()
Dihevce_ssd_and_sad_calculator_neon.c67 const uint8x16_t src_u8 = load_unaligned_u8q(pu1_src, src_strd); in ihevce_ssd_and_sad_calculator_neon()
68 const uint8x16_t ref_u8 = load_unaligned_u8q(pu1_recon, recon_strd); in ihevce_ssd_and_sad_calculator_neon()
Dihevce_coarse_layer_sad_neon.c230 const uint8x16_t ref = load_unaligned_u8q(pu1_ref, i4_ref_stride); in hme_store_4x4_sads_high_speed_neon()
397 uint8x16_t ref = load_unaligned_u8q(pu1_ref, i4_ref_stride); in hme_store_4x4_sads_high_quality_neon()
411 ref = load_unaligned_u8q(pu1_ref + 2, i4_ref_stride); in hme_store_4x4_sads_high_quality_neon()
Dihevce_itrans_recon_neon.c65 src_u8 = load_unaligned_u8q(pu1_pred, pred_strd); in ihevce_itrans_recon_dc_4x4_luma_neon()
Dihevce_had_compute_neon.c393 src_u8 = load_unaligned_u8q(pu1_src, src_strd); in ihevce_HAD_4x4_8bit_plane_neon()
394 pred_u8 = load_unaligned_u8q(pu1_pred, pred_strd); in ihevce_HAD_4x4_8bit_plane_neon()
Dihevce_hme_utils_neon.c698 src0_16x8b = load_unaligned_u8q(pu1_src, src_stride); in hme_get_wt_inp_ctb_neon()
Dihevce_common_utils_neon.c247 src0_u8 = load_unaligned_u8q(pu1_pred0 + ((i * pred0_strd) + j), pred0_strd); in ihevce_wt_avg_2d_4xn_neon()
248 src1_u8 = load_unaligned_u8q(pu1_pred1 + ((i * pred1_strd) + j), pred1_strd); in ihevce_wt_avg_2d_4xn_neon()
/external/libhevc/common/arm/
Dihevc_cmn_utils_neon.h49 static INLINE uint8x16_t load_unaligned_u8q(const uint8_t *buf, int stride) in load_unaligned_u8q() function
Dihevc_resi_trans_neon.c91 inp_buf = load_unaligned_u8q(pu1_src, src_strd); in ihevc_resi_trans_4x4_neon()
92 pred_buf = load_unaligned_u8q(pu1_pred, pred_strd); in ihevc_resi_trans_4x4_neon()
232 const uint8x16_t src_u8 = load_unaligned_u8q(pu1_src, src_strd); in ihevc_resi_trans_4x4_ttype1_neon()
233 const uint8x16_t pred_u8 = load_unaligned_u8q(pu1_pred, pred_strd); in ihevc_resi_trans_4x4_ttype1_neon()