Searched refs:load_unaligned_u8q (Results 1 – 16 of 16) sorted by relevance
/external/libvpx/libvpx/vpx_dsp/arm/ |
D | sad_neon.c | 22 const uint8x16_t src_u8 = load_unaligned_u8q(src_ptr, src_stride); in vpx_sad4x4_neon() 23 const uint8x16_t ref_u8 = load_unaligned_u8q(ref_ptr, ref_stride); in vpx_sad4x4_neon() 32 const uint8x16_t src_u8 = load_unaligned_u8q(src_ptr, src_stride); in vpx_sad4x4_avg_neon() 33 const uint8x16_t ref_u8 = load_unaligned_u8q(ref_ptr, ref_stride); in vpx_sad4x4_avg_neon() 46 const uint8x16_t src_u8 = load_unaligned_u8q(src_ptr, src_stride); in vpx_sad4x8_neon() 47 const uint8x16_t ref_u8 = load_unaligned_u8q(ref_ptr, ref_stride); in vpx_sad4x8_neon() 63 const uint8x16_t src_u8 = load_unaligned_u8q(src_ptr, src_stride); in vpx_sad4x8_avg_neon() 64 const uint8x16_t ref_u8 = load_unaligned_u8q(ref_ptr, ref_stride); in vpx_sad4x8_avg_neon()
|
D | avg_pred_neon.c | 55 r = load_unaligned_u8q(ref, ref_stride); in vpx_comp_avg_pred_neon()
|
D | variance_neon.c | 42 const uint8x16_t a_u8 = load_unaligned_u8q(src_ptr, src_stride); in variance_neon_w4x4() 43 const uint8x16_t b_u8 = load_unaligned_u8q(ref_ptr, ref_stride); in variance_neon_w4x4()
|
D | mem_neon.h | 124 static INLINE uint8x16_t load_unaligned_u8q(const uint8_t *buf, int stride) { in load_unaligned_u8q() function
|
D | avg_neon.c | 23 const uint8x16_t b = load_unaligned_u8q(a, a_stride); in vpx_avg_4x4_neon()
|
/external/libhevc/encoder/arm/ |
D | ihevce_sad_compute_neon.c | 59 const uint8x16_t src_u8 = load_unaligned_u8q(pu1_src, src_strd); in ihevce_4x4_sad_computer_neon() 60 const uint8x16_t ref_u8 = load_unaligned_u8q(pu1_pred, pred_strd); in ihevce_4x4_sad_computer_neon()
|
D | ihevce_ssd_calculator_neon.c | 65 src = load_unaligned_u8q(pu1_src, src_strd); in ihevce_4x4_ssd_computer_neon() 66 pred = load_unaligned_u8q(pu1_pred, pred_strd); in ihevce_4x4_ssd_computer_neon()
|
D | ihevce_subpel_neon.c | 119 uint8x16_t src_a = load_unaligned_u8q(pu1_src_a, src_a_strd); in hme_4x4_qpel_interp_avg_neon() 120 uint8x16_t src_b = load_unaligned_u8q(pu1_src_b, src_b_strd); in hme_4x4_qpel_interp_avg_neon()
|
D | ihevce_ssd_and_sad_calculator_neon.c | 67 const uint8x16_t src_u8 = load_unaligned_u8q(pu1_src, src_strd); in ihevce_ssd_and_sad_calculator_neon() 68 const uint8x16_t ref_u8 = load_unaligned_u8q(pu1_recon, recon_strd); in ihevce_ssd_and_sad_calculator_neon()
|
D | ihevce_coarse_layer_sad_neon.c | 230 const uint8x16_t ref = load_unaligned_u8q(pu1_ref, i4_ref_stride); in hme_store_4x4_sads_high_speed_neon() 397 uint8x16_t ref = load_unaligned_u8q(pu1_ref, i4_ref_stride); in hme_store_4x4_sads_high_quality_neon() 411 ref = load_unaligned_u8q(pu1_ref + 2, i4_ref_stride); in hme_store_4x4_sads_high_quality_neon()
|
D | ihevce_itrans_recon_neon.c | 65 src_u8 = load_unaligned_u8q(pu1_pred, pred_strd); in ihevce_itrans_recon_dc_4x4_luma_neon()
|
D | ihevce_had_compute_neon.c | 393 src_u8 = load_unaligned_u8q(pu1_src, src_strd); in ihevce_HAD_4x4_8bit_plane_neon() 394 pred_u8 = load_unaligned_u8q(pu1_pred, pred_strd); in ihevce_HAD_4x4_8bit_plane_neon()
|
D | ihevce_hme_utils_neon.c | 698 src0_16x8b = load_unaligned_u8q(pu1_src, src_stride); in hme_get_wt_inp_ctb_neon()
|
D | ihevce_common_utils_neon.c | 247 src0_u8 = load_unaligned_u8q(pu1_pred0 + ((i * pred0_strd) + j), pred0_strd); in ihevce_wt_avg_2d_4xn_neon() 248 src1_u8 = load_unaligned_u8q(pu1_pred1 + ((i * pred1_strd) + j), pred1_strd); in ihevce_wt_avg_2d_4xn_neon()
|
/external/libhevc/common/arm/ |
D | ihevc_cmn_utils_neon.h | 49 static INLINE uint8x16_t load_unaligned_u8q(const uint8_t *buf, int stride) in load_unaligned_u8q() function
|
D | ihevc_resi_trans_neon.c | 91 inp_buf = load_unaligned_u8q(pu1_src, src_strd); in ihevc_resi_trans_4x4_neon() 92 pred_buf = load_unaligned_u8q(pu1_pred, pred_strd); in ihevc_resi_trans_4x4_neon() 232 const uint8x16_t src_u8 = load_unaligned_u8q(pu1_src, src_strd); in ihevc_resi_trans_4x4_ttype1_neon() 233 const uint8x16_t pred_u8 = load_unaligned_u8q(pu1_pred, pred_strd); in ihevc_resi_trans_4x4_ttype1_neon()
|