Searched refs:load_unaligned_u8q (Results 1 – 6 of 6) sorted by relevance
/external/libvpx/libvpx/vpx_dsp/arm/ |
D | sad4d_neon.c | 23 const uint8x16_t src_u8 = load_unaligned_u8q(src, src_stride); in vpx_sad4x4x4d_neon() 25 const uint8x16_t ref_u8 = load_unaligned_u8q(ref[i], ref_stride); in vpx_sad4x4x4d_neon() 36 const uint8x16_t src_0 = load_unaligned_u8q(src, src_stride); in vpx_sad4x8x4d_neon() 37 const uint8x16_t src_1 = load_unaligned_u8q(src + 4 * src_stride, src_stride); in vpx_sad4x8x4d_neon() 39 const uint8x16_t ref_0 = load_unaligned_u8q(ref[i], ref_stride); in vpx_sad4x8x4d_neon() 41 load_unaligned_u8q(ref[i] + 4 * ref_stride, ref_stride); in vpx_sad4x8x4d_neon()
|
D | sad_neon.c | 21 const uint8x16_t src_u8 = load_unaligned_u8q(src_ptr, src_stride); in vpx_sad4x4_neon() 22 const uint8x16_t ref_u8 = load_unaligned_u8q(ref_ptr, ref_stride); in vpx_sad4x4_neon() 31 const uint8x16_t src_u8 = load_unaligned_u8q(src_ptr, src_stride); in vpx_sad4x4_avg_neon() 32 const uint8x16_t ref_u8 = load_unaligned_u8q(ref_ptr, ref_stride); in vpx_sad4x4_avg_neon() 45 const uint8x16_t src_u8 = load_unaligned_u8q(src_ptr, src_stride); in vpx_sad4x8_neon() 46 const uint8x16_t ref_u8 = load_unaligned_u8q(ref_ptr, ref_stride); in vpx_sad4x8_neon() 62 const uint8x16_t src_u8 = load_unaligned_u8q(src_ptr, src_stride); in vpx_sad4x8_avg_neon() 63 const uint8x16_t ref_u8 = load_unaligned_u8q(ref_ptr, ref_stride); in vpx_sad4x8_avg_neon()
|
D | avg_pred_neon.c | 39 r = load_unaligned_u8q(ref, ref_stride); in vpx_comp_avg_pred_neon()
|
D | variance_neon.c | 41 const uint8x16_t a_u8 = load_unaligned_u8q(a, a_stride); in variance_neon_w4x4() 42 const uint8x16_t b_u8 = load_unaligned_u8q(b, b_stride); in variance_neon_w4x4()
|
D | mem_neon.h | 109 static INLINE uint8x16_t load_unaligned_u8q(const uint8_t *buf, int stride) { in load_unaligned_u8q() function
|
D | avg_neon.c | 23 const uint8x16_t b = load_unaligned_u8q(a, a_stride); in vpx_avg_4x4_neon()
|