Searched refs:vrev64q_s16 (Results 1 – 11 of 11) sorted by relevance
134 diff_16[2][0] = vrev64q_s16(vreinterpretq_s16_u16( in ihevc_resi_trans_32x32_neon()138 diff_16[3][0] = vrev64q_s16(vreinterpretq_s16_u16( in ihevc_resi_trans_32x32_neon()159 diff_16[2][1] = vrev64q_s16(vreinterpretq_s16_u16( in ihevc_resi_trans_32x32_neon()163 diff_16[3][1] = vrev64q_s16(vreinterpretq_s16_u16( in ihevc_resi_trans_32x32_neon()237 int16x8_t e0_1 = vrev64q_s16(vaddq_s16( in ihevc_resi_trans_32x32_neon()242 int16x8_t e1_1 = vrev64q_s16(vaddq_s16( in ihevc_resi_trans_32x32_neon()
143 return vrev64q_s16(a); in test_vrev64q_s16()
141 ; return vrev64q_s16(a);
2196 const int16x8_t c = vrev64q_s16(a); in FlipColumns()2197 const int16x8_t d = vrev64q_s16(b); in FlipColumns()2206 const int16x8_t b = vrev64q_s16(a); in FlipColumns()2213 vst1q_s16(&source[i], vrev64q_s16(a)); in FlipColumns()
467 …t_s16x8 = vrev64q_s16( t_s16x8 ); /* 4 5 6 7 0 1 2… in silk_short_prediction_create_arch_coef_neon_local()473 …t_s16x8 = vrev64q_s16( t_s16x8 ); /* C D E F 8 9 A B … in silk_short_prediction_create_arch_coef_neon_local()
170 ; return vrev64q_s16(a);
861 return vrev64q_s16(a); in test_vrev64q_s16()
13388 return vrev64q_s16(a); in test_vrev64q_s16()
921 return vrev64q_s16(a); in test_vrev64q_s16()
15689 return vrev64q_s16(a); in test_vrev64q_s16()
1992 _NEON2SSESTORAGE int16x8_t vrev64q_s16(int16x8_t vec); // VREV64.16 q0,q014405 res = vrev64q_s16(_pM128i(vec)); in vrev64_s16()14449 _NEON2SSESTORAGE int16x8_t vrev64q_s16(int16x8_t vec); // VREV64.16 q0,q014450 _NEON2SSE_INLINE int16x8_t vrev64q_s16(int16x8_t vec) // VREV64.16 q0,q0 in vrev64q_s16() function14467 #define vrev64q_u16 vrev64q_s16