Searched refs:vreinterpret_s64_s8 (Results 1 – 11 of 11) sorted by relevance
/external/libaom/libaom/aom_dsp/simd/ |
D | v64_intrinsics_arm.h | 105 return c ? vreinterpret_s64_s8( in v64_align() 222 return vreinterpret_s64_s8( in v64_sadd_s8() 267 return vreinterpret_s64_s8( in v64_ssub_s8() 281 return vreinterpret_s64_s8(vabs_s8(vreinterpret_s8_s64(x))); in v64_abs_s8() 350 return vreinterpret_s64_s8( in v64_max_s8() 355 return vreinterpret_s64_s8( in v64_min_s8() 461 return vreinterpret_s64_s8(vqmovn_s16(vreinterpretq_s16_s32( in v64_pack_s16_s8() 567 return vreinterpret_s64_s8(vshl_s8(vreinterpret_s8_s64(a), vdup_n_s8(-c))); in v64_shr_s8() 619 return c ? vreinterpret_s64_s8(vshr_n_s8(vreinterpret_s8_s64(a), c)) : a; in v64_shr_n_s8()
|
D | v128_intrinsics_arm.h | 607 vreinterpret_s64_s8(vqmovn_s16(vreinterpretq_s16_s64(a))), in v128_pack_s16_s8() 608 vreinterpret_s64_s8(vqmovn_s16(vreinterpretq_s16_s64(b)))); in v128_pack_s16_s8()
|
/external/libgav1/libgav1/src/dsp/arm/ |
D | motion_field_projection_neon.cc | 131 *skip_64 = vget_lane_s64(vreinterpret_s64_s8(skip), 0); in GetPosition() 215 const int64_t early_skip = vget_lane_s64(vreinterpret_s64_s8(skip_r), 0); in MotionFieldProjectionKernel_NEON() 274 vget_lane_s64(vreinterpret_s64_s8(skip_r), 0); in MotionFieldProjectionKernel_NEON()
|
/external/tensorflow/tensorflow/lite/kernels/internal/optimized/ |
D | depthwiseconv_uint8_3x3_filter.h | 6828 vreinterpret_s64_s8(padding_mask), vdup_n_s64(8 * copy_remaining))); 6891 vshl_s64(vreinterpret_s64_s8(half_work_reg), 6919 vreinterpret_s64_s8(padding_mask), vdup_n_s64(8 * copy_remaining))); 6975 vshl_s64(vreinterpret_s64_s8(half_work_reg), 7057 vreinterpret_s64_s8(padding_mask), vdup_n_s64(8 * copy_remaining))); 7065 vshl_n_s64(vreinterpret_s64_s8(half_work_reg), 8)); 7073 vshl_n_s64(vreinterpret_s64_s8(half_work_reg), 8)); 7241 vshl_s64(vreinterpret_s64_s8(half_work_reg), 7302 vshl_s64(vreinterpret_s64_s8(half_work_reg), 7330 vshl_n_s64(vreinterpret_s64_s8(half_work_reg), 8));
|
D | depthwiseconv_uint8_transitional.h | 1991 vreinterpret_s64_s8(padding_mask), vdup_n_s64(8 * copy_remaining))); 2050 vshl_s64(vreinterpret_s64_s8(half_work_reg), 2072 vreinterpret_s64_s8(padding_mask), vdup_n_s64(8 * copy_remaining))); 2125 vshl_s64(vreinterpret_s64_s8(half_work_reg), 2195 vreinterpret_s64_s8(padding_mask), vdup_n_s64(8 * copy_remaining))); 2203 vshl_n_s64(vreinterpret_s64_s8(half_work_reg), 8)); 2211 vshl_n_s64(vreinterpret_s64_s8(half_work_reg), 8)); 2375 vshl_s64(vreinterpret_s64_s8(half_work_reg), 2432 vshl_s64(vreinterpret_s64_s8(half_work_reg), 2457 vshl_n_s64(vreinterpret_s64_s8(half_work_reg), 8));
|
/external/pffft/ |
D | sse2neon.h | 210 #define vreinterpret_m64_s8(x) vreinterpret_s64_s8(x)
|
/external/llvm-project/clang/test/CodeGen/ |
D | aarch64-neon-intrinsics.c | 15374 return vreinterpret_s64_s8(a); in test_vreinterpret_s64_s8()
|
D | arm_neon_intrinsics.c | 11602 return vreinterpret_s64_s8(a); in test_vreinterpret_s64_s8()
|
/external/clang/test/CodeGen/ |
D | arm_neon_intrinsics.c | 13900 return vreinterpret_s64_s8(a); in test_vreinterpret_s64_s8()
|
D | aarch64-neon-intrinsics.c | 19554 return vreinterpret_s64_s8(a); in test_vreinterpret_s64_s8()
|
/external/neon_2_sse/ |
D | NEON_2_SSE.h | 16206 _NEON2SSESTORAGE int64x1_t vreinterpret_s64_s8 (int8x8_t t); 16207 #define vreinterpret_s64_s8 macro
|