/external/libhevc/common/arm/ |
D | ihevc_cmn_utils_neon.h | 154 const int16x8x2_t b0 = vtrnq_s16(*a0, *a1); in transpose_s16_4x4q() 155 const int16x8x2_t b1 = vtrnq_s16(*a2, *a3); in transpose_s16_4x4q() 205 b0 = vtrnq_s16(*a0, *a1); in transpose_s16_8x8() 206 b1 = vtrnq_s16(*a2, *a3); in transpose_s16_8x8() 207 b2 = vtrnq_s16(*a4, *a5); in transpose_s16_8x8() 208 b3 = vtrnq_s16(*a6, *a7); in transpose_s16_8x8()
|
/external/libaom/libaom/av1/common/arm/ |
D | transpose_neon.h | 409 const int16x8x2_t b0 = vtrnq_s16(*a0, *a1); in transpose_s16_8x8() 410 const int16x8x2_t b1 = vtrnq_s16(*a2, *a3); in transpose_s16_8x8() 411 const int16x8x2_t b2 = vtrnq_s16(*a4, *a5); in transpose_s16_8x8() 412 const int16x8x2_t b3 = vtrnq_s16(*a6, *a7); in transpose_s16_8x8() 483 const int16x8x2_t b0 = vtrnq_s16(*a0, *(a0 + 1)); in transpose_s16_8x8q() 484 const int16x8x2_t b1 = vtrnq_s16(*(a0 + 2), *(a0 + 3)); in transpose_s16_8x8q() 485 const int16x8x2_t b2 = vtrnq_s16(*(a0 + 4), *(a0 + 5)); in transpose_s16_8x8q() 486 const int16x8x2_t b3 = vtrnq_s16(*(a0 + 6), *(a0 + 7)); in transpose_s16_8x8q()
|
D | warp_plane_neon.c | 322 b0 = vtrnq_s16(src[0], src[1]); in vertical_filter_neon() 323 b1 = vtrnq_s16(src[2], src[3]); in vertical_filter_neon() 324 b2 = vtrnq_s16(src[4], src[5]); in vertical_filter_neon() 325 b3 = vtrnq_s16(src[6], src[7]); in vertical_filter_neon()
|
/external/libjpeg-turbo/simd/arm/ |
D | jfdctfst-neon.c | 133 int16x8x2_t cols_01 = vtrnq_s16(col0, col1); in jsimd_fdct_ifast_neon() 134 int16x8x2_t cols_23 = vtrnq_s16(col2, col3); in jsimd_fdct_ifast_neon() 135 int16x8x2_t cols_45 = vtrnq_s16(col4, col5); in jsimd_fdct_ifast_neon() 136 int16x8x2_t cols_67 = vtrnq_s16(col6, col7); in jsimd_fdct_ifast_neon()
|
D | jfdctint-neon.c | 232 int16x8x2_t cols_01 = vtrnq_s16(col0, col1); in jsimd_fdct_islow_neon() 233 int16x8x2_t cols_23 = vtrnq_s16(col2, col3); in jsimd_fdct_islow_neon() 234 int16x8x2_t cols_45 = vtrnq_s16(col4, col5); in jsimd_fdct_islow_neon() 235 int16x8x2_t cols_67 = vtrnq_s16(col6, col7); in jsimd_fdct_islow_neon()
|
D | jidctfst-neon.c | 332 int16x8x2_t rows_01 = vtrnq_s16(row0, row1); in jsimd_idct_ifast_neon() 333 int16x8x2_t rows_23 = vtrnq_s16(row2, row3); in jsimd_idct_ifast_neon() 334 int16x8x2_t rows_45 = vtrnq_s16(row4, row5); in jsimd_idct_ifast_neon() 335 int16x8x2_t rows_67 = vtrnq_s16(row6, row7); in jsimd_idct_ifast_neon()
|
D | jidctred-neon.c | 130 int16x8x2_t cols_0246_1357 = vtrnq_s16(row0, row1); in jsimd_idct_2x2_neon() 421 int16x8x2_t row_01 = vtrnq_s16(row0, row1); in jsimd_idct_4x4_neon() 422 int16x8x2_t row_23 = vtrnq_s16(row2, row3); in jsimd_idct_4x4_neon()
|
/external/libvpx/libvpx/vp8/common/arm/neon/ |
D | idct_blk_neon.c | 159 q2tmp2 = vtrnq_s16(vreinterpretq_s16_s32(q2tmp0.val[0]), in idct_dequant_full_2x_neon() 161 q2tmp3 = vtrnq_s16(vreinterpretq_s16_s32(q2tmp0.val[1]), in idct_dequant_full_2x_neon() 194 q2tmp2 = vtrnq_s16(vreinterpretq_s16_s32(q2tmp0.val[0]), in idct_dequant_full_2x_neon() 196 q2tmp3 = vtrnq_s16(vreinterpretq_s16_s32(q2tmp0.val[1]), in idct_dequant_full_2x_neon()
|
/external/libvpx/libvpx/vpx_dsp/arm/ |
D | fwd_txfm_neon.c | 150 vtrnq_s16(vreinterpretq_s16_s32(r02_s32.val[0]), in vpx_fdct8x8_neon() 153 vtrnq_s16(vreinterpretq_s16_s32(r02_s32.val[1]), in vpx_fdct8x8_neon() 156 vtrnq_s16(vreinterpretq_s16_s32(r46_s32.val[0]), in vpx_fdct8x8_neon() 159 vtrnq_s16(vreinterpretq_s16_s32(r46_s32.val[1]), in vpx_fdct8x8_neon()
|
D | fdct16x16_neon.c | 196 const int16x8x2_t c0 = vtrnq_s16(a[0], a[1]); in transpose_8x8() 197 const int16x8x2_t c1 = vtrnq_s16(a[2], a[3]); in transpose_8x8() 198 const int16x8x2_t c2 = vtrnq_s16(a[4], a[5]); in transpose_8x8() 199 const int16x8x2_t c3 = vtrnq_s16(a[6], a[7]); in transpose_8x8()
|
D | transpose_neon.h | 154 vtrnq_s16(vreinterpretq_s16_s32(c0), vreinterpretq_s16_s32(c1)); in transpose_s16_4x4q() 594 const int16x8x2_t b0 = vtrnq_s16(*a0, *a1); in transpose_s16_8x8() 595 const int16x8x2_t b1 = vtrnq_s16(*a2, *a3); in transpose_s16_8x8() 596 const int16x8x2_t b2 = vtrnq_s16(*a4, *a5); in transpose_s16_8x8() 597 const int16x8x2_t b3 = vtrnq_s16(*a6, *a7); in transpose_s16_8x8()
|
D | fdct32x32_neon.c | 1287 const int16x8x2_t c0 = vtrnq_s16(a[0], a[1]); in transpose_8x8() 1288 const int16x8x2_t c1 = vtrnq_s16(a[2], a[3]); in transpose_8x8() 1289 const int16x8x2_t c2 = vtrnq_s16(a[4], a[5]); in transpose_8x8() 1290 const int16x8x2_t c3 = vtrnq_s16(a[6], a[7]); in transpose_8x8()
|
/external/libvpx/libvpx/vp8/encoder/arm/neon/ |
D | shortfdct_neon.c | 143 v2tmp0 = vtrnq_s16(vreinterpretq_s16_s32(v2tmp2.val[0]), // q0 in vp8_short_fdct8x4_neon() 145 v2tmp1 = vtrnq_s16(vreinterpretq_s16_s32(v2tmp2.val[1]), // q2 in vp8_short_fdct8x4_neon() 194 v2tmp0 = vtrnq_s16(vreinterpretq_s16_s32(v2tmp2.val[0]), // q0 in vp8_short_fdct8x4_neon() 196 v2tmp1 = vtrnq_s16(vreinterpretq_s16_s32(v2tmp2.val[1]), // q2 in vp8_short_fdct8x4_neon()
|
/external/libgav1/libgav1/src/dsp/arm/ |
D | common_neon.h | 586 const int16x8x2_t c0 = vtrnq_s16(vreinterpretq_s16_s8(b0.val[0]), in Transpose8x8() 588 const int16x8x2_t c1 = vtrnq_s16(vreinterpretq_s16_s8(b0.val[1]), in Transpose8x8() 680 const int16x8x2_t b0 = vtrnq_s16(a[0], a[1]); in Transpose8x8() 681 const int16x8x2_t b1 = vtrnq_s16(a[2], a[3]); in Transpose8x8() 682 const int16x8x2_t b2 = vtrnq_s16(a[4], a[5]); in Transpose8x8() 683 const int16x8x2_t b3 = vtrnq_s16(a[6], a[7]); in Transpose8x8()
|
D | inverse_transform_neon.cc | 111 const int16x8x2_t b0 = vtrnq_s16(in[0], in[1]); in Transpose8x8() 112 const int16x8x2_t b1 = vtrnq_s16(in[2], in[3]); in Transpose8x8() 113 const int16x8x2_t b2 = vtrnq_s16(in[4], in[5]); in Transpose8x8() 114 const int16x8x2_t b3 = vtrnq_s16(in[6], in[7]); in Transpose8x8() 238 const int16x8x2_t b0 = vtrnq_s16(in[0], in[1]); in Transpose8x4To4x8() 239 const int16x8x2_t b1 = vtrnq_s16(in[2], in[3]); in Transpose8x4To4x8()
|
/external/libaom/libaom/aom_dsp/arm/ |
D | fwd_txfm_neon.c | 239 vtrnq_s16(vreinterpretq_s16_s32(r02_s32.val[0]), in aom_fdct8x8_neon() 242 vtrnq_s16(vreinterpretq_s16_s32(r02_s32.val[1]), in aom_fdct8x8_neon() 245 vtrnq_s16(vreinterpretq_s16_s32(r46_s32.val[0]), in aom_fdct8x8_neon() 248 vtrnq_s16(vreinterpretq_s16_s32(r46_s32.val[1]), in aom_fdct8x8_neon()
|
/external/gemmlowp/internal/ |
D | output_neon.h | 430 const int16x8x2_t t0 = vtrnq_s16(src.buf.reg[0], src.buf.reg[1]); 431 const int16x8x2_t t1 = vtrnq_s16(src.buf.reg[2], src.buf.reg[3]); 871 a[0] = vtrnq_s16(src.buf.reg[0], src.buf.reg[1]); 872 a[1] = vtrnq_s16(src.buf.reg[2], src.buf.reg[3]); 873 a[2] = vtrnq_s16(src.buf.reg[4], src.buf.reg[5]); 874 a[3] = vtrnq_s16(src.buf.reg[6], src.buf.reg[7]);
|
/external/webp/src/dsp/ |
D | enc_neon.c | 569 const int16x8x2_t q2_tmp0 = vtrnq_s16(q4_in.val[0], q4_in.val[1]); in DistoTranspose4x4S16_NEON() 570 const int16x8x2_t q2_tmp1 = vtrnq_s16(q4_in.val[2], q4_in.val[3]); in DistoTranspose4x4S16_NEON()
|
/external/tensorflow/tensorflow/lite/kernels/internal/optimized/ |
D | depthwiseconv_3x3_filter_common.h | 97 r16x8 = vtrnq_s16(vreinterpretq_s16_s8(*a), vreinterpretq_s16_s8(*b)); in vtrn1_s8x2_in_place()
|
/external/clang/test/CodeGen/ |
D | aarch64-neon-perm.c | 2097 return vtrnq_s16(a, b); in test_vtrnq_s16()
|
D | arm_neon_intrinsics.c | 22961 return vtrnq_s16(a, b); in test_vtrnq_s16()
|
/external/llvm-project/clang/test/CodeGen/ |
D | aarch64-neon-perm.c | 1888 return vtrnq_s16(a, b); in test_vtrnq_s16()
|
D | arm_neon_intrinsics.c | 20380 return vtrnq_s16(a, b); in test_vtrnq_s16()
|
/external/neon_2_sse/ |
D | NEON_2_SSE.h | 2228 _NEON2SSESTORAGE int16x8x2_t vtrnq_s16(int16x8_t a, int16x8_t b); // VTRN.16 q0,q0 15729 _NEON2SSESTORAGE int16x8x2_t vtrnq_s16(int16x8_t a, int16x8_t b); // VTRN.16 q0,q0 15730 _NEON2SSE_INLINE int16x8x2_t vtrnq_s16(int16x8_t a, int16x8_t b) // VTRN.16 q0,q0 in vtrnq_s16() function 15759 #define vtrnq_u16 vtrnq_s16 15782 #define vtrnq_p16 vtrnq_s16
|