/external/libaom/libaom/aom_dsp/arm/ |
D | variance_neon.c | 186 d22s16 = vreinterpret_s16_u16(vget_low_u16(q11u16)); in aom_variance16x8_neon() 187 d23s16 = vreinterpret_s16_u16(vget_high_u16(q11u16)); in aom_variance16x8_neon() 192 d24s16 = vreinterpret_s16_u16(vget_low_u16(q12u16)); in aom_variance16x8_neon() 193 d25s16 = vreinterpret_s16_u16(vget_high_u16(q12u16)); in aom_variance16x8_neon() 198 d26s16 = vreinterpret_s16_u16(vget_low_u16(q13u16)); in aom_variance16x8_neon() 199 d27s16 = vreinterpret_s16_u16(vget_high_u16(q13u16)); in aom_variance16x8_neon() 204 d28s16 = vreinterpret_s16_u16(vget_low_u16(q14u16)); in aom_variance16x8_neon() 205 d29s16 = vreinterpret_s16_u16(vget_high_u16(q14u16)); in aom_variance16x8_neon() 260 d22s16 = vreinterpret_s16_u16(vget_low_u16(q11u16)); in aom_variance8x16_neon() 261 d23s16 = vreinterpret_s16_u16(vget_high_u16(q11u16)); in aom_variance8x16_neon() [all …]
|
/external/libvpx/libvpx/vpx_dsp/arm/ |
D | highbd_vpx_convolve8_neon.c | 167 s0 = vreinterpret_s16_u16(vget_low_u16(t0)); in vpx_highbd_convolve8_horiz_neon() 168 s1 = vreinterpret_s16_u16(vget_low_u16(t1)); in vpx_highbd_convolve8_horiz_neon() 169 s2 = vreinterpret_s16_u16(vget_low_u16(t2)); in vpx_highbd_convolve8_horiz_neon() 170 s3 = vreinterpret_s16_u16(vget_low_u16(t3)); in vpx_highbd_convolve8_horiz_neon() 171 s4 = vreinterpret_s16_u16(vget_high_u16(t0)); in vpx_highbd_convolve8_horiz_neon() 172 s5 = vreinterpret_s16_u16(vget_high_u16(t1)); in vpx_highbd_convolve8_horiz_neon() 173 s6 = vreinterpret_s16_u16(vget_high_u16(t2)); in vpx_highbd_convolve8_horiz_neon() 376 s0 = vreinterpret_s16_u16(vget_low_u16(t0)); in vpx_highbd_convolve8_avg_horiz_neon() 377 s1 = vreinterpret_s16_u16(vget_low_u16(t1)); in vpx_highbd_convolve8_avg_horiz_neon() 378 s2 = vreinterpret_s16_u16(vget_low_u16(t2)); in vpx_highbd_convolve8_avg_horiz_neon() [all …]
|
D | variance_neon.c | 298 d22s16 = vreinterpret_s16_u16(vget_low_u16(q11u16)); in vpx_mse16x16_neon() 299 d23s16 = vreinterpret_s16_u16(vget_high_u16(q11u16)); in vpx_mse16x16_neon() 303 d24s16 = vreinterpret_s16_u16(vget_low_u16(q12u16)); in vpx_mse16x16_neon() 304 d25s16 = vreinterpret_s16_u16(vget_high_u16(q12u16)); in vpx_mse16x16_neon() 308 d26s16 = vreinterpret_s16_u16(vget_low_u16(q13u16)); in vpx_mse16x16_neon() 309 d27s16 = vreinterpret_s16_u16(vget_high_u16(q13u16)); in vpx_mse16x16_neon() 313 d28s16 = vreinterpret_s16_u16(vget_low_u16(q14u16)); in vpx_mse16x16_neon() 314 d29s16 = vreinterpret_s16_u16(vget_high_u16(q14u16)); in vpx_mse16x16_neon()
|
D | vpx_convolve8_neon.c | 621 s0 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vld1_u8(src)))); in vpx_convolve8_vert_neon() 623 s1 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vld1_u8(src)))); in vpx_convolve8_vert_neon() 625 s2 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vld1_u8(src)))); in vpx_convolve8_vert_neon() 627 s3 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vld1_u8(src)))); in vpx_convolve8_vert_neon() 629 s4 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vld1_u8(src)))); in vpx_convolve8_vert_neon() 631 s5 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vld1_u8(src)))); in vpx_convolve8_vert_neon() 633 s6 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vld1_u8(src)))); in vpx_convolve8_vert_neon() 637 s7 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vld1_u8(src)))); in vpx_convolve8_vert_neon() 639 s8 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vld1_u8(src)))); in vpx_convolve8_vert_neon() 641 s9 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vld1_u8(src)))); in vpx_convolve8_vert_neon() [all …]
|
D | subtract_neon.c | 73 vst1_s16(diff + 0 * diff_stride, vreinterpret_s16_u16(vget_low_u16(d))); in vpx_subtract_block_neon() 74 vst1_s16(diff + 1 * diff_stride, vreinterpret_s16_u16(vget_high_u16(d))); in vpx_subtract_block_neon()
|
/external/webp/src/dsp/ |
D | yuv_neon.c | 91 const int16x4_t r_lo = vreinterpret_s16_u16(vget_low_u16(r)); \ 92 const int16x4_t r_hi = vreinterpret_s16_u16(vget_high_u16(r)); \ 93 const int16x4_t g_lo = vreinterpret_s16_u16(vget_low_u16(g)); \ 94 const int16x4_t g_hi = vreinterpret_s16_u16(vget_high_u16(g)); \ 95 const int16x4_t b_lo = vreinterpret_s16_u16(vget_low_u16(b)); \ 96 const int16x4_t b_hi = vreinterpret_s16_u16(vget_high_u16(b))
|
D | enc_neon.c | 344 vreinterpret_s16_u16(vceq_s16(vget_low_s16(a3a2), vdup_n_s16(0))); in FTransform_NEON()
|
/external/libhevc/common/arm/ |
D | ihevc_quant_iquant_ssd_neon_intr.c | 209 pq0 = vand_s16(q0, vreinterpret_s16_u16(psgn0)); in ihevc_quant_iquant_ssd_flat_scale_mat_neon() 210 pq1 = vand_s16(q1, vreinterpret_s16_u16(psgn1)); in ihevc_quant_iquant_ssd_flat_scale_mat_neon() 211 pq2 = vand_s16(q2, vreinterpret_s16_u16(psgn2)); in ihevc_quant_iquant_ssd_flat_scale_mat_neon() 212 pq3 = vand_s16(q3, vreinterpret_s16_u16(psgn3)); in ihevc_quant_iquant_ssd_flat_scale_mat_neon() 214 nq0 = vand_s16(q0, vreinterpret_s16_u16(nsgn0)); in ihevc_quant_iquant_ssd_flat_scale_mat_neon() 215 nq1 = vand_s16(q1, vreinterpret_s16_u16(nsgn1)); in ihevc_quant_iquant_ssd_flat_scale_mat_neon() 216 nq2 = vand_s16(q2, vreinterpret_s16_u16(nsgn2)); in ihevc_quant_iquant_ssd_flat_scale_mat_neon() 217 nq3 = vand_s16(q3, vreinterpret_s16_u16(nsgn3)); in ihevc_quant_iquant_ssd_flat_scale_mat_neon()
|
/external/libaom/libaom/av1/common/arm/ |
D | cfl_neon.c | 331 case 4: avg_16x4 = vreinterpret_s16_u16(vqrshrn_n_u32(sum_32x4, 4)); break; in CFL_GET_SUBSAMPLE_FUNCTION() 332 case 5: avg_16x4 = vreinterpret_s16_u16(vqrshrn_n_u32(sum_32x4, 5)); break; in CFL_GET_SUBSAMPLE_FUNCTION() 333 case 6: avg_16x4 = vreinterpret_s16_u16(vqrshrn_n_u32(sum_32x4, 6)); break; in CFL_GET_SUBSAMPLE_FUNCTION() 334 case 7: avg_16x4 = vreinterpret_s16_u16(vqrshrn_n_u32(sum_32x4, 7)); break; in CFL_GET_SUBSAMPLE_FUNCTION() 335 case 8: avg_16x4 = vreinterpret_s16_u16(vqrshrn_n_u32(sum_32x4, 8)); break; in CFL_GET_SUBSAMPLE_FUNCTION() 336 case 9: avg_16x4 = vreinterpret_s16_u16(vqrshrn_n_u32(sum_32x4, 9)); break; in CFL_GET_SUBSAMPLE_FUNCTION() 338 avg_16x4 = vreinterpret_s16_u16(vqrshrn_n_u32(sum_32x4, 10)); in CFL_GET_SUBSAMPLE_FUNCTION() 345 vst1_s16(dst, vsub_s16(vreinterpret_s16_u16(vld1_u16(src)), avg_16x4)); in CFL_GET_SUBSAMPLE_FUNCTION()
|
D | convolve_neon.c | 630 s0 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vld1_u8(src)))); in av1_convolve_y_sr_neon() 632 s1 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vld1_u8(src)))); in av1_convolve_y_sr_neon() 634 s2 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vld1_u8(src)))); in av1_convolve_y_sr_neon() 636 s3 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vld1_u8(src)))); in av1_convolve_y_sr_neon() 638 s4 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vld1_u8(src)))); in av1_convolve_y_sr_neon() 640 s5 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vld1_u8(src)))); in av1_convolve_y_sr_neon() 642 s6 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vld1_u8(src)))); in av1_convolve_y_sr_neon() 646 s7 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vld1_u8(src)))); in av1_convolve_y_sr_neon() 649 s8 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vld1_u8(src)))); in av1_convolve_y_sr_neon() 651 s9 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vld1_u8(src)))); in av1_convolve_y_sr_neon() [all …]
|
D | jnt_convolve_neon.c | 56 tmp0 = vsub_s16(vreinterpret_s16_u16(tmp_u0), sub_const_vec); in compute_avg_4x1() 177 tmp0 = vsub_s16(vreinterpret_s16_u16(tmp_u0), sub_const_vec); in compute_avg_4x4() 178 tmp1 = vsub_s16(vreinterpret_s16_u16(tmp_u1), sub_const_vec); in compute_avg_4x4() 179 tmp2 = vsub_s16(vreinterpret_s16_u16(tmp_u2), sub_const_vec); in compute_avg_4x4() 180 tmp3 = vsub_s16(vreinterpret_s16_u16(tmp_u3), sub_const_vec); in compute_avg_4x4()
|
/external/tensorflow/tensorflow/lite/kernels/internal/optimized/ |
D | depthwiseconv_uint8.h | 205 vreinterpret_s16_u16(vget_low_u16(vmovl_u8(input_u8))); 250 vreinterpret_s16_u16(vget_low_u16(vmovl_u8(input_u8))); 280 vreinterpret_s16_u16(vget_low_u16(vmovl_u8(input_u8))); 311 vreinterpret_s16_u16(vget_low_u16(vmovl_u8(filter_u8))); 351 vreinterpret_s16_u16(vget_low_u16(vmovl_u8(input_u8))); 377 vreinterpret_s16_u16(vget_low_u16(vmovl_u8(filter_u8))); 447 vreinterpret_s16_u16(vget_low_u16(vmovl_u8(input_u8))); 466 vreinterpret_s16_u16(vget_low_u16(vmovl_u8(input_u8))); 491 vreinterpret_s16_u16(vget_low_u16(vmovl_u8(filter_u8))); 551 vreinterpret_s16_u16(vget_low_u16(vmovl_u8(filter_u8))); [all …]
|
/external/XNNPACK/src/qu8-avgpool/ |
D | 9x-minmax-neon-c8.c | 138 int32x4_t vacc_lo = vaddw_s16(vbias, vreinterpret_s16_u16(vget_low_u16(vsum))); in xnn_qu8_avgpool_minmax_ukernel_9x__neon_c8() 139 int32x4_t vacc_hi = vaddw_s16(vbias, vreinterpret_s16_u16(vget_high_u16(vsum))); in xnn_qu8_avgpool_minmax_ukernel_9x__neon_c8() 211 int32x4_t vacc_lo = vaddw_s16(vbias, vreinterpret_s16_u16(vget_low_u16(vsum))); in xnn_qu8_avgpool_minmax_ukernel_9x__neon_c8() 212 int32x4_t vacc_hi = vaddw_s16(vbias, vreinterpret_s16_u16(vget_high_u16(vsum))); in xnn_qu8_avgpool_minmax_ukernel_9x__neon_c8()
|
D | 9p8x-minmax-neon-c8.c | 114 const int32x4_t vacc_lo = vaddw_s16(vbias, vreinterpret_s16_u16(vget_low_u16(vsum))); in xnn_qu8_avgpool_minmax_ukernel_9p8x__neon_c8() 115 const int32x4_t vacc_hi = vaddw_s16(vbias, vreinterpret_s16_u16(vget_high_u16(vsum))); in xnn_qu8_avgpool_minmax_ukernel_9p8x__neon_c8() 187 vacc_lo = vaddw_s16(vacc_lo, vreinterpret_s16_u16(vget_low_u16(vsum))); in xnn_qu8_avgpool_minmax_ukernel_9p8x__neon_c8() 188 vacc_hi = vaddw_s16(vacc_hi, vreinterpret_s16_u16(vget_high_u16(vsum))); in xnn_qu8_avgpool_minmax_ukernel_9p8x__neon_c8()
|
/external/libgav1/libgav1/src/dsp/arm/ |
D | mask_blend_neon.cc | 43 const int16x4_t mask_val0 = vreinterpret_s16_u16(vpaddl_u8(vld1_u8(mask))); in GetMask4x2() 44 const int16x4_t mask_val1 = vreinterpret_s16_u16( in GetMask4x2() 49 vreinterpret_s16_u16(vpaddl_u8(vld1_u8(mask + mask_stride))); in GetMask4x2() 51 vreinterpret_s16_u16(vpaddl_u8(vld1_u8(mask + mask_stride * 3))); in GetMask4x2()
|
D | intrapred_cfl_neon.cc | 93 vst1_s16(luma[y], vreinterpret_s16_u16(sum_row)); in CflSubsampler420_NEON() 197 vst1_s16(luma[y], vreinterpret_s16_u16(vget_low_u16(row_shifted))); in CflSubsampler444_NEON() 198 vst1_s16(luma[y + 1], vreinterpret_s16_u16(vget_high_u16(row_shifted))); in CflSubsampler444_NEON() 206 vst1_s16(luma[y], vreinterpret_s16_u16(vget_low_u16(row_shifted))); in CflSubsampler444_NEON() 207 vst1_s16(luma[y + 1], vreinterpret_s16_u16(vget_high_u16(row_shifted))); in CflSubsampler444_NEON() 490 vst1_s16(luma_ptr, vreinterpret_s16_u16(vget_low_u16(result_shifted))); in StoreLumaResults4_420() 492 vreinterpret_s16_u16(vget_high_u16(result_shifted))); in StoreLumaResults4_420() 823 vst1_s16(luma_ptr, vreinterpret_s16_u16(final_fill)); in CflSubsampler420_4xH_NEON()
|
D | inverse_transform_10bit_neon.cc | 1557 b.val[0] = vaddw_s16(a.val[0], vreinterpret_s16_u16(frame_data.val[0])); in IdentityColumnStoreToFrame() 1558 b.val[1] = vaddw_s16(a.val[1], vreinterpret_s16_u16(frame_data.val[1])); in IdentityColumnStoreToFrame() 1596 b.val[0] = vaddw_s16(a.val[0], vreinterpret_s16_u16(frame_data.val[0])); in IdentityColumnStoreToFrame() 1597 b.val[1] = vaddw_s16(a.val[1], vreinterpret_s16_u16(frame_data.val[1])); in IdentityColumnStoreToFrame() 1625 const int32x4_t b = vaddw_s16(a, vreinterpret_s16_u16(frame_data)); in Identity4RowColumnStoreToFrame() 1653 b.val[0] = vaddw_s16(a.val[0], vreinterpret_s16_u16(frame_data.val[0])); in Identity4RowColumnStoreToFrame() 1654 b.val[1] = vaddw_s16(a.val[1], vreinterpret_s16_u16(frame_data.val[1])); in Identity4RowColumnStoreToFrame()
|
/external/libvpx/libvpx/vp8/encoder/arm/neon/ |
D | shortfdct_neon.c | 102 d1s16 = vsub_s16(d1s16, vreinterpret_s16_u16(d4u16)); in vp8_short_fdct4x4_neon() 248 d1s16 = vsub_s16(d1s16, vreinterpret_s16_u16(d28u16)); in vp8_short_fdct8x4_neon() 249 d5s16 = vsub_s16(d5s16, vreinterpret_s16_u16(d29u16)); in vp8_short_fdct8x4_neon()
|
D | vp8_shortwalsh4x4_neon.c | 69 d0s16 = vsub_s16(d0s16, vreinterpret_s16_u16(d16u16)); in vp8_short_walsh4x4_neon()
|
/external/libxaac/decoder/armv7/ |
D | ixheaacd_fft32x32_ld.s | 706 …VSHR.U16 d10, d10, #1 @a_data1.val[0]= vreinterpret_s16_u16(vshr_n_u16(vreinterpret_… 708 …VSHR.U16 d12, d12, #1 @a_data1.val[2]= vreinterpret_s16_u16(vshr_n_u16(vreinterpret_… 716 …VSHR.U16 d14, d14, #1 @a_data2.val[0]=vreinterpret_s16_u16(vshr_n_u16(vreinterpret_u… 717 …VSHR.U16 d16, d16, #1 @a_data2.val[2]=vreinterpret_s16_u16(vshr_n_u16(vreinterpret_u… 721 …VSHR.U16 d18, d18, #1 @a_data3.val[0]= vreinterpret_s16_u16(vshr_n_u16(vreinterpret_… 722 …VSHR.U16 d20, d20, #1 @a_data3.val[2]= vreinterpret_s16_u16(vshr_n_u16(vreinterpret_…
|
/external/libhevc/encoder/arm/ |
D | ihevce_common_utils_neon.c | 255 reg0[0] = vmovl_s16(vreinterpret_s16_u16(vget_low_u16(a0))); in ihevce_wt_avg_2d_4xn_neon() 256 reg0[1] = vmovl_s16(vreinterpret_s16_u16(vget_high_u16(a0))); in ihevce_wt_avg_2d_4xn_neon() 257 reg0[2] = vmovl_s16(vreinterpret_s16_u16(vget_low_u16(a1))); in ihevce_wt_avg_2d_4xn_neon() 258 reg0[3] = vmovl_s16(vreinterpret_s16_u16(vget_high_u16(a1))); in ihevce_wt_avg_2d_4xn_neon() 260 reg1[0] = vmovl_s16(vreinterpret_s16_u16(vget_low_u16(a2))); in ihevce_wt_avg_2d_4xn_neon() 261 reg1[1] = vmovl_s16(vreinterpret_s16_u16(vget_high_u16(a2))); in ihevce_wt_avg_2d_4xn_neon() 262 reg1[2] = vmovl_s16(vreinterpret_s16_u16(vget_low_u16(a3))); in ihevce_wt_avg_2d_4xn_neon() 263 reg1[3] = vmovl_s16(vreinterpret_s16_u16(vget_high_u16(a3))); in ihevce_wt_avg_2d_4xn_neon()
|
/external/libgav1/libgav1/src/utils/ |
D | entropy_decoder.cc | 182 const int16x4_t diff = vreinterpret_s16_u16(vsub_u16(a, cdf_vec)); in UpdateCdf5() 938 vreinterpret_s16_u16(vsub_u16(cdf_max_probability, cdf_vec)); in ReadSymbol3Or4()
|
/external/libopus/silk/arm/ |
D | NSQ_del_dec_neon_intr.c | 717 q1_Q0_s16x4 = vreinterpret_s16_u16( vclt_s16( q1_Q10_s16x4, vdup_n_s16( 0 ) ) ); in silk_noise_shape_quantizer_del_dec_neon()
|
/external/pffft/ |
D | sse2neon.h | 2636 int16x4_t zeroMask = vreinterpret_s16_u16(vceqz_s16(b)); in _mm_sign_pi16() 2638 int16x4_t zeroMask = vreinterpret_s16_u16(vceq_s16(b, vdup_n_s16(0))); in _mm_sign_pi16()
|
/external/llvm-project/clang/test/CodeGen/ |
D | aarch64-neon-intrinsics.c | 15222 return vreinterpret_s16_u16(a); in test_vreinterpret_s16_u16()
|