Home
last modified time | relevance | path

Searched refs:vld1_u16 (Results 1 – 25 of 26) sorted by relevance

12

/external/libvpx/libvpx/vpx_dsp/arm/
Dhighbd_vpx_convolve8_neon.c404 d01 = vcombine_u16(vld1_u16(dst + 0 * dst_stride), in vpx_highbd_convolve8_avg_horiz_neon()
405 vld1_u16(dst + 2 * dst_stride)); in vpx_highbd_convolve8_avg_horiz_neon()
406 d23 = vcombine_u16(vld1_u16(dst + 1 * dst_stride), in vpx_highbd_convolve8_avg_horiz_neon()
407 vld1_u16(dst + 3 * dst_stride)); in vpx_highbd_convolve8_avg_horiz_neon()
466 d0 = vcombine_u16(vld1_u16(dst + 0 * dst_stride), in vpx_highbd_convolve8_avg_horiz_neon()
467 vld1_u16(dst + 4 * dst_stride)); in vpx_highbd_convolve8_avg_horiz_neon()
468 d1 = vcombine_u16(vld1_u16(dst + 1 * dst_stride), in vpx_highbd_convolve8_avg_horiz_neon()
469 vld1_u16(dst + 5 * dst_stride)); in vpx_highbd_convolve8_avg_horiz_neon()
470 d2 = vcombine_u16(vld1_u16(dst + 2 * dst_stride), in vpx_highbd_convolve8_avg_horiz_neon()
471 vld1_u16(dst + 6 * dst_stride)); in vpx_highbd_convolve8_avg_horiz_neon()
[all …]
Dhighbd_vpx_convolve_avg_neon.c32 s0 = vld1_u16(src); in vpx_highbd_convolve_avg_neon()
33 d0 = vld1_u16(dst); in vpx_highbd_convolve_avg_neon()
35 s1 = vld1_u16(src); in vpx_highbd_convolve_avg_neon()
36 d1 = vld1_u16(dst + dst_stride); in vpx_highbd_convolve_avg_neon()
Dhighbd_vpx_convolve_copy_neon.c30 vst1_u16(dst, vld1_u16(src)); in vpx_highbd_convolve_copy_neon()
33 vst1_u16(dst, vld1_u16(src)); in vpx_highbd_convolve_copy_neon()
Dhighbd_idct4x4_add_neon.c23 const uint16x4_t a0 = vld1_u16(*dest); in highbd_idct4x4_1_add_kernel2()
24 const uint16x4_t a1 = vld1_u16(*dest + stride); in highbd_idct4x4_1_add_kernel2()
Dhighbd_intrapred_neon.c21 const uint16x4_t ref_u16 = vld1_u16(ref); in dc_sum_4()
38 const uint16x4_t a = vld1_u16(above); in vpx_highbd_dc_predictor_4x4_neon()
39 const uint16x4_t l = vld1_u16(left); in vpx_highbd_dc_predictor_4x4_neon()
460 const uint16x4_t L0123 = vld1_u16(left); in vpx_highbd_d135_predictor_4x4_neon()
702 const uint16x4_t row = vld1_u16(above); in vpx_highbd_v_predictor_4x4_neon()
760 const uint16x4_t left_u16 = vld1_u16(left); in vpx_highbd_h_predictor_4x4_neon()
Dhighbd_idct_neon.h24 const uint16x4_t a0 = vld1_u16(*dest); in highbd_idct4x4_1_add_kernel1()
25 const uint16x4_t a1 = vld1_u16(*dest + stride); in highbd_idct4x4_1_add_kernel1()
/external/libaom/libaom/av1/common/arm/
Dreconinter_neon.c71 tmp0 = vcombine_u16(vld1_u16(src0_1 + (0 * src0_stride)), in av1_build_compound_diffwtd_mask_d16_neon()
72 vld1_u16(src0_1 + (1 * src0_stride))); in av1_build_compound_diffwtd_mask_d16_neon()
73 tmp1 = vcombine_u16(vld1_u16(src1_1 + (0 * src1_stride)), in av1_build_compound_diffwtd_mask_d16_neon()
74 vld1_u16(src1_1 + (1 * src1_stride))); in av1_build_compound_diffwtd_mask_d16_neon()
Dcfl_neon.c150 const uint16x4_t top = vld1_u16(input); in cfl_luma_subsampling_420_hbd_neon()
151 const uint16x4_t bot = vld1_u16(input + input_stride); in cfl_luma_subsampling_420_hbd_neon()
196 const uint16x4_t top = vld1_u16(input); in cfl_luma_subsampling_422_hbd_neon()
230 const uint16x4_t top = vld1_u16(input); in cfl_luma_subsampling_444_hbd_neon()
277 vadd_u16(vld1_u16(sum_buf), vld1_u16(sum_buf + CFL_BUF_LINE)); in CFL_GET_SUBSAMPLE_FUNCTION()
278 const uint16x4_t a1 = vadd_u16(vld1_u16(sum_buf + 2 * CFL_BUF_LINE), in CFL_GET_SUBSAMPLE_FUNCTION()
279 vld1_u16(sum_buf + 3 * CFL_BUF_LINE)); in CFL_GET_SUBSAMPLE_FUNCTION()
345 vst1_s16(dst, vsub_s16(vreinterpret_s16_u16(vld1_u16(src)), avg_16x4)); in CFL_GET_SUBSAMPLE_FUNCTION()
Dmem_neon.h83 *s0 = vld1_u16(s); in load_u16_4x4()
85 *s1 = vld1_u16(s); in load_u16_4x4()
87 *s2 = vld1_u16(s); in load_u16_4x4()
89 *s3 = vld1_u16(s); in load_u16_4x4()
Dwarp_plane_neon.c640 uint16x4_t tmp16_lo = vld1_u16(p); in av1_warp_affine_neon()
671 uint16x4_t tmp16_hi = vld1_u16(p4); in av1_warp_affine_neon()
Djnt_convolve_neon.c686 res4 = vld1_u16(d); in dist_wtd_convolve_2d_vert_neon()
1097 res4 = vld1_u16(d); in av1_dist_wtd_convolve_x_neon()
1530 res4 = vld1_u16(d); in av1_dist_wtd_convolve_y_neon()
/external/libgav1/libgav1/src/dsp/arm/
Dcdef_neon.cc442 output[0] = vcombine_u16(vld1_u16(src + y_0 * stride + x_0), in LoadDirection4()
443 vld1_u16(src + y_0 * stride + stride + x_0)); in LoadDirection4()
444 output[1] = vcombine_u16(vld1_u16(src - y_0 * stride - x_0), in LoadDirection4()
445 vld1_u16(src - y_0 * stride + stride - x_0)); in LoadDirection4()
446 output[2] = vcombine_u16(vld1_u16(src + y_1 * stride + x_1), in LoadDirection4()
447 vld1_u16(src + y_1 * stride + stride + x_1)); in LoadDirection4()
448 output[3] = vcombine_u16(vld1_u16(src - y_1 * stride - x_1), in LoadDirection4()
449 vld1_u16(src - y_1 * stride + stride - x_1)); in LoadDirection4()
515 pixel = vcombine_u16(vld1_u16(src), vld1_u16(src + src_stride)); in CdefFilter_NEON()
Dintrapred_neon.cc777 const uint16x4_t val_0 = vld1_u16(ref_0_u16); in DcSum_NEON()
780 const uint16x4_t val_1 = vld1_u16(ref_1_u16); in DcSum_NEON()
798 const uint16x4_t val_1 = vld1_u16(ref_1_u16); in DcSum_NEON()
818 const uint16x4_t val_1 = vld1_u16(ref_1_u16); in DcSum_NEON()
/external/exoplayer/tree/extensions/vp9/src/main/jni/
Dvpx_jni.cc122 uint16x4_t values = vshl_n_u16(vld1_u16(src), 6); in convert_16_to_8_neon()
135 values = vshl_n_u16(vld1_u16(src), 6); in convert_16_to_8_neon()
150 values = vshl_n_u16(vld1_u16(src), 6); in convert_16_to_8_neon()
154 values = vshl_n_u16(vld1_u16(src), 6); in convert_16_to_8_neon()
201 uint16x4_t uVal1 = vqadd_u16(vshl_n_u16(vld1_u16(srcU), 6), in convert_16_to_8_neon()
205 uint16x4_t vVal1 = vqadd_u16(vshl_n_u16(vld1_u16(srcV), 6), in convert_16_to_8_neon()
209 uint16x4_t uVal2 = vqadd_u16(vshl_n_u16(vld1_u16(srcU), 6), in convert_16_to_8_neon()
213 uint16x4_t vVal2 = vqadd_u16(vshl_n_u16(vld1_u16(srcV), 6), in convert_16_to_8_neon()
/external/libhevc/encoder/arm/
Dihevce_coarse_layer_sad_neon.c545 uint16x4_t curr = vld1_u16((U16 *)pi2_sads_4x4_current + sad_pos); in hme_combine_4x4_sads_and_compute_cost_high_speed_neon()
546 uint16x4_t south = vld1_u16((U16 *)pi2_sads_4x4_south + sad_pos); in hme_combine_4x4_sads_and_compute_cost_high_speed_neon()
547 uint16x4_t east = vld1_u16((U16 *)pi2_sads_4x4_east + sad_pos); in hme_combine_4x4_sads_and_compute_cost_high_speed_neon()
552 vld1_u16((U16 *)&gi2_mvx_range[mvx + MAX_MVX_SUPPORTED_IN_COARSE_LAYER][0]); in hme_combine_4x4_sads_and_compute_cost_high_speed_neon()
706 uint16x4_t curr = vld1_u16((U16 *)pi2_sads_4x4_current + sad_pos); in hme_combine_4x4_sads_and_compute_cost_high_quality_neon()
707 uint16x4_t south = vld1_u16((U16 *)pi2_sads_4x4_south + sad_pos); in hme_combine_4x4_sads_and_compute_cost_high_quality_neon()
708 uint16x4_t east = vld1_u16((U16 *)pi2_sads_4x4_east + sad_pos); in hme_combine_4x4_sads_and_compute_cost_high_quality_neon()
712 uint16x4_t mv_wt = vld1_u16( in hme_combine_4x4_sads_and_compute_cost_high_quality_neon()
/external/libaom/libaom/aom_dsp/arm/
Dsse_neon.c264 d0 = vld1_u16(a); // load 4 data in aom_highbd_sse_neon()
266 d1 = vld1_u16(a); in aom_highbd_sse_neon()
269 d2 = vld1_u16(b); in aom_highbd_sse_neon()
271 d3 = vld1_u16(b); in aom_highbd_sse_neon()
Dintrapred_neon.c562 sum_q = vaddl_u16(vld1_u16(above), vld1_u16(left)); in highbd_dc_predictor()
/external/exoplayer/tree/extensions/av1/src/main/jni/
Dgav1_jni.cc446 uint16x4_t values = vshl_n_u16(vld1_u16(source_16), 6); in Convert10BitFrameTo8BitDataBufferNeon()
459 values = vshl_n_u16(vld1_u16(source_16), 6); in Convert10BitFrameTo8BitDataBufferNeon()
476 values = vshl_n_u16(vld1_u16(source_16), 6); in Convert10BitFrameTo8BitDataBufferNeon()
482 values = vshl_n_u16(vld1_u16(source_16), 6); in Convert10BitFrameTo8BitDataBufferNeon()
/external/libgav1/libgav1/src/utils/
Dentropy_decoder.cc170 uint16x4_t cdf_vec = vld1_u16(cdf); in UpdateCdf5()
782 uint16x4_t cdf_vec = vld1_u16(cdf); in ReadSymbol4()
854 uint16x4_t cdf_vec = vld1_u16(cdf); in ReadSymbol4()
/external/FP16/bench/
Dfrom-alt-array.cc160 (float16x4_t) vld1_u16(&input[i]))); in hardware_vcvt_f32_f16()
Dfrom-ieee-array.cc228 (float16x4_t) vld1_u16(&input[i]))); in hardware_vcvt_f32_f16()
/external/skia/include/private/
DSkNx_neon.h329 AI static SkNx Load(const void* ptr) { return vld1_u16((const uint16_t*)ptr); } in Load()
/external/skqp/include/private/
DSkNx_neon.h329 AI static SkNx Load(const void* ptr) { return vld1_u16((const uint16_t*)ptr); } in Load()
/external/neon_2_sse/
DNEON_2_SSE.h1235 _NEON2SSESTORAGE uint16x4_t vld1_u16(__transfersize(4) uint16_t const * ptr); // VLD1.16 {d0}, [r0]
9284 _NEON2SSESTORAGE uint16x4_t vld1_u16(__transfersize(4) uint16_t const * ptr); // VLD1.16 {d0}, [r0]
9285 #define vld1_u16 vld1_u8 macro
9298 #define vld1_s16 vld1_u16
9322 #define vld1_p16 vld1_u16
/external/clang/test/CodeGen/
Daarch64-neon-intrinsics.c10016 return vld1_u16(a); in test_vld1_u16()

12