Home
last modified time | relevance | path

Searched refs:vcombine_u16 (Results 1 – 25 of 58) sorted by relevance

123

/external/libjpeg-turbo/simd/arm/aarch64/
Djccolext-neon.c134 uint16x8_t y_l = vcombine_u16(vrshrn_n_u32(y_ll, 16), in jsimd_rgb_ycc_convert_neon()
136 uint16x8_t y_h = vcombine_u16(vrshrn_n_u32(y_hl, 16), in jsimd_rgb_ycc_convert_neon()
139 uint16x8_t cb_l = vcombine_u16(vshrn_n_u32(cb_ll, 16), in jsimd_rgb_ycc_convert_neon()
141 uint16x8_t cb_h = vcombine_u16(vshrn_n_u32(cb_hl, 16), in jsimd_rgb_ycc_convert_neon()
144 uint16x8_t cr_l = vcombine_u16(vshrn_n_u32(cr_ll, 16), in jsimd_rgb_ycc_convert_neon()
146 uint16x8_t cr_h = vcombine_u16(vshrn_n_u32(cr_hl, 16), in jsimd_rgb_ycc_convert_neon()
233 uint16x8_t y_l = vcombine_u16(vrshrn_n_u32(y_ll, 16), in jsimd_rgb_ycc_convert_neon()
235 uint16x8_t y_h = vcombine_u16(vrshrn_n_u32(y_hl, 16), in jsimd_rgb_ycc_convert_neon()
238 uint16x8_t cb_l = vcombine_u16(vshrn_n_u32(cb_ll, 16), in jsimd_rgb_ycc_convert_neon()
240 uint16x8_t cb_h = vcombine_u16(vshrn_n_u32(cb_hl, 16), in jsimd_rgb_ycc_convert_neon()
[all …]
/external/libaom/libaom/av1/common/arm/
Dtranspose_neon.h243 *o0 = vcombine_u16(vreinterpret_u16_u32(c0.val[0]), in transpose_u16_4x8()
245 *o1 = vcombine_u16(vreinterpret_u16_u32(c1.val[0]), in transpose_u16_4x8()
247 *o2 = vcombine_u16(vreinterpret_u16_u32(c0.val[1]), in transpose_u16_4x8()
249 *o3 = vcombine_u16(vreinterpret_u16_u32(c1.val[1]), in transpose_u16_4x8()
365 *a0 = vcombine_u16(vget_low_u16(vreinterpretq_u16_u32(c0.val[0])), in transpose_u16_8x8()
367 *a4 = vcombine_u16(vget_high_u16(vreinterpretq_u16_u32(c0.val[0])), in transpose_u16_8x8()
370 *a2 = vcombine_u16(vget_low_u16(vreinterpretq_u16_u32(c0.val[1])), in transpose_u16_8x8()
372 *a6 = vcombine_u16(vget_high_u16(vreinterpretq_u16_u32(c0.val[1])), in transpose_u16_8x8()
375 *a1 = vcombine_u16(vget_low_u16(vreinterpretq_u16_u32(c1.val[0])), in transpose_u16_8x8()
377 *a5 = vcombine_u16(vget_high_u16(vreinterpretq_u16_u32(c1.val[0])), in transpose_u16_8x8()
[all …]
Dreconinter_neon.c71 tmp0 = vcombine_u16(vld1_u16(src0_1 + (0 * src0_stride)), in av1_build_compound_diffwtd_mask_d16_neon()
73 tmp1 = vcombine_u16(vld1_u16(src1_1 + (0 * src1_stride)), in av1_build_compound_diffwtd_mask_d16_neon()
Dblend_a64_vmask_neon.c87 const uint8x8_t m = vmovn_u16(vcombine_u16(m1, m2)); in aom_blend_a64_vmask_neon()
91 vmovn_u16(vcombine_u16(max_minus_m1, max_minus_m2)); in aom_blend_a64_vmask_neon()
/external/libhevc/common/arm/
Dihevc_weighted_pred_neon_intr.c162 sto_res_tmp3 = vcombine_u16(sto_res_tmp2, sto_res_tmp2); in ihevc_weighted_pred_uni_neonintr()
168 sto_res_tmp3 = vcombine_u16(sto_res_tmp2, sto_res_tmp2); in ihevc_weighted_pred_uni_neonintr()
309 sto_res_tmp3 = vcombine_u16(sto_res_tmp2, sto_res_tmp2); in ihevc_weighted_pred_chroma_uni_neonintr()
315 sto_res_tmp3 = vcombine_u16(sto_res_tmp2, sto_res_tmp2); in ihevc_weighted_pred_chroma_uni_neonintr()
473 sto_res_tmp3 = vcombine_u16(sto_res_tmp2, sto_res_tmp2); in ihevc_weighted_pred_bi_neonintr()
479 sto_res_tmp3 = vcombine_u16(sto_res_tmp2, sto_res_tmp2); in ihevc_weighted_pred_bi_neonintr()
659 sto_res_tmp3 = vcombine_u16(sto_res_tmp2, sto_res_tmp2); in ihevc_weighted_pred_chroma_bi_neonintr()
665 sto_res_tmp3 = vcombine_u16(sto_res_tmp2, sto_res_tmp2); in ihevc_weighted_pred_chroma_bi_neonintr()
808 sto_res_tmp3 = vcombine_u16(sto_res_tmp2, sto_res_tmp2); in ihevc_weighted_pred_bi_default_neonintr()
814 sto_res_tmp3 = vcombine_u16(sto_res_tmp2, sto_res_tmp2); in ihevc_weighted_pred_bi_default_neonintr()
[all …]
Dihevc_weighted_pred_uni.s182 vmov.s32 d5,d4 @vcombine_u16(sto_res_tmp2, sto_res_tmp2)
193 vmov.s32 d7,d6 @vcombine_u16(sto_res_tmp2, sto_res_tmp2) ii iteration
199 vmov.s32 d11,d10 @vcombine_u16(sto_res_tmp2, sto_res_tmp2) iii iteration
207 vmov.s32 d13,d12 @vcombine_u16(sto_res_tmp2, sto_res_tmp2) iv iteration
Dihevc_weighted_pred_bi.s228 vmov.s32 d5,d4 @vcombine_u16(sto_res_tmp2, sto_res_tmp2)
240 vmov.s32 d11,d10 @vcombine_u16(sto_res_tmp2, sto_res_tmp2) ii iteration
253 vmov.s32 d15,d14 @vcombine_u16(sto_res_tmp2, sto_res_tmp2) iii iteration
256 vmov.s32 d19,d18 @vcombine_u16(sto_res_tmp2, sto_res_tmp2)
/external/libjpeg-turbo/simd/arm/aarch32/
Djccolext-neon.c126 uint16x8_t y_u16 = vcombine_u16(vrshrn_n_u32(y_low, 16), in jsimd_rgb_ycc_convert_neon()
129 uint16x8_t cb_u16 = vcombine_u16(vshrn_n_u32(cb_low, 16), in jsimd_rgb_ycc_convert_neon()
132 uint16x8_t cr_u16 = vcombine_u16(vshrn_n_u32(cr_low, 16), in jsimd_rgb_ycc_convert_neon()
/external/libaom/libaom/aom_dsp/arm/
Dblend_a64_mask_neon.c324 mask0 = vreinterpretq_s16_u16(vcombine_u16( in aom_lowbd_blend_a64_d16_mask_neon()
326 mask1 = vreinterpretq_s16_u16(vcombine_u16( in aom_lowbd_blend_a64_d16_mask_neon()
328 mask2 = vreinterpretq_s16_u16(vcombine_u16( in aom_lowbd_blend_a64_d16_mask_neon()
330 mask3 = vreinterpretq_s16_u16(vcombine_u16( in aom_lowbd_blend_a64_d16_mask_neon()
359 vreinterpretq_s16_u16(vcombine_u16(vpaddl_u8(mask0_l), vec_zero)); in aom_lowbd_blend_a64_d16_mask_neon()
361 vreinterpretq_s16_u16(vcombine_u16(vpaddl_u8(mask1_l), vec_zero)); in aom_lowbd_blend_a64_d16_mask_neon()
363 vreinterpretq_s16_u16(vcombine_u16(vpaddl_u8(mask2_l), vec_zero)); in aom_lowbd_blend_a64_d16_mask_neon()
365 vreinterpretq_s16_u16(vcombine_u16(vpaddl_u8(mask3_l), vec_zero)); in aom_lowbd_blend_a64_d16_mask_neon()
Dintrapred_neon.c33 sum_top = vcombine_u16(p1, p1); in dc_4x4()
40 sum_left = vcombine_u16(p1, p1); in dc_4x4()
102 sum_top = vcombine_u16(p2, p2); in dc_8x8()
110 sum_left = vcombine_u16(p2, p2); in dc_8x8()
174 sum_top = vcombine_u16(p3, p3); in dc_16x16()
183 sum_left = vcombine_u16(p3, p3); in dc_16x16()
253 sum_top = vcombine_u16(p5, p5); in dc_32x32()
265 sum_left = vcombine_u16(p5, p5); in dc_32x32()
/external/libvpx/libvpx/vpx_dsp/arm/
Dhighbd_vpx_convolve8_neon.c133 d = vcombine_u16(vqrshrun_n_s32(sum0, 7), vqrshrun_n_s32(sum1, 7)); in highbd_convolve8_8()
189 d01 = vcombine_u16(vqrshrun_n_s32(d0, 7), vqrshrun_n_s32(d1, 7)); in vpx_highbd_convolve8_horiz_neon()
190 d23 = vcombine_u16(vqrshrun_n_s32(d2, 7), vqrshrun_n_s32(d3, 7)); in vpx_highbd_convolve8_horiz_neon()
398 t01 = vcombine_u16(vqrshrun_n_s32(d0, 7), vqrshrun_n_s32(d1, 7)); in vpx_highbd_convolve8_avg_horiz_neon()
399 t23 = vcombine_u16(vqrshrun_n_s32(d2, 7), vqrshrun_n_s32(d3, 7)); in vpx_highbd_convolve8_avg_horiz_neon()
404 d01 = vcombine_u16(vld1_u16(dst + 0 * dst_stride), in vpx_highbd_convolve8_avg_horiz_neon()
406 d23 = vcombine_u16(vld1_u16(dst + 1 * dst_stride), in vpx_highbd_convolve8_avg_horiz_neon()
466 d0 = vcombine_u16(vld1_u16(dst + 0 * dst_stride), in vpx_highbd_convolve8_avg_horiz_neon()
468 d1 = vcombine_u16(vld1_u16(dst + 1 * dst_stride), in vpx_highbd_convolve8_avg_horiz_neon()
470 d2 = vcombine_u16(vld1_u16(dst + 2 * dst_stride), in vpx_highbd_convolve8_avg_horiz_neon()
[all …]
Dsad4d_neon.c56 r = vpaddlq_u16(vcombine_u16(a[0], a[1])); in sad4x_4d()
83 const uint32x4_t r = vpaddlq_u16(vcombine_u16(b0, b1)); in sad_512_pel_final_neon()
94 const uint32x4_t b0 = vpaddlq_u16(vcombine_u16(a0, a1)); in sad_1024_pel_final_neon()
95 const uint32x4_t b1 = vpaddlq_u16(vcombine_u16(a2, a3)); in sad_1024_pel_final_neon()
Dhighbd_vpx_convolve_avg_neon.c38 s01 = vcombine_u16(s0, s1); in vpx_highbd_convolve_avg_neon()
39 d01 = vcombine_u16(d0, d1); in vpx_highbd_convolve_avg_neon()
Dhighbd_intrapred_neon.c462 const uint16x8_t L____3210 = vcombine_u16(L0123, L3210); in vpx_highbd_d135_predictor_4x4_neon()
463 const uint16x8_t L3210XA012 = vcombine_u16(L3210, vget_low_u16(XA0123___)); in vpx_highbd_d135_predictor_4x4_neon()
492 const uint16x8_t L76543210 = vcombine_u16(L7654, L3210); in vpx_highbd_d135_predictor_8x8_neon()
542 const uint16x8_t L76543210 = vcombine_u16(L7654, L3210); in vpx_highbd_d135_predictor_16x16_neon()
543 const uint16x8_t Lfedcba98 = vcombine_u16(Lfedc, Lba98); in vpx_highbd_d135_predictor_16x16_neon()
618 const uint16x8_t LL76543210 = vcombine_u16(LL7654, LL3210); in vpx_highbd_d135_predictor_32x32_neon()
619 const uint16x8_t LLfedcba98 = vcombine_u16(LLfedc, LLba98); in vpx_highbd_d135_predictor_32x32_neon()
631 const uint16x8_t LU76543210 = vcombine_u16(LU7654, LU3210); in vpx_highbd_d135_predictor_32x32_neon()
632 const uint16x8_t LUfedcba98 = vcombine_u16(LUfedc, LUba98); in vpx_highbd_d135_predictor_32x32_neon()
Dhighbd_idct4x4_add_neon.c25 const int16x8_t a = vreinterpretq_s16_u16(vcombine_u16(a1, a0)); in highbd_idct4x4_1_add_kernel2()
/external/webp/src/dsp/
Drescaler_neon.c84 const uint8x8_t D = vqmovn_u16(vcombine_u16(C0, C1)); in RescalerExportRowExpand_NEON()
104 const uint8x8_t F = vqmovn_u16(vcombine_u16(E0, E1)); in RescalerExportRowExpand_NEON()
144 const uint8x8_t E = vqmovn_u16(vcombine_u16(D0, D1)); in RescalerExportRowShrink_NEON()
161 const uint8x8_t C = vqmovn_u16(vcombine_u16(B0, B1)); in RescalerExportRowShrink_NEON()
/external/libjpeg-turbo/simd/arm/
Djcgryext-neon.c91 uint16x8_t y_l = vcombine_u16(vrshrn_n_u32(y_ll, 16), in jsimd_rgb_gray_convert_neon()
93 uint16x8_t y_h = vcombine_u16(vrshrn_n_u32(y_hl, 16), in jsimd_rgb_gray_convert_neon()
/external/libgav1/libgav1/src/dsp/arm/
Dcdef_neon.cc441 output[0] = vcombine_u16(vld1_u16(src + y_0 * stride + x_0), in LoadDirection4()
443 output[1] = vcombine_u16(vld1_u16(src - y_0 * stride - x_0), in LoadDirection4()
445 output[2] = vcombine_u16(vld1_u16(src + y_1 * stride + x_1), in LoadDirection4()
447 output[3] = vcombine_u16(vld1_u16(src - y_1 * stride - x_1), in LoadDirection4()
514 pixel = vcombine_u16(vld1_u16(src), vld1_u16(src + src_stride)); in CdefFilter_NEON()
Dintrapred_smooth_neon.cc108 StoreLo4(dst, vmovn_u16(vcombine_u16(dest_0, dest_0))); in Smooth4Or8xN_NEON()
113 vst1_u8(dst, vmovn_u16(vcombine_u16(dest_0, dest_1))); in Smooth4Or8xN_NEON()
133 const uint8x8_t dest_0_u8 = vmovn_u16(vcombine_u16(dest_0, dest_1)); in CalculateWeightsAndPred()
145 const uint8x8_t dest_1_u8 = vmovn_u16(vcombine_u16(dest_2, dest_3)); in CalculateWeightsAndPred()
/external/libhevc/encoder/arm/
Dihevce_coarse_layer_sad_neon.c204 abs_01 = vcombine_u16(tmp_a0, tmp_a1); in hme_store_4x4_sads_high_speed_neon()
361 abs_a_01 = vcombine_u16(tmp_a.val[0], tmp_a.val[1]); in hme_store_4x4_sads_high_quality_neon()
365 abs_b_01 = vcombine_u16(tmp_b0, tmp_b1); in hme_store_4x4_sads_high_quality_neon()
529 total_cost = vcombine_u16(vmovn_u32(total_cost_0), vmovn_u32(total_cost_1)); in hme_combine_4x4_sads_and_compute_cost_high_speed_neon()
690 total_cost = vcombine_u16(vmovn_u32(total_cost_0), vmovn_u32(total_cost_1)); in hme_combine_4x4_sads_and_compute_cost_high_quality_neon()
Dihevce_me_neon.c199 abs_01 = vcombine_u16(tmp_a0, tmp_a1); in ihevce_sad4_4x4_neon()
232 abs_0 = vcombine_u16(tmp_a0, tmp_a1); in ihevce_sad4_8x8_neon()
235 abs_1 = vcombine_u16(tmp_a0, tmp_a1); in ihevce_sad4_8x8_neon()
238 abs_0 = vcombine_u16(tmp_a0, tmp_a1); in ihevce_sad4_8x8_neon()
/external/libgav1/libgav1/src/utils/
Dentropy_decoder.cc204 const uint16x8_t index = vcombine_u16(vcreate_u16(0x0003000200010000), in UpdateCdf7To9()
243 const uint16x8_t index = vcombine_u16(vcreate_u16(0x0005000400030002), in UpdateCdf11()
278 uint16x8_t index = vcombine_u16(vcreate_u16(0x0003000200010000), in UpdateCdf13()
288 index = vcombine_u16(vcreate_u16(0x0007000600050004), in UpdateCdf13()
310 uint16x8_t index = vcombine_u16(vcreate_u16(0x0003000200010000), in UpdateCdf16()
321 index = vcombine_u16(vcreate_u16(0x000b000a00090008), in UpdateCdf16()
/external/llvm-project/clang/test/CodeGen/
Daarch64-neon-vcombine.c46 return vcombine_u16(low, high); in test_vcombine_u16()
/external/clang/test/CodeGen/
Daarch64-neon-vcombine.c46 return vcombine_u16(low, high); in test_vcombine_u16()
/external/skia/include/private/
DSkNx_neon.h394 return { vcombine_u16(vshrn_n_u32(lo,16), vshrn_n_u32(hi,16)) }; in mulHi()
631 return vqmovn_u16(vcombine_u16(_16, _16));
666 return vqmovn_u16(vcombine_u16(a16, b16));
678 return vmovn_u16(vcombine_u16(src.fVec, src.fVec));
687 return vqmovn_u16(vcombine_u16(_16, _16));
692 return vqmovn_u16(vcombine_u16(_16, _16));

123