/external/libvpx/libvpx/vpx_dsp/arm/ |
D | highbd_idct8x8_add_neon.c | 278 c[0] = vcombine_s16(vrshrn_n_s32(a[0], 5), vrshrn_n_s32(a[4], 5)); in vpx_highbd_idct8x8_12_add_neon() 279 c[1] = vcombine_s16(vrshrn_n_s32(a[1], 5), vrshrn_n_s32(a[5], 5)); in vpx_highbd_idct8x8_12_add_neon() 280 c[2] = vcombine_s16(vrshrn_n_s32(a[2], 5), vrshrn_n_s32(a[6], 5)); in vpx_highbd_idct8x8_12_add_neon() 281 c[3] = vcombine_s16(vrshrn_n_s32(a[3], 5), vrshrn_n_s32(a[7], 5)); in vpx_highbd_idct8x8_12_add_neon() 282 c[4] = vcombine_s16(vrshrn_n_s32(a[8], 5), vrshrn_n_s32(a[12], 5)); in vpx_highbd_idct8x8_12_add_neon() 283 c[5] = vcombine_s16(vrshrn_n_s32(a[9], 5), vrshrn_n_s32(a[13], 5)); in vpx_highbd_idct8x8_12_add_neon() 284 c[6] = vcombine_s16(vrshrn_n_s32(a[10], 5), vrshrn_n_s32(a[14], 5)); in vpx_highbd_idct8x8_12_add_neon() 285 c[7] = vcombine_s16(vrshrn_n_s32(a[11], 5), vrshrn_n_s32(a[15], 5)); in vpx_highbd_idct8x8_12_add_neon() 318 b[0] = vcombine_s16(vmovn_s32(a[0]), vmovn_s32(a[1])); in vpx_highbd_idct8x8_64_add_neon() 319 b[1] = vcombine_s16(vmovn_s32(a[2]), vmovn_s32(a[3])); in vpx_highbd_idct8x8_64_add_neon() [all …]
|
D | highbd_idct4x4_add_neon.c | 64 a[0] = vcombine_s16(vmovn_s32(c[0]), vmovn_s32(c[1])); in vpx_highbd_idct4x4_16_add_neon() 65 a[1] = vcombine_s16(vmovn_s32(c[2]), vmovn_s32(c[3])); in vpx_highbd_idct4x4_16_add_neon() 69 a[1] = vcombine_s16(vget_high_s16(a[1]), vget_low_s16(a[1])); in vpx_highbd_idct4x4_16_add_neon() 83 a[0] = vcombine_s16(vqrshrn_n_s32(c[0], 4), vqrshrn_n_s32(c[1], 4)); in vpx_highbd_idct4x4_16_add_neon() 84 a[1] = vcombine_s16(vqrshrn_n_s32(c[3], 4), vqrshrn_n_s32(c[2], 4)); in vpx_highbd_idct4x4_16_add_neon()
|
D | fwd_txfm_neon.c | 73 out_0 = vcombine_s16(a, c); // 00 01 02 03 40 41 42 43 in vpx_fdct8x8_neon() 74 out_2 = vcombine_s16(e, g); // 20 21 22 23 60 61 62 63 in vpx_fdct8x8_neon() 75 out_4 = vcombine_s16(b, d); // 04 05 06 07 44 45 46 47 in vpx_fdct8x8_neon() 76 out_6 = vcombine_s16(f, h); // 24 25 26 27 64 65 66 67 in vpx_fdct8x8_neon() 90 const int16x8_t ab = vcombine_s16(a, b); in vpx_fdct8x8_neon() 91 const int16x8_t cd = vcombine_s16(c, d); in vpx_fdct8x8_neon() 124 out_1 = vcombine_s16(a, c); // 10 11 12 13 50 51 52 53 in vpx_fdct8x8_neon() 125 out_3 = vcombine_s16(e, g); // 30 31 32 33 70 71 72 73 in vpx_fdct8x8_neon() 126 out_5 = vcombine_s16(b, d); // 14 15 16 17 54 55 56 57 in vpx_fdct8x8_neon() 127 out_7 = vcombine_s16(f, h); // 34 35 36 37 74 75 76 77 in vpx_fdct8x8_neon()
|
D | fdct_neon.c | 37 const int16x8_t input_01 = vcombine_s16(input_0, input_1); in vpx_fdct4x4_neon() 38 const int16x8_t input_32 = vcombine_s16(input_3, input_2); in vpx_fdct4x4_neon() 84 int16x8_t out_01 = vcombine_s16(input_0, input_1); in vpx_fdct4x4_neon() 85 int16x8_t out_23 = vcombine_s16(input_2, input_3); in vpx_fdct4x4_neon()
|
D | highbd_idct_neon.h | 420 o[0] = vcombine_s16(vrshrn_n_s32(out[0].val[0], 6), in highbd_idct16x16_add_store() 422 o[1] = vcombine_s16(vrshrn_n_s32(out[1].val[0], 6), in highbd_idct16x16_add_store() 424 o[2] = vcombine_s16(vrshrn_n_s32(out[2].val[0], 6), in highbd_idct16x16_add_store() 426 o[3] = vcombine_s16(vrshrn_n_s32(out[3].val[0], 6), in highbd_idct16x16_add_store() 428 o[4] = vcombine_s16(vrshrn_n_s32(out[4].val[0], 6), in highbd_idct16x16_add_store() 430 o[5] = vcombine_s16(vrshrn_n_s32(out[5].val[0], 6), in highbd_idct16x16_add_store() 432 o[6] = vcombine_s16(vrshrn_n_s32(out[6].val[0], 6), in highbd_idct16x16_add_store() 434 o[7] = vcombine_s16(vrshrn_n_s32(out[7].val[0], 6), in highbd_idct16x16_add_store() 436 o[8] = vcombine_s16(vrshrn_n_s32(out[8].val[0], 6), in highbd_idct16x16_add_store() 438 o[9] = vcombine_s16(vrshrn_n_s32(out[9].val[0], 6), in highbd_idct16x16_add_store() [all …]
|
D | mem_neon.h | 47 res.val[0] = vcombine_s16(s0, s2); in load_tran_low_to_s16x2q() 48 res.val[1] = vcombine_s16(s1, s3); in load_tran_low_to_s16x2q() 61 return vcombine_s16(s0, s1); in load_tran_low_to_s16q()
|
/external/libaom/libaom/aom_dsp/arm/ |
D | fwd_txfm_neon.c | 34 const int16x8_t input_01 = vcombine_s16(*input_0, *input_1); in aom_fdct4x4_helper() 35 const int16x8_t input_32 = vcombine_s16(*input_3, *input_2); in aom_fdct4x4_helper() 88 int16x8_t out_01 = vcombine_s16(input_0, input_1); in aom_fdct4x4_neon() 89 int16x8_t out_23 = vcombine_s16(input_2, input_3); in aom_fdct4x4_neon() 105 int16x8_t out_01 = vcombine_s16(input_0, input_1); in aom_fdct4x4_lp_neon() 106 int16x8_t out_23 = vcombine_s16(input_2, input_3); in aom_fdct4x4_lp_neon() 164 out_0 = vcombine_s16(a, c); // 00 01 02 03 40 41 42 43 in aom_fdct8x8_neon() 165 out_2 = vcombine_s16(e, g); // 20 21 22 23 60 61 62 63 in aom_fdct8x8_neon() 166 out_4 = vcombine_s16(b, d); // 04 05 06 07 44 45 46 47 in aom_fdct8x8_neon() 167 out_6 = vcombine_s16(f, h); // 24 25 26 27 64 65 66 67 in aom_fdct8x8_neon() [all …]
|
D | blend_a64_mask_neon.c | 35 *res = vcombine_s16(vshrn_n_s32(im_res_low, AOM_BLEND_A64_ROUND_BITS), in blend8x1() 103 mask0_1 = vcombine_s16(mask0, mask1); in blend_4x4() 104 mask2_3 = vcombine_s16(mask2, mask3); in blend_4x4() 262 vcombine_s16(vqrshrn_n_s32(m0_32, 2), vqrshrn_n_s32(m4_32, 2)); in aom_lowbd_blend_a64_d16_mask_neon() 264 vcombine_s16(vqrshrn_n_s32(m1_32, 2), vqrshrn_n_s32(m5_32, 2)); in aom_lowbd_blend_a64_d16_mask_neon() 266 vcombine_s16(vqrshrn_n_s32(m2_32, 2), vqrshrn_n_s32(m6_32, 2)); in aom_lowbd_blend_a64_d16_mask_neon() 268 vcombine_s16(vqrshrn_n_s32(m3_32, 2), vqrshrn_n_s32(m7_32, 2)); in aom_lowbd_blend_a64_d16_mask_neon()
|
/external/libvpx/libvpx/vp9/common/arm/neon/ |
D | vp9_highbd_iht8x8_add_neon.c | 185 c[0] = vcombine_s16(vmovn_s32(a[0]), vmovn_s32(a[1])); in vp9_highbd_iht8x8_64_add_neon() 186 c[1] = vcombine_s16(vmovn_s32(a[2]), vmovn_s32(a[3])); in vp9_highbd_iht8x8_64_add_neon() 187 c[2] = vcombine_s16(vmovn_s32(a[4]), vmovn_s32(a[5])); in vp9_highbd_iht8x8_64_add_neon() 188 c[3] = vcombine_s16(vmovn_s32(a[6]), vmovn_s32(a[7])); in vp9_highbd_iht8x8_64_add_neon() 189 c[4] = vcombine_s16(vmovn_s32(a[8]), vmovn_s32(a[9])); in vp9_highbd_iht8x8_64_add_neon() 190 c[5] = vcombine_s16(vmovn_s32(a[10]), vmovn_s32(a[11])); in vp9_highbd_iht8x8_64_add_neon() 191 c[6] = vcombine_s16(vmovn_s32(a[12]), vmovn_s32(a[13])); in vp9_highbd_iht8x8_64_add_neon() 192 c[7] = vcombine_s16(vmovn_s32(a[14]), vmovn_s32(a[15])); in vp9_highbd_iht8x8_64_add_neon() 335 c[0] = vcombine_s16(vrshrn_n_s32(a[0], 5), vrshrn_n_s32(a[4], 5)); in vp9_highbd_iht8x8_64_add_neon() 336 c[1] = vcombine_s16(vrshrn_n_s32(a[8], 5), vrshrn_n_s32(a[12], 5)); in vp9_highbd_iht8x8_64_add_neon() [all …]
|
D | vp9_highbd_iht4x4_add_neon.c | 89 a[0] = vcombine_s16(vmovn_s32(c[0]), vmovn_s32(c[1])); in vp9_highbd_iht4x4_16_add_neon() 90 a[1] = vcombine_s16(vmovn_s32(c[2]), vmovn_s32(c[3])); in vp9_highbd_iht4x4_16_add_neon() 96 a[1] = vcombine_s16(vget_high_s16(a[1]), vget_low_s16(a[1])); in vp9_highbd_iht4x4_16_add_neon() 99 a[1] = vcombine_s16(vget_high_s16(a[1]), vget_low_s16(a[1])); in vp9_highbd_iht4x4_16_add_neon() 104 a[1] = vcombine_s16(vget_high_s16(a[1]), vget_low_s16(a[1])); in vp9_highbd_iht4x4_16_add_neon() 113 a[1] = vcombine_s16(vget_high_s16(a[1]), vget_low_s16(a[1])); in vp9_highbd_iht4x4_16_add_neon() 175 a[0] = vcombine_s16(vqrshrn_n_s32(c[0], 4), vqrshrn_n_s32(c[1], 4)); in vp9_highbd_iht4x4_16_add_neon() 176 a[1] = vcombine_s16(vqrshrn_n_s32(c[2], 4), vqrshrn_n_s32(c[3], 4)); in vp9_highbd_iht4x4_16_add_neon()
|
D | vp9_iht4x4_add_neon.c | 38 a[1] = vcombine_s16(vget_high_s16(a[1]), vget_low_s16(a[1])); in vp9_iht4x4_16_add_neon() 41 a[1] = vcombine_s16(vget_high_s16(a[1]), vget_low_s16(a[1])); in vp9_iht4x4_16_add_neon() 46 a[1] = vcombine_s16(vget_high_s16(a[1]), vget_low_s16(a[1])); in vp9_iht4x4_16_add_neon() 55 a[1] = vcombine_s16(vget_high_s16(a[1]), vget_low_s16(a[1])); in vp9_iht4x4_16_add_neon()
|
/external/libvpx/libvpx/vp8/common/arm/neon/ |
D | shortidct4x4llm_neon.c | 40 q1s16 = vcombine_s16(d2, d4); // Swap d3 d4 here in vp8_short_idct4x4llm_neon() 41 q2s16 = vcombine_s16(d3, d5); in vp8_short_idct4x4llm_neon() 69 q1s16 = vcombine_s16(v2tmp2.val[0], v2tmp3.val[0]); in vp8_short_idct4x4llm_neon() 70 q2s16 = vcombine_s16(v2tmp2.val[1], v2tmp3.val[1]); in vp8_short_idct4x4llm_neon() 102 q1s16 = vcombine_s16(v2tmp2.val[0], v2tmp2.val[1]); in vp8_short_idct4x4llm_neon() 103 q2s16 = vcombine_s16(v2tmp3.val[0], v2tmp3.val[1]); in vp8_short_idct4x4llm_neon()
|
D | iwalsh_neon.c | 31 q2s16 = vcombine_s16(d4s16, d5s16); in vp8_short_inv_walsh4x4_neon() 32 q3s16 = vcombine_s16(d6s16, d7s16); in vp8_short_inv_walsh4x4_neon() 51 q2s16 = vcombine_s16(d4s16, d5s16); in vp8_short_inv_walsh4x4_neon() 52 q3s16 = vcombine_s16(d6s16, d7s16); in vp8_short_inv_walsh4x4_neon()
|
D | dequant_idct_neon.c | 64 q2 = vcombine_s16(vget_high_s16(q1), vget_high_s16(q2)); in vp8_dequant_idct_add_neon() 89 q2 = vcombine_s16(d2tmp2.val[1], d2tmp3.val[1]); in vp8_dequant_idct_add_neon() 121 q1 = vcombine_s16(d2tmp2.val[0], d2tmp2.val[1]); in vp8_dequant_idct_add_neon() 122 q2 = vcombine_s16(d2tmp3.val[0], d2tmp3.val[1]); in vp8_dequant_idct_add_neon()
|
/external/libaom/libaom/av1/common/arm/ |
D | transpose_neon.h | 308 *o0 = vcombine_s16(vreinterpret_s16_s32(c0.val[0]), in transpose_s16_4x8() 310 *o1 = vcombine_s16(vreinterpret_s16_s32(c1.val[0]), in transpose_s16_4x8() 312 *o2 = vcombine_s16(vreinterpret_s16_s32(c0.val[1]), in transpose_s16_4x8() 314 *o3 = vcombine_s16(vreinterpret_s16_s32(c1.val[1]), in transpose_s16_4x8() 433 *a0 = vcombine_s16(vget_low_s16(vreinterpretq_s16_s32(c0.val[0])), in transpose_s16_8x8() 435 *a4 = vcombine_s16(vget_high_s16(vreinterpretq_s16_s32(c0.val[0])), in transpose_s16_8x8() 438 *a2 = vcombine_s16(vget_low_s16(vreinterpretq_s16_s32(c0.val[1])), in transpose_s16_8x8() 440 *a6 = vcombine_s16(vget_high_s16(vreinterpretq_s16_s32(c0.val[1])), in transpose_s16_8x8() 443 *a1 = vcombine_s16(vget_low_s16(vreinterpretq_s16_s32(c1.val[0])), in transpose_s16_8x8() 445 *a5 = vcombine_s16(vget_high_s16(vreinterpretq_s16_s32(c1.val[0])), in transpose_s16_8x8() [all …]
|
/external/libvpx/libvpx/vp8/encoder/arm/neon/ |
D | shortfdct_neon.c | 105 q0s16 = vcombine_s16(d0s16, d1s16); in vp8_short_fdct4x4_neon() 106 q1s16 = vcombine_s16(d2s16, d3s16); in vp8_short_fdct4x4_neon() 183 q1s16 = vcombine_s16(d2s16, d3s16); in vp8_short_fdct8x4_neon() 184 q3s16 = vcombine_s16(d6s16, d7s16); in vp8_short_fdct8x4_neon() 251 q0s16 = vcombine_s16(d0s16, d1s16); in vp8_short_fdct8x4_neon() 252 q1s16 = vcombine_s16(d2s16, d3s16); in vp8_short_fdct8x4_neon() 253 q2s16 = vcombine_s16(d4s16, d5s16); in vp8_short_fdct8x4_neon() 254 q3s16 = vcombine_s16(d6s16, d7s16); in vp8_short_fdct8x4_neon()
|
/external/libhevc/common/arm/ |
D | ihevc_quant_iquant_ssd_neon_intr.c | 454 psgn0 = vcgeq_s16(vcombine_s16(s0, s1), zero); in ihevc_q_iq_ssd_flat_scale_mat_var_rnd_fact_neon() 455 psgn1 = vcgeq_s16(vcombine_s16(s2, s3), zero); in ihevc_q_iq_ssd_flat_scale_mat_var_rnd_fact_neon() 457 nsgn0 = vcltq_s16(vcombine_s16(s0, s1), zero); in ihevc_q_iq_ssd_flat_scale_mat_var_rnd_fact_neon() 458 nsgn1 = vcltq_s16(vcombine_s16(s2, s3), zero); in ihevc_q_iq_ssd_flat_scale_mat_var_rnd_fact_neon() 480 q_00 = vcombine_s16(vqmovn_s32(q_tmp0), vqmovn_s32(q_tmp1)); in ihevc_q_iq_ssd_flat_scale_mat_var_rnd_fact_neon() 481 q_01 = vcombine_s16(vqmovn_s32(q_tmp2), vqmovn_s32(q_tmp3)); in ihevc_q_iq_ssd_flat_scale_mat_var_rnd_fact_neon() 500 q_10 = vcombine_s16(vqmovn_s32(q_tmp0), vqmovn_s32(q_tmp1)); in ihevc_q_iq_ssd_flat_scale_mat_var_rnd_fact_neon() 501 q_11 = vcombine_s16(vqmovn_s32(q_tmp2), vqmovn_s32(q_tmp3)); in ihevc_q_iq_ssd_flat_scale_mat_var_rnd_fact_neon() 531 q_20 = vcombine_s16(vqmovn_s32(q_tmp0), vqmovn_s32(q_tmp1)); in ihevc_q_iq_ssd_flat_scale_mat_var_rnd_fact_neon() 532 q_21 = vcombine_s16(vqmovn_s32(q_tmp2), vqmovn_s32(q_tmp3)); in ihevc_q_iq_ssd_flat_scale_mat_var_rnd_fact_neon() [all …]
|
D | ihevc_resi_trans_neon.c | 116 diff_1 = vcombine_s16(diff_1_low, diff_1_high); in ihevc_resi_trans_4x4_neon() 117 diff_2 = vcombine_s16(diff_2_high, diff_2_low); in ihevc_resi_trans_4x4_neon() 897 b[0] = vcombine_s16( in partial_round_shift() 900 b[1] = vcombine_s16( in partial_round_shift() 903 b[2] = vcombine_s16( in partial_round_shift() 906 b[3] = vcombine_s16( in partial_round_shift() 909 b[4] = vcombine_s16( in partial_round_shift() 912 b[5] = vcombine_s16( in partial_round_shift() 915 b[6] = vcombine_s16( in partial_round_shift() 918 b[7] = vcombine_s16( in partial_round_shift() [all …]
|
/external/libaom/libaom/av1/encoder/arm/neon/ |
D | quantize_neon.c | 64 vcombine_s16(vshrn_n_s32(v_tmp_lo, 16), vshrn_n_s32(v_tmp_hi, 16)); in av1_quantize_fp_neon() 90 vcombine_s16(vshrn_n_s32(v_tmp_lo, 16), vshrn_n_s32(v_tmp_hi, 16)); in av1_quantize_fp_neon() 129 vst1q_s16(dqcoeff, vcombine_s16(vmovn_s32(dqcoeff_0), vmovn_s32(dqcoeff_1))); in calculate_dqcoeff_lp_and_store() 162 vcombine_s16(vshrn_n_s32(v_tmp_lo, 16), vshrn_n_s32(v_tmp_hi, 16)); in av1_quantize_lp_neon() 187 vcombine_s16(vshrn_n_s32(v_tmp_lo, 16), vshrn_n_s32(v_tmp_hi, 16)); in av1_quantize_lp_neon()
|
/external/gemmlowp/internal/ |
D | output_neon.h | 39 uint8x8_t res_8 = vqmovun_s16(vcombine_s16(res_16, res_16)); 58 vcombine_s16(vqmovn_s32(input.reg[0]), vqmovn_s32(input.reg[1])); 77 vcombine_s16(vqmovn_s32(input.reg[0]), vqmovn_s32(input.reg[1])); 79 vcombine_s16(vqmovn_s32(input.reg[2]), vqmovn_s32(input.reg[3])); 100 res_16[i] = vcombine_s16(vqmovn_s32(input.reg[2 * i]), 123 int8x8_t res_8 = vqmovn_s16(vcombine_s16(res_16, res_16)); 142 vcombine_s16(vqmovn_s32(input.reg[0]), vqmovn_s32(input.reg[1])); 161 vcombine_s16(vqmovn_s32(input.reg[0]), vqmovn_s32(input.reg[1])); 163 vcombine_s16(vqmovn_s32(input.reg[2]), vqmovn_s32(input.reg[3])); 184 res_16[i] = vcombine_s16(vqmovn_s32(input.reg[2 * i]), [all …]
|
/external/libvpx/libvpx/vp9/encoder/arm/neon/ |
D | vp9_quantize_neon.c | 41 vst1q_s16(dqcoeff, vcombine_s16(vmovn_s32(dqcoeff_0), vmovn_s32(dqcoeff_1))); in calculate_dqcoeff_and_store() 81 vcombine_s16(vshrn_n_s32(v_tmp_lo, 16), vshrn_n_s32(v_tmp_hi, 16)); in vp9_quantize_fp_neon() 106 vcombine_s16(vshrn_n_s32(v_tmp_lo, 16), vshrn_n_s32(v_tmp_hi, 16)); in vp9_quantize_fp_neon() 192 store_s16q_to_tran_low(dqcoeff_ptr, vcombine_s16(vshrn_n_s32(dqcoeff_0, 1), in vp9_quantize_fp_32x32_neon() 242 vcombine_s16(vshrn_n_s32(dqcoeff_0, 1), vshrn_n_s32(dqcoeff_1, 1))); in vp9_quantize_fp_32x32_neon()
|
/external/webrtc/webrtc/modules/audio_processing/ns/ |
D | nsx_core_neon.c | 234 tmp16x8_0 = vcombine_s16(tmp16x4_1, tmp16x4_0); // Keep for several lines. in WebRtcNsx_NoiseEstimationNeon() 259 tmp16x8_0 = vcombine_s16(tmp16x4_1, tmp16x4_0); // keep in WebRtcNsx_NoiseEstimationNeon() 392 vst1q_s16(preal, vcombine_s16(result_r_0, result_r_1)); in WebRtcNsx_PrepareSpectrumNeon() 393 vst1q_s16(pimag, vcombine_s16(result_i_0, result_i_1)); in WebRtcNsx_PrepareSpectrumNeon() 494 synthesis_buffer_0 = vqaddq_s16(vcombine_s16(tmp16b_0_low, tmp16b_0_high), in WebRtcNsx_SynthesisUpdateNeon() 496 synthesis_buffer_1 = vqaddq_s16(vcombine_s16(tmp16b_1_low, tmp16b_1_high), in WebRtcNsx_SynthesisUpdateNeon() 592 vst1q_s16(p_start_out, vcombine_s16(result_low, result_high)); in WebRtcNsx_AnalysisUpdateNeon()
|
/external/libhevc/encoder/arm/ |
D | ihevce_hme_utils_neon.c | 204 src4_8x16b = vcombine_s16( in ihevce_get_wt_inp_4x8_neon() 207 src5_8x16b = vcombine_s16( in ihevce_get_wt_inp_4x8_neon() 210 src6_8x16b = vcombine_s16( in ihevce_get_wt_inp_4x8_neon() 213 src7_8x16b = vcombine_s16( in ihevce_get_wt_inp_4x8_neon() 627 src4_8x16b = vcombine_s16( in hme_get_wt_inp_ctb_neon() 630 src5_8x16b = vcombine_s16( in hme_get_wt_inp_ctb_neon() 633 src6_8x16b = vcombine_s16( in hme_get_wt_inp_ctb_neon() 636 src7_8x16b = vcombine_s16( in hme_get_wt_inp_ctb_neon()
|
/external/libgav1/libgav1/src/dsp/arm/ |
D | mask_blend_neon.cc | 52 final_val = vaddq_s16(vcombine_s16(mask_val0, mask_val1), in GetMask4x2() 53 vcombine_s16(next_mask_val0, next_mask_val1)); in GetMask4x2() 56 vpaddlq_u8(vreinterpretq_u8_s16(vcombine_s16(mask_val0, mask_val1)))); in GetMask4x2() 105 vqrshrun_n_s16(vcombine_s16(vshrn_n_s32(weighted_combo_lo, 6), in WriteMaskBlendLine4x2() 235 result = vqrshrun_n_s16(vcombine_s16(vshrn_n_s32(weighted_combo_lo, 6), in MaskBlend_NEON()
|
/external/tensorflow/tensorflow/lite/kernels/internal/optimized/ |
D | depthwiseconv_uint8_transitional.h | 3414 vcombine_s16(vqmovn_s32(acc0), vqmovn_s32(acc1)); 3416 vcombine_s16(vqmovn_s32(acc2), vqmovn_s32(acc3)); 3490 vcombine_s16(vqmovn_s32(acc0), vqmovn_s32(acc1)); 3492 vcombine_s16(vqmovn_s32(acc2), vqmovn_s32(acc3)); 3558 vcombine_s16(vqmovn_s32(acc0), vqmovn_s32(acc1)); 3560 vcombine_s16(vqmovn_s32(acc2), vqmovn_s32(acc3)); 3613 vcombine_s16(vqmovn_s32(acc0), vqmovn_s32(acc1)); 3615 vcombine_s16(vqmovn_s32(acc2), vqmovn_s32(acc3)); 3713 vcombine_s16(vqmovn_s32(acc0), vqmovn_s32(acc1)); 3715 vcombine_s16(vqmovn_s32(acc2), vqmovn_s32(acc3)); [all …]
|