/external/libvpx/libvpx/vp8/common/arm/neon/ |
D | vp8_loopfilter_neon.c | 217 q3 = vcombine_u8(d6, d7); in vp8_loop_filter_horizontal_edge_uv_neon() 218 q4 = vcombine_u8(d8, d9); in vp8_loop_filter_horizontal_edge_uv_neon() 219 q5 = vcombine_u8(d10, d11); in vp8_loop_filter_horizontal_edge_uv_neon() 220 q6 = vcombine_u8(d12, d13); in vp8_loop_filter_horizontal_edge_uv_neon() 221 q7 = vcombine_u8(d14, d15); in vp8_loop_filter_horizontal_edge_uv_neon() 222 q8 = vcombine_u8(d16, d17); in vp8_loop_filter_horizontal_edge_uv_neon() 223 q9 = vcombine_u8(d18, d19); in vp8_loop_filter_horizontal_edge_uv_neon() 224 q10 = vcombine_u8(d20, d21); in vp8_loop_filter_horizontal_edge_uv_neon() 368 q3 = vcombine_u8(d6, d7); in vp8_loop_filter_vertical_edge_y_neon() 369 q4 = vcombine_u8(d8, d9); in vp8_loop_filter_vertical_edge_y_neon() [all …]
|
D | mbloopfilter_neon.c | 250 q3 = vcombine_u8(d6, d7); in vp8_mbloop_filter_horizontal_edge_uv_neon() 251 q4 = vcombine_u8(d8, d9); in vp8_mbloop_filter_horizontal_edge_uv_neon() 252 q5 = vcombine_u8(d10, d11); in vp8_mbloop_filter_horizontal_edge_uv_neon() 253 q6 = vcombine_u8(d12, d13); in vp8_mbloop_filter_horizontal_edge_uv_neon() 254 q7 = vcombine_u8(d14, d15); in vp8_mbloop_filter_horizontal_edge_uv_neon() 255 q8 = vcombine_u8(d16, d17); in vp8_mbloop_filter_horizontal_edge_uv_neon() 256 q9 = vcombine_u8(d18, d19); in vp8_mbloop_filter_horizontal_edge_uv_neon() 257 q10 = vcombine_u8(d20, d21); in vp8_mbloop_filter_horizontal_edge_uv_neon() 339 q3 = vcombine_u8(d6, d7); in vp8_mbloop_filter_vertical_edge_y_neon() 340 q4 = vcombine_u8(d8, d9); in vp8_mbloop_filter_vertical_edge_y_neon() [all …]
|
D | bilinearpredict_neon.c | 68 a01 = vcombine_u8(a0, a1); in vp8_bilinear_predict4x4_neon() 69 a23 = vcombine_u8(a2, a3); in vp8_bilinear_predict4x4_neon() 99 store_unaligned_u8q(dst_ptr, dst_pitch, vcombine_u8(e0, e1)); in vp8_bilinear_predict4x4_neon() 117 store_unaligned_u8q(dst_ptr, dst_pitch, vcombine_u8(f0, f1)); in vp8_bilinear_predict4x4_neon() 441 q1u8 = vcombine_u8(d2u8, d3u8); in vp8_bilinear_predict16x16_neon() 442 q2u8 = vcombine_u8(d4u8, d5u8); in vp8_bilinear_predict16x16_neon() 443 q3u8 = vcombine_u8(d6u8, d7u8); in vp8_bilinear_predict16x16_neon() 444 q4u8 = vcombine_u8(d8u8, d9u8); in vp8_bilinear_predict16x16_neon() 520 q7u8 = vcombine_u8(d14u8, d15u8); in vp8_bilinear_predict16x16_neon() 521 q8u8 = vcombine_u8(d16u8, d17u8); in vp8_bilinear_predict16x16_neon() [all …]
|
D | loopfiltersimpleverticaledge_neon.c | 196 q3u8 = vcombine_u8(d0u8x4.val[0], d1u8x4.val[0]); // d6 d10 in vp8_loop_filter_simple_vertical_edge_neon() 197 q4u8 = vcombine_u8(d0u8x4.val[2], d1u8x4.val[2]); // d8 d12 in vp8_loop_filter_simple_vertical_edge_neon() 198 q5u8 = vcombine_u8(d0u8x4.val[1], d1u8x4.val[1]); // d7 d11 in vp8_loop_filter_simple_vertical_edge_neon() 199 q6u8 = vcombine_u8(d0u8x4.val[3], d1u8x4.val[3]); // d9 d13 in vp8_loop_filter_simple_vertical_edge_neon()
|
/external/libvpx/libvpx/vpx_dsp/arm/ |
D | avg_neon.c | 191 const uint8x16_t a01 = vcombine_u8(vld1_u8(a), vld1_u8(a + a_stride)); in vpx_minmax_8x8_neon() 193 vcombine_u8(vld1_u8(a + 2 * a_stride), vld1_u8(a + 3 * a_stride)); in vpx_minmax_8x8_neon() 195 vcombine_u8(vld1_u8(a + 4 * a_stride), vld1_u8(a + 5 * a_stride)); in vpx_minmax_8x8_neon() 197 vcombine_u8(vld1_u8(a + 6 * a_stride), vld1_u8(a + 7 * a_stride)); in vpx_minmax_8x8_neon() 199 const uint8x16_t b01 = vcombine_u8(vld1_u8(b), vld1_u8(b + b_stride)); in vpx_minmax_8x8_neon() 201 vcombine_u8(vld1_u8(b + 2 * b_stride), vld1_u8(b + 3 * b_stride)); in vpx_minmax_8x8_neon() 203 vcombine_u8(vld1_u8(b + 4 * b_stride), vld1_u8(b + 5 * b_stride)); in vpx_minmax_8x8_neon() 205 vcombine_u8(vld1_u8(b + 6 * b_stride), vld1_u8(b + 7 * b_stride)); in vpx_minmax_8x8_neon()
|
D | transpose_neon.h | 51 b0.val[0] = vcombine_u8(vreinterpret_u8_u32(vget_low_u32(a0)), in vpx_vtrnq_u64_to_u8() 53 b0.val[1] = vcombine_u8(vreinterpret_u8_u32(vget_high_u32(a0)), in vpx_vtrnq_u64_to_u8() 536 vtrnq_u8(vcombine_u8(*a0, *a4), vcombine_u8(*a1, *a5)); in transpose_u8_8x8() 538 vtrnq_u8(vcombine_u8(*a2, *a6), vcombine_u8(*a3, *a7)); in transpose_u8_8x8() 926 const uint8x16_t a0 = vcombine_u8(i0, i8); in transpose_u8_8x16() 927 const uint8x16_t a1 = vcombine_u8(i1, i9); in transpose_u8_8x16() 928 const uint8x16_t a2 = vcombine_u8(i2, i10); in transpose_u8_8x16() 929 const uint8x16_t a3 = vcombine_u8(i3, i11); in transpose_u8_8x16() 930 const uint8x16_t a4 = vcombine_u8(i4, i12); in transpose_u8_8x16() 931 const uint8x16_t a5 = vcombine_u8(i5, i13); in transpose_u8_8x16() [all …]
|
D | vpx_convolve8_neon.c | 388 vrhaddq_u8(vreinterpretq_u8_u32(d0123), vcombine_u8(d01, d23))); in vpx_convolve8_avg_horiz_neon() 473 vrhaddq_u8(vreinterpretq_u8_u32(d0415), vcombine_u8(t0, t1))); in vpx_convolve8_avg_horiz_neon() 475 vrhaddq_u8(vreinterpretq_u8_u32(d2637), vcombine_u8(t2, t3))); in vpx_convolve8_avg_horiz_neon() 562 d01 = vcombine_u8(vld1_u8(d + 0 * dst_stride), in vpx_convolve8_avg_horiz_neon() 564 d23 = vcombine_u8(vld1_u8(d + 2 * dst_stride), in vpx_convolve8_avg_horiz_neon() 566 d45 = vcombine_u8(vld1_u8(d + 4 * dst_stride), in vpx_convolve8_avg_horiz_neon() 568 d67 = vcombine_u8(vld1_u8(d + 6 * dst_stride), in vpx_convolve8_avg_horiz_neon() 570 d01 = vrhaddq_u8(d01, vcombine_u8(t0, t1)); in vpx_convolve8_avg_horiz_neon() 571 d23 = vrhaddq_u8(d23, vcombine_u8(t2, t3)); in vpx_convolve8_avg_horiz_neon() 572 d45 = vrhaddq_u8(d45, vcombine_u8(t4, t5)); in vpx_convolve8_avg_horiz_neon() [all …]
|
D | vpx_convolve_avg_neon.c | 58 s01 = vcombine_u8(s0, s1); in vpx_convolve_avg_neon() 59 d01 = vcombine_u8(d0, d1); in vpx_convolve_avg_neon()
|
D | intrapred_neon.c | 95 const uint8x16_t above_and_left = vcombine_u8(above_u8, left_u8); in vpx_dc_predictor_8x8_neon() 420 const uint8x16_t L76543210XA0123456 = vcombine_u8(L76543210, XA0123456); in vpx_d135_predictor_8x8_neon() 421 const uint8x16_t L6543210XA01234567 = vcombine_u8(L6543210X, A01234567); in vpx_d135_predictor_8x8_neon() 422 const uint8x16_t L543210XA01234567_ = vcombine_u8(L543210XA0, A1234567_); in vpx_d135_predictor_8x8_neon() 483 const uint8x16_t Lfedcba9876543210 = vcombine_u8(Lfedcba98, L76543210); in vpx_d135_predictor_16x16_neon() 499 const uint8x16_t r_7 = vcombine_u8(vget_high_u8(row_0), vget_low_u8(row_1)); in vpx_d135_predictor_16x16_neon() 535 const uint8x16_t LLfedcba9876543210 = vcombine_u8(LLfedcba98, LL76543210); in vpx_d135_predictor_32x32_neon() 536 const uint8x16_t LUfedcba9876543210 = vcombine_u8(LUfedcba98, LU76543210); in vpx_d135_predictor_32x32_neon() 1003 vst1q_u8(*dst, vcombine_u8(d0, d1)); in tm_32_kernel() 1005 vst1q_u8(*dst, vcombine_u8(d2, d3)); in tm_32_kernel()
|
D | avg_pred_neon.c | 39 r = vcombine_u8(r_0, r_1); in vpx_comp_avg_pred_neon()
|
/external/libhevc/encoder/arm/ |
D | ihevce_itrans_recon_neon.c | 120 a1 = vcombine_u8(a0, a0); in ihevce_itrans_recon_dc_8x8_luma_neon() 125 a4 = vcombine_u8(a2, a3); in ihevce_itrans_recon_dc_8x8_luma_neon() 148 a0 = vcombine_u8(a1, a1); in ihevce_itrans_recon_dc_8x8_chroma_neon() 153 a4 = vcombine_u8(a2, a3); in ihevce_itrans_recon_dc_8x8_chroma_neon() 157 a5 = vcombine_u8(a2, a3); in ihevce_itrans_recon_dc_8x8_chroma_neon() 178 a1 = vcombine_u8(a0, a0); in ihevce_itrans_recon_dc_16x16_luma_neon() 203 a1 = vcombine_u8(a0, a0); in ihevce_itrans_recon_dc_16x16_chroma_neon()
|
/external/libvpx/libvpx/vp8/encoder/arm/neon/ |
D | denoising_neon.c | 288 const uint8x16_t v_sig = vcombine_u8(v_sig_lo, v_sig_hi); in vp8_denoiser_filter_uv_neon() 293 vcombine_u8(v_mc_running_avg_lo, v_mc_running_avg_hi); in vp8_denoiser_filter_uv_neon() 388 const uint8x16_t v_sig = vcombine_u8(v_sig_lo, v_sig_hi); in vp8_denoiser_filter_uv_neon() 393 vcombine_u8(v_mc_running_avg_lo, v_mc_running_avg_hi); in vp8_denoiser_filter_uv_neon() 409 vcombine_u8(v_running_avg_lo, v_running_avg_hi); in vp8_denoiser_filter_uv_neon()
|
/external/skqp/src/opts/ |
D | Sk4px_NEON.h | 22 return Sk16b(vcombine_u8(vaddhn_u16(this->fLo.fVec, o.fLo.fVec), in addNarrowHi() 28 return Sk16b(vcombine_u8(vraddhn_u16(this->fLo.fVec, vrshrq_n_u16(this->fLo.fVec, 8)), in div255()
|
/external/skia/src/opts/ |
D | Sk4px_NEON.h | 22 return Sk16b(vcombine_u8(vaddhn_u16(this->fLo.fVec, o.fLo.fVec), in addNarrowHi() 28 return Sk16b(vcombine_u8(vraddhn_u16(this->fLo.fVec, vrshrq_n_u16(this->fLo.fVec, 8)), in div255()
|
/external/libgav1/libgav1/src/dsp/arm/ |
D | loop_filter_neon.cc | 970 const uint8x16_t index_qp7toqp0 = vcombine_u8(index_qp3toqp0, index_qp7toqp4); in Vertical14_NEON() 990 const uint8x16_t input_0 = vcombine_u8(x0_qp3qp0, x0_qp7qp4); in Vertical14_NEON() 991 const uint8x16_t input_1 = vcombine_u8(x1_qp3qp0, x1_qp7qp4); in Vertical14_NEON() 992 const uint8x16_t input_2 = vcombine_u8(x2_qp3qp0, x2_qp7qp4); in Vertical14_NEON() 993 const uint8x16_t input_3 = vcombine_u8(x3_qp3qp0, x3_qp7qp4); in Vertical14_NEON() 1102 const uint8x16_t p0q0_p4q4 = vcombine_u8(p0q0_output, p4q4_output); in Vertical14_NEON() 1103 const uint8x16_t p2q2_p6q6 = vcombine_u8(p2q2_output, p6q6); in Vertical14_NEON() 1104 const uint8x16_t p1q1_p5q5 = vcombine_u8(p1q1_output, p5q5_output); in Vertical14_NEON() 1105 const uint8x16_t p3q3_p7q7 = vcombine_u8(p3q3_output, p7q7); in Vertical14_NEON() 1119 const uint8x16_t index_p7toq7 = vcombine_u8(index_p7top0, index_q7toq0); in Vertical14_NEON() [all …]
|
D | intra_edge_neon.cc | 79 vcombine_u8(vrshrn_n_u16(sum_lo, 4), vrshrn_n_u16(sum_hi, 4)); in IntraEdgeFilter_NEON() 104 vcombine_u8(vrshrn_n_u16(sum_lo, 4), vrshrn_n_u16(sum_hi, 4)); in IntraEdgeFilter_NEON() 164 vcombine_u8(vrshrn_n_u16(sum_lo, 4), vrshrn_n_u16(sum_hi, 4)); in IntraEdgeFilter_NEON() 195 vcombine_u8(vrshrn_n_u16(sum_lo, 4), vrshrn_n_u16(sum_hi, 4)); in IntraEdgeFilter_NEON()
|
D | common_neon.h | 566 vtrnq_u8(vcombine_u8(a[0], a[4]), vcombine_u8(a[1], a[5])); in Transpose8x8() 568 vtrnq_u8(vcombine_u8(a[2], a[6]), vcombine_u8(a[3], a[7])); in Transpose8x8()
|
D | average_blend_neon.cc | 59 vst1q_u8(dest + x, vcombine_u8(res_out0, res_out1)); in AverageBlendLargeRow()
|
/external/webp/src/dsp/ |
D | lossless_enc_neon.c | 41 return vcombine_u8(vtbl1q_u8(argb, vget_low_u8(shuffle)), in DoGreenShuffle_NEON() 50 return vcombine_u8(vtbl1_u8(vget_low_u8(argb), shuffle), in DoGreenShuffle_NEON()
|
D | cost_neon.c | 29 const uint8x16_t eob = vcombine_u8(vqmovn_u16(eob_0), vqmovn_u16(eob_1)); in SetResidualCoeffs_NEON() 75 const uint8x16_t F = vcombine_u8(vqmovn_u16(E0), vqmovn_u16(E1)); in GetResidualCost_NEON()
|
D | dec_neon.c | 55 *p1 = vcombine_u8(row0.val[0], row8.val[0]); in Load4x16_NEON() 56 *p0 = vcombine_u8(row0.val[1], row8.val[1]); in Load4x16_NEON() 57 *q0 = vcombine_u8(row0.val[2], row8.val[2]); in Load4x16_NEON() 58 *q1 = vcombine_u8(row0.val[3], row8.val[3]); in Load4x16_NEON() 149 *p3 = vcombine_u8(vld1_u8(u - 4 * stride), vld1_u8(v - 4 * stride)); in Load8x8x2_NEON() 150 *p2 = vcombine_u8(vld1_u8(u - 3 * stride), vld1_u8(v - 3 * stride)); in Load8x8x2_NEON() 151 *p1 = vcombine_u8(vld1_u8(u - 2 * stride), vld1_u8(v - 2 * stride)); in Load8x8x2_NEON() 152 *p0 = vcombine_u8(vld1_u8(u - 1 * stride), vld1_u8(v - 1 * stride)); in Load8x8x2_NEON() 153 *q0 = vcombine_u8(vld1_u8(u + 0 * stride), vld1_u8(v + 0 * stride)); in Load8x8x2_NEON() 154 *q1 = vcombine_u8(vld1_u8(u + 1 * stride), vld1_u8(v + 1 * stride)); in Load8x8x2_NEON() [all …]
|
D | upsampling_neon.c | 124 vst1q_u8(out, vcombine_u8(rgba4444.val[0], rgba4444.val[1])); \ 132 vst1q_u8(out, vcombine_u8(rgb565.val[0], rgb565.val[1])); \
|
/external/libaom/libaom/aom_dsp/arm/ |
D | sse_neon.c | 69 q2 = vcombine_u8(tmp.val[0], tmp2.val[0]); // make a 16 data vector in aom_sse_neon() 81 q3 = vcombine_u8(tmp.val[0], tmp2.val[0]); in aom_sse_neon() 90 q2 = vcombine_u8(d0, d1); // make a 16 data vector in aom_sse_neon() 94 q3 = vcombine_u8(d0, d1); in aom_sse_neon()
|
/external/libaom/libaom/av1/common/arm/ |
D | transpose_neon.h | 35 vtrnq_u8(vcombine_u8(*a0, *a4), vcombine_u8(*a1, *a5)); in transpose_u8_8x8() 37 vtrnq_u8(vcombine_u8(*a2, *a6), vcombine_u8(*a3, *a7)); in transpose_u8_8x8()
|
/external/clang/test/CodeGen/ |
D | aarch64-neon-vcombine.c | 39 return vcombine_u8(low, high); in test_vcombine_u8()
|