/external/libvpx/libvpx/vp8/common/arm/neon/ |
D | sixtappredict_neon.c | 111 b0 = vext_u8(a0, a1, 4); in yonly4x4() 112 b2 = vext_u8(a2, a3, 4); in yonly4x4() 113 b4 = vext_u8(a4, a5, 4); in yonly4x4() 114 b6 = vext_u8(a6, a7, 4); in yonly4x4() 119 b1 = vext_u8(b0, b2, 4); in yonly4x4() 120 b3 = vext_u8(b2, b4, 4); in yonly4x4() 121 b5 = vext_u8(b4, b6, 4); in yonly4x4() 122 b7 = vext_u8(b6, b8, 4); in yonly4x4() 216 s0_f5 = vext_u8(vget_low_u8(s0), vget_high_u8(s0), 5); in vp8_sixtap_predict4x4_neon() 217 s1_f5 = vext_u8(vget_low_u8(s1), vget_high_u8(s1), 5); in vp8_sixtap_predict4x4_neon() [all …]
|
D | bilinearpredict_neon.c | 46 e0 = vext_u8(a0, a1, 4); in vp8_bilinear_predict4x4_neon() 47 e1 = vext_u8(a2, a3, 4); in vp8_bilinear_predict4x4_neon() 108 const uint8x8_t a0 = vext_u8(e0, e1, 4); in vp8_bilinear_predict4x4_neon() 109 const uint8x8_t a1 = vext_u8(e1, e2, 4); in vp8_bilinear_predict4x4_neon() 161 d3u8 = vext_u8(vget_low_u8(q1u8), vget_high_u8(q1u8), 1); in vp8_bilinear_predict8x4_neon() 162 d5u8 = vext_u8(vget_low_u8(q2u8), vget_high_u8(q2u8), 1); in vp8_bilinear_predict8x4_neon() 163 d7u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 1); in vp8_bilinear_predict8x4_neon() 164 d9u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 1); in vp8_bilinear_predict8x4_neon() 165 d11u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 1); in vp8_bilinear_predict8x4_neon() 265 d3u8 = vext_u8(vget_low_u8(q1u8), vget_high_u8(q1u8), 1); in vp8_bilinear_predict8x8_neon() [all …]
|
/external/libpng/arm/ |
D | filter_neon_intrinsics.c | 92 vtmp1 = vext_u8(vrp.val[0], vrp.val[1], 3); in png_read_filter_row_sub3_neon() 94 vtmp2 = vext_u8(vrp.val[0], vrp.val[1], 6); in png_read_filter_row_sub3_neon() 97 vtmp1 = vext_u8(vrp.val[1], vrp.val[1], 1); in png_read_filter_row_sub3_neon() 183 vtmp1 = vext_u8(vrp.val[0], vrp.val[1], 3); in png_read_filter_row_avg3_neon() 187 vtmp2 = vext_u8(vpp.val[0], vpp.val[1], 3); in png_read_filter_row_avg3_neon() 188 vtmp3 = vext_u8(vrp.val[0], vrp.val[1], 6); in png_read_filter_row_avg3_neon() 192 vtmp2 = vext_u8(vpp.val[0], vpp.val[1], 6); in png_read_filter_row_avg3_neon() 193 vtmp1 = vext_u8(vrp.val[1], vrp.val[1], 1); in png_read_filter_row_avg3_neon() 202 vtmp2 = vext_u8(vpp.val[1], vpp.val[1], 1); in png_read_filter_row_avg3_neon() 322 vtmp1 = vext_u8(vrp.val[0], vrp.val[1], 3); in png_read_filter_row_paeth3_neon() [all …]
|
/external/pdfium/third_party/libpng16/arm/ |
D | filter_neon_intrinsics.c | 92 vtmp1 = vext_u8(vrp.val[0], vrp.val[1], 3); in png_read_filter_row_sub3_neon() 94 vtmp2 = vext_u8(vrp.val[0], vrp.val[1], 6); in png_read_filter_row_sub3_neon() 97 vtmp1 = vext_u8(vrp.val[1], vrp.val[1], 1); in png_read_filter_row_sub3_neon() 183 vtmp1 = vext_u8(vrp.val[0], vrp.val[1], 3); in png_read_filter_row_avg3_neon() 187 vtmp2 = vext_u8(vpp.val[0], vpp.val[1], 3); in png_read_filter_row_avg3_neon() 188 vtmp3 = vext_u8(vrp.val[0], vrp.val[1], 6); in png_read_filter_row_avg3_neon() 192 vtmp2 = vext_u8(vpp.val[0], vpp.val[1], 6); in png_read_filter_row_avg3_neon() 193 vtmp1 = vext_u8(vrp.val[1], vrp.val[1], 1); in png_read_filter_row_avg3_neon() 202 vtmp2 = vext_u8(vpp.val[1], vpp.val[1], 1); in png_read_filter_row_avg3_neon() 322 vtmp1 = vext_u8(vrp.val[0], vrp.val[1], 3); in png_read_filter_row_paeth3_neon() [all …]
|
/external/libhevc/common/arm/ |
D | ihevc_intra_pred_filters_neon_intr.c | 788 … pu1_ref_two_nt_1_row_dup = vext_u8(pu1_ref_two_nt_1_row_dup, pu1_ref_two_nt_1_row_dup1, 4); in ihevc_intra_pred_luma_planar_neonintr() 794 const_nt_1_row_dup = vext_u8(const_nt_1_row_dup, const_nt_1_row_dup1, 4); in ihevc_intra_pred_luma_planar_neonintr() 800 const_row_1_dup = vext_u8(const_row_1_dup, const_row_1_dup1, 4); in ihevc_intra_pred_luma_planar_neonintr() 819 const_nt_1_col_t = vext_u8(const_nt_1_col_t, const_nt_1_col_t1, 4); in ihevc_intra_pred_luma_planar_neonintr() 826 const_col_1_t = vext_u8(const_col_1_t1, const_col_1_t, 4); in ihevc_intra_pred_luma_planar_neonintr() 831 pu1_ref_two_nt_1_t = vext_u8(pu1_ref_two_nt_1_t1, pu1_ref_two_nt_1_t, 4); in ihevc_intra_pred_luma_planar_neonintr() 1012 sto_res_tmp1 = vext_u8(sto_res_tmp, dc_val_t, 7); in ihevc_intra_pred_luma_dc_neonintr() 1015 sto_res_tmp2 = vext_u8(sto_res_tmp2, dc_val_t, 7); in ihevc_intra_pred_luma_dc_neonintr() 1020 sto_res_tmp3 = vext_u8(sto_res_tmp3, dc_val_t, 7); in ihevc_intra_pred_luma_dc_neonintr() 1025 sto_res_tmp4 = vext_u8(sto_res_tmp4, dc_val_t, 7); in ihevc_intra_pred_luma_dc_neonintr() [all …]
|
/external/libgav1/libgav1/src/dsp/arm/ |
D | cdef_neon.cc | 93 *partial_lo = vaddl_u8(v_src[0], vext_u8(v_zero, v_src[1], 7)); in AddPartial_D0_D4() 96 *partial_lo = vaddw_u8(*partial_lo, vext_u8(v_zero, v_src[2], 6)); in AddPartial_D0_D4() 100 vaddl_u8(vext_u8(v_src[1], v_zero, 7), vext_u8(v_src[2], v_zero, 6)); in AddPartial_D0_D4() 103 *partial_lo = vaddw_u8(*partial_lo, vext_u8(v_zero, v_src[3], 5)); in AddPartial_D0_D4() 105 *partial_hi = vaddw_u8(*partial_hi, vext_u8(v_src[3], v_zero, 5)); in AddPartial_D0_D4() 108 *partial_lo = vaddw_u8(*partial_lo, vext_u8(v_zero, v_src[4], 4)); in AddPartial_D0_D4() 110 *partial_hi = vaddw_u8(*partial_hi, vext_u8(v_src[4], v_zero, 4)); in AddPartial_D0_D4() 113 *partial_lo = vaddw_u8(*partial_lo, vext_u8(v_zero, v_src[5], 3)); in AddPartial_D0_D4() 115 *partial_hi = vaddw_u8(*partial_hi, vext_u8(v_src[5], v_zero, 3)); in AddPartial_D0_D4() 118 *partial_lo = vaddw_u8(*partial_lo, vext_u8(v_zero, v_src[6], 2)); in AddPartial_D0_D4() [all …]
|
D | convolve_neon.cc | 274 sum = vmlal_u8(sum, vext_u8(input.val[0], input.val[1], 2), v_tap[4]); in FilterHorizontalWidth2() 280 sum = vmlsl_u8(sum, vext_u8(input.val[0], input.val[1], 6), v_tap[5]); in FilterHorizontalWidth2() 286 sum = vmlal_u8(sum, vext_u8(input.val[0], input.val[1], 6), v_tap[5]); in FilterHorizontalWidth2() 1838 srcs[1] = vext_u8(srcs[0], srcs[2], 4); in FilterVertical4xH() 1868 srcs[1] = vext_u8(srcs[0], srcs[2], 4); in FilterVertical4xH() 1876 srcs[3] = vext_u8(srcs[2], srcs[4], 4); in FilterVertical4xH() 1908 srcs[1] = vext_u8(srcs[0], srcs[2], 4); in FilterVertical4xH() 1913 srcs[3] = vext_u8(srcs[2], srcs[4], 4); in FilterVertical4xH() 1921 srcs[5] = vext_u8(srcs[4], srcs[6], 4); in FilterVertical4xH() 1955 srcs[1] = vext_u8(srcs[0], srcs[2], 4); in FilterVertical4xH() [all …]
|
D | loop_restoration_neon.cc | 40 return vext_u8(src.val[0], src.val[1], bytes); in VshrU128() 45 return vext_u8(src[0], src[1], bytes); in VshrU128()
|
/external/libaom/libaom/av1/common/arm/ |
D | wiener_convolve_neon.c | 214 t1 = vext_u8(temp_0, t7, 1); // a1 a2 a3 a4 a5 a6 a7 a8 in av1_wiener_convolve_add_src_neon() 215 t2 = vext_u8(temp_0, t7, 2); // a2 a3 a4 a5 a6 a7 a8 a9 in av1_wiener_convolve_add_src_neon() 216 t3 = vext_u8(temp_0, t7, 3); // a3 a4 a5 a6 a7 a8 a9 a10 in av1_wiener_convolve_add_src_neon() 217 t4 = vext_u8(temp_0, t7, 4); // a4 a5 a6 a7 a8 a9 a10 a11 in av1_wiener_convolve_add_src_neon() 218 t5 = vext_u8(temp_0, t7, 5); // a5 a6 a7 a8 a9 a10 a11 a12 in av1_wiener_convolve_add_src_neon() 219 t6 = vext_u8(temp_0, t7, 6); // a6 a7 a8 a9 a10 a11 a12 a13 in av1_wiener_convolve_add_src_neon() 220 t7 = vext_u8(temp_0, t7, 7); // a7 a8 a9 a10 a11 a12 a13 a14 in av1_wiener_convolve_add_src_neon() 363 t1 = vext_u8(temp_0, t7, 1); // a1 a2 a3 a4 a5 a6 a7 a8 in av1_wiener_convolve_add_src_neon() 364 t2 = vext_u8(temp_0, t7, 2); // a2 a3 a4 a5 a6 a7 a8 a9 in av1_wiener_convolve_add_src_neon() 365 t3 = vext_u8(temp_0, t7, 3); // a3 a4 a5 a6 a7 a8 a9 a10 in av1_wiener_convolve_add_src_neon() [all …]
|
/external/libhevc/encoder/arm/ |
D | ihevce_scale_by_2_neon.c | 84 c = vext_u8(vget_low_u8(src.val[1]), vget_high_u8(src.val[1]), 1); in ihevce_horz_scale_neon_w16() 85 l0 = vext_u8(vget_low_u8(src.val[0]), vget_high_u8(src.val[0]), 1); in ihevce_horz_scale_neon_w16() 86 r0 = vext_u8(vget_low_u8(src.val[0]), vget_high_u8(src.val[0]), 2); in ihevce_horz_scale_neon_w16() 87 r3 = vext_u8(vget_low_u8(src.val[0]), vget_high_u8(src.val[0]), 3); in ihevce_horz_scale_neon_w16()
|
/external/XNNPACK/src/u8-clamp/ |
D | neon-x64.c | 56 vout = vext_u8(vout, vout, 4); in xnn_u8_clamp_ukernel__neon_x64() 60 vout = vext_u8(vout, vout, 2); in xnn_u8_clamp_ukernel__neon_x64()
|
/external/libvpx/libvpx/vpx_dsp/arm/ |
D | intrapred_neon.c | 287 *row = vext_u8(*row, above_right, 1); in d45_store_8() 296 const uint8x8_t A1 = vext_u8(A0, above_right, 1); in vpx_d45_predictor_8x8_neon() 297 const uint8x8_t A2 = vext_u8(A0, above_right, 2); in vpx_d45_predictor_8x8_neon() 391 const uint8x8_t L3210XA012 = vext_u8(L3210, XA0123, 4); in vpx_d135_predictor_4x4_neon() 392 const uint8x8_t L210XA0123 = vext_u8(L3210, XA0123, 5); in vpx_d135_predictor_4x4_neon() 418 const uint8x8_t L6543210X = vext_u8(L76543210, XA0123456, 1); in vpx_d135_predictor_8x8_neon() 419 const uint8x8_t L543210XA0 = vext_u8(L76543210, XA0123456, 2); in vpx_d135_predictor_8x8_neon() 427 const uint8x8_t r0 = vext_u8(row_0, row_1, 7); in vpx_d135_predictor_8x8_neon() 428 const uint8x8_t r1 = vext_u8(row_0, row_1, 6); in vpx_d135_predictor_8x8_neon() 429 const uint8x8_t r2 = vext_u8(row_0, row_1, 5); in vpx_d135_predictor_8x8_neon() [all …]
|
/external/XNNPACK/src/u8-maxpool/ |
D | 9p8x-minmax-neon-c16.c | 131 vout_lo = vext_u8(vout_lo, vout_lo, 4); in xnn_u8_maxpool_minmax_ukernel_9p8x__neon_c16() 135 vout_lo = vext_u8(vout_lo, vout_lo, 2); in xnn_u8_maxpool_minmax_ukernel_9p8x__neon_c16() 235 vout_lo = vext_u8(vout_lo, vout_lo, 4); in xnn_u8_maxpool_minmax_ukernel_9p8x__neon_c16() 239 vout_lo = vext_u8(vout_lo, vout_lo, 2); in xnn_u8_maxpool_minmax_ukernel_9p8x__neon_c16()
|
/external/webp/src/dsp/ |
D | filters_neon.c | 42 #define ROTATE_LEFT_N(A, N) vext_u8((A), (A), (N)) 44 #define ROTATE_RIGHT_N(A, N) vext_u8((A), (A), (8 - (N)) % 8) 264 out = vext_u8(out, ROTATE_LEFT_N(pred, (L)), 1); \
|
/external/llvm-project/clang/test/Sema/ |
D | aarch64-neon-ranges.c | 8 vext_u8(small, small, 7); in test_vext_8bit() 15 …vext_u8(small, small, 8); // expected-error-re {{argument value {{.*}} is outside the valid range}} in test_vext_8bit()
|
/external/clang/test/Sema/ |
D | aarch64-neon-ranges.c | 8 vext_u8(small, small, 7); in test_vext_8bit() 15 vext_u8(small, small, 8); // expected-error {{argument should be a value from 0 to 7}} in test_vext_8bit()
|
/external/clang/test/CodeGen/ |
D | aarch64-neon-extract.c | 93 return vext_u8(a, b, 2); in test_vext_u8()
|
/external/llvm-project/clang/test/CodeGen/ |
D | aarch64-neon-extract.c | 93 return vext_u8(a, b, 2); in test_vext_u8()
|
/external/XNNPACK/src/qu8-gavgpool/ |
D | 7x-minmax-neon-c8.c | 204 vout = vext_u8(vout, vout, 4); in xnn_qu8_gavgpool_minmax_ukernel_7x__neon_c8() 208 vout = vext_u8(vout, vout, 2); in xnn_qu8_gavgpool_minmax_ukernel_7x__neon_c8()
|
D | 7p7x-minmax-neon-c8.c | 282 vout = vext_u8(vout, vout, 4); in xnn_qu8_gavgpool_minmax_ukernel_7p7x__neon_c8() 286 vout = vext_u8(vout, vout, 2); in xnn_qu8_gavgpool_minmax_ukernel_7p7x__neon_c8()
|
/external/XNNPACK/src/qu8-vadd/ |
D | minmax-neon.c | 236 vy = vext_u8(vy, vy, 4); in xnn_qu8_vadd_minmax_ukernel__neon() 240 vy = vext_u8(vy, vy, 2); in xnn_qu8_vadd_minmax_ukernel__neon()
|
/external/XNNPACK/src/qu8-avgpool/ |
D | 9x-minmax-neon-c8.c | 262 vout = vext_u8(vout, vout, 4); in xnn_qu8_avgpool_minmax_ukernel_9x__neon_c8() 266 vout = vext_u8(vout, vout, 2); in xnn_qu8_avgpool_minmax_ukernel_9x__neon_c8()
|
D | 9p8x-minmax-neon-c8.c | 407 vout = vext_u8(vout, vout, 4); in xnn_qu8_avgpool_minmax_ukernel_9p8x__neon_c8() 411 vout = vext_u8(vout, vout, 2); in xnn_qu8_avgpool_minmax_ukernel_9p8x__neon_c8()
|
/external/XNNPACK/src/qu8-dwconv/ |
D | up8x9-minmax-neon.c | 258 vout = vext_u8(vout, vout, 4); in xnn_qu8_dwconv_minmax_ukernel_up8x9__neon() 262 vout = vext_u8(vout, vout, 2); in xnn_qu8_dwconv_minmax_ukernel_up8x9__neon()
|
/external/tensorflow/tensorflow/lite/kernels/internal/optimized/ |
D | legacy_optimized_ops.h | 4040 uint8x8_t max4 = vmax_u8(max8, vext_u8(max8, max8, 4)); in Softmax() 4041 uint8x8_t max2 = vmax_u8(max4, vext_u8(max4, max4, 2)); in Softmax()
|