/external/libvpx/libvpx/vpx_dsp/arm/ |
D | intrapred_neon.c | 315 *row = vextq_u8(*row, above_right, 1); in d45_store_16() 324 const uint8x16_t A1 = vextq_u8(A0, above_right, 1); in vpx_d45_predictor_16x16_neon() 325 const uint8x16_t A2 = vextq_u8(A0, above_right, 2); in vpx_d45_predictor_16x16_neon() 371 row_0 = vextq_u8(row_0, row_1, 1); in vpx_d45_predictor_32x32_neon() 372 row_1 = vextq_u8(row_1, above_right, 1); in vpx_d45_predictor_32x32_neon() 485 vextq_u8(Lfedcba9876543210, XA0123456789abcde, 1); in vpx_d135_predictor_16x16_neon() 487 vextq_u8(Lfedcba9876543210, XA0123456789abcde, 2); in vpx_d135_predictor_16x16_neon() 492 const uint8x16_t r_0 = vextq_u8(row_0, row_1, 15); in vpx_d135_predictor_16x16_neon() 493 const uint8x16_t r_1 = vextq_u8(row_0, row_1, 14); in vpx_d135_predictor_16x16_neon() 494 const uint8x16_t r_2 = vextq_u8(row_0, row_1, 13); in vpx_d135_predictor_16x16_neon() [all …]
|
/external/libaom/libaom/av1/common/arm/ |
D | warp_plane_neon.c | 242 tmp_2 = vextq_u8(tmp_0, tmp_0, 1); in horizontal_filter_neon() 243 tmp_3 = vextq_u8(tmp_1, tmp_1, 1); in horizontal_filter_neon() 250 src_3_low = vget_low_u8(vextq_u8(src_1, src_1, 4)); in horizontal_filter_neon() 251 src_4_low = vget_low_u8(vextq_u8(src_2, src_2, 4)); in horizontal_filter_neon() 252 src_5_low = vget_low_u8(vextq_u8(src_1, src_1, 2)); in horizontal_filter_neon() 253 src_6_low = vget_low_u8(vextq_u8(src_1, src_1, 6)); in horizontal_filter_neon() 594 src_2 = vextq_u8(src_1, src_1, 1); in av1_warp_affine_neon() 595 src_3 = vextq_u8(src_2, src_2, 1); in av1_warp_affine_neon() 596 src_4 = vextq_u8(src_3, src_3, 1); in av1_warp_affine_neon() 612 src_2 = vextq_u8(src_1, src_1, 1); in av1_warp_affine_neon() [all …]
|
/external/libgav1/libgav1/src/dsp/arm/ |
D | cdef_neon.cc | 159 *partial_lo = vpadalq_u8(*partial_lo, vextq_u8(v_zero_16, v_d1_temp[1], 14)); in AddPartial_D1_D3() 162 *partial_lo = vpadalq_u8(*partial_lo, vextq_u8(v_zero_16, v_d1_temp[2], 12)); in AddPartial_D1_D3() 164 *partial_lo = vpadalq_u8(*partial_lo, vextq_u8(v_zero_16, v_d1_temp[3], 10)); in AddPartial_D1_D3() 166 *partial_lo = vpadalq_u8(*partial_lo, vextq_u8(v_zero_16, v_d1_temp[4], 8)); in AddPartial_D1_D3() 169 *partial_lo = vpadalq_u8(*partial_lo, vextq_u8(v_zero_16, v_d1_temp[5], 6)); in AddPartial_D1_D3() 171 *partial_hi = vpadalq_u8(*partial_hi, vextq_u8(v_d1_temp[5], v_zero_16, 6)); in AddPartial_D1_D3() 174 *partial_lo = vpadalq_u8(*partial_lo, vextq_u8(v_zero_16, v_d1_temp[6], 4)); in AddPartial_D1_D3() 176 *partial_hi = vpadalq_u8(*partial_hi, vextq_u8(v_d1_temp[6], v_zero_16, 4)); in AddPartial_D1_D3() 179 *partial_lo = vpadalq_u8(*partial_lo, vextq_u8(v_zero_16, v_d1_temp[7], 2)); in AddPartial_D1_D3() 181 *partial_hi = vpadalq_u8(*partial_hi, vextq_u8(v_d1_temp[7], v_zero_16, 2)); in AddPartial_D1_D3()
|
D | convolve_neon.cc | 113 v_src[0] = vget_low_u8(vextq_u8(src_long, src_long, 1)); in SumHorizontalTaps() 114 v_src[1] = vget_low_u8(vextq_u8(src_long, src_long, 2)); in SumHorizontalTaps() 115 v_src[2] = vget_low_u8(vextq_u8(src_long, src_long, 3)); in SumHorizontalTaps() 116 v_src[3] = vget_low_u8(vextq_u8(src_long, src_long, 4)); in SumHorizontalTaps() 117 v_src[4] = vget_low_u8(vextq_u8(src_long, src_long, 5)); in SumHorizontalTaps() 118 v_src[5] = vget_low_u8(vextq_u8(src_long, src_long, 6)); in SumHorizontalTaps() 122 v_src[1] = vget_low_u8(vextq_u8(src_long, src_long, 1)); in SumHorizontalTaps() 123 v_src[2] = vget_low_u8(vextq_u8(src_long, src_long, 2)); in SumHorizontalTaps() 124 v_src[3] = vget_low_u8(vextq_u8(src_long, src_long, 3)); in SumHorizontalTaps() 125 v_src[4] = vget_low_u8(vextq_u8(src_long, src_long, 4)); in SumHorizontalTaps() [all …]
|
D | loop_restoration_neon.cc | 127 s[1] = vextq_u8(s[0], s[7], 1); in WienerHorizontalTap7() 128 s[2] = vextq_u8(s[0], s[7], 2); in WienerHorizontalTap7() 129 s[3] = vextq_u8(s[0], s[7], 3); in WienerHorizontalTap7() 130 s[4] = vextq_u8(s[0], s[7], 4); in WienerHorizontalTap7() 131 s[5] = vextq_u8(s[0], s[7], 5); in WienerHorizontalTap7() 132 s[6] = vextq_u8(s[0], s[7], 6); in WienerHorizontalTap7() 159 s[1] = vextq_u8(s[0], s[5], 1); in WienerHorizontalTap5() 160 s[2] = vextq_u8(s[0], s[5], 2); in WienerHorizontalTap5() 161 s[3] = vextq_u8(s[0], s[5], 3); in WienerHorizontalTap5() 162 s[4] = vextq_u8(s[0], s[5], 4); in WienerHorizontalTap5() [all …]
|
/external/libhevc/common/arm/ |
D | ihevc_sao_edge_offset_class0_chroma.s | 170 …VEXT.8 Q7,Q7,Q6,#14 @pu1_cur_row_tmp = vextq_u8(pu1_cur_row_tmp, pu1_cur_row, … 185 …VEXT.8 Q14,Q14,Q15,#14 @II pu1_cur_row_tmp = vextq_u8(pu1_cur_row_tmp, pu1_cur_ro… 198 …VEXT.8 Q7,Q6,Q7,#2 @pu1_cur_row_tmp = vextq_u8(pu1_cur_row, pu1_cur_row_tmp, … 209 …VEXT.8 Q14,Q15,Q14,#2 @II pu1_cur_row_tmp = vextq_u8(pu1_cur_row, pu1_cur_row_tm… 333 …VEXT.8 Q7,Q7,Q6,#14 @pu1_cur_row_tmp = vextq_u8(pu1_cur_row_tmp, pu1_cur_row, … 348 …VEXT.8 Q14,Q14,Q15,#14 @II pu1_cur_row_tmp = vextq_u8(pu1_cur_row_tmp, pu1_cur_ro… 359 …VEXT.8 Q7,Q6,Q7,#2 @pu1_cur_row_tmp = vextq_u8(pu1_cur_row, pu1_cur_row_tmp, … 374 …VEXT.8 Q14,Q15,Q14,#2 @II pu1_cur_row_tmp = vextq_u8(pu1_cur_row, pu1_cur_row_tm…
|
D | ihevc_sao_edge_offset_class0.s | 163 …VEXT.8 Q7,Q7,Q6,#15 @pu1_cur_row_tmp = vextq_u8(pu1_cur_row_tmp, pu1_cur_row, … 179 …VEXT.8 Q14,Q14,Q13,#15 @II Iteration pu1_cur_row_tmp = vextq_u8(pu1_cur_row_tmp, … 187 …VEXT.8 Q7,Q6,Q7,#1 @pu1_cur_row_tmp = vextq_u8(pu1_cur_row, pu1_cur_row_tmp, … 199 …VEXT.8 Q14,Q13,Q14,#1 @II pu1_cur_row_tmp = vextq_u8(pu1_cur_row, pu1_cur_row_tm… 305 …VEXT.8 Q7,Q7,Q6,#15 @pu1_cur_row_tmp = vextq_u8(pu1_cur_row_tmp, pu1_cur_row, … 313 …VEXT.8 Q7,Q6,Q7,#1 @pu1_cur_row_tmp = vextq_u8(pu1_cur_row, pu1_cur_row_tmp, …
|
D | ihevc_sao_edge_offset_class2.s | 286 …VEXT.8 Q9,Q8,Q9,#1 @I pu1_next_row_tmp = vextq_u8(pu1_next_row, pu1_next_row_… 358 …VEXT.8 Q11,Q8,Q14,#1 @II pu1_next_row_tmp = vextq_u8(pu1_next_row, pu1_next_row… 366 …VEXT.8 Q9,Q15,Q9,#1 @III pu1_next_row_tmp = vextq_u8(pu1_next_row, pu1_next_ro… 467 …VEXT.8 Q9,Q8,Q9,#1 @pu1_next_row_tmp = vextq_u8(pu1_next_row, pu1_next_row_tm… 596 …VEXT.8 Q9,Q8,Q9,#1 @pu1_next_row_tmp = vextq_u8(pu1_next_row, pu1_next_row_tm… 726 …VEXT.8 Q9,Q8,Q9,#1 @pu1_next_row_tmp = vextq_u8(pu1_next_row, pu1_next_row_tm…
|
D | ihevc_sao_edge_offset_class3.s | 301 …VEXT.8 Q9,Q9,Q8,#15 @I pu1_next_row_tmp = vextq_u8(pu1_next_row_tmp, pu1_next_… 379 …VEXT.8 Q9,Q9,Q8,#15 @II pu1_next_row_tmp = vextq_u8(pu1_next_row_tmp, pu1_next… 413 …VEXT.8 Q9,Q9,Q15,#15 @III pu1_next_row_tmp = vextq_u8(pu1_next_row_tmp, pu1_nex… 504 …VEXT.8 Q9,Q9,Q8,#15 @pu1_next_row_tmp = vextq_u8(pu1_next_row_tmp, pu1_next_ro… 636 …VEXT.8 Q9,Q9,Q8,#15 @pu1_next_row_tmp = vextq_u8(pu1_next_row_tmp, pu1_next_ro… 778 …VEXT.8 Q9,Q9,Q8,#15 @pu1_next_row_tmp = vextq_u8(pu1_next_row_tmp, pu1_next_ro…
|
D | ihevc_sao_edge_offset_class2_chroma.s | 382 …VEXT.8 Q9,Q8,Q9,#2 @I pu1_next_row_tmp = vextq_u8(pu1_next_row, pu1_next_row_… 470 …VEXT.8 Q14,Q8,Q14,#2 @II pu1_next_row_tmp = vextq_u8(pu1_next_row, pu1_next_row… 482 …VEXT.8 Q9,Q15,Q9,#2 @III pu1_next_row_tmp = vextq_u8(pu1_next_row, pu1_next_ro… 616 …VEXT.8 Q9,Q8,Q9,#2 @pu1_next_row_tmp = vextq_u8(pu1_next_row, pu1_next_row_tm… 758 …VEXT.8 Q9,Q8,Q9,#2 @pu1_next_row_tmp = vextq_u8(pu1_next_row, pu1_next_row_tm… 908 …VEXT.8 Q9,Q8,Q9,#2 @pu1_next_row_tmp = vextq_u8(pu1_next_row, pu1_next_row_tm…
|
D | ihevc_sao_edge_offset_class3_chroma.s | 375 …VEXT.8 Q9,Q9,Q8,#14 @I pu1_next_row_tmp = vextq_u8(pu1_next_row_tmp, pu1_next_… 470 …VEXT.8 Q14,Q14,Q8,#14 @II pu1_next_row_tmp = vextq_u8(pu1_next_row_tmp, pu1_next… 509 …VEXT.8 Q9,Q9,Q15,#14 @III pu1_next_row_tmp = vextq_u8(pu1_next_row_tmp, pu1_nex… 625 …VEXT.8 Q9,Q9,Q8,#14 @pu1_next_row_tmp = vextq_u8(pu1_next_row_tmp, pu1_next_ro… 775 …VEXT.8 Q9,Q9,Q8,#14 @pu1_next_row_tmp = vextq_u8(pu1_next_row_tmp, pu1_next_ro… 952 …VEXT.8 Q9,Q9,Q8,#14 @pu1_next_row_tmp = vextq_u8(pu1_next_row_tmp, pu1_next_ro…
|
/external/XNNPACK/src/q8-igemm/ |
D | 8x8-neon.c | 623 … vout6x01234567_7x01234567 = vextq_u8(vout6x01234567_7x01234567, vout6x01234567_7x01234567, 4); in xnn_q8_igemm_ukernel_8x8__neon() 624 … vout4x01234567_5x01234567 = vextq_u8(vout4x01234567_5x01234567, vout4x01234567_5x01234567, 4); in xnn_q8_igemm_ukernel_8x8__neon() 625 … vout2x01234567_3x01234567 = vextq_u8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4); in xnn_q8_igemm_ukernel_8x8__neon() 626 … vout0x01234567_1x01234567 = vextq_u8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4); in xnn_q8_igemm_ukernel_8x8__neon() 637 … vout6x01234567_7x01234567 = vextq_u8(vout6x01234567_7x01234567, vout6x01234567_7x01234567, 2); in xnn_q8_igemm_ukernel_8x8__neon() 638 … vout4x01234567_5x01234567 = vextq_u8(vout4x01234567_5x01234567, vout4x01234567_5x01234567, 2); in xnn_q8_igemm_ukernel_8x8__neon() 639 … vout2x01234567_3x01234567 = vextq_u8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2); in xnn_q8_igemm_ukernel_8x8__neon() 640 … vout0x01234567_1x01234567 = vextq_u8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2); in xnn_q8_igemm_ukernel_8x8__neon()
|
D | 4x8-neon.c | 390 … vout0x01234567_1x01234567 = vextq_u8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4); in xnn_q8_igemm_ukernel_4x8__neon() 391 … vout2x01234567_3x01234567 = vextq_u8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4); in xnn_q8_igemm_ukernel_4x8__neon() 398 … vout0x01234567_1x01234567 = vextq_u8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2); in xnn_q8_igemm_ukernel_4x8__neon() 399 … vout2x01234567_3x01234567 = vextq_u8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2); in xnn_q8_igemm_ukernel_4x8__neon()
|
/external/XNNPACK/src/q8-gemm/ |
D | 8x8-neon.c | 585 … vout0x01234567_1x01234567 = vextq_u8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4); in xnn_q8_gemm_ukernel_8x8__neon() 586 … vout2x01234567_3x01234567 = vextq_u8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4); in xnn_q8_gemm_ukernel_8x8__neon() 587 … vout4x01234567_5x01234567 = vextq_u8(vout4x01234567_5x01234567, vout4x01234567_5x01234567, 4); in xnn_q8_gemm_ukernel_8x8__neon() 588 … vout6x01234567_7x01234567 = vextq_u8(vout6x01234567_7x01234567, vout6x01234567_7x01234567, 4); in xnn_q8_gemm_ukernel_8x8__neon() 599 … vout0x01234567_1x01234567 = vextq_u8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2); in xnn_q8_gemm_ukernel_8x8__neon() 600 … vout2x01234567_3x01234567 = vextq_u8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2); in xnn_q8_gemm_ukernel_8x8__neon() 601 … vout4x01234567_5x01234567 = vextq_u8(vout4x01234567_5x01234567, vout4x01234567_5x01234567, 2); in xnn_q8_gemm_ukernel_8x8__neon() 602 … vout6x01234567_7x01234567 = vextq_u8(vout6x01234567_7x01234567, vout6x01234567_7x01234567, 2); in xnn_q8_gemm_ukernel_8x8__neon()
|
D | 4x8-neon.c | 357 … vout0x01234567_1x01234567 = vextq_u8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4); in xnn_q8_gemm_ukernel_4x8__neon() 358 … vout2x01234567_3x01234567 = vextq_u8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4); in xnn_q8_gemm_ukernel_4x8__neon() 365 … vout0x01234567_1x01234567 = vextq_u8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2); in xnn_q8_gemm_ukernel_4x8__neon() 366 … vout2x01234567_3x01234567 = vextq_u8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2); in xnn_q8_gemm_ukernel_4x8__neon()
|
/external/clang/test/Sema/ |
D | aarch64-neon-ranges.c | 11 vextq_u8(big, big, 15); in test_vext_8bit() 18 vextq_u8(big, big, 16); // expected-error {{argument should be a value from 0 to 15}} in test_vext_8bit()
|
/external/webp/src/dsp/ |
D | filters_neon.c | 38 #define SHIFT_RIGHT_N_Q(A, N) vextq_u8((A), zero, (N)) 39 #define SHIFT_LEFT_N_Q(A, N) vextq_u8(zero, (A), (16 - (N)) % 16)
|
D | lossless_neon.c | 152 #define ROTATE32_LEFT(L) vextq_u8((L), (L), 12) // D|C|B|A -> C|B|A|D 228 const uint8x16_t shift0 = vextq_u8(zero, src, 12); in PredictorAdd1_NEON() 232 const uint8x16_t shift1 = vextq_u8(zero, sum0, 8); in PredictorAdd1_NEON()
|
/external/clang/test/CodeGen/ |
D | aarch64-neon-extract.c | 133 return vextq_u8(a, b, 2); in test_vextq_u8()
|
D | arm_neon_intrinsics.c | 3307 return vextq_u8(a, b, 15); in test_vextq_u8()
|
/external/neon_2_sse/ |
D | NEON_2_SSE.h | 1971 _NEON2SSESTORAGE uint8x16_t vextq_u8(uint8x16_t a, uint8x16_t b, __constrange(0,15) int c); // VEXT… 14358 _NEON2SSESTORAGE uint8x16_t vextq_u8(uint8x16_t a, uint8x16_t b, __constrange(0,15) int c); // VEXT… 14359 #define vextq_u8(a,b,c) _MM_ALIGNR_EPI8 (b,a,c) macro
|