/external/libaom/libaom/aom_dsp/arm/ |
D | sad4d_neon.c | 87 uint16x8_t vec_sum_ref0_lo = vdupq_n_u16(0); in aom_sad64x64x4d_neon() 88 uint16x8_t vec_sum_ref0_hi = vdupq_n_u16(0); in aom_sad64x64x4d_neon() 89 uint16x8_t vec_sum_ref1_lo = vdupq_n_u16(0); in aom_sad64x64x4d_neon() 90 uint16x8_t vec_sum_ref1_hi = vdupq_n_u16(0); in aom_sad64x64x4d_neon() 91 uint16x8_t vec_sum_ref2_lo = vdupq_n_u16(0); in aom_sad64x64x4d_neon() 92 uint16x8_t vec_sum_ref2_hi = vdupq_n_u16(0); in aom_sad64x64x4d_neon() 93 uint16x8_t vec_sum_ref3_lo = vdupq_n_u16(0); in aom_sad64x64x4d_neon() 94 uint16x8_t vec_sum_ref3_hi = vdupq_n_u16(0); in aom_sad64x64x4d_neon() 133 uint16x8_t vec_sum_ref0_lo = vdupq_n_u16(0); in aom_sad32x32x4d_neon() 134 uint16x8_t vec_sum_ref0_hi = vdupq_n_u16(0); in aom_sad32x32x4d_neon() [all …]
|
D | sad_neon.c | 134 uint16x8_t vec_accum_lo = vdupq_n_u16(0); in aom_sad64x64_neon() 135 uint16x8_t vec_accum_hi = vdupq_n_u16(0); in aom_sad64x64_neon() 170 uint16x8_t vec_accum_lo = vdupq_n_u16(0); in aom_sad32x32_neon() 171 uint16x8_t vec_accum_hi = vdupq_n_u16(0); in aom_sad32x32_neon() 195 uint16x8_t vec_accum_lo = vdupq_n_u16(0); in aom_sad16x16_neon() 196 uint16x8_t vec_accum_hi = vdupq_n_u16(0); in aom_sad16x16_neon() 214 uint16x8_t vec_accum = vdupq_n_u16(0); in aom_sad8x8_neon()
|
/external/libvpx/libvpx/vpx_dsp/arm/ |
D | sad4d_neon.c | 36 uint16x8_t abs[2] = { vdupq_n_u16(0), vdupq_n_u16(0) }; in sad4x_4d() 147 uint16x8_t sum[4] = { vdupq_n_u16(0), vdupq_n_u16(0), vdupq_n_u16(0), in sad8x_4d() 148 vdupq_n_u16(0) }; in sad8x_4d() 196 uint16x8_t sum[4] = { vdupq_n_u16(0), vdupq_n_u16(0), vdupq_n_u16(0), in sad16x_4d() 197 vdupq_n_u16(0) }; in sad16x_4d() 238 sum[0] = sum[1] = sum[2] = sum[3] = vdupq_n_u16(0); in sad32x_4d() 295 uint16x8_t sum[4] = { vdupq_n_u16(0), vdupq_n_u16(0), vdupq_n_u16(0), in vpx_sad64x32x4d_neon() 296 vdupq_n_u16(0) }; in vpx_sad64x32x4d_neon() 341 uint16x8_t sum[8] = { vdupq_n_u16(0), vdupq_n_u16(0), vdupq_n_u16(0), in vpx_sad64x64x4d_neon() 342 vdupq_n_u16(0), vdupq_n_u16(0), vdupq_n_u16(0), in vpx_sad64x64x4d_neon() [all …]
|
D | sad_neon.c | 44 uint16x8_t abs = vdupq_n_u16(0); in vpx_sad4x8_neon() 61 uint16x8_t abs = vdupq_n_u16(0); in vpx_sad4x8_avg_neon() 81 uint16x8_t abs = vdupq_n_u16(0); in sad8x() 98 uint16x8_t abs = vdupq_n_u16(0); in sad8x_avg() 136 uint16x8_t abs = vdupq_n_u16(0); in sad16x() 154 uint16x8_t abs = vdupq_n_u16(0); in sad16x_avg() 194 uint16x8_t abs = vdupq_n_u16(0); in sad32x() 216 uint16x8_t abs = vdupq_n_u16(0); in sad32x_avg() 262 uint16x8_t abs_0 = vdupq_n_u16(0); in sad64x() 263 uint16x8_t abs_1 = vdupq_n_u16(0); in sad64x() [all …]
|
D | avg_neon.c | 81 uint16x8_t vec_sum_lo = vdupq_n_u16(0); in vpx_int_pro_row_neon() 82 uint16x8_t vec_sum_hi = vdupq_n_u16(0); in vpx_int_pro_row_neon() 133 uint16x8_t vec_sum = vdupq_n_u16(0); in vpx_int_pro_col_neon()
|
/external/libhevc/encoder/arm/ |
D | ihevce_sad_compute_neon.c | 75 uint16x8_t abs = vdupq_n_u16(0); in ihevce_8xn_sad_computer_neon() 102 uint16x8_t abs_0 = vdupq_n_u16(0); in ihevce_16xn_sad_computer_neon() 103 uint16x8_t abs_1 = vdupq_n_u16(0); in ihevce_16xn_sad_computer_neon() 132 uint16x8_t abs_0 = vdupq_n_u16(0); in ihevce_32xn_sad_computer_neon() 133 uint16x8_t abs_1 = vdupq_n_u16(0); in ihevce_32xn_sad_computer_neon() 166 uint16x8_t abs_0 = vdupq_n_u16(0); in ihevce_64xn_sad_computer_neon() 167 uint16x8_t abs_1 = vdupq_n_u16(0); in ihevce_64xn_sad_computer_neon()
|
D | ihevce_coarse_layer_sad_neon.c | 190 uint16x8_t abs_01 = vdupq_n_u16(0); in hme_store_4x4_sads_high_speed_neon() 191 uint16x8_t abs_23 = vdupq_n_u16(0); in hme_store_4x4_sads_high_speed_neon() 211 uint16x8_t abs_01 = vdupq_n_u16(0); in hme_store_4x4_sads_high_speed_neon() 341 uint16x8_t abs_a_01 = vdupq_n_u16(0); in hme_store_4x4_sads_high_quality_neon() 342 uint16x8_t abs_a_23 = vdupq_n_u16(0); in hme_store_4x4_sads_high_quality_neon() 343 uint16x8_t abs_b_01 = vdupq_n_u16(0); in hme_store_4x4_sads_high_quality_neon() 344 uint16x8_t abs_b_23 = vdupq_n_u16(0); in hme_store_4x4_sads_high_quality_neon() 372 uint16x8_t abs_a_01 = vdupq_n_u16(0); in hme_store_4x4_sads_high_quality_neon() 373 uint16x8_t abs_b_01 = vdupq_n_u16(0); in hme_store_4x4_sads_high_quality_neon() 476 const uint16x8_t v_ref_idx = vdupq_n_u16(i1_ref_idx); in hme_combine_4x4_sads_and_compute_cost_high_speed_neon() [all …]
|
D | ihevce_me_neon.c | 160 uint16x8_t abs = vdupq_n_u16(0); in ihevce_sad4_2x2_neon() 181 uint16x8_t abs_01 = vdupq_n_u16(0); in ihevce_sad4_4x4_neon() 182 uint16x8_t abs_23 = vdupq_n_u16(0); in ihevce_sad4_4x4_neon() 207 uint16x8_t abs_0 = vdupq_n_u16(0); in ihevce_sad4_8x8_neon() 208 uint16x8_t abs_1 = vdupq_n_u16(0); in ihevce_sad4_8x8_neon() 209 uint16x8_t abs_2 = vdupq_n_u16(0); in ihevce_sad4_8x8_neon() 210 uint16x8_t abs_3 = vdupq_n_u16(0); in ihevce_sad4_8x8_neon()
|
D | ihevce_ssd_and_sad_calculator_neon.c | 93 uint16x8_t abs_sum = vdupq_n_u16(0); in ihevce_ssd_and_sad_calculator_neon() 125 uint16x8_t abs_sum_l = vdupq_n_u16(0); in ihevce_ssd_and_sad_calculator_neon() 126 uint16x8_t abs_sum_h = vdupq_n_u16(0); in ihevce_ssd_and_sad_calculator_neon() 172 uint16x8_t abs_sum = vdupq_n_u16(0); in ihevce_ssd_and_sad_calculator_neon()
|
/external/tensorflow/tensorflow/tools/android/test/jni/object_tracking/ |
D | image_neon.cc | 49 uint16x8_t accum1 = vdupq_n_u16(0); in Downsample2x32ColumnsNeon() 50 uint16x8_t accum2 = vdupq_n_u16(0); in Downsample2x32ColumnsNeon() 112 uint16x8_t accum1 = vdupq_n_u16(0); in Downsample4x32ColumnsNeon() 113 uint16x8_t accum2 = vdupq_n_u16(0); in Downsample4x32ColumnsNeon()
|
/external/libjpeg-turbo/simd/arm/aarch64/ |
D | jchuff-neon.c | 192 vdupq_n_u16(0))); in jsimd_huff_encode_one_block_neon() 194 vdupq_n_u16(0))); in jsimd_huff_encode_one_block_neon() 196 vdupq_n_u16(0))); in jsimd_huff_encode_one_block_neon() 198 vdupq_n_u16(0))); in jsimd_huff_encode_one_block_neon() 200 vdupq_n_u16(0))); in jsimd_huff_encode_one_block_neon() 202 vdupq_n_u16(0))); in jsimd_huff_encode_one_block_neon() 204 vdupq_n_u16(0))); in jsimd_huff_encode_one_block_neon() 206 vdupq_n_u16(0))); in jsimd_huff_encode_one_block_neon()
|
/external/skqp/src/core/ |
D | SkBlitRow_D32.cpp | 167 vsrc_wide = vmulq_u16(vsrc_wide, vdupq_n_u16(src_scale)); in blit_row_s32_blend() 189 vsrc_wide = vmulq_u16(vsrc_wide, vdupq_n_u16(src_scale)); in blit_row_s32_blend() 244 vsrc_scale = vdupq_n_u16(alpha256); in blit_row_s32a_blend() 253 vdst_scale = vmlsq_u16(vdupq_n_u16(0xFF00), vdst_scale, vsrc_scale); in blit_row_s32a_blend() 255 vdst_scale = vsraq_n_u16(vdupq_n_u16(1), vdst_scale, 8); in blit_row_s32a_blend()
|
/external/skia/src/core/ |
D | SkBlitRow_D32.cpp | 166 vsrc_wide = vmulq_u16(vsrc_wide, vdupq_n_u16(src_scale)); in blit_row_s32_blend() 188 vsrc_wide = vmulq_u16(vsrc_wide, vdupq_n_u16(src_scale)); in blit_row_s32_blend() 243 vsrc_scale = vdupq_n_u16(alpha256); in blit_row_s32a_blend() 252 vdst_scale = vmlsq_u16(vdupq_n_u16(0xFF00), vdst_scale, vsrc_scale); in blit_row_s32a_blend() 254 vdst_scale = vsraq_n_u16(vdupq_n_u16(1), vdst_scale, 8); in blit_row_s32a_blend()
|
/external/zlib/ |
D | adler32_simd.c | 250 uint16x8_t v_column_sum_1 = vdupq_n_u16(0); in adler32_simd_() 251 uint16x8_t v_column_sum_2 = vdupq_n_u16(0); in adler32_simd_() 252 uint16x8_t v_column_sum_3 = vdupq_n_u16(0); in adler32_simd_() 253 uint16x8_t v_column_sum_4 = vdupq_n_u16(0); in adler32_simd_()
|
/external/angle/third_party/zlib/ |
D | adler32_simd.c | 250 uint16x8_t v_column_sum_1 = vdupq_n_u16(0); in adler32_simd_() 251 uint16x8_t v_column_sum_2 = vdupq_n_u16(0); in adler32_simd_() 252 uint16x8_t v_column_sum_3 = vdupq_n_u16(0); in adler32_simd_() 253 uint16x8_t v_column_sum_4 = vdupq_n_u16(0); in adler32_simd_()
|
/external/skqp/src/opts/ |
D | SkBlitMask_opts.h | 25 return vaddw_u8(vdupq_n_u16(1), alpha); in SkAlpha255To256_neon8() 68 vscale = vsubw_u8(vdupq_n_u16(256), in D32_A8_Opaque_Color_neon() 71 vscale = vsubw_u8(vdupq_n_u16(256), vmask); in D32_A8_Opaque_Color_neon() 134 uint16x8_t vscale = vsubw_u8(vdupq_n_u16(256), vmask); in blit_mask_d32_a8_black()
|
/external/skia/src/opts/ |
D | SkBlitMask_opts.h | 25 return vaddw_u8(vdupq_n_u16(1), alpha); in SkAlpha255To256_neon8() 68 vscale = vsubw_u8(vdupq_n_u16(256), in D32_A8_Opaque_Color_neon() 71 vscale = vsubw_u8(vdupq_n_u16(256), vmask); in D32_A8_Opaque_Color_neon() 134 uint16x8_t vscale = vsubw_u8(vdupq_n_u16(256), vmask); in blit_mask_d32_a8_black()
|
/external/libgav1/libgav1/src/dsp/arm/ |
D | cdef_neon.cc | 154 *partial_lo = *partial_hi = vdupq_n_u16(0); in AddPartial_D1_D3() 200 const uint16x8_t v_zero = vdupq_n_u16(0); in AddPartial_D5_D7() 213 *partial_hi = vdupq_n_u16(0); in AddPartial_D5_D7() 268 partial_lo[2] = vsetq_lane_u16(SumVector(v_src[0]), vdupq_n_u16(0), 0); in AddPartial() 483 vdupq_n_u16(static_cast<uint16_t>(~kCdefLargeValue)); in CdefFilter_NEON() 484 const uint16x8_t primary_threshold = vdupq_n_u16(primary_strength); in CdefFilter_NEON() 485 const uint16x8_t secondary_threshold = vdupq_n_u16(secondary_strength); in CdefFilter_NEON()
|
D | intrapred_filter_neon.cc | 114 uint16x8_t sum = vdupq_n_u16(0); in FilterIntraPredictor_NEON()
|
/external/libgav1/libgav1/src/utils/ |
D | entropy_decoder.cc | 203 const uint16x8_t cdf_max_probability = vdupq_n_u16(kCdfMaxProbability); in UpdateCdf7To9() 206 const uint16x8_t symbol_vec = vdupq_n_u16(symbol); in UpdateCdf7To9() 240 const uint16x8_t cdf_max_probability = vdupq_n_u16(kCdfMaxProbability); in UpdateCdf11() 241 const uint16x8_t symbol_vec = vdupq_n_u16(symbol); in UpdateCdf11() 274 const uint16x8_t cdf_max_probability = vdupq_n_u16(kCdfMaxProbability); in UpdateCdf13() 275 const uint16x8_t symbol_vec = vdupq_n_u16(symbol); in UpdateCdf13() 306 const uint16x8_t cdf_max_probability = vdupq_n_u16(kCdfMaxProbability); in UpdateCdf16() 307 const uint16x8_t symbol_vec = vdupq_n_u16(symbol); in UpdateCdf16()
|
/external/libjpeg-turbo/simd/arm/ |
D | jdsample-neon.c | 69 const uint16x8_t one_u16 = vdupq_n_u16(1); in jsimd_h2v1_fancy_upsample_neon() 214 const uint16x8_t seven_u16 = vdupq_n_u16(7); in jsimd_h2v2_fancy_upsample_neon() 216 const uint16x8_t three_u16 = vdupq_n_u16(3); in jsimd_h2v2_fancy_upsample_neon() 420 const uint16x8_t one_u16 = vdupq_n_u16(1); in jsimd_h1v2_fancy_upsample_neon()
|
/external/libvpx/libvpx/vp8/common/arm/neon/ |
D | dc_only_idct_add_neon.c | 25 qAdd = vdupq_n_u16(a1); in vp8_dc_only_idct_add_neon()
|
/external/zlib/contrib/optimizations/ |
D | slide_hash_neon.h | 25 const uint16x8_t v = vdupq_n_u16(w_size); in neon_slide_hash_update()
|
/external/angle/third_party/zlib/contrib/optimizations/ |
D | slide_hash_neon.h | 25 const uint16x8_t v = vdupq_n_u16(w_size); in neon_slide_hash_update()
|
/external/rust/crates/libz-sys/src/zlib-ng/arch/arm/ |
D | slide_neon.c | 29 v = vdupq_n_u16(window_size); in slide_hash_chain()
|