Home
last modified time | relevance | path

Searched refs:vmull_u8 (Results 1 – 25 of 70) sorted by relevance

123

/external/libvpx/libvpx/vp8/common/arm/neon/
Dbilinearpredict_neon.c84 d0 = vmull_u8(vreinterpret_u8_u32(c0.val[0]), filter0); in vp8_bilinear_predict4x4_neon()
85 d1 = vmull_u8(vreinterpret_u8_u32(c1.val[0]), filter0); in vp8_bilinear_predict4x4_neon()
86 d2 = vmull_u8(a4, filter0); in vp8_bilinear_predict4x4_neon()
105 uint16x8_t b0 = vmull_u8(e0, filter0); in vp8_bilinear_predict4x4_neon()
106 uint16x8_t b1 = vmull_u8(e1, filter0); in vp8_bilinear_predict4x4_neon()
155 q6u16 = vmull_u8(vget_low_u8(q1u8), d0u8); in vp8_bilinear_predict8x4_neon()
156 q7u16 = vmull_u8(vget_low_u8(q2u8), d0u8); in vp8_bilinear_predict8x4_neon()
157 q8u16 = vmull_u8(vget_low_u8(q3u8), d0u8); in vp8_bilinear_predict8x4_neon()
158 q9u16 = vmull_u8(vget_low_u8(q4u8), d0u8); in vp8_bilinear_predict8x4_neon()
159 q10u16 = vmull_u8(vget_low_u8(q5u8), d0u8); in vp8_bilinear_predict8x4_neon()
[all …]
Dsixtappredict_neon.c125 c0 = vmull_u8(b0, filter0); in yonly4x4()
126 c1 = vmull_u8(b2, filter0); in yonly4x4()
127 c2 = vmull_u8(b5, filter5); in yonly4x4()
128 c3 = vmull_u8(b7, filter5); in yonly4x4()
226 d0123 = vmull_u8(vreinterpret_u8_u32(s01_f5_q.val[0]), filter5); in vp8_sixtap_predict4x4_neon()
227 d4567 = vmull_u8(vreinterpret_u8_u32(s23_f5_q.val[0]), filter5); in vp8_sixtap_predict4x4_neon()
260 d0123_a = vmull_u8(vreinterpret_u8_u32(s01_f3_q.val[0]), filter3); in vp8_sixtap_predict4x4_neon()
261 d4567_a = vmull_u8(vreinterpret_u8_u32(s23_f3_q.val[0]), filter3); in vp8_sixtap_predict4x4_neon()
302 d0123 = vmull_u8(vreinterpret_u8_u32(s01_f5_q.val[0]), filter5); in vp8_sixtap_predict4x4_neon()
303 d4567 = vmull_u8(vreinterpret_u8_u32(s23_f5_q.val[0]), filter5); in vp8_sixtap_predict4x4_neon()
[all …]
/external/libhevc/common/arm/
Dihevc_intra_pred_chroma_mode_27_to_33.s179 vmull.u8 q5,d8,d30 @(i row)vmull_u8(ref_main_idx, dup_const_32_fract)
182 vmlal.u8 q5,d9,d31 @(i row)vmull_u8(ref_main_idx_1, dup_const_fract)
190 vmull.u8 q7,d12,d28 @(ii)vmull_u8(ref_main_idx, dup_const_32_fract)
194 vmlal.u8 q7,d13,d29 @(ii)vmull_u8(ref_main_idx_1, dup_const_fract)
205 vmull.u8 q9,d16,d26 @(iii)vmull_u8(ref_main_idx, dup_const_32_fract)
208 vmlal.u8 q9,d17,d27 @(iii)vmull_u8(ref_main_idx_1, dup_const_fract)
222 vmull.u8 q11,d20,d24 @(iv)vmull_u8(ref_main_idx, dup_const_32_fract)
226 vmlal.u8 q11,d21,d25 @(iv)vmull_u8(ref_main_idx_1, dup_const_fract)
240 vmull.u8 q5,d8,d30 @(v)vmull_u8(ref_main_idx, dup_const_32_fract)
243 vmlal.u8 q5,d9,d31 @(v)vmull_u8(ref_main_idx_1, dup_const_fract)
[all …]
Dihevc_intra_pred_luma_mode_27_to_33.s181 vmull.u8 q5,d8,d30 @(i row)vmull_u8(ref_main_idx, dup_const_32_fract)
184 vmlal.u8 q5,d9,d31 @(i row)vmull_u8(ref_main_idx_1, dup_const_fract)
192 vmull.u8 q7,d12,d28 @(ii)vmull_u8(ref_main_idx, dup_const_32_fract)
196 vmlal.u8 q7,d13,d29 @(ii)vmull_u8(ref_main_idx_1, dup_const_fract)
207 vmull.u8 q9,d16,d26 @(iii)vmull_u8(ref_main_idx, dup_const_32_fract)
210 vmlal.u8 q9,d17,d27 @(iii)vmull_u8(ref_main_idx_1, dup_const_fract)
223 vmull.u8 q11,d20,d24 @(iv)vmull_u8(ref_main_idx, dup_const_32_fract)
227 vmlal.u8 q11,d21,d25 @(iv)vmull_u8(ref_main_idx_1, dup_const_fract)
241 vmull.u8 q5,d8,d30 @(v)vmull_u8(ref_main_idx, dup_const_32_fract)
244 vmlal.u8 q5,d9,d31 @(v)vmull_u8(ref_main_idx_1, dup_const_fract)
[all …]
Dihevc_intra_pred_filters_luma_mode_19_to_25.s291 vmull.u8 q5,d8,d30 @(i row)vmull_u8(ref_main_idx, dup_const_32_fract)
294 vmlal.u8 q5,d9,d31 @(i row)vmull_u8(ref_main_idx_1, dup_const_fract)
301 vmull.u8 q7,d12,d28 @(ii)vmull_u8(ref_main_idx, dup_const_32_fract)
305 vmlal.u8 q7,d13,d29 @(ii)vmull_u8(ref_main_idx_1, dup_const_fract)
316 vmull.u8 q9,d16,d26 @(iii)vmull_u8(ref_main_idx, dup_const_32_fract)
319 vmlal.u8 q9,d17,d27 @(iii)vmull_u8(ref_main_idx_1, dup_const_fract)
331 vmull.u8 q11,d20,d24 @(iv)vmull_u8(ref_main_idx, dup_const_32_fract)
335 vmlal.u8 q11,d21,d25 @(iv)vmull_u8(ref_main_idx_1, dup_const_fract)
348 vmull.u8 q5,d8,d30 @(v)vmull_u8(ref_main_idx, dup_const_32_fract)
351 vmlal.u8 q5,d9,d31 @(v)vmull_u8(ref_main_idx_1, dup_const_fract)
[all …]
Dihevc_intra_pred_filters_chroma_mode_19_to_25.s288 vmull.u8 q5,d8,d30 @(i row)vmull_u8(ref_main_idx, dup_const_32_fract)
291 vmlal.u8 q5,d9,d31 @(i row)vmull_u8(ref_main_idx_1, dup_const_fract)
298 vmull.u8 q7,d12,d28 @(ii)vmull_u8(ref_main_idx, dup_const_32_fract)
302 vmlal.u8 q7,d13,d29 @(ii)vmull_u8(ref_main_idx_1, dup_const_fract)
313 vmull.u8 q9,d16,d26 @(iii)vmull_u8(ref_main_idx, dup_const_32_fract)
316 vmlal.u8 q9,d17,d27 @(iii)vmull_u8(ref_main_idx_1, dup_const_fract)
329 vmull.u8 q11,d20,d24 @(iv)vmull_u8(ref_main_idx, dup_const_32_fract)
333 vmlal.u8 q11,d21,d25 @(iv)vmull_u8(ref_main_idx_1, dup_const_fract)
346 vmull.u8 q5,d8,d30 @(v)vmull_u8(ref_main_idx, dup_const_32_fract)
349 vmlal.u8 q5,d9,d31 @(v)vmull_u8(ref_main_idx_1, dup_const_fract)
[all …]
Dihevc_inter_pred_chroma_horz.s175 vmull.u8 q15,d2,d25 @mul_res = vmull_u8(src[0_3], coeffabs_3)@
212 vmull.u8 q11,d10,d25 @mul_res = vmull_u8(src[0_3], coeffabs_3)@
238 vmull.u8 q10,d11,d25 @mul_res = vmull_u8(src[0_3], coeffabs_3)@
250 vmull.u8 q15,d2,d25 @mul_res = vmull_u8(src[0_3], coeffabs_3)@
297 vmull.u8 q11,d10,d25 @mul_res = vmull_u8(src[0_3], coeffabs_3)@
311 vmull.u8 q10,d11,d25 @mul_res = vmull_u8(src[0_3], coeffabs_3)@
319 vmull.u8 q15,d2,d25 @mul_res = vmull_u8(src[0_3], coeffabs_3)@
349 vmull.u8 q11,d10,d25 @mul_res = vmull_u8(src[0_3], coeffabs_3)@
355 vmull.u8 q10,d11,d25 @mul_res = vmull_u8(src[0_3], coeffabs_3)@
405 vmull.u8 q4,d1,d25 @mul_res = vmull_u8(src[0_3], coeffabs_3)@
[all …]
Dihevc_inter_pred_chroma_horz_w16out.s194 vmull.u8 q15,d2,d25 @mul_res = vmull_u8(src[0_3], coeffabs_3)@
225 vmull.u8 q11,d10,d25 @mul_res = vmull_u8(src[0_3], coeffabs_3)@
253 vmull.u8 q10,d11,d25 @mul_res = vmull_u8(src[0_3], coeffabs_3)@
266 vmull.u8 q15,d2,d25 @mul_res = vmull_u8(src[0_3], coeffabs_3)@
306 vmull.u8 q11,d10,d25 @mul_res = vmull_u8(src[0_3], coeffabs_3)@
320 vmull.u8 q10,d11,d25 @mul_res = vmull_u8(src[0_3], coeffabs_3)@
328 vmull.u8 q15,d2,d25 @mul_res = vmull_u8(src[0_3], coeffabs_3)@
351 vmull.u8 q11,d10,d25 @mul_res = vmull_u8(src[0_3], coeffabs_3)@
357 vmull.u8 q10,d11,d25 @mul_res = vmull_u8(src[0_3], coeffabs_3)@
402 vmull.u8 q4,d1,d25 @mul_res = vmull_u8(src[0_3], coeffabs_3)@
[all …]
Dihevc_intra_pred_filters_neon_intr.c742 prod_t1 = vmull_u8(const_nt_1_col_t, pu1_ref_two_nt_1_row_dup); in ihevc_intra_pred_luma_planar_neonintr()
746 prod_t2 = vmull_u8(const_col_1_t, pu1_ref_three_nt_1_dup); in ihevc_intra_pred_luma_planar_neonintr()
822 prod_t1 = vmull_u8(const_nt_1_col_t, pu1_ref_two_nt_1_row_dup); in ihevc_intra_pred_luma_planar_neonintr()
829 prod_t2 = vmull_u8(const_col_1_t, pu1_ref_three_nt_1_dup); in ihevc_intra_pred_luma_planar_neonintr()
2147 mul_res1 = vmull_u8(ref_main_idx, dup_const_32_fract); in ihevc_intra_pred_luma_mode_3_to_9_neonintr()
2148 mul_res2 = vmull_u8(ref_main_idx_1, dup_const_fract); in ihevc_intra_pred_luma_mode_3_to_9_neonintr()
2232 mul_res1 = vmull_u8(vreinterpret_u8_u32(pu1_ref_val1), dup_32_fract); in ihevc_intra_pred_luma_mode_3_to_9_neonintr()
2233 mul_res2 = vmull_u8(vreinterpret_u8_u32(pu1_ref_val2), dup_fract); in ihevc_intra_pred_luma_mode_3_to_9_neonintr()
2427 mul_res1 = vmull_u8(ref_main_idx, dup_const_32_fract); in ihevc_intra_pred_luma_mode_11_to_17_neonintr()
2428 mul_res2 = vmull_u8(ref_main_idx_1, dup_const_fract); in ihevc_intra_pred_luma_mode_11_to_17_neonintr()
[all …]
Dihevc_inter_pred_filters_luma_vert.s164 vmull.u8 q4,d1,d23 @mul_res1 = vmull_u8(src_tmp2, coeffabs_1)@
182 vmull.u8 q5,d2,d23 @mul_res2 = vmull_u8(src_tmp3, coeffabs_1)@
247 vmull.u8 q4,d1,d23 @mul_res1 = vmull_u8(src_tmp2, coeffabs_1)@
273 vmull.u8 q5,d2,d23 @mul_res2 = vmull_u8(src_tmp3, coeffabs_1)@
353 vmull.u8 q4,d1,d23 @mul_res1 = vmull_u8(src_tmp2, coeffabs_1)@
366 vmull.u8 q5,d2,d23 @mul_res2 = vmull_u8(src_tmp3, coeffabs_1)@
440 …vmull.u8 q0,d5,d23 @mul_res1 = vmull_u8(vreinterpret_u8_u32(src_tmp2), coeffa…
453 …vmull.u8 q1,d7,d25 @mul_res2 = vmull_u8(vreinterpret_u8_u32(src_tmp4), coeffa…
618 vmull.u8 q4,d1,d23 @mul_res1 = vmull_u8(src_tmp2, coeffabs_1)@
636 vmull.u8 q5,d2,d23 @mul_res2 = vmull_u8(src_tmp3, coeffabs_1)@
[all …]
Dihevc_inter_pred_filters_luma_vert_w16inp.s153 vmull.s16 q4,d1,d23 @mul_res1 = vmull_u8(src_tmp2, coeffabs_1)@
169 vmull.s16 q5,d2,d23 @mul_res2 = vmull_u8(src_tmp3, coeffabs_1)@
225 vmull.s16 q4,d1,d23 @mul_res1 = vmull_u8(src_tmp2, coeffabs_1)@
241 vmull.s16 q5,d2,d23 @mul_res2 = vmull_u8(src_tmp3, coeffabs_1)@
310 vmull.s16 q4,d1,d23 @mul_res1 = vmull_u8(src_tmp2, coeffabs_1)@
324 vmull.s16 q5,d2,d23 @mul_res2 = vmull_u8(src_tmp3, coeffabs_1)@
/external/libhevc/encoder/arm/
Dihevce_ssd_calculator_neon.c75 sqabs_low = vmull_u8(vget_low_u8(abs), vget_low_u8(abs)); in ihevce_4x4_ssd_computer_neon()
76 sqabs_high = vmull_u8(vget_high_u8(abs), vget_high_u8(abs)); in ihevce_4x4_ssd_computer_neon()
102 sqabs = vmull_u8(abs, abs); in ihevce_1x8_ssd_computer_neon()
127 sqabs_low = vmull_u8(vget_low_u8(abs), vget_low_u8(abs)); in ihevce_1x16_ssd_computer_neon()
128 sqabs_high = vmull_u8(vget_high_u8(abs), vget_high_u8(abs)); in ihevce_1x16_ssd_computer_neon()
159 sqabs_0 = vmull_u8(vget_low_u8(abs_0), vget_low_u8(abs_0)); in ihevce_1x32_ssd_computer_neon()
160 sqabs_1 = vmull_u8(vget_high_u8(abs_0), vget_high_u8(abs_0)); in ihevce_1x32_ssd_computer_neon()
161 sqabs_2 = vmull_u8(vget_low_u8(abs_1), vget_low_u8(abs_1)); in ihevce_1x32_ssd_computer_neon()
162 sqabs_3 = vmull_u8(vget_high_u8(abs_1), vget_high_u8(abs_1)); in ihevce_1x32_ssd_computer_neon()
211 sqabs_0 = vmull_u8(vget_low_u8(abs_0), vget_low_u8(abs_0)); in ihevce_1x64_ssd_computer_neon()
[all …]
Dihevce_ssd_and_sad_calculator_neon.c71 const uint16x8_t sq_abs_l = vmull_u8(abs_l, abs_l); in ihevce_ssd_and_sad_calculator_neon()
72 const uint16x8_t sq_abs_h = vmull_u8(abs_h, abs_h); in ihevce_ssd_and_sad_calculator_neon()
194 sqabs_l = vmull_u8(abs_l, abs_l); in ihevce_ssd_and_sad_calculator_neon()
195 sqabs_h = vmull_u8(abs_h, abs_h); in ihevce_ssd_and_sad_calculator_neon()
205 sqabs_l = vmull_u8(abs_l, abs_l); in ihevce_ssd_and_sad_calculator_neon()
206 sqabs_h = vmull_u8(abs_h, abs_h); in ihevce_ssd_and_sad_calculator_neon()
257 sqabs_l = vmull_u8(abs_l, abs_l); in ihevce_ssd_and_sad_calculator_neon()
258 sqabs_h = vmull_u8(abs_h, abs_h); in ihevce_ssd_and_sad_calculator_neon()
268 sqabs_l = vmull_u8(abs_l, abs_l); in ihevce_ssd_and_sad_calculator_neon()
269 sqabs_h = vmull_u8(abs_h, abs_h); in ihevce_ssd_and_sad_calculator_neon()
[all …]
Dihevce_scale_by_2_neon.c89 p = vreinterpretq_s16_u16(vmull_u8(c, wt_0)); // a[0] * 66 in ihevce_horz_scale_neon_w16()
162 p = vreinterpretq_s16_u16(vmull_u8(vget_low_u8(src[c]), wt_0)); in ihevce_vert_scale_neon_w16()
180 p = vreinterpretq_s16_u16(vmull_u8(vget_high_u8(src[c]), wt_0)); in ihevce_vert_scale_neon_w16()
/external/libgav1/libgav1/src/dsp/arm/
Dintrapred_smooth_neon.cc98 const uint16x8_t weighted_bl = vmull_u8(scaled_weights_y, bottom_left_v); in Smooth4Or8xN_NEON()
100 const uint16x8_t weighted_top = vmull_u8(weights_y_v, top_v); in Smooth4Or8xN_NEON()
101 const uint16x8_t weighted_left = vmull_u8(weights_x_v, left_v); in Smooth4Or8xN_NEON()
102 const uint16x8_t weighted_tr = vmull_u8(scaled_weights_x, top_right_v); in Smooth4Or8xN_NEON()
123 const uint16x8_t weighted_top_low = vmull_u8(weights_y, vget_low_u8(top)); in CalculateWeightsAndPred()
124 const uint16x8_t weighted_left_low = vmull_u8(vget_low_u8(weights_x), left); in CalculateWeightsAndPred()
126 vmull_u8(vget_low_u8(scaled_weights_x), top_right); in CalculateWeightsAndPred()
135 const uint16x8_t weighted_top_high = vmull_u8(weights_y, vget_high_u8(top)); in CalculateWeightsAndPred()
136 const uint16x8_t weighted_left_high = vmull_u8(vget_high_u8(weights_x), left); in CalculateWeightsAndPred()
138 vmull_u8(vget_high_u8(scaled_weights_x), top_right); in CalculateWeightsAndPred()
[all …]
Dobmc_neon.cc44 const uint16x8_t weighted_pred = vmull_u8(pred_mask, pred_val); in WriteObmcLine4()
80 const uint16x8_t weighted_pred = vmull_u8(pred_mask, pred_val); in OverlapBlend2xH_NEON()
129 const uint16x8_t weighted_pred = vmull_u8(pred_mask, pred_val); in OverlapBlendFromLeft8xH_NEON()
177 vmull_u8(vget_low_u8(pred_mask), vget_low_u8(pred_val)); in OverlapBlendFromLeft_NEON()
183 vmull_u8(vget_high_u8(pred_mask), vget_high_u8(pred_val)); in OverlapBlendFromLeft_NEON()
299 const uint16x8_t weighted_pred = vmull_u8(pred_mask, pred_val); in OverlapBlendFromTop8xH_NEON()
349 vmull_u8(pred_mask, vget_low_u8(pred_val)); in OverlapBlendFromTop_NEON()
355 vmull_u8(pred_mask, vget_high_u8(pred_val)); in OverlapBlendFromTop_NEON()
Dloop_restoration_neon.cc915 sq[0] = vmull_u8(s[0], s[0]); in BoxSum()
921 sq[1] = vmull_u8(s[1], s[1]); in BoxSum()
952 sq[0] = vmull_u8(s[0], s[0]); in BoxSum()
958 sq[1] = vmull_u8(s[1], s[1]); in BoxSum()
1153 sq[0][0] = vmull_u8(vget_low_u8(s[0][0]), vget_low_u8(s[0][0])); in BoxFilterPreProcess5Lo()
1154 sq[1][0] = vmull_u8(vget_low_u8(s[1][0]), vget_low_u8(s[1][0])); in BoxFilterPreProcess5Lo()
1155 sq[0][1] = vmull_u8(vget_high_u8(s[0][0]), vget_high_u8(s[0][0])); in BoxFilterPreProcess5Lo()
1156 sq[1][1] = vmull_u8(vget_high_u8(s[1][0]), vget_high_u8(s[1][0])); in BoxFilterPreProcess5Lo()
1188 sq[0][2] = vmull_u8(vget_low_u8(s[0][1]), vget_low_u8(s[0][1])); in BoxFilterPreProcess5()
1189 sq[1][2] = vmull_u8(vget_low_u8(s[1][1]), vget_low_u8(s[1][1])); in BoxFilterPreProcess5()
[all …]
/external/libaom/libaom/av1/common/arm/
Dblend_a64_hmask_neon.c51 res_low = vmull_u8(vget_low_u8(m_q), vget_low_u8(tmp0_q)); in aom_blend_a64_hmask_neon()
54 res_high = vmull_u8(vget_high_u8(m_q), vget_high_u8(tmp0_q)); in aom_blend_a64_hmask_neon()
78 res = vmull_u8(m, tmp0); in aom_blend_a64_hmask_neon()
97 res = vmull_u8(m, tmp0); in aom_blend_a64_hmask_neon()
121 res = vmull_u8(m, tmp0); in aom_blend_a64_hmask_neon()
Dblend_a64_vmask_neon.c49 res_low = vmull_u8(m, vget_low_u8(tmp0_q)); in aom_blend_a64_vmask_neon()
51 res_high = vmull_u8(m, vget_high_u8(tmp0_q)); in aom_blend_a64_vmask_neon()
72 res = vmull_u8(m, tmp0); in aom_blend_a64_vmask_neon()
96 res = vmull_u8(m, tmp0); in aom_blend_a64_vmask_neon()
128 res = vmull_u8(m, tmp0); in aom_blend_a64_vmask_neon()
/external/libvpx/libvpx/vpx_dsp/arm/
Dsubpel_variance_neon.c39 const uint16x8_t a = vmull_u8(src_0, f0); in var_filter_block2d_bil_w4()
61 const uint16x8_t a = vmull_u8(src_0, f0); in var_filter_block2d_bil_w8()
85 const uint16x8_t a = vmull_u8(vget_low_u8(src_0), f0); in var_filter_block2d_bil_w16()
88 const uint16x8_t c = vmull_u8(vget_high_u8(src_0), f0); in var_filter_block2d_bil_w16()
/external/libaom/libaom/aom_dsp/arm/
Dsubpel_variance_neon.c36 const uint16x8_t a = vmull_u8(src_0, f0); in var_filter_block2d_bil_w8()
60 const uint16x8_t a = vmull_u8(vget_low_u8(src_0), f0); in var_filter_block2d_bil_w16()
63 const uint16x8_t c = vmull_u8(vget_high_u8(src_0), f0); in var_filter_block2d_bil_w16()
/external/skqp/src/opts/
DSk4px_NEON.h16 return Sk16h(vmull_u8(vget_low_u8 (this->fVec), vget_low_u8 (other.fVec)), in mulWiden()
17 vmull_u8(vget_high_u8(this->fVec), vget_high_u8(other.fVec))); in mulWiden()
/external/skia/src/opts/
DSk4px_NEON.h16 return Sk16h(vmull_u8(vget_low_u8 (this->fVec), vget_low_u8 (other.fVec)), in mulWiden()
17 vmull_u8(vget_high_u8(this->fVec), vget_high_u8(other.fVec))); in mulWiden()
/external/webp/src/dsp/
Dalpha_processing_neon.c27 const uint16x8_t r1 = vmull_u8((V).val[1], alpha); \
28 const uint16x8_t g1 = vmull_u8((V).val[2], alpha); \
29 const uint16x8_t b1 = vmull_u8((V).val[(OTHER)], alpha); \
/external/rust/crates/libz-sys/src/zlib-ng/arch/arm/
Dadler32_neon.c36 sum2 = vmull_u8( vget_low_u8(t0), vget_low_u8(d0)); in NEON_accum32()
51 sum2 = vmull_u8( vget_low_u8(t1), vget_low_u8(d0)); in NEON_accum32()

123