Home
last modified time | relevance | path

Searched refs:vreinterpret_u32_u8 (Results 1 – 25 of 69) sorted by relevance

123

/external/libaom/libaom/aom_dsp/arm/
Dloopfilter_neon.c37 temp_8x8 = vreinterpret_u8_u32(vrev64_u32(vreinterpret_u32_u8(mask_8x8))); in lpf_mask()
40 p0q0_p1q1 = vtrn_u32(vreinterpret_u32_u8(p0q0), vreinterpret_u32_u8(p1q1)); in lpf_mask()
67 temp_8x8 = vreinterpret_u8_u32(vrev64_u32(vreinterpret_u32_u8(mask_8x8))); in lpf_mask2()
70 p0q0_p1q1 = vtrn_u32(vreinterpret_u32_u8(p0q0), vreinterpret_u32_u8(p1q1)); in lpf_mask2()
95 temp_8x8 = vreinterpret_u8_u32(vrev64_u32(vreinterpret_u32_u8(flat_8x8))); in lpf_flat_mask4()
110 temp_8x8 = vreinterpret_u8_u32(vrev64_u32(vreinterpret_u32_u8(flat_8x8))); in lpf_flat_mask3()
131 temp_8x8 = vreinterpret_u8_u32(vrev64_u32(vreinterpret_u32_u8(mask_8x8))); in lpf_mask3_chroma()
134 p0q0_p1q1 = vtrn_u32(vreinterpret_u32_u8(p0q0), vreinterpret_u32_u8(p1q1)); in lpf_mask3_chroma()
192 temp1_8x8 = vreinterpret_u8_u32(vrev64_u32(vreinterpret_u32_u8(temp0_8x8))); in lpf_14_neon()
225 q0p0 = vreinterpret_u8_u32(vrev64_u32(vreinterpret_u32_u8(*p0q0))); in lpf_14_neon()
[all …]
Dintrapred_neon.c58 vst1_lane_u32((uint32_t *)(dst + i * stride), vreinterpret_u32_u8(dc), 0); in dc_4x4()
128 vst1_u32((uint32_t *)(dst + i * stride), vreinterpret_u32_u8(dc)); in dc_8x8()
336 const uint32x2_t r3 = vreinterpret_u32_u8(avg2); in aom_d135_predictor_4x4_neon()
401 vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(d0u8), 0); in aom_h_predictor_4x4_neon()
404 vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(d0u8), 0); in aom_h_predictor_4x4_neon()
407 vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(d0u8), 0); in aom_h_predictor_4x4_neon()
410 vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(d0u8), 0); in aom_h_predictor_4x4_neon()
/external/libvpx/libvpx/vpx_dsp/arm/
Dvpx_convolve_avg_neon.c35 s01 = vzip_u32(vreinterpret_u32_u8(s0), vreinterpret_u32_u8(s1)); in vpx_convolve_avg_neon()
37 vld1_lane_u32((const uint32_t *)dst, vreinterpret_u32_u8(dd0), 0)); in vpx_convolve_avg_neon()
39 (const uint32_t *)(dst + dst_stride), vreinterpret_u32_u8(dd0), 1)); in vpx_convolve_avg_neon()
41 vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(dd0), 0); in vpx_convolve_avg_neon()
43 vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(dd0), 1); in vpx_convolve_avg_neon()
Didct4x4_1_add_neon.c27 vst1_lane_u32((uint32_t *)*dest, vreinterpret_u32_u8(b), 0); in idct4x4_1_add_kernel()
29 vst1_lane_u32((uint32_t *)*dest, vreinterpret_u32_u8(b), 1); in idct4x4_1_add_kernel()
Dvpx_scaled_convolve8_neon.c66 vst1_lane_u32((uint32_t *)&temp[4 * z], vreinterpret_u32_u8(d), 0); in scaledconvolve_horiz_w4()
80 vreinterpret_u32_u8(d4.val[0]), 0); in scaledconvolve_horiz_w4()
82 vreinterpret_u32_u8(d4.val[1]), 0); in scaledconvolve_horiz_w4()
84 vreinterpret_u32_u8(d4.val[2]), 0); in scaledconvolve_horiz_w4()
86 vreinterpret_u32_u8(d4.val[3]), 0); in scaledconvolve_horiz_w4()
189 vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(d), 0); in scaledconvolve_vert_w4()
Didct4x4_add_neon.c56 vst1_lane_u32((uint32_t *)dest, vreinterpret_u32_u8(d[1]), 1); in vpx_idct4x4_16_add_neon()
58 vst1_lane_u32((uint32_t *)dest, vreinterpret_u32_u8(d[1]), 0); in vpx_idct4x4_16_add_neon()
Dvpx_convolve8_neon.c130 vreinterpret_u32_u8(d01), 0); in vpx_convolve8_horiz_neon()
132 vreinterpret_u32_u8(d23), 0); in vpx_convolve8_horiz_neon()
134 vreinterpret_u32_u8(d01), 1); in vpx_convolve8_horiz_neon()
136 vreinterpret_u32_u8(d23), 1); in vpx_convolve8_horiz_neon()
204 vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(t0), 0); in vpx_convolve8_horiz_neon()
206 vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(t1), 0); in vpx_convolve8_horiz_neon()
208 vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(t2), 0); in vpx_convolve8_horiz_neon()
210 vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(t3), 0); in vpx_convolve8_horiz_neon()
212 vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(t0), 1); in vpx_convolve8_horiz_neon()
214 vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(t1), 1); in vpx_convolve8_horiz_neon()
[all …]
Dmem_neon.h113 const uint32x2_t a_u32 = vreinterpret_u32_u8(a); in store_unaligned_u8()
175 uint32x2_t a_u32 = vreinterpret_u32_u8(a); in store_u8()
/external/skqp/src/core/
DSkBlitRow_D32.cpp174 vst1_u32(dst, vreinterpret_u32_u8(vres)); in blit_row_s32_blend()
185 vsrc = vreinterpret_u8_u32(vld1_lane_u32(src, vreinterpret_u32_u8(vsrc), 0)); in blit_row_s32_blend()
186 vdst = vreinterpret_u8_u32(vld1_lane_u32(dst, vreinterpret_u32_u8(vdst), 0)); in blit_row_s32_blend()
194 vst1_lane_u32(dst, vreinterpret_u32_u8(vres), 0); in blit_row_s32_blend()
208 vsrc = vreinterpret_u8_u32(vld1_lane_u32(src, vreinterpret_u32_u8(vsrc), 0)); in blit_row_s32a_blend()
209 vdst = vreinterpret_u8_u32(vld1_lane_u32(dst, vreinterpret_u32_u8(vdst), 0)); in blit_row_s32a_blend()
223 vst1_lane_u32(dst, vreinterpret_u32_u8(vres), 0); in blit_row_s32a_blend()
266 vst1_u32(dst, vreinterpret_u32_u8(vres)); in blit_row_s32a_blend()
/external/skia/src/core/
DSkBlitRow_D32.cpp173 vst1_u32(dst, vreinterpret_u32_u8(vres)); in blit_row_s32_blend()
184 vsrc = vreinterpret_u8_u32(vld1_lane_u32(src, vreinterpret_u32_u8(vsrc), 0)); in blit_row_s32_blend()
185 vdst = vreinterpret_u8_u32(vld1_lane_u32(dst, vreinterpret_u32_u8(vdst), 0)); in blit_row_s32_blend()
193 vst1_lane_u32(dst, vreinterpret_u32_u8(vres), 0); in blit_row_s32_blend()
207 vsrc = vreinterpret_u8_u32(vld1_lane_u32(src, vreinterpret_u32_u8(vsrc), 0)); in blit_row_s32a_blend()
208 vdst = vreinterpret_u8_u32(vld1_lane_u32(dst, vreinterpret_u32_u8(vdst), 0)); in blit_row_s32a_blend()
222 vst1_lane_u32(dst, vreinterpret_u32_u8(vres), 0); in blit_row_s32a_blend()
265 vst1_u32(dst, vreinterpret_u32_u8(vres)); in blit_row_s32a_blend()
/external/libvpx/libvpx/vp8/common/arm/neon/
Dloopfiltersimpleverticaledge_neon.c107 vtrn_u32(vreinterpret_u32_u8(a), vreinterpret_u32_u8(e)); in read_4x8()
109 vtrn_u32(vreinterpret_u32_u8(b), vreinterpret_u32_u8(f)); in read_4x8()
111 vtrn_u32(vreinterpret_u32_u8(c), vreinterpret_u32_u8(g)); in read_4x8()
113 vtrn_u32(vreinterpret_u32_u8(d), vreinterpret_u32_u8(h)); in read_4x8()
Ddc_only_idct_add_neon.c36 vst1_lane_u32((uint32_t *)dst_ptr, vreinterpret_u32_u8(d2u8), 0); in vp8_dc_only_idct_add_neon()
38 vst1_lane_u32((uint32_t *)dst_ptr, vreinterpret_u32_u8(d2u8), 1); in vp8_dc_only_idct_add_neon()
Dshortidct4x4llm_neon.c115 vst1_lane_u32((uint32_t *)dst_ptr, vreinterpret_u32_u8(d1u8), 0); in vp8_short_idct4x4llm_neon()
117 vst1_lane_u32((uint32_t *)dst_ptr, vreinterpret_u32_u8(d1u8), 1); in vp8_short_idct4x4llm_neon()
Dbilinearpredict_neon.c75 c0 = vzip_u32(vreinterpret_u32_u8(vget_low_u8(a01)), in vp8_bilinear_predict4x4_neon()
76 vreinterpret_u32_u8(vget_high_u8(a01))); in vp8_bilinear_predict4x4_neon()
77 c1 = vzip_u32(vreinterpret_u32_u8(vget_low_u8(a23)), in vp8_bilinear_predict4x4_neon()
78 vreinterpret_u32_u8(vget_high_u8(a23))); in vp8_bilinear_predict4x4_neon()
79 c2 = vzip_u32(vreinterpret_u32_u8(vget_low_u8(b01)), in vp8_bilinear_predict4x4_neon()
80 vreinterpret_u32_u8(vget_high_u8(b01))); in vp8_bilinear_predict4x4_neon()
81 c3 = vzip_u32(vreinterpret_u32_u8(vget_low_u8(b23)), in vp8_bilinear_predict4x4_neon()
82 vreinterpret_u32_u8(vget_high_u8(b23))); in vp8_bilinear_predict4x4_neon()
Dsixtappredict_neon.c50 const uint32x2x2_t a_shuf = vzip_u32(vreinterpret_u32_u8(vget_low_u8(a)), in filter_add_accumulate()
51 vreinterpret_u32_u8(vget_high_u8(a))); in filter_add_accumulate()
52 const uint32x2x2_t b_shuf = vzip_u32(vreinterpret_u32_u8(vget_low_u8(b)), in filter_add_accumulate()
53 vreinterpret_u32_u8(vget_high_u8(b))); in filter_add_accumulate()
61 const uint32x2x2_t a_shuf = vzip_u32(vreinterpret_u32_u8(vget_low_u8(a)), in filter_sub_accumulate()
62 vreinterpret_u32_u8(vget_high_u8(a))); in filter_sub_accumulate()
63 const uint32x2x2_t b_shuf = vzip_u32(vreinterpret_u32_u8(vget_low_u8(b)), in filter_sub_accumulate()
64 vreinterpret_u32_u8(vget_high_u8(b))); in filter_sub_accumulate()
224 s01_f5_q = vzip_u32(vreinterpret_u32_u8(s0_f5), vreinterpret_u32_u8(s1_f5)); in vp8_sixtap_predict4x4_neon()
225 s23_f5_q = vzip_u32(vreinterpret_u32_u8(s2_f5), vreinterpret_u32_u8(s3_f5)); in vp8_sixtap_predict4x4_neon()
[all …]
/external/libhevc/common/arm/
Dihevc_weighted_pred_neon_intr.c170 vst1_lane_u32((uint32_t *)pu1_dst, vreinterpret_u32_u8(sto_res), 0); in ihevc_weighted_pred_uni_neonintr()
174 vst1_lane_u32((uint32_t *)pu1_dst_tmp, vreinterpret_u32_u8(sto_res), 0); in ihevc_weighted_pred_uni_neonintr()
317 vst1_lane_u32((uint32_t *)pu1_dst, vreinterpret_u32_u8(sto_res), 0); in ihevc_weighted_pred_chroma_uni_neonintr()
321 vst1_lane_u32((uint32_t *)pu1_dst_tmp, vreinterpret_u32_u8(sto_res), 0); in ihevc_weighted_pred_chroma_uni_neonintr()
481 vst1_lane_u32((uint32_t *)pu1_dst, vreinterpret_u32_u8(sto_res), 0); in ihevc_weighted_pred_bi_neonintr()
485 vst1_lane_u32((uint32_t *)pu1_dst_tmp, vreinterpret_u32_u8(sto_res), 0); in ihevc_weighted_pred_bi_neonintr()
667 vst1_lane_u32((uint32_t *)pu1_dst, vreinterpret_u32_u8(sto_res), 0); in ihevc_weighted_pred_chroma_bi_neonintr()
671 vst1_lane_u32((uint32_t *)pu1_dst_tmp, vreinterpret_u32_u8(sto_res), 0); in ihevc_weighted_pred_chroma_bi_neonintr()
816 vst1_lane_u32((uint32_t *)pu1_dst, vreinterpret_u32_u8(sto_res), 0); in ihevc_weighted_pred_bi_default_neonintr()
820 vst1_lane_u32((uint32_t *)pu1_dst_tmp, vreinterpret_u32_u8(sto_res), 0); in ihevc_weighted_pred_bi_default_neonintr()
[all …]
Dihevc_intra_pred_filters_neon_intr.c227 … vst1_lane_u32((uint32_t *)pu1_dst_const_nt, vreinterpret_u32_u8(dup_pu1_dst1), 0); in ihevc_intra_pred_luma_ref_substitution_neonintr()
278 … vst1_lane_u32((uint32_t *)pu1_dst_const_three_nt_1, vreinterpret_u32_u8(dup_pu1_dst3), 0); in ihevc_intra_pred_luma_ref_substitution_neonintr()
841 vst1_lane_u32((uint32_t *)pu1_dst, vreinterpret_u32_u8(sto_res), 0); in ihevc_intra_pred_luma_planar_neonintr()
844 vst1_lane_u32((uint32_t *)pu1_dst_tmp, vreinterpret_u32_u8(sto_res), 1); in ihevc_intra_pred_luma_planar_neonintr()
1106 vst1_lane_u32((uint32_t *)pu1_dst_tmp, vreinterpret_u32_u8(dc_val_t), 0); in ihevc_intra_pred_luma_dc_neonintr()
1126 vst1_lane_u32((uint32_t *)pu1_dst_tmp, vreinterpret_u32_u8(dc_val_t), 0); in ihevc_intra_pred_luma_dc_neonintr()
1255 vst1_lane_u32((uint32_t *)pu1_dst_4, vreinterpret_u32_u8(round_val), 0); in ihevc_intra_pred_luma_horz_neonintr()
1260 vst1_lane_u32((uint32_t *)pu1_dst_4, vreinterpret_u32_u8(dup_val), 0); in ihevc_intra_pred_luma_horz_neonintr()
1264 vst1_lane_u32((uint32_t *)pu1_dst_4, vreinterpret_u32_u8(dup_val), 0); in ihevc_intra_pred_luma_horz_neonintr()
1268 vst1_lane_u32((uint32_t *)pu1_dst_4, vreinterpret_u32_u8(dup_val), 0); in ihevc_intra_pred_luma_horz_neonintr()
[all …]
/external/libgav1/libgav1/src/dsp/arm/
Dcommon_neon.h229 vld1_lane_u32(&temp, vreinterpret_u32_u8(val), lane)); in Load4()
250 ValueToMem<uint32_t>(buf, vget_lane_u32(vreinterpret_u32_u8(val), 0)); in StoreLo4()
255 ValueToMem<uint32_t>(buf, vget_lane_u32(vreinterpret_u32_u8(val), 1)); in StoreHi4()
344 vzip1_u32(vreinterpret_u32_u8(a), vreinterpret_u32_u8(b))); in InterleaveLow32()
348 vzip_u32(vreinterpret_u32_u8(a), vreinterpret_u32_u8(b)).val[0]); in InterleaveLow32()
366 vzip2_u32(vreinterpret_u32_u8(a), vreinterpret_u32_u8(b))); in InterleaveHigh32()
370 vzip_u32(vreinterpret_u32_u8(a), vreinterpret_u32_u8(b)).val[1]); in InterleaveHigh32()
428 const uint32x2_t a_32 = vreinterpret_u32_u8(a); in Interleave32()
429 const uint32x2_t b_32 = vreinterpret_u32_u8(b); in Interleave32()
438 const uint32x2_t b = vrev64_u32(vreinterpret_u32_u8(a)); in Transpose32()
/external/libaom/libaom/av1/common/arm/
Dconvolve_neon.c280 vreinterpret_u32_u8(d01), 0); in av1_convolve_x_sr_neon()
282 vreinterpret_u32_u8(d23), 0); in av1_convolve_x_sr_neon()
284 vreinterpret_u32_u8(d01), 1); in av1_convolve_x_sr_neon()
286 vreinterpret_u32_u8(d23), 1); in av1_convolve_x_sr_neon()
372 vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(t0), in av1_convolve_x_sr_neon()
375 vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(t1), in av1_convolve_x_sr_neon()
378 vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(t2), in av1_convolve_x_sr_neon()
381 vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(t3), in av1_convolve_x_sr_neon()
384 vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(t0), in av1_convolve_x_sr_neon()
387 vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(t1), in av1_convolve_x_sr_neon()
[all …]
Dtranspose_neon.h152 vtrn_u32(vreinterpret_u32_u8(*a0), vreinterpret_u32_u8(a4)); in transpose_u8_4x8()
154 vtrn_u32(vreinterpret_u32_u8(*a1), vreinterpret_u32_u8(a5)); in transpose_u8_4x8()
156 vtrn_u32(vreinterpret_u32_u8(*a2), vreinterpret_u32_u8(a6)); in transpose_u8_4x8()
158 vtrn_u32(vreinterpret_u32_u8(*a3), vreinterpret_u32_u8(a7)); in transpose_u8_4x8()
Djnt_convolve_neon.c654 vst1_lane_u32((uint32_t *)d_u8, vreinterpret_u32_u8(t0), 0); in dist_wtd_convolve_2d_vert_neon()
656 vst1_lane_u32((uint32_t *)d_u8, vreinterpret_u32_u8(t0), 1); in dist_wtd_convolve_2d_vert_neon()
658 vst1_lane_u32((uint32_t *)d_u8, vreinterpret_u32_u8(t1), 0); in dist_wtd_convolve_2d_vert_neon()
660 vst1_lane_u32((uint32_t *)d_u8, vreinterpret_u32_u8(t1), 1); in dist_wtd_convolve_2d_vert_neon()
692 vst1_lane_u32((uint32_t *)d_u8, vreinterpret_u32_u8(t0), 0); in dist_wtd_convolve_2d_vert_neon()
855 vst1_lane_u32((uint32_t *)(dst8_1), vreinterpret_u32_u8(tmp_shift0), 0); in av1_dist_wtd_convolve_2d_copy_neon()
857 vst1_lane_u32((uint32_t *)(dst8_1), vreinterpret_u32_u8(tmp_shift0), 1); in av1_dist_wtd_convolve_2d_copy_neon()
859 vst1_lane_u32((uint32_t *)(dst8_1), vreinterpret_u32_u8(tmp_shift1), 0); in av1_dist_wtd_convolve_2d_copy_neon()
861 vst1_lane_u32((uint32_t *)(dst8_1), vreinterpret_u32_u8(tmp_shift1), 1); in av1_dist_wtd_convolve_2d_copy_neon()
1032 vst1_lane_u32((uint32_t *)d_u8, vreinterpret_u32_u8(t0), in av1_dist_wtd_convolve_x_neon()
[all …]
Dblend_a64_hmask_neon.c101 vreinterpret_u32_u8(vrshrn_n_u16(res, AOM_BLEND_A64_ROUND_BITS)), 0); in aom_blend_a64_hmask_neon()
104 vreinterpret_u32_u8(vrshrn_n_u16(res, AOM_BLEND_A64_ROUND_BITS)), 1); in aom_blend_a64_hmask_neon()
Dblend_a64_vmask_neon.c100 vreinterpret_u32_u8(vrshrn_n_u16(res, AOM_BLEND_A64_ROUND_BITS)), 0); in aom_blend_a64_vmask_neon()
103 vreinterpret_u32_u8(vrshrn_n_u16(res, AOM_BLEND_A64_ROUND_BITS)), 1); in aom_blend_a64_vmask_neon()
/external/libjpeg-turbo/simd/arm/
Djcphuff-neon.c244 uint32_t bitmap0 = vget_lane_u32(vreinterpret_u32_u8(bitmap_all), 0); in jsimd_encode_mcu_AC_first_prepare_neon()
245 uint32_t bitmap1 = vget_lane_u32(vreinterpret_u32_u8(bitmap_all), 1); in jsimd_encode_mcu_AC_first_prepare_neon()
489 uint32_t bitmap0 = vget_lane_u32(vreinterpret_u32_u8(bitmap_all), 0); in jsimd_encode_mcu_AC_refine_prepare_neon()
490 uint32_t bitmap1 = vget_lane_u32(vreinterpret_u32_u8(bitmap_all), 1); in jsimd_encode_mcu_AC_refine_prepare_neon()
530 bitmap0 = vget_lane_u32(vreinterpret_u32_u8(bitmap_all), 0); in jsimd_encode_mcu_AC_refine_prepare_neon()
531 bitmap1 = vget_lane_u32(vreinterpret_u32_u8(bitmap_all), 1); in jsimd_encode_mcu_AC_refine_prepare_neon()
579 bitmap0 = vget_lane_u32(vreinterpret_u32_u8(bitmap_all), 0); in jsimd_encode_mcu_AC_refine_prepare_neon()
580 bitmap1 = vget_lane_u32(vreinterpret_u32_u8(bitmap_all), 1); in jsimd_encode_mcu_AC_refine_prepare_neon()
/external/libvpx/libvpx/vp9/encoder/arm/neon/
Dvp9_frame_scale_neon.c226 vst1_lane_u32((uint32_t *)(t + 0 * width_hor), vreinterpret_u32_u8(d[0]), in scale_plane_2_to_1_general()
228 vst1_lane_u32((uint32_t *)(t + 1 * width_hor), vreinterpret_u32_u8(d[1]), in scale_plane_2_to_1_general()
230 vst1_lane_u32((uint32_t *)(t + 2 * width_hor), vreinterpret_u32_u8(d[2]), in scale_plane_2_to_1_general()
232 vst1_lane_u32((uint32_t *)(t + 3 * width_hor), vreinterpret_u32_u8(d[3]), in scale_plane_2_to_1_general()
234 vst1_lane_u32((uint32_t *)(t + 4 * width_hor), vreinterpret_u32_u8(d[0]), in scale_plane_2_to_1_general()
236 vst1_lane_u32((uint32_t *)(t + 5 * width_hor), vreinterpret_u32_u8(d[1]), in scale_plane_2_to_1_general()
238 vst1_lane_u32((uint32_t *)(t + 6 * width_hor), vreinterpret_u32_u8(d[2]), in scale_plane_2_to_1_general()
240 vst1_lane_u32((uint32_t *)(t + 7 * width_hor), vreinterpret_u32_u8(d[3]), in scale_plane_2_to_1_general()

123