Home
last modified time | relevance | path

Searched refs:vec0 (Results 1 – 25 of 92) sorted by relevance

1234

/external/libvpx/libvpx/vpx_dsp/mips/
Didct32x32_msa.c44 v8i16 vec0, vec1, vec2, vec3, loc0, loc1, loc2, loc3; in idct32x8_row_even_process_store() local
53 BUTTERFLY_4(reg1, reg7, reg3, reg5, vec1, vec3, vec2, vec0); in idct32x8_row_even_process_store()
54 DOTP_CONST_PAIR(vec2, vec0, cospi_16_64, cospi_16_64, loc2, loc3); in idct32x8_row_even_process_store()
61 BUTTERFLY_4(reg4, reg0, reg2, reg6, vec1, vec3, vec2, vec0); in idct32x8_row_even_process_store()
62 BUTTERFLY_4(vec0, vec1, loc1, loc0, stp3, stp0, stp7, stp4); in idct32x8_row_even_process_store()
72 vec0 = reg0 + reg4; in idct32x8_row_even_process_store()
80 reg3 = vec0; in idct32x8_row_even_process_store()
91 vec0 = reg0 - reg6; in idct32x8_row_even_process_store()
96 DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, reg6, reg1); in idct32x8_row_even_process_store()
128 v8i16 vec0, vec1, vec2, vec3, loc0, loc1, loc2, loc3; in idct32x8_row_odd_process_store() local
[all …]
Dvpx_convolve8_avg_horiz_msa.c57 v8i16 filt, vec0, vec1, vec2, vec3; in common_hz_8t_and_aver_dst_4x8_msa() local
78 filt0, filt1, filt2, filt3, vec0, vec1); in common_hz_8t_and_aver_dst_4x8_msa()
83 SRARI_H4_SH(vec0, vec1, vec2, vec3, FILTER_BITS); in common_hz_8t_and_aver_dst_4x8_msa()
84 SAT_SH4_SH(vec0, vec1, vec2, vec3, 7); in common_hz_8t_and_aver_dst_4x8_msa()
85 PCKEV_B4_UB(vec0, vec0, vec1, vec1, vec2, vec2, vec3, vec3, res0, res1, res2, in common_hz_8t_and_aver_dst_4x8_msa()
151 v8i16 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7; in common_hz_8t_and_aver_dst_16w_msa() local
171 VSHF_B4_SH(src0, src0, mask0, mask1, mask2, mask3, vec0, vec4, vec8, vec12); in common_hz_8t_and_aver_dst_16w_msa()
177 DOTP_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, vec0, vec1, in common_hz_8t_and_aver_dst_16w_msa()
181 DPADD_SB4_SH(vec4, vec5, vec6, vec7, filt1, filt1, filt1, filt1, vec0, vec1, in common_hz_8t_and_aver_dst_16w_msa()
185 ADDS_SH4_SH(vec0, vec8, vec1, vec9, vec2, vec10, vec3, vec11, out0, out1, in common_hz_8t_and_aver_dst_16w_msa()
[all …]
Dvpx_convolve8_horiz_msa.c321 v16u8 filt0, vec0, vec1, res0, res1; in common_hz_2t_4x4_msa() local
331 VSHF_B2_UB(src0, src1, src2, src3, mask, mask, vec0, vec1); in common_hz_2t_4x4_msa()
332 DOTP_UB2_UH(vec0, vec1, filt0, filt0, vec2, vec3); in common_hz_2t_4x4_msa()
341 v16u8 vec0, vec1, vec2, vec3, filt0; in common_hz_2t_4x8_msa() local
353 VSHF_B2_UB(src0, src1, src2, src3, mask, mask, vec0, vec1); in common_hz_2t_4x8_msa()
355 DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, vec4, vec5, in common_hz_2t_4x8_msa()
380 v8u16 vec0, vec1, vec2, vec3, filt; in common_hz_2t_8x4_msa() local
389 VSHF_B2_UH(src0, src0, src1, src1, mask, mask, vec0, vec1); in common_hz_2t_8x4_msa()
391 DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, vec0, vec1, in common_hz_2t_8x4_msa()
393 SRARI_H4_UH(vec0, vec1, vec2, vec3, FILTER_BITS); in common_hz_2t_8x4_msa()
[all …]
Dsub_pixel_variance_msa.c392 v8u16 vec0, vec1, vec2, vec3; in sub_pixel_sse_diff_4width_h_msa() local
405 VSHF_B2_UH(src0, src0, src1, src1, mask, mask, vec0, vec1); in sub_pixel_sse_diff_4width_h_msa()
407 DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, vec0, vec1, in sub_pixel_sse_diff_4width_h_msa()
409 SRARI_H4_UH(vec0, vec1, vec2, vec3, FILTER_BITS); in sub_pixel_sse_diff_4width_h_msa()
410 PCKEV_B4_SB(vec0, vec0, vec1, vec1, vec2, vec2, vec3, vec3, src0, src1, in sub_pixel_sse_diff_4width_h_msa()
431 v8u16 vec0, vec1, vec2, vec3; in sub_pixel_sse_diff_8width_h_msa() local
445 VSHF_B2_UH(src0, src0, src1, src1, mask, mask, vec0, vec1); in sub_pixel_sse_diff_8width_h_msa()
447 DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, vec0, vec1, in sub_pixel_sse_diff_8width_h_msa()
449 SRARI_H4_UH(vec0, vec1, vec2, vec3, FILTER_BITS); in sub_pixel_sse_diff_8width_h_msa()
450 PCKEV_B4_SB(vec0, vec0, vec1, vec1, vec2, vec2, vec3, vec3, src0, src1, in sub_pixel_sse_diff_8width_h_msa()
[all …]
Dvpx_convolve8_avg_msa.c24 v8i16 hz_out7, hz_out8, hz_out9, res0, res1, vec0, vec1, vec2, vec3, vec4; in common_hv_8ht_8vt_and_aver_dst_4w_msa() local
55 ILVEV_B2_SH(hz_out0, hz_out1, hz_out2, hz_out3, vec0, vec1); in common_hv_8ht_8vt_and_aver_dst_4w_msa()
69 res0 = FILT_8TAP_DPADD_S_H(vec0, vec1, vec2, vec3, filt_vt0, filt_vt1, in common_hv_8ht_8vt_and_aver_dst_4w_msa()
87 vec0 = vec2; in common_hv_8ht_8vt_and_aver_dst_4w_msa()
233 v16u8 filt_hz, filt_vt, vec0, vec1; in common_hv_2ht_2vt_and_aver_dst_4x4_msa() local
253 ILVEV_B2_UB(hz_out0, hz_out1, hz_out2, hz_out3, vec0, vec1); in common_hv_2ht_2vt_and_aver_dst_4x4_msa()
257 DOTP_UB2_UH(vec0, vec1, filt_vt, filt_vt, tmp0, tmp1); in common_hv_2ht_2vt_and_aver_dst_4x4_msa()
269 v16u8 filt_hz, filt_vt, vec0, vec1, vec2, vec3, res0, res1; in common_hv_2ht_2vt_and_aver_dst_4x8_msa() local
301 ILVEV_B2_UB(hz_out0, hz_out1, hz_out2, hz_out3, vec0, vec1); in common_hv_2ht_2vt_and_aver_dst_4x8_msa()
303 DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt_vt, filt_vt, filt_vt, filt_vt, tmp0, in common_hv_2ht_2vt_and_aver_dst_4x8_msa()
[all …]
Dloopfilter_8_msa.c163 v8i16 vec0, vec1, vec2, vec3, vec4; in vpx_lpf_vertical_8_msa() local
187 ILVR_B2_SH(p0_out, p1_out, q1_out, q0_out, vec0, vec1); in vpx_lpf_vertical_8_msa()
188 ILVRL_H2_SH(vec1, vec0, vec2, vec3); in vpx_lpf_vertical_8_msa()
215 ILVR_B2_SH(p1, p2, q0, p0, vec0, vec1); in vpx_lpf_vertical_8_msa()
216 ILVRL_H2_SH(vec1, vec0, vec2, vec3); in vpx_lpf_vertical_8_msa()
243 v8i16 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7; in vpx_lpf_vertical_8_dual_msa() local
257 vec0 = (v8i16)__msa_fill_b(*thresh1); in vpx_lpf_vertical_8_dual_msa()
258 thresh = (v16u8)__msa_ilvr_d((v2i64)vec0, (v2i64)thresh); in vpx_lpf_vertical_8_dual_msa()
261 vec0 = (v8i16)__msa_fill_b(*b_limit1); in vpx_lpf_vertical_8_dual_msa()
262 b_limit = (v16u8)__msa_ilvr_d((v2i64)vec0, (v2i64)b_limit); in vpx_lpf_vertical_8_dual_msa()
[all …]
Dvpx_convolve8_msa.c105 v16u8 mask0, mask1, mask2, mask3, vec0, vec1; in common_hv_8ht_8vt_8w_msa() local
179 vec0 = PCKEV_XORI128_UB(tmp0, tmp1); in common_hv_8ht_8vt_8w_msa()
181 ST8x4_UB(vec0, vec1, dst, dst_stride); in common_hv_8ht_8vt_8w_msa()
238 v16u8 filt_vt, filt_hz, vec0, vec1, res0, res1; in common_hv_2ht_2vt_4x4_msa() local
257 ILVEV_B2_UB(hz_out0, hz_out1, hz_out2, hz_out3, vec0, vec1); in common_hv_2ht_2vt_4x4_msa()
258 DOTP_UB2_UH(vec0, vec1, filt_vt, filt_vt, tmp0, tmp1); in common_hv_2ht_2vt_4x4_msa()
270 v16u8 filt_hz, filt_vt, vec0, vec1, vec2, vec3; in common_hv_2ht_2vt_4x8_msa() local
296 ILVEV_B2_UB(hz_out0, hz_out1, hz_out2, hz_out3, vec0, vec1); in common_hv_2ht_2vt_4x8_msa()
298 DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt_vt, filt_vt, filt_vt, filt_vt, vec4, in common_hv_2ht_2vt_4x8_msa()
326 v16u8 filt_hz, filt_vt, vec0, vec1, vec2, vec3; in common_hv_2ht_2vt_8x4_msa() local
[all …]
Dfwd_txfm_msa.c35 v8i16 vec0, vec1, vec2, vec3, vec4, vec5, cnst0, cnst1, cnst4, cnst5; in fdct8x16_1d_column() local
87 ILVRL_H2_SH(in15, in8, vec1, vec0); in fdct8x16_1d_column()
91 in8 = DOT_SHIFT_RIGHT_PCK_H(vec0, vec1, cnst0); in fdct8x16_1d_column()
96 in8 = DOT_SHIFT_RIGHT_PCK_H(vec0, vec1, cnst0); in fdct8x16_1d_column()
99 ILVRL_H2_SH(in14, in9, vec1, vec0); in fdct8x16_1d_column()
103 in8 = DOT_SHIFT_RIGHT_PCK_H(vec0, vec1, cnst1); in fdct8x16_1d_column()
108 in8 = DOT_SHIFT_RIGHT_PCK_H(vec0, vec1, cnst0); in fdct8x16_1d_column()
123 ILVRL_H2_SH(in13, in10, vec1, vec0); in fdct8x16_1d_column()
126 in8 = DOT_SHIFT_RIGHT_PCK_H(vec0, vec1, cnst0); in fdct8x16_1d_column()
131 in8 = DOT_SHIFT_RIGHT_PCK_H(vec0, vec1, cnst0); in fdct8x16_1d_column()
[all …]
/external/libvpx/libvpx/third_party/libyuv/source/
Drow_msa.cc378 v8i16 vec0, vec1, vec2; in I422ToARGBRow_MSA() local
392 vec0, vec1, vec2); in I422ToARGBRow_MSA()
393 STOREARGB(vec0, vec1, vec2, alpha, dst_argb); in I422ToARGBRow_MSA()
409 v8i16 vec0, vec1, vec2; in I422ToRGBARow_MSA() local
423 vec0, vec1, vec2); in I422ToRGBARow_MSA()
424 STOREARGB(alpha, vec0, vec1, vec2, dst_argb); in I422ToRGBARow_MSA()
442 v8i16 vec0, vec1, vec2; in I422AlphaToARGBRow_MSA() local
458 vec0, vec1, vec2); in I422AlphaToARGBRow_MSA()
460 STOREARGB(vec0, vec1, vec2, src3, dst_argb); in I422AlphaToARGBRow_MSA()
478 v8i16 vec0, vec1, vec2, vec3, vec4, vec5; in I422ToRGB24Row_MSA() local
[all …]
Dcompare_msa.cc31 v2i64 vec0 = {0}, vec1 = {0}; in HammingDistance_MSA() local
40 vec0 += __msa_pcnt_d((v2i64)src0); in HammingDistance_MSA()
46 vec0 += vec1; in HammingDistance_MSA()
47 diff = (uint32_t)__msa_copy_u_w((v4i32)vec0, 0); in HammingDistance_MSA()
48 diff += (uint32_t)__msa_copy_u_w((v4i32)vec0, 2); in HammingDistance_MSA()
58 v8i16 vec0, vec1, vec2, vec3; in SumSquareError_MSA() local
67 vec0 = (v8i16)__msa_ilvr_b((v16i8)src2, (v16i8)src0); in SumSquareError_MSA()
71 vec0 = __msa_hsub_u_h((v16u8)vec0, (v16u8)vec0); in SumSquareError_MSA()
75 reg0 = __msa_dpadd_s_w(reg0, vec0, vec0); in SumSquareError_MSA()
Dscale_msa.cc55 v16u8 src0, src1, vec0, vec1, dst0; in ScaleARGBRowDown2Linear_MSA() local
61 vec0 = (v16u8)__msa_pckev_w((v4i32)src1, (v4i32)src0); in ScaleARGBRowDown2Linear_MSA()
63 dst0 = (v16u8)__msa_aver_u_b((v16u8)vec0, (v16u8)vec1); in ScaleARGBRowDown2Linear_MSA()
77 v16u8 src0, src1, src2, src3, vec0, vec1, vec2, vec3, dst0; in ScaleARGBRowDown2Box_MSA() local
86 vec0 = (v16u8)__msa_vshf_b(shuffler, (v16i8)src0, (v16i8)src0); in ScaleARGBRowDown2Box_MSA()
90 reg0 = __msa_hadd_u_h(vec0, vec0); in ScaleARGBRowDown2Box_MSA()
140 v16u8 vec0, vec1, vec2, vec3; in ScaleARGBRowDownEvenBox_MSA() local
161 vec0 = (v16u8)__msa_ilvr_b((v16i8)src2, (v16i8)src0); in ScaleARGBRowDownEvenBox_MSA()
165 reg0 = __msa_hadd_u_h(vec0, vec0); in ScaleARGBRowDownEvenBox_MSA()
211 v16u8 src0, src1, src2, src3, vec0, vec1, vec2, vec3, dst0, dst1; in ScaleRowDown2Linear_MSA() local
[all …]
Drotate_msa.cc84 v16u8 src0, src1, src2, src3, dst0, dst1, dst2, dst3, vec0, vec1, vec2, vec3; in TransposeWx16_MSA() local
98 ILVRL_B(src0, src1, src2, src3, vec0, vec1, vec2, vec3); in TransposeWx16_MSA()
99 ILVRL_H(vec0, vec2, vec1, vec3, reg0, reg1, reg2, reg3); in TransposeWx16_MSA()
108 ILVRL_B(src0, src1, src2, src3, vec0, vec1, vec2, vec3); in TransposeWx16_MSA()
109 ILVRL_H(vec0, vec2, vec1, vec3, reg4, reg5, reg6, reg7); in TransposeWx16_MSA()
120 ILVRL_B(src0, src1, src2, src3, vec0, vec1, vec2, vec3); in TransposeWx16_MSA()
121 ILVRL_H(vec0, vec2, vec1, vec3, reg0, reg1, reg2, reg3); in TransposeWx16_MSA()
130 ILVRL_B(src0, src1, src2, src3, vec0, vec1, vec2, vec3); in TransposeWx16_MSA()
131 ILVRL_H(vec0, vec2, vec1, vec3, reg4, reg5, reg6, reg7); in TransposeWx16_MSA()
165 v16u8 src0, src1, src2, src3, dst0, dst1, dst2, dst3, vec0, vec1, vec2, vec3; in TransposeUVWx16_MSA() local
[all …]
/external/libyuv/files/source/
Drow_msa.cc378 v8i16 vec0, vec1, vec2; in I422ToARGBRow_MSA() local
392 vec0, vec1, vec2); in I422ToARGBRow_MSA()
393 STOREARGB(vec0, vec1, vec2, alpha, dst_argb); in I422ToARGBRow_MSA()
409 v8i16 vec0, vec1, vec2; in I422ToRGBARow_MSA() local
423 vec0, vec1, vec2); in I422ToRGBARow_MSA()
424 STOREARGB(alpha, vec0, vec1, vec2, dst_argb); in I422ToRGBARow_MSA()
442 v8i16 vec0, vec1, vec2; in I422AlphaToARGBRow_MSA() local
458 vec0, vec1, vec2); in I422AlphaToARGBRow_MSA()
460 STOREARGB(vec0, vec1, vec2, src3, dst_argb); in I422AlphaToARGBRow_MSA()
478 v8i16 vec0, vec1, vec2, vec3, vec4, vec5; in I422ToRGB24Row_MSA() local
[all …]
Dcompare_msa.cc31 v2i64 vec0 = {0}, vec1 = {0}; in HammingDistance_MSA() local
40 vec0 += __msa_pcnt_d((v2i64)src0); in HammingDistance_MSA()
46 vec0 += vec1; in HammingDistance_MSA()
47 diff = (uint32_t)__msa_copy_u_w((v4i32)vec0, 0); in HammingDistance_MSA()
48 diff += (uint32_t)__msa_copy_u_w((v4i32)vec0, 2); in HammingDistance_MSA()
58 v8i16 vec0, vec1, vec2, vec3; in SumSquareError_MSA() local
67 vec0 = (v8i16)__msa_ilvr_b((v16i8)src2, (v16i8)src0); in SumSquareError_MSA()
71 vec0 = __msa_hsub_u_h((v16u8)vec0, (v16u8)vec0); in SumSquareError_MSA()
75 reg0 = __msa_dpadd_s_w(reg0, vec0, vec0); in SumSquareError_MSA()
Dscale_msa.cc55 v16u8 src0, src1, vec0, vec1, dst0; in ScaleARGBRowDown2Linear_MSA() local
61 vec0 = (v16u8)__msa_pckev_w((v4i32)src1, (v4i32)src0); in ScaleARGBRowDown2Linear_MSA()
63 dst0 = (v16u8)__msa_aver_u_b((v16u8)vec0, (v16u8)vec1); in ScaleARGBRowDown2Linear_MSA()
77 v16u8 src0, src1, src2, src3, vec0, vec1, vec2, vec3, dst0; in ScaleARGBRowDown2Box_MSA() local
86 vec0 = (v16u8)__msa_vshf_b(shuffler, (v16i8)src0, (v16i8)src0); in ScaleARGBRowDown2Box_MSA()
90 reg0 = __msa_hadd_u_h(vec0, vec0); in ScaleARGBRowDown2Box_MSA()
140 v16u8 vec0, vec1, vec2, vec3; in ScaleARGBRowDownEvenBox_MSA() local
161 vec0 = (v16u8)__msa_ilvr_b((v16i8)src2, (v16i8)src0); in ScaleARGBRowDownEvenBox_MSA()
165 reg0 = __msa_hadd_u_h(vec0, vec0); in ScaleARGBRowDownEvenBox_MSA()
211 v16u8 src0, src1, src2, src3, vec0, vec1, vec2, vec3, dst0, dst1; in ScaleRowDown2Linear_MSA() local
[all …]
Drotate_msa.cc84 v16u8 src0, src1, src2, src3, dst0, dst1, dst2, dst3, vec0, vec1, vec2, vec3; in TransposeWx16_MSA() local
98 ILVRL_B(src0, src1, src2, src3, vec0, vec1, vec2, vec3); in TransposeWx16_MSA()
99 ILVRL_H(vec0, vec2, vec1, vec3, reg0, reg1, reg2, reg3); in TransposeWx16_MSA()
108 ILVRL_B(src0, src1, src2, src3, vec0, vec1, vec2, vec3); in TransposeWx16_MSA()
109 ILVRL_H(vec0, vec2, vec1, vec3, reg4, reg5, reg6, reg7); in TransposeWx16_MSA()
120 ILVRL_B(src0, src1, src2, src3, vec0, vec1, vec2, vec3); in TransposeWx16_MSA()
121 ILVRL_H(vec0, vec2, vec1, vec3, reg0, reg1, reg2, reg3); in TransposeWx16_MSA()
130 ILVRL_B(src0, src1, src2, src3, vec0, vec1, vec2, vec3); in TransposeWx16_MSA()
131 ILVRL_H(vec0, vec2, vec1, vec3, reg4, reg5, reg6, reg7); in TransposeWx16_MSA()
165 v16u8 src0, src1, src2, src3, dst0, dst1, dst2, dst3, vec0, vec1, vec2, vec3; in TransposeUVWx16_MSA() local
[all …]
/external/libvpx/libvpx/vp8/common/mips/msa/
Dbilinear_filter_msa.c34 v16u8 filt0, vec0, vec1, res0, res1; in common_hz_2t_4x4_msa() local
43 VSHF_B2_UB(src0, src1, src2, src3, mask, mask, vec0, vec1); in common_hz_2t_4x4_msa()
44 DOTP_UB2_UH(vec0, vec1, filt0, filt0, vec2, vec3); in common_hz_2t_4x4_msa()
53 v16u8 vec0, vec1, vec2, vec3, filt0; in common_hz_2t_4x8_msa() local
64 VSHF_B2_UB(src0, src1, src2, src3, mask, mask, vec0, vec1); in common_hz_2t_4x8_msa()
66 DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, vec4, vec5, in common_hz_2t_4x8_msa()
91 v8u16 vec0, vec1, vec2, vec3, filt; in common_hz_2t_8x4_msa() local
99 VSHF_B2_UH(src0, src0, src1, src1, mask, mask, vec0, vec1); in common_hz_2t_8x4_msa()
101 DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, vec0, vec1, in common_hz_2t_8x4_msa()
103 SRARI_H4_UH(vec0, vec1, vec2, vec3, VP8_FILTER_SHIFT); in common_hz_2t_8x4_msa()
[all …]
/external/libpng/mips/
Dfilter_msa_intrinsics.c664 v8i16 vec0, vec1, vec2; in png_read_filter_row_paeth4_msa() local
696 ILVR_B2_SH(src2, src6, src1, src6, vec0, vec1); in png_read_filter_row_paeth4_msa()
697 HSUB_UB2_SH(vec0, vec1, vec0, vec1); in png_read_filter_row_paeth4_msa()
698 vec2 = vec0 + vec1; in png_read_filter_row_paeth4_msa()
699 ADD_ABS_H3_SH(vec0, vec1, vec2, vec0, vec1, vec2); in png_read_filter_row_paeth4_msa()
700 CMP_AND_SELECT(vec0, vec1, vec2, src1, src2, src6, src10); in png_read_filter_row_paeth4_msa()
701 ILVR_B2_SH(src3, src7, src10, src7, vec0, vec1); in png_read_filter_row_paeth4_msa()
702 HSUB_UB2_SH(vec0, vec1, vec0, vec1); in png_read_filter_row_paeth4_msa()
703 vec2 = vec0 + vec1; in png_read_filter_row_paeth4_msa()
704 ADD_ABS_H3_SH(vec0, vec1, vec2, vec0, vec1, vec2); in png_read_filter_row_paeth4_msa()
[all …]
/external/libaom/libaom/aom_dsp/mips/
Daom_convolve8_horiz_msa.c324 v16u8 filt0, vec0, vec1, res0, res1; in common_hz_2t_4x4_msa() local
334 VSHF_B2_UB(src0, src1, src2, src3, mask, mask, vec0, vec1); in common_hz_2t_4x4_msa()
335 DOTP_UB2_UH(vec0, vec1, filt0, filt0, vec2, vec3); in common_hz_2t_4x4_msa()
344 v16u8 vec0, vec1, vec2, vec3, filt0; in common_hz_2t_4x8_msa() local
356 VSHF_B2_UB(src0, src1, src2, src3, mask, mask, vec0, vec1); in common_hz_2t_4x8_msa()
358 DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, vec4, vec5, in common_hz_2t_4x8_msa()
383 v8u16 vec0, vec1, vec2, vec3, filt; in common_hz_2t_8x4_msa() local
392 VSHF_B2_UH(src0, src0, src1, src1, mask, mask, vec0, vec1); in common_hz_2t_8x4_msa()
394 DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, vec0, vec1, in common_hz_2t_8x4_msa()
396 SRARI_H4_UH(vec0, vec1, vec2, vec3, FILTER_BITS); in common_hz_2t_8x4_msa()
[all …]
Dsub_pixel_variance_msa.c389 v8u16 vec0, vec1, vec2, vec3; in sub_pixel_sse_diff_4width_h_msa() local
402 VSHF_B2_UH(src0, src0, src1, src1, mask, mask, vec0, vec1); in sub_pixel_sse_diff_4width_h_msa()
404 DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, vec0, vec1, in sub_pixel_sse_diff_4width_h_msa()
406 SRARI_H4_UH(vec0, vec1, vec2, vec3, FILTER_BITS); in sub_pixel_sse_diff_4width_h_msa()
407 PCKEV_B4_SB(vec0, vec0, vec1, vec1, vec2, vec2, vec3, vec3, src0, src1, in sub_pixel_sse_diff_4width_h_msa()
428 v8u16 vec0, vec1, vec2, vec3; in sub_pixel_sse_diff_8width_h_msa() local
442 VSHF_B2_UH(src0, src0, src1, src1, mask, mask, vec0, vec1); in sub_pixel_sse_diff_8width_h_msa()
444 DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, vec0, vec1, in sub_pixel_sse_diff_8width_h_msa()
446 SRARI_H4_UH(vec0, vec1, vec2, vec3, FILTER_BITS); in sub_pixel_sse_diff_8width_h_msa()
447 PCKEV_B4_SB(vec0, vec0, vec1, vec1, vec2, vec2, vec3, vec3, src0, src1, in sub_pixel_sse_diff_8width_h_msa()
[all …]
Dloopfilter_8_msa.c163 v8i16 vec0, vec1, vec2, vec3, vec4; in aom_lpf_vertical_8_msa() local
187 ILVR_B2_SH(p0_out, p1_out, q1_out, q0_out, vec0, vec1); in aom_lpf_vertical_8_msa()
188 ILVRL_H2_SH(vec1, vec0, vec2, vec3); in aom_lpf_vertical_8_msa()
215 ILVR_B2_SH(p1, p2, q0, p0, vec0, vec1); in aom_lpf_vertical_8_msa()
216 ILVRL_H2_SH(vec1, vec0, vec2, vec3); in aom_lpf_vertical_8_msa()
243 v8i16 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7; in aom_lpf_vertical_8_dual_msa() local
257 vec0 = (v8i16)__msa_fill_b(*thresh1); in aom_lpf_vertical_8_dual_msa()
258 thresh = (v16u8)__msa_ilvr_d((v2i64)vec0, (v2i64)thresh); in aom_lpf_vertical_8_dual_msa()
261 vec0 = (v8i16)__msa_fill_b(*b_limit1); in aom_lpf_vertical_8_dual_msa()
262 b_limit = (v16u8)__msa_ilvr_d((v2i64)vec0, (v2i64)b_limit); in aom_lpf_vertical_8_dual_msa()
[all …]
/external/llvm-project/llvm/test/CodeGen/AArch64/
Darm64-copy-tuple.ll17 %vec0 = extractvalue { <8 x i8>, <8 x i8> } %vec, 0
20 tail call void @llvm.aarch64.neon.st2.v8i8.p0i8(<8 x i8> %vec0, <8 x i8> %vec1, i8* %addr)
23 tail call void @llvm.aarch64.neon.st2.v8i8.p0i8(<8 x i8> %vec0, <8 x i8> %vec1, i8* %addr)
34 %vec0 = extractvalue { <8 x i8>, <8 x i8> } %vec, 0
37 tail call void @llvm.aarch64.neon.st2.v8i8.p0i8(<8 x i8> %vec0, <8 x i8> %vec1, i8* %addr)
40 tail call void @llvm.aarch64.neon.st2.v8i8.p0i8(<8 x i8> %vec0, <8 x i8> %vec1, i8* %addr)
51 %vec0 = extractvalue { <8 x i8>, <8 x i8> } %vec, 0
54 tail call void @llvm.aarch64.neon.st2.v8i8.p0i8(<8 x i8> %vec0, <8 x i8> %vec1, i8* %addr)
57 tail call void @llvm.aarch64.neon.st2.v8i8.p0i8(<8 x i8> %vec0, <8 x i8> %vec1, i8* %addr)
68 %vec0 = extractvalue { <8 x i8>, <8 x i8> } %vec, 0
[all …]
/external/llvm/test/CodeGen/AArch64/
Darm64-copy-tuple.ll17 %vec0 = extractvalue { <8 x i8>, <8 x i8> } %vec, 0
20 tail call void @llvm.aarch64.neon.st2.v8i8.p0i8(<8 x i8> %vec0, <8 x i8> %vec1, i8* %addr)
23 tail call void @llvm.aarch64.neon.st2.v8i8.p0i8(<8 x i8> %vec0, <8 x i8> %vec1, i8* %addr)
34 %vec0 = extractvalue { <8 x i8>, <8 x i8> } %vec, 0
37 tail call void @llvm.aarch64.neon.st2.v8i8.p0i8(<8 x i8> %vec0, <8 x i8> %vec1, i8* %addr)
40 tail call void @llvm.aarch64.neon.st2.v8i8.p0i8(<8 x i8> %vec0, <8 x i8> %vec1, i8* %addr)
51 %vec0 = extractvalue { <8 x i8>, <8 x i8> } %vec, 0
54 tail call void @llvm.aarch64.neon.st2.v8i8.p0i8(<8 x i8> %vec0, <8 x i8> %vec1, i8* %addr)
57 tail call void @llvm.aarch64.neon.st2.v8i8.p0i8(<8 x i8> %vec0, <8 x i8> %vec1, i8* %addr)
68 %vec0 = extractvalue { <8 x i8>, <8 x i8> } %vec, 0
[all …]
/external/llvm-project/llvm/test/CodeGen/AMDGPU/
Dpacked-op-sel.ll18 %vec0 = load volatile <2 x half>, <2 x half> addrspace(3)* %lds, align 4
25 …%result = tail call <2 x half> @llvm.fma.v2f16(<2 x half> %vec0, <2 x half> %vec1, <2 x half> %sca…
46 %vec0 = load volatile <2 x half>, <2 x half> addrspace(3)* %lds, align 4
54 …%result = tail call <2 x half> @llvm.fma.v2f16(<2 x half> %vec0, <2 x half> %vec1, <2 x half> %neg…
75 %vec0 = load volatile <2 x half>, <2 x half> addrspace(3)* %lds, align 4
83 …%result = tail call <2 x half> @llvm.fma.v2f16(<2 x half> %vec0, <2 x half> %vec1, <2 x half> %neg…
104 %vec0 = load volatile <2 x half>, <2 x half> addrspace(3)* %lds, align 4
113 …%result = tail call <2 x half> @llvm.fma.v2f16(<2 x half> %vec0, <2 x half> %vec1, <2 x half> %neg…
134 %vec0 = load volatile <2 x half>, <2 x half> addrspace(3)* %lds, align 4
141 …%result = tail call <2 x half> @llvm.fma.v2f16(<2 x half> %vec0, <2 x half> %vec1, <2 x half> %neg…
[all …]
Dbitcast-vector-extract.ll15 …%vec0.bc = bitcast <8 x i32> <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 8> to <8 x floa…
16 store volatile <8 x float> %vec0.bc, <8 x float> addrspace(1)* %out
31 %vec0.bc = bitcast <4 x i64> <i64 7, i64 7, i64 7, i64 8> to <8 x float>
32 store volatile <8 x float> %vec0.bc, <8 x float> addrspace(1)* %out
47 %vec0.bc = bitcast <4 x i64> <i64 7, i64 7, i64 7, i64 8> to <4 x double>
48 store volatile <4 x double> %vec0.bc, <4 x double> addrspace(1)* %out
63 …%vec0.bc = bitcast <16 x i16> <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 …
64 store volatile <8 x float> %vec0.bc, <8 x float> addrspace(1)* %out

1234