/external/libvpx/libvpx/vpx_dsp/mips/ |
D | fwd_dct32x32_msa.c | 61 v8i16 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7; in fdct8x32_1d_column_even_store() local 71 BUTTERFLY_8(in4, in5, in6, in7, in8, in9, in10, in11, vec4, vec5, vec6, vec7, in fdct8x32_1d_column_even_store() 75 ADD4(vec0, vec7, vec1, vec6, vec2, vec5, vec3, vec4, in0, in1, in2, in3); in fdct8x32_1d_column_even_store() 87 SUB4(vec0, vec7, vec1, vec6, vec2, vec5, vec3, vec4, vec7, vec6, vec5, vec4); in fdct8x32_1d_column_even_store() 88 DOTP_CONST_PAIR(vec6, vec5, cospi_16_64, cospi_16_64, vec5, vec6); in fdct8x32_1d_column_even_store() 89 ADD2(vec4, vec5, vec7, vec6, vec0, vec1); in fdct8x32_1d_column_even_store() 95 SUB2(vec4, vec5, vec7, vec6, vec4, vec7); in fdct8x32_1d_column_even_store() 101 DOTP_CONST_PAIR(in13, in10, cospi_16_64, cospi_16_64, vec2, vec5); in fdct8x32_1d_column_even_store() 103 ADD4(in8, vec3, in9, vec2, in14, vec5, in15, vec4, in0, vec1, vec6, in2); in fdct8x32_1d_column_even_store() 117 SUB2(in9, vec2, in14, vec5, vec2, vec5); in fdct8x32_1d_column_even_store() [all …]
|
D | vpx_convolve8_avg_horiz_msa.c | 147 v8i16 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7; in common_hz_8t_and_aver_dst_16w_msa() local 168 VSHF_B4_SH(src1, src1, mask0, mask1, mask2, mask3, vec1, vec5, vec9, vec13); in common_hz_8t_and_aver_dst_16w_msa() 177 DPADD_SB4_SH(vec4, vec5, vec6, vec7, filt1, filt1, filt1, filt1, vec0, vec1, in common_hz_8t_and_aver_dst_16w_msa() 201 v8i16 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7; in common_hz_8t_and_aver_dst_32w_msa() local 224 VSHF_B4_SH(src1, src1, mask0, mask1, mask2, mask3, vec1, vec5, vec9, vec13); in common_hz_8t_and_aver_dst_32w_msa() 233 DPADD_SB4_SH(vec4, vec5, vec6, vec7, filt1, filt1, filt1, filt1, vec0, vec1, in common_hz_8t_and_aver_dst_32w_msa() 256 v8i16 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7; in common_hz_8t_and_aver_dst_64w_msa() local 280 VSHF_B4_SH(src1, src1, mask0, mask1, mask2, mask3, vec1, vec5, vec9, in common_hz_8t_and_aver_dst_64w_msa() 290 DPADD_SB4_SH(vec4, vec5, vec6, vec7, filt1, filt1, filt1, filt1, vec0, in common_hz_8t_and_aver_dst_64w_msa() 340 v8u16 vec4, vec5, vec6, vec7, filt; in common_hz_2t_and_aver_dst_4x8_msa() local [all …]
|
D | vpx_convolve8_vert_msa.c | 394 v16u8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, filt0; in common_vt_2t_8x8mult_msa() local 412 ILVR_B4_UB(src5, src4, src6, src5, src7, src6, src8, src7, vec4, vec5, vec6, in common_vt_2t_8x8mult_msa() 421 DOTP_UB4_UH(vec4, vec5, vec6, vec7, filt0, filt0, filt0, filt0, tmp0, tmp1, in common_vt_2t_8x8mult_msa() 447 v16u8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, filt0; in common_vt_2t_16w_msa() local 470 ILVL_B2_UB(src3, src2, src4, src3, vec5, vec7); in common_vt_2t_16w_msa() 476 DOTP_UB2_UH(vec4, vec5, filt0, filt0, tmp0, tmp1); in common_vt_2t_16w_msa() 495 v16u8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, filt0; in common_vt_2t_32w_msa() local 523 ILVL_B2_UB(src3, src2, src4, src3, vec5, vec7); in common_vt_2t_32w_msa() 524 DOTP_UB2_UH(vec4, vec5, filt0, filt0, tmp0, tmp1); in common_vt_2t_32w_msa() 543 ILVL_B2_UB(src8, src7, src9, src8, vec5, vec7); in common_vt_2t_32w_msa() [all …]
|
D | vpx_convolve8_avg_vert_msa.c | 358 v16u8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, filt0; in common_vt_2t_and_aver_dst_8x8mult_msa() local 376 ILVR_B4_UB(src5, src4, src6, src5, src7, src6, src8, src7, vec4, vec5, vec6, in common_vt_2t_and_aver_dst_8x8mult_msa() 385 DOTP_UB4_UH(vec4, vec5, vec6, vec7, filt0, filt0, filt0, filt0, tmp0, tmp1, in common_vt_2t_and_aver_dst_8x8mult_msa() 414 v16u8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7; in common_vt_2t_and_aver_dst_16w_msa() local 437 ILVL_B2_UB(src3, src2, src4, src3, vec5, vec7); in common_vt_2t_and_aver_dst_16w_msa() 443 DOTP_UB2_UH(vec4, vec5, filt0, filt0, tmp0, tmp1); in common_vt_2t_and_aver_dst_16w_msa() 464 v16u8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, filt0; in common_vt_2t_and_aver_dst_32w_msa() local 493 ILVL_B2_UB(src3, src2, src4, src3, vec5, vec7); in common_vt_2t_and_aver_dst_32w_msa() 494 DOTP_UB2_UH(vec4, vec5, filt0, filt0, tmp0, tmp1); in common_vt_2t_and_aver_dst_32w_msa() 513 ILVL_B2_UB(src8, src7, src9, src8, vec5, vec7); in common_vt_2t_and_aver_dst_32w_msa() [all …]
|
D | vpx_convolve8_horiz_msa.c | 344 v8u16 vec4, vec5, vec6, vec7, filt; in common_hz_2t_4x8_msa() local 355 DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, vec4, vec5, in common_hz_2t_4x8_msa() 357 SRARI_H4_UH(vec4, vec5, vec6, vec7, FILTER_BITS); in common_hz_2t_4x8_msa() 358 PCKEV_B4_SB(vec4, vec4, vec5, vec5, vec6, vec6, vec7, vec7, res0, res1, res2, in common_hz_2t_4x8_msa() 476 v16u8 filt0, vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7; in common_hz_2t_16w_msa() local 493 VSHF_B2_UB(src4, src4, src5, src5, mask, mask, vec4, vec5); in common_hz_2t_16w_msa() 497 DOTP_UB4_UH(vec4, vec5, vec6, vec7, filt0, filt0, filt0, filt0, out4, out5, in common_hz_2t_16w_msa() 517 VSHF_B2_UB(src4, src4, src5, src5, mask, mask, vec4, vec5); in common_hz_2t_16w_msa() 521 DOTP_UB4_UH(vec4, vec5, vec6, vec7, filt0, filt0, filt0, filt0, out4, out5, in common_hz_2t_16w_msa() 541 v16u8 filt0, vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7; in common_hz_2t_32w_msa() local [all …]
|
D | fwd_txfm_msa.c | 20 v8i16 vec0, vec1, vec2, vec3, vec4, vec5, cnst0, cnst1, cnst4, cnst5; in fdct8x16_1d_column() local 47 ILVR_H2_SH(in10, in13, in11, in12, vec3, vec5); in fdct8x16_1d_column() 55 stp24 = DOT_SHIFT_RIGHT_PCK_H(vec4, vec5, cnst4); in fdct8x16_1d_column() 56 stp23 = DOT_SHIFT_RIGHT_PCK_H(vec4, vec5, cnst5); in fdct8x16_1d_column() 62 ILVR_H2_SH(stp36, stp31, stp35, stp32, vec3, vec5); in fdct8x16_1d_column() 99 stp25 = DOT_SHIFT_RIGHT_PCK_H(vec4, vec5, cnst1); in fdct8x16_1d_column() 103 stp22 = DOT_SHIFT_RIGHT_PCK_H(vec4, vec5, cnst1); in fdct8x16_1d_column()
|
D | loopfilter_8_msa.c | 242 v8i16 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7; in vpx_lpf_vertical_8_dual_msa() local 279 ILVRL_H2_SH(vec1, vec0, vec4, vec5); in vpx_lpf_vertical_8_dual_msa() 284 ST4x8_UB(vec4, vec5, src, pitch); in vpx_lpf_vertical_8_dual_msa() 317 ILVRL_B2_SH(q2, q1, vec2, vec5); in vpx_lpf_vertical_8_dual_msa() 327 ST2x4_UB(vec5, 0, src + 4, pitch); in vpx_lpf_vertical_8_dual_msa() 330 ST2x4_UB(vec5, 4, src + 4, pitch); in vpx_lpf_vertical_8_dual_msa()
|
D | vpx_convolve8_msa.c | 272 v8u16 hz_out7, hz_out8, vec4, vec5, vec6, vec7, filt; in common_hv_2ht_2vt_4x8_msa() local 299 vec5, vec6, vec7); in common_hv_2ht_2vt_4x8_msa() 300 SRARI_H4_UH(vec4, vec5, vec6, vec7, FILTER_BITS); in common_hv_2ht_2vt_4x8_msa() 301 PCKEV_B4_SB(vec4, vec4, vec5, vec5, vec6, vec6, vec7, vec7, res0, res1, res2, in common_hv_2ht_2vt_4x8_msa()
|
D | sub_pixel_variance_msa.c | 471 v8u16 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7; in sub_pixel_sse_diff_16width_h_msa() local 488 VSHF_B2_UH(src4, src4, src5, src5, mask, mask, vec4, vec5); in sub_pixel_sse_diff_16width_h_msa() 492 DOTP_UB4_UH(vec4, vec5, vec6, vec7, filt0, filt0, filt0, filt0, out4, out5, in sub_pixel_sse_diff_16width_h_msa() 641 v16u8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7; in sub_pixel_sse_diff_16width_v_msa() local 666 ILVL_B2_UB(src3, src2, src4, src3, vec5, vec7); in sub_pixel_sse_diff_16width_v_msa() 671 DOTP_UB2_UH(vec4, vec5, filt0, filt0, tmp0, tmp1); in sub_pixel_sse_diff_16width_v_msa() 1053 v8u16 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7; in subpel_avg_ssediff_16w_h_msa() local 1072 VSHF_B2_UH(src4, src4, src5, src5, mask, mask, vec4, vec5); in subpel_avg_ssediff_16w_h_msa() 1076 DOTP_UB4_UH(vec4, vec5, vec6, vec7, filt0, filt0, filt0, filt0, out4, out5, in subpel_avg_ssediff_16w_h_msa() 1252 v8u16 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7; in subpel_avg_ssediff_16w_v_msa() local [all …]
|
D | loopfilter_16_msa.c | 1080 v8i16 vec0, vec1, vec2, vec3, vec4, vec5; in vpx_vt_lpf_t4_and_t8_16w() local 1101 ILVRL_H2_SH(vec1, vec0, vec4, vec5); in vpx_vt_lpf_t4_and_t8_16w() 1106 ST4x8_UB(vec4, vec5, src_org, pitch); in vpx_vt_lpf_t4_and_t8_16w() 1164 v8i16 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7; in vpx_vt_lpf_t16_16w() local 1173 ILVRL_B2_SH(q2, q1, vec2, vec5); in vpx_vt_lpf_t16_16w() 1183 ST2x4_UB(vec5, 0, (src_org + 4), pitch); in vpx_vt_lpf_t16_16w() 1186 ST2x4_UB(vec5, 4, (src_org + 4), pitch); in vpx_vt_lpf_t16_16w()
|
/external/eigen/unsupported/test/ |
D | cxx11_tensor_simple.cpp | 96 Tensor<int, 1> vec5(vec1); in test_1d() local 98 VERIFY_IS_EQUAL((vec5(0)), 4); in test_1d() 99 VERIFY_IS_EQUAL((vec5(1)), 8); in test_1d() 100 VERIFY_IS_EQUAL((vec5(2)), 15); in test_1d() 101 VERIFY_IS_EQUAL((vec5(3)), 16); in test_1d() 102 VERIFY_IS_EQUAL((vec5(4)), 23); in test_1d() 103 VERIFY_IS_EQUAL((vec5(5)), 42); in test_1d() 105 VERIFY_IS_EQUAL((vec5.data()[0]), 4); in test_1d() 106 VERIFY_IS_EQUAL((vec5.data()[1]), 8); in test_1d() 107 VERIFY_IS_EQUAL((vec5.data()[2]), 15); in test_1d() [all …]
|
D | cxx11_tensor_expr.cpp | 36 TensorMap<Tensor<float, 1, RowMajor>> vec5(data5, 6); in test_1d() local 37 vec5 = vec2.cube(); in test_1d() 53 VERIFY_IS_APPROX(vec5(0), 0.0f); in test_1d() 54 VERIFY_IS_APPROX(vec5(1), 1.0f); in test_1d() 55 VERIFY_IS_APPROX(vec5(2), 2.0f * 2.0f * 2.0f); in test_1d() 56 VERIFY_IS_APPROX(vec5(3), 3.0f * 3.0f * 3.0f); in test_1d() 57 VERIFY_IS_APPROX(vec5(4), 4.0f * 4.0f * 4.0f); in test_1d() 58 VERIFY_IS_APPROX(vec5(5), 5.0f * 5.0f * 5.0f); in test_1d()
|
/external/libyuv/files/source/ |
D | row_msa.cc | 478 v8i16 vec0, vec1, vec2, vec3, vec4, vec5; in I422ToRGB24Row_MSA() local 505 vec3, vec4, vec5); in I422ToRGB24Row_MSA() 508 reg3 = (v16u8)__msa_pckev_b((v16i8)vec5, (v16i8)vec2); in I422ToRGB24Row_MSA() 825 v16u8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, vec8, vec9; in ARGBToUVRow_MSA() local 849 vec5 = (v16u8)__msa_pckod_b((v16i8)src3, (v16i8)src2); in ARGBToUVRow_MSA() 854 vec4 = (v16u8)__msa_pckev_b((v16i8)vec5, (v16i8)vec4); in ARGBToUVRow_MSA() 855 vec5 = (v16u8)__msa_pckev_b((v16i8)vec7, (v16i8)vec6); in ARGBToUVRow_MSA() 861 reg3 = __msa_hadd_u_h(vec5, vec5); in ARGBToUVRow_MSA() 877 vec5 = (v16u8)__msa_pckod_b((v16i8)src3, (v16i8)src2); in ARGBToUVRow_MSA() 882 vec4 = (v16u8)__msa_pckev_b((v16i8)vec5, (v16i8)vec4); in ARGBToUVRow_MSA() [all …]
|
D | scale_msa.cc | 388 v8u16 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7; in ScaleRowDown38_2_Box_MSA() local 413 vec5 = (v8u16)__msa_vshf_h(mask, zero, (v8i16)vec1); in ScaleRowDown38_2_Box_MSA() 420 tmp1 = __msa_hadd_u_w(vec5, vec5); in ScaleRowDown38_2_Box_MSA() 458 v8u16 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7; in ScaleRowDown38_3_Box_MSA() local 481 vec5 = (v8u16)__msa_ilvl_b((v16i8)zero, (v16i8)src4); in ScaleRowDown38_3_Box_MSA() 489 vec1 += __msa_hadd_u_h((v16u8)vec5, (v16u8)vec5); in ScaleRowDown38_3_Box_MSA() 493 vec5 = (v8u16)__msa_vshf_h(mask, (v8i16)zero, (v8i16)vec1); in ScaleRowDown38_3_Box_MSA() 500 tmp1 = __msa_hadd_u_w(vec5, vec5); in ScaleRowDown38_3_Box_MSA()
|
/external/libvpx/libvpx/vp8/common/mips/msa/ |
D | bilinear_filter_msa.c | 56 v8u16 vec4, vec5, vec6, vec7, filt; in common_hz_2t_4x8_msa() local 66 DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, vec4, vec5, in common_hz_2t_4x8_msa() 68 SRARI_H4_UH(vec4, vec5, vec6, vec7, VP8_FILTER_SHIFT); in common_hz_2t_4x8_msa() 69 PCKEV_B4_SB(vec4, vec4, vec5, vec5, vec6, vec6, vec7, vec7, res0, res1, res2, in common_hz_2t_4x8_msa() 185 v16u8 filt0, vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7; in common_hz_2t_16w_msa() local 201 VSHF_B2_UB(src4, src4, src5, src5, mask, mask, vec4, vec5); in common_hz_2t_16w_msa() 205 DOTP_UB4_UH(vec4, vec5, vec6, vec7, filt0, filt0, filt0, filt0, out4, out5, in common_hz_2t_16w_msa() 225 VSHF_B2_UB(src4, src4, src5, src5, mask, mask, vec4, vec5); in common_hz_2t_16w_msa() 229 DOTP_UB4_UH(vec4, vec5, vec6, vec7, filt0, filt0, filt0, filt0, out4, out5, in common_hz_2t_16w_msa() 337 v16u8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, filt0; in common_vt_2t_8x8mult_msa() local [all …]
|
/external/llvm/test/Transforms/ConstProp/ |
D | InsertElement.ll | 29 %vec5 = insertelement <4 x i64> %vec3, i64 -5, i32 4 31 ret <4 x i64> %vec5
|
/external/llvm/test/CodeGen/X86/ |
D | avx512bwvl-intrinsics.ll | 46 %vec5 = insertelement <8 x i32> %vec4, i32 %res5, i32 5 48 %vec6 = insertelement <8 x i32> %vec5, i32 %res6, i32 6 95 %vec5 = insertelement <8 x i32> %vec4, i32 %res5, i32 5 97 %vec6 = insertelement <8 x i32> %vec5, i32 %res6, i32 6 145 %vec5 = insertelement <8 x i32> %vec4, i32 %res5, i32 5 147 %vec6 = insertelement <8 x i32> %vec5, i32 %res6, i32 6 194 %vec5 = insertelement <8 x i32> %vec4, i32 %res5, i32 5 196 %vec6 = insertelement <8 x i32> %vec5, i32 %res6, i32 6 243 %vec5 = insertelement <8 x i16> %vec4, i16 %res5, i32 5 245 %vec6 = insertelement <8 x i16> %vec5, i16 %res6, i32 6 [all …]
|
D | avx512vl-intrinsics.ll | 45 %vec5 = insertelement <8 x i8> %vec4, i8 %res5, i32 5 47 %vec6 = insertelement <8 x i8> %vec5, i8 %res6, i32 6 93 %vec5 = insertelement <8 x i8> %vec4, i8 %res5, i32 5 95 %vec6 = insertelement <8 x i8> %vec5, i8 %res6, i32 6 142 %vec5 = insertelement <8 x i8> %vec4, i8 %res5, i32 5 144 %vec6 = insertelement <8 x i8> %vec5, i8 %res6, i32 6 190 %vec5 = insertelement <8 x i8> %vec4, i8 %res5, i32 5 192 %vec6 = insertelement <8 x i8> %vec5, i8 %res6, i32 6 239 %vec5 = insertelement <8 x i8> %vec4, i8 %res5, i32 5 241 %vec6 = insertelement <8 x i8> %vec5, i8 %res6, i32 6 [all …]
|
D | avx512-intrinsics.ll | 935 %vec5 = insertelement <8 x i16> %vec4, i16 %res5, i32 5 937 %vec6 = insertelement <8 x i16> %vec5, i16 %res6, i32 6 983 %vec5 = insertelement <8 x i16> %vec4, i16 %res5, i32 5 985 %vec6 = insertelement <8 x i16> %vec5, i16 %res6, i32 6 1032 %vec5 = insertelement <8 x i16> %vec4, i16 %res5, i32 5 1034 %vec6 = insertelement <8 x i16> %vec5, i16 %res6, i32 6 1080 %vec5 = insertelement <8 x i16> %vec4, i16 %res5, i32 5 1082 %vec6 = insertelement <8 x i16> %vec5, i16 %res6, i32 6 1129 %vec5 = insertelement <8 x i8> %vec4, i8 %res5, i32 5 1131 %vec6 = insertelement <8 x i8> %vec5, i8 %res6, i32 6 [all …]
|
/external/hyphenation-patterns/en-GB/ |
D | hyph-en-gb.pat.txt | 479 .vec5
|