/external/libyuv/files/source/ |
D | row_msa.cc | 78 v16i8 zero_m = {0}; \ 80 vec0_m = (v8i16)__msa_ilvr_b((v16i8)in_y, (v16i8)in_y); \ 81 vec1_m = (v8i16)__msa_ilvr_b((v16i8)zero_m, (v16i8)in_uv); \ 128 vec0_m = (v8i16)__msa_ilvev_b((v16i8)in1, (v16i8)in0); \ 129 vec1_m = (v8i16)__msa_ilvev_b((v16i8)in3, (v16i8)in2); \ 154 y_out = (v16u8)__msa_pckev_b((v16i8)reg1_m, (v16i8)reg0_m); \ 166 src0_m = (v16u8)__msa_ld_b((v16i8*)s, 0); \ 167 src1_m = (v16u8)__msa_ld_b((v16i8*)s, 16); \ 168 src2_m = (v16u8)__msa_ld_b((v16i8*)s, 32); \ 169 src3_m = (v16u8)__msa_ld_b((v16i8*)s, 48); \ [all …]
|
D | scale_msa.cc | 33 src0 = (v16u8)__msa_ld_b((v16i8*)src_argb, 0); in ScaleARGBRowDown2_MSA() 34 src1 = (v16u8)__msa_ld_b((v16i8*)src_argb, 16); in ScaleARGBRowDown2_MSA() 51 src0 = (v16u8)__msa_ld_b((v16i8*)src_argb, 0); in ScaleARGBRowDown2Linear_MSA() 52 src1 = (v16u8)__msa_ld_b((v16i8*)src_argb, 16); in ScaleARGBRowDown2Linear_MSA() 71 v16i8 shuffler = {0, 4, 1, 5, 2, 6, 3, 7, 8, 12, 9, 13, 10, 14, 11, 15}; in ScaleARGBRowDown2Box_MSA() 74 src0 = (v16u8)__msa_ld_b((v16i8*)s, 0); in ScaleARGBRowDown2Box_MSA() 75 src1 = (v16u8)__msa_ld_b((v16i8*)s, 16); in ScaleARGBRowDown2Box_MSA() 76 src2 = (v16u8)__msa_ld_b((v16i8*)t, 0); in ScaleARGBRowDown2Box_MSA() 77 src3 = (v16u8)__msa_ld_b((v16i8*)t, 16); in ScaleARGBRowDown2Box_MSA() 78 vec0 = (v16u8)__msa_vshf_b(shuffler, (v16i8)src0, (v16i8)src0); in ScaleARGBRowDown2Box_MSA() [all …]
|
D | rotate_msa.cc | 24 out0 = (v16u8)__msa_ilvr_b((v16i8)in1, (v16i8)in0); \ 25 out1 = (v16u8)__msa_ilvl_b((v16i8)in1, (v16i8)in0); \ 26 out2 = (v16u8)__msa_ilvr_b((v16i8)in3, (v16i8)in2); \ 27 out3 = (v16u8)__msa_ilvl_b((v16i8)in3, (v16i8)in2); \ 90 src0 = (v16u8)__msa_ld_b((v16i8*)s, 0); in TransposeWx16_MSA() 92 src1 = (v16u8)__msa_ld_b((v16i8*)s, 0); in TransposeWx16_MSA() 94 src2 = (v16u8)__msa_ld_b((v16i8*)s, 0); in TransposeWx16_MSA() 96 src3 = (v16u8)__msa_ld_b((v16i8*)s, 0); in TransposeWx16_MSA() 100 src0 = (v16u8)__msa_ld_b((v16i8*)s, 0); in TransposeWx16_MSA() 102 src1 = (v16u8)__msa_ld_b((v16i8*)s, 0); in TransposeWx16_MSA() [all …]
|
/external/libvpx/libvpx/third_party/libyuv/source/ |
D | row_msa.cc | 78 v16i8 zero_m = {0}; \ 80 vec0_m = (v8i16)__msa_ilvr_b((v16i8)in_y, (v16i8)in_y); \ 81 vec1_m = (v8i16)__msa_ilvr_b((v16i8)zero_m, (v16i8)in_uv); \ 128 vec0_m = (v8i16)__msa_ilvev_b((v16i8)in1, (v16i8)in0); \ 129 vec1_m = (v8i16)__msa_ilvev_b((v16i8)in3, (v16i8)in2); \ 154 y_out = (v16u8)__msa_pckev_b((v16i8)reg1_m, (v16i8)reg0_m); \ 166 src0_m = (v16u8)__msa_ld_b((v16i8*)s, 0); \ 167 src1_m = (v16u8)__msa_ld_b((v16i8*)s, 16); \ 168 src2_m = (v16u8)__msa_ld_b((v16i8*)s, 32); \ 169 src3_m = (v16u8)__msa_ld_b((v16i8*)s, 48); \ [all …]
|
D | scale_msa.cc | 41 src0 = (v16u8)__msa_ld_b((v16i8*)src_argb, 0); in ScaleARGBRowDown2_MSA() 42 src1 = (v16u8)__msa_ld_b((v16i8*)src_argb, 16); in ScaleARGBRowDown2_MSA() 59 src0 = (v16u8)__msa_ld_b((v16i8*)src_argb, 0); in ScaleARGBRowDown2Linear_MSA() 60 src1 = (v16u8)__msa_ld_b((v16i8*)src_argb, 16); in ScaleARGBRowDown2Linear_MSA() 79 v16i8 shuffler = {0, 4, 1, 5, 2, 6, 3, 7, 8, 12, 9, 13, 10, 14, 11, 15}; in ScaleARGBRowDown2Box_MSA() 82 src0 = (v16u8)__msa_ld_b((v16i8*)s, 0); in ScaleARGBRowDown2Box_MSA() 83 src1 = (v16u8)__msa_ld_b((v16i8*)s, 16); in ScaleARGBRowDown2Box_MSA() 84 src2 = (v16u8)__msa_ld_b((v16i8*)t, 0); in ScaleARGBRowDown2Box_MSA() 85 src3 = (v16u8)__msa_ld_b((v16i8*)t, 16); in ScaleARGBRowDown2Box_MSA() 86 vec0 = (v16u8)__msa_vshf_b(shuffler, (v16i8)src0, (v16i8)src0); in ScaleARGBRowDown2Box_MSA() [all …]
|
D | compare_msa.cc | 34 src0 = (v16u8)__msa_ld_b((v16i8*)src_a, 0); in HammingDistance_MSA() 35 src1 = (v16u8)__msa_ld_b((v16i8*)src_a, 16); in HammingDistance_MSA() 36 src2 = (v16u8)__msa_ld_b((v16i8*)src_b, 0); in HammingDistance_MSA() 37 src3 = (v16u8)__msa_ld_b((v16i8*)src_b, 16); in HammingDistance_MSA() 63 src0 = (v16u8)__msa_ld_b((v16i8*)src_a, 0); in SumSquareError_MSA() 64 src1 = (v16u8)__msa_ld_b((v16i8*)src_a, 16); in SumSquareError_MSA() 65 src2 = (v16u8)__msa_ld_b((v16i8*)src_b, 0); in SumSquareError_MSA() 66 src3 = (v16u8)__msa_ld_b((v16i8*)src_b, 16); in SumSquareError_MSA() 67 vec0 = (v8i16)__msa_ilvr_b((v16i8)src2, (v16i8)src0); in SumSquareError_MSA() 68 vec1 = (v8i16)__msa_ilvl_b((v16i8)src2, (v16i8)src0); in SumSquareError_MSA() [all …]
|
D | rotate_msa.cc | 24 out0 = (v16u8)__msa_ilvr_b((v16i8)in1, (v16i8)in0); \ 25 out1 = (v16u8)__msa_ilvl_b((v16i8)in1, (v16i8)in0); \ 26 out2 = (v16u8)__msa_ilvr_b((v16i8)in3, (v16i8)in2); \ 27 out3 = (v16u8)__msa_ilvl_b((v16i8)in3, (v16i8)in2); \ 90 src0 = (v16u8)__msa_ld_b((v16i8*)s, 0); in TransposeWx16_MSA() 92 src1 = (v16u8)__msa_ld_b((v16i8*)s, 0); in TransposeWx16_MSA() 94 src2 = (v16u8)__msa_ld_b((v16i8*)s, 0); in TransposeWx16_MSA() 96 src3 = (v16u8)__msa_ld_b((v16i8*)s, 0); in TransposeWx16_MSA() 100 src0 = (v16u8)__msa_ld_b((v16i8*)s, 0); in TransposeWx16_MSA() 102 src1 = (v16u8)__msa_ld_b((v16i8*)s, 0); in TransposeWx16_MSA() [all …]
|
/external/llvm/test/CodeGen/PowerPC/ |
D | vaddsplat.ll | 10 %v16i8 = type <16 x i8> 56 define void @test_v16i8_pos_even(%v16i8* %P, %v16i8* %S) { 57 %p = load %v16i8, %v16i8* %P 58 …%r = add %v16i8 %p, < i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16,… 59 store %v16i8 %r, %v16i8* %S 67 define void @test_v16i8_neg_even(%v16i8* %P, %v16i8* %S) { 68 %p = load %v16i8, %v16i8* %P 69 …%r = add %v16i8 %p, < i8 -18, i8 -18, i8 -18, i8 -18, i8 -18, i8 -18, i8 -18, i8 -18, i8 -18, i8 -… 70 store %v16i8 %r, %v16i8* %S 126 define void @test_v16i8_pos_odd(%v16i8* %P, %v16i8* %S) { [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/PowerPC/ |
D | vaddsplat.ll | 10 %v16i8 = type <16 x i8> 56 define void @test_v16i8_pos_even(%v16i8* %P, %v16i8* %S) { 57 %p = load %v16i8, %v16i8* %P 58 …%r = add %v16i8 %p, < i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16,… 59 store %v16i8 %r, %v16i8* %S 67 define void @test_v16i8_neg_even(%v16i8* %P, %v16i8* %S) { 68 %p = load %v16i8, %v16i8* %P 69 …%r = add %v16i8 %p, < i8 -18, i8 -18, i8 -18, i8 -18, i8 -18, i8 -18, i8 -18, i8 -18, i8 -18, i8 -… 70 store %v16i8 %r, %v16i8* %S 126 define void @test_v16i8_pos_odd(%v16i8* %P, %v16i8* %S) { [all …]
|
/external/libvpx/libvpx/vpx_dsp/mips/ |
D | loopfilter_16_msa.c | 84 v16i8 zero = { 0 }; in hz_lpf_t16_16w() 114 q0_r_in = (v8u16)__msa_ilvr_b(zero, (v16i8)q0); in hz_lpf_t16_16w() 133 q0_l_in = (v8u16)__msa_ilvl_b(zero, (v16i8)q0); in hz_lpf_t16_16w() 148 r_out = (v8i16)__msa_pckev_b((v16i8)l_out, (v16i8)r_out); in hz_lpf_t16_16w() 154 q1_r_in = (v8u16)__msa_ilvr_b(zero, (v16i8)q1); in hz_lpf_t16_16w() 161 q1_l_in = (v8u16)__msa_ilvl_b(zero, (v16i8)q1); in hz_lpf_t16_16w() 168 r_out = (v8i16)__msa_pckev_b((v16i8)l_out, (v16i8)r_out); in hz_lpf_t16_16w() 174 q2_r_in = (v8u16)__msa_ilvr_b(zero, (v16i8)q2); in hz_lpf_t16_16w() 181 q2_l_in = (v8u16)__msa_ilvl_b(zero, (v16i8)q2); in hz_lpf_t16_16w() 188 r_out = (v8i16)__msa_pckev_b((v16i8)l_out, (v16i8)r_out); in hz_lpf_t16_16w() [all …]
|
D | macros_msa.h | 21 #define LD_SB(...) LD_V(v16i8, __VA_ARGS__) 28 #define ST_SB(...) ST_V(v16i8, __VA_ARGS__) 241 #define LD_SB2(...) LD_V2(v16i8, __VA_ARGS__) 258 #define LD_SB4(...) LD_V4(v16i8, __VA_ARGS__) 267 #define LD_SB5(...) LD_V5(v16i8, __VA_ARGS__) 274 #define LD_SB7(...) LD_V7(v16i8, __VA_ARGS__) 283 #define LD_SB8(...) LD_V8(v16i8, __VA_ARGS__) 505 v16i8 zero_m = { 0 }; \ 506 out0 = (RTYPE)__msa_sldi_b((v16i8)zero_m, (v16i8)in0, slide_val); \ 507 out1 = (RTYPE)__msa_sldi_b((v16i8)zero_m, (v16i8)in1, slide_val); \ [all …]
|
D | vpx_convolve8_msa.c | 29 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10; in common_hv_8ht_8vt_4w_msa() 30 v16i8 filt_hz0, filt_hz1, filt_hz2, filt_hz3; in common_hv_8ht_8vt_4w_msa() 65 out2 = (v8i16)__msa_ilvev_b((v16i8)hz_out5, (v16i8)hz_out4); in common_hv_8ht_8vt_4w_msa() 74 hz_out6 = (v8i16)__msa_sldi_b((v16i8)hz_out7, (v16i8)hz_out5, 8); in common_hv_8ht_8vt_4w_msa() 75 out3 = (v8i16)__msa_ilvev_b((v16i8)hz_out7, (v16i8)hz_out6); in common_hv_8ht_8vt_4w_msa() 81 hz_out8 = (v8i16)__msa_sldi_b((v16i8)hz_out9, (v16i8)hz_out7, 8); in common_hv_8ht_8vt_4w_msa() 82 out4 = (v8i16)__msa_ilvev_b((v16i8)hz_out9, (v16i8)hz_out8); in common_hv_8ht_8vt_4w_msa() 103 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10; in common_hv_8ht_8vt_8w_msa() 104 v16i8 filt_hz0, filt_hz1, filt_hz2, filt_hz3; in common_hv_8ht_8vt_8w_msa() 156 out3 = (v8i16)__msa_ilvev_b((v16i8)hz_out7, (v16i8)hz_out6); in common_hv_8ht_8vt_8w_msa() [all …]
|
D | vpx_convolve8_avg_msa.c | 20 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10; in common_hv_8ht_8vt_and_aver_dst_4w_msa() 22 v16i8 filt_hz0, filt_hz1, filt_hz2, filt_hz3; in common_hv_8ht_8vt_and_aver_dst_4w_msa() 56 vec2 = (v8i16)__msa_ilvev_b((v16i8)hz_out5, (v16i8)hz_out4); in common_hv_8ht_8vt_and_aver_dst_4w_msa() 67 hz_out6 = (v8i16)__msa_sldi_b((v16i8)hz_out7, (v16i8)hz_out5, 8); in common_hv_8ht_8vt_and_aver_dst_4w_msa() 68 vec3 = (v8i16)__msa_ilvev_b((v16i8)hz_out7, (v16i8)hz_out6); in common_hv_8ht_8vt_and_aver_dst_4w_msa() 74 hz_out8 = (v8i16)__msa_sldi_b((v16i8)hz_out9, (v16i8)hz_out7, 8); in common_hv_8ht_8vt_and_aver_dst_4w_msa() 75 vec4 = (v8i16)__msa_ilvev_b((v16i8)hz_out9, (v16i8)hz_out8); in common_hv_8ht_8vt_and_aver_dst_4w_msa() 98 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10; in common_hv_8ht_8vt_and_aver_dst_8w_msa() 99 v16i8 filt_hz0, filt_hz1, filt_hz2, filt_hz3; in common_hv_8ht_8vt_and_aver_dst_8w_msa() 154 out3 = (v8i16)__msa_ilvev_b((v16i8)hz_out7, (v16i8)hz_out6); in common_hv_8ht_8vt_and_aver_dst_8w_msa() [all …]
|
D | vpx_convolve_msa.h | 24 tmp_dpadd_0 = __msa_dotp_s_h((v16i8)vec0, (v16i8)filt0); \ 25 tmp_dpadd_0 = __msa_dpadd_s_h(tmp_dpadd_0, (v16i8)vec1, (v16i8)filt1); \ 26 tmp_dpadd_1 = __msa_dotp_s_h((v16i8)vec2, (v16i8)filt2); \ 27 tmp_dpadd_1 = __msa_dpadd_s_h(tmp_dpadd_1, (v16i8)vec3, (v16i8)filt3); \ 36 v16i8 vec0_m, vec1_m, vec2_m, vec3_m; \ 54 v16i8 vec0_m, vec1_m, vec2_m, vec3_m, vec4_m, vec5_m, vec6_m, vec7_m; \ 72 v16i8 vec0_m, vec1_m, vec2_m, vec3_m, vec4_m, vec5_m, vec6_m, vec7_m; \ 108 tmp_m = (v16u8)__msa_pckev_b((v16i8)in0, (v16i8)in1); \
|
/external/libaom/libaom/aom_dsp/mips/ |
D | loopfilter_16_msa.c | 83 v16i8 zero = { 0 }; in aom_hz_lpf_t16_16w() 113 q0_r_in = (v8u16)__msa_ilvr_b(zero, (v16i8)q0); in aom_hz_lpf_t16_16w() 132 q0_l_in = (v8u16)__msa_ilvl_b(zero, (v16i8)q0); in aom_hz_lpf_t16_16w() 147 r_out = (v8i16)__msa_pckev_b((v16i8)l_out, (v16i8)r_out); in aom_hz_lpf_t16_16w() 153 q1_r_in = (v8u16)__msa_ilvr_b(zero, (v16i8)q1); in aom_hz_lpf_t16_16w() 160 q1_l_in = (v8u16)__msa_ilvl_b(zero, (v16i8)q1); in aom_hz_lpf_t16_16w() 167 r_out = (v8i16)__msa_pckev_b((v16i8)l_out, (v16i8)r_out); in aom_hz_lpf_t16_16w() 173 q2_r_in = (v8u16)__msa_ilvr_b(zero, (v16i8)q2); in aom_hz_lpf_t16_16w() 180 q2_l_in = (v8u16)__msa_ilvl_b(zero, (v16i8)q2); in aom_hz_lpf_t16_16w() 187 r_out = (v8i16)__msa_pckev_b((v16i8)l_out, (v16i8)r_out); in aom_hz_lpf_t16_16w() [all …]
|
D | loopfilter_msa.h | 20 v16i8 p1_m, p0_m, q0_m, q1_m, q0_sub_p0, filt_sign; \ 21 v16i8 filt, filt1, filt2, cnst4b, cnst3b; \ 24 p1_m = (v16i8)__msa_xori_b(p1_in, 0x80); \ 25 p0_m = (v16i8)__msa_xori_b(p0_in, 0x80); \ 26 q0_m = (v16i8)__msa_xori_b(q0_in, 0x80); \ 27 q1_m = (v16i8)__msa_xori_b(q1_in, 0x80); \ 30 filt = filt & (v16i8)hev_in; \ 36 q0_sub_p0_r = __msa_dotp_s_h((v16i8)q0_sub_p0_r, (v16i8)cnst3h); \ 42 filt = __msa_pckev_b((v16i8)filt_r, (v16i8)filt_r); \ 44 filt = filt & (v16i8)mask_in; \ [all …]
|
D | macros_msa.h | 23 #define LD_SB(...) LD_B(v16i8, __VA_ARGS__) 34 #define ST_SB(...) ST_B(v16i8, __VA_ARGS__) 301 #define LD_SB2(...) LD_B2(v16i8, __VA_ARGS__) 316 #define LD_SB4(...) LD_B4(v16i8, __VA_ARGS__) 324 #define LD_SB5(...) LD_B5(v16i8, __VA_ARGS__) 331 #define LD_SB7(...) LD_B7(v16i8, __VA_ARGS__) 340 #define LD_SB8(...) LD_B8(v16i8, __VA_ARGS__) 633 v16i8 zero_m = { 0 }; \ 634 out0 = (RTYPE)__msa_sldi_b((v16i8)zero_m, (v16i8)in0, slide_val); \ 635 out1 = (RTYPE)__msa_sldi_b((v16i8)zero_m, (v16i8)in1, slide_val); \ [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/lib/Target/PowerPC/ |
D | PPCInstrAltivec.td | 34 // Since we canonicalize buildvectors to v16i8, all vnots "-1" operands will be 37 (xor node:$in, (bitconvert (v16i8 immAllOnesV)))>; 80 (vector_shuffle (v16i8 node:$lhs), node:$rhs), [{ 84 (vector_shuffle (v16i8 node:$lhs), node:$rhs), [{ 88 (vector_shuffle (v16i8 node:$lhs), node:$rhs), [{ 92 (vector_shuffle (v16i8 node:$lhs), node:$rhs), [{ 96 (vector_shuffle (v16i8 node:$lhs), node:$rhs), [{ 100 (vector_shuffle (v16i8 node:$lhs), node:$rhs), [{ 106 (vector_shuffle (v16i8 node:$lhs), node:$rhs), [{ 134 (vector_shuffle (v16i8 node:$lhs), node:$rhs), [{ [all …]
|
/external/libvpx/libvpx/vp8/common/mips/msa/ |
D | vp8_macros_msa.h | 21 #define LD_SB(...) LD_B(v16i8, __VA_ARGS__) 33 #define ST_SB(...) ST_B(v16i8, __VA_ARGS__) 274 #define LD_SB2(...) LD_B2(v16i8, __VA_ARGS__) 282 #define LD_SB3(...) LD_B3(v16i8, __VA_ARGS__) 290 #define LD_SB4(...) LD_B4(v16i8, __VA_ARGS__) 298 #define LD_SB5(...) LD_B5(v16i8, __VA_ARGS__) 307 #define LD_SB8(...) LD_B8(v16i8, __VA_ARGS__) 358 #define ST_SB4(...) ST_B4(v16i8, __VA_ARGS__) 514 v16i8 zero_m = { 0 }; \ 516 out0 = (RTYPE)__msa_sldi_b((v16i8)zero_m, (v16i8)in0, slide_val); \ [all …]
|
D | sixtap_filter_msa.c | 38 v16i8 vec0_m, vec1_m, vec2_m; \ 55 v16i8 vec0_m, vec1_m, vec2_m, vec3_m, vec4_m, vec5_m; \ 69 v16i8 vec0_m, vec1_m, vec2_m, vec3_m, vec4_m, vec5_m, vec6_m, vec7_m; \ 89 tmp0 = __msa_dotp_s_h((v16i8)vec0, (v16i8)filt0); \ 90 tmp0 = __msa_dpadd_s_h(tmp0, (v16i8)vec1, (v16i8)filt1); \ 97 v16i8 vec0_m, vec1_m; \ 112 v16i8 vec0_m, vec1_m, vec2_m, vec3_m; \ 123 v16i8 vec0_m, vec1_m, vec2_m, vec3_m; \ 138 v16i8 src0, src1, src2, src3, filt0, filt1, filt2; in common_hz_6t_4x4_msa() 164 v16i8 src0, src1, src2, src3, filt0, filt1, filt2; in common_hz_6t_4x8_msa() [all …]
|
D | mfqe_msa.c | 21 v16i8 src0 = { 0 }; in filter_by_weight8x8_msa() 22 v16i8 src1 = { 0 }; in filter_by_weight8x8_msa() 23 v16i8 dst0 = { 0 }; in filter_by_weight8x8_msa() 24 v16i8 dst1 = { 0 }; in filter_by_weight8x8_msa() 50 dst0 = (v16i8)__msa_pckev_b((v16i8)res_h_l, (v16i8)res_h_r); in filter_by_weight8x8_msa() 61 dst1 = (v16i8)__msa_pckev_b((v16i8)res_h_l, (v16i8)res_h_r); in filter_by_weight8x8_msa() 72 v16i8 src0, src1, src2, src3; in filter_by_weight16x16_msa() 73 v16i8 dst0, dst1, dst2, dst3; in filter_by_weight16x16_msa()
|
/external/llvm/lib/Target/PowerPC/ |
D | PPCInstrAltivec.td | 33 // Since we canonicalize buildvectors to v16i8, all vnots "-1" operands will be 36 (xor node:$in, (bitconvert (v16i8 immAllOnesV)))>; 79 (vector_shuffle (v16i8 node:$lhs), node:$rhs), [{ 83 (vector_shuffle (v16i8 node:$lhs), node:$rhs), [{ 87 (vector_shuffle (v16i8 node:$lhs), node:$rhs), [{ 91 (vector_shuffle (v16i8 node:$lhs), node:$rhs), [{ 95 (vector_shuffle (v16i8 node:$lhs), node:$rhs), [{ 99 (vector_shuffle (v16i8 node:$lhs), node:$rhs), [{ 105 (vector_shuffle (v16i8 node:$lhs), node:$rhs), [{ 133 (vector_shuffle (v16i8 node:$lhs), node:$rhs), [{ [all …]
|
/external/libvpx/libvpx/vp8/encoder/mips/msa/ |
D | denoising_msa.c | 116 temp2_h = (v8i16)__msa_pckev_b((v16i8)temp3_h, (v16i8)temp2_h); in vp8_denoiser_filter_msa() 117 running_avg_y = (v16u8)__msa_pckev_b((v16i8)temp1_h, (v16i8)temp0_h); in vp8_denoiser_filter_msa() 164 temp2_h = (v8i16)__msa_pckev_b((v16i8)temp3_h, (v16i8)temp2_h); in vp8_denoiser_filter_msa() 165 running_avg_y = (v16u8)__msa_pckev_b((v16i8)temp1_h, (v16i8)temp0_h); in vp8_denoiser_filter_msa() 238 running_avg_y = (v16u8)__msa_pckev_b((v16i8)adjust3, (v16i8)adjust2); in vp8_denoiser_filter_msa() 272 running_avg_y = (v16u8)__msa_pckev_b((v16i8)adjust3, (v16i8)adjust2); in vp8_denoiser_filter_msa() 331 v16i8 zero = { 0 }; in vp8_denoiser_filter_uv_msa() 338 temp0_h = (v8i16)__msa_ilvr_b(zero, (v16i8)sig0); in vp8_denoiser_filter_uv_msa() 341 temp0_h += (v8i16)__msa_ilvr_b(zero, (v16i8)sig1); in vp8_denoiser_filter_uv_msa() 344 temp0_h += (v8i16)__msa_ilvr_b(zero, (v16i8)sig2); in vp8_denoiser_filter_uv_msa() [all …]
|
/external/swiftshader/third_party/LLVM/test/CodeGen/CellSPU/useful-harnesses/ |
D | vecoperations.c | 3 typedef unsigned char v16i8 __attribute__((ext_vector_type(16))); typedef 10 void print_v16i8(const char *str, const v16i8 v) { in print_v16i8() 13 v16i8 vec; in print_v16i8() 24 void print_v16i8_hex(const char *str, const v16i8 v) { in print_v16i8_hex() 27 v16i8 vec; in print_v16i8_hex() 68 v16i8 v16i8_mpy(v16i8 v1, v16i8 v2) { in v16i8_mpy() 72 v16i8 v16i8_add(v16i8 v1, v16i8 v2) { in v16i8_add() 137 v16i8 v00 = { 0xf4, 0xad, 0x01, 0xe9, 0x51, 0x78, 0xc1, 0x8a, in main() 139 v16i8 va0 = { 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, in main() 141 v16i8 va1 = { 0x11, 0x83, 0x4b, 0x63, 0xff, 0x90, 0x32, 0xe5, in main()
|
/external/libpng/mips/ |
D | filter_msa_intrinsics.c | 42 #define MSA_SRLI_B(a, b) __msa_srli_b((v16i8) a, b) 300 out0 = (RTYPE) __msa_ilvr_b((v16i8) in0, (v16i8) in1); \ 301 out1 = (RTYPE) __msa_ilvr_b((v16i8) in2, (v16i8) in3); \ 314 v16i8 zero_m = { 0 }; \ 315 out0 = (RTYPE) __msa_sldi_b((v16i8) zero_m, (v16i8) in0, slide_val); \ 316 out1 = (RTYPE) __msa_sldi_b((v16i8) zero_m, (v16i8) in1, slide_val); \ 322 v16i8 zero_m = { 0 }; \ 324 out2 = (RTYPE) __msa_sldi_b((v16i8) zero_m, (v16i8) in2, slide_val); \ 347 out0 = (RTYPE) __msa_vshf_b((v16i8) mask0, (v16i8) in1, (v16i8) in0); \ 348 out1 = (RTYPE) __msa_vshf_b((v16i8) mask1, (v16i8) in3, (v16i8) in2); \ [all …]
|