/external/libvpx/libvpx/vpx_dsp/ppc/ |
D | fdct32x32_vsx.c | 29 const int32x4_t sum_e = vec_add(ac_e, bc_e); in single_butterfly() 30 const int32x4_t sum_o = vec_add(ac_o, bc_o); in single_butterfly() 35 const int32x4_t rsum_o = vec_add(sum_o, vec_dct_const_rounding); in single_butterfly() 36 const int32x4_t rsum_e = vec_add(sum_e, vec_dct_const_rounding); in single_butterfly() 37 const int32x4_t rdiff_o = vec_add(diff_o, vec_dct_const_rounding); in single_butterfly() 38 const int32x4_t rdiff_e = vec_add(diff_e, vec_dct_const_rounding); in single_butterfly() 64 const int32x4_t sum_o = vec_add(ac1_o, bc2_o); in double_butterfly() 65 const int32x4_t sum_e = vec_add(ac1_e, bc2_e); in double_butterfly() 70 const int32x4_t rsum_o = vec_add(sum_o, vec_dct_const_rounding); in double_butterfly() 71 const int32x4_t rsum_e = vec_add(sum_e, vec_dct_const_rounding); in double_butterfly() [all …]
|
D | hadamard_vsx.c | 17 const int16x8_t b0 = vec_add(v[0], v[1]); in vpx_hadamard_s16_8x8_one_pass() 19 const int16x8_t b2 = vec_add(v[2], v[3]); in vpx_hadamard_s16_8x8_one_pass() 21 const int16x8_t b4 = vec_add(v[4], v[5]); in vpx_hadamard_s16_8x8_one_pass() 23 const int16x8_t b6 = vec_add(v[6], v[7]); in vpx_hadamard_s16_8x8_one_pass() 26 const int16x8_t c0 = vec_add(b0, b2); in vpx_hadamard_s16_8x8_one_pass() 27 const int16x8_t c1 = vec_add(b1, b3); in vpx_hadamard_s16_8x8_one_pass() 30 const int16x8_t c4 = vec_add(b4, b6); in vpx_hadamard_s16_8x8_one_pass() 31 const int16x8_t c5 = vec_add(b5, b7); in vpx_hadamard_s16_8x8_one_pass() 35 v[0] = vec_add(c0, c4); in vpx_hadamard_s16_8x8_one_pass() 38 v[3] = vec_add(c2, c6); in vpx_hadamard_s16_8x8_one_pass() [all …]
|
D | inv_txfm_vsx.c | 145 #define DCT_CONST_ROUND_SHIFT(vec) vec = vec_sra(vec_add(vec, shift), shift14); 151 #define PIXEL_ADD4(out, in) out = vec_sra(vec_add(in, add8), shift4); 154 t0 = vec_add(in0, in1); \ 157 temp1 = vec_sra(vec_add(vec_mule(tmp16_0, cospi16_v), shift), shift14); \ 158 temp2 = vec_sra(vec_add(vec_mulo(tmp16_0, cospi16_v), shift), shift14); \ 163 temp4 = vec_add(vec_mule(tmp16_0, cospi8_v), vec_mulo(tmp16_0, cospi24_v)); \ 168 out0 = vec_add(step0, step1); \ 173 tmp16_0 = vec_add(vec_perm(d_u0, d_u1, tr8_mask0), v0); \ 174 tmp16_1 = vec_add(vec_perm(d_u2, d_u3, tr8_mask0), v1); \ 271 temp10 = vec_add(vec_mule(tmp16_0, cospi1), vec_mulo(tmp16_0, cospi0)); \ [all …]
|
D | deblock_vsx.c | 132 const int16x8_t sum1 = vec_add(x, vec_slo(x, vec_splats((int8_t)(2 << 3)))); in slide_sum_s16() 134 const int16x8_t sum2 = vec_add(vec_slo(x, vec_splats((int8_t)(4 << 3))), in slide_sum_s16() 138 const int16x8_t sum3 = vec_add(vec_slo(x, vec_splats((int8_t)(8 << 3))), in slide_sum_s16() 142 const int16x8_t sum4 = vec_add(vec_slo(x, vec_splats((int8_t)(12 << 3))), in slide_sum_s16() 145 return vec_add(vec_add(sum1, sum2), vec_add(sum3, sum4)); in slide_sum_s16() 152 int32x4_t sumsq_1 = vec_add(vec_slo(xsq_even, vec_splats((int8_t)(4 << 3))), in slide_sumsq_s32() 156 int32x4_t sumsq_2 = vec_add(vec_slo(xsq_even, vec_splats((int8_t)(8 << 3))), in slide_sumsq_s32() 160 int32x4_t sumsq_3 = vec_add(vec_slo(xsq_even, vec_splats((int8_t)(12 << 3))), in slide_sumsq_s32() 162 sumsq_1 = vec_add(sumsq_1, xsq_even); in slide_sumsq_s32() 163 sumsq_2 = vec_add(sumsq_2, sumsq_3); in slide_sumsq_s32() [all …]
|
D | intrapred_vsx.c | 251 val = vec_sub(vec_add(vec_splat(l, 0), a), tl); 257 val = vec_sub(vec_add(vec_splat(l, 1), a), tl); 263 val = vec_sub(vec_add(vec_splat(l, 2), a), tl); 269 val = vec_sub(vec_add(vec_splat(l, 3), a), tl); 281 val = vec_sub(vec_add(vec_splat(l, 0), a), tl); 286 val = vec_sub(vec_add(vec_splat(l, 1), a), tl); 291 val = vec_sub(vec_add(vec_splat(l, 2), a), tl); 296 val = vec_sub(vec_add(vec_splat(l, 3), a), tl); 301 val = vec_sub(vec_add(vec_splat(l, 4), a), tl); 306 val = vec_sub(vec_add(vec_splat(l, 5), a), tl); [all …]
|
D | quantize_vsx.c | 20 return vec_xor(vec_add(a, mask), mask); in vec_sign() 44 qcoeff = vec_add(qcoeff, rounded); in quantize_coeff() 57 qcoeff = vec_add(qcoeff, rounded); in quantize_coeff_32() 74 dqcoeffe = vec_add(dqcoeffe, vec_is_neg(dqcoeffe)); in dequantize_coeff_32() 75 dqcoeffo = vec_add(dqcoeffo, vec_is_neg(dqcoeffo)); in dequantize_coeff_32() 235 zbin = vec_sra(vec_add(zbin, vec_ones_s16), vec_ones_u16); in vpx_quantize_b_32x32_vsx() 236 round = vec_sra(vec_add(round, vec_ones_s16), vec_ones_u16); in vpx_quantize_b_32x32_vsx()
|
/external/fec/ |
D | sumsq_av.c | 40 carries = vec_add(carries,vec_addc(sums,s1)); in sumsq_av() 41 sums = vec_add(sums,s1); in sumsq_av() 51 carries = vec_add(carries,vec_addc(sums,s1)); in sumsq_av() 52 sums = vec_add(sums,s1); in sumsq_av() 58 carries = vec_add(carries,vec_addc(sums,s1)); in sumsq_av() 59 sums = vec_add(sums,s1); in sumsq_av() 60 carries = vec_add(carries,s2); in sumsq_av() 65 carries = vec_add(carries,vec_addc(sums,s1)); in sumsq_av() 66 sums = vec_add(sums,s1); in sumsq_av() 67 carries = vec_add(carries,s2); in sumsq_av()
|
D | viterbi615_av.c | 143 m0 = vec_add(vec_xor(Branchtab615[0].v[i],sym0v),vec_xor(Branchtab615[1].v[i],sym1v)); in update_viterbi615_blk_av() 144 m1 = vec_add(vec_xor(Branchtab615[2].v[i],sym2v),vec_xor(Branchtab615[3].v[i],sym3v)); in update_viterbi615_blk_av() 145 m2 = vec_add(vec_xor(Branchtab615[4].v[i],sym4v),vec_xor(Branchtab615[5].v[i],sym5v)); in update_viterbi615_blk_av() 146 metric = vec_add(m0,m1); in update_viterbi615_blk_av() 147 metric = vec_add(metric,m2); in update_viterbi615_blk_av() 166 decisions = vec_add(decisions,decisions); /* Shift each byte 1 bit to the left */ in update_viterbi615_blk_av()
|
D | viterbi39_av.c | 140 m0 = vec_add(vec_xor(Branchtab39[0].v[i],sym0v),vec_xor(Branchtab39[1].v[i],sym1v)); in update_viterbi39_blk_av() 142 metric = vec_add(m0,m1); in update_viterbi39_blk_av() 161 decisions = vec_add(decisions,decisions); /* Shift each byte 1 bit to the left */ in update_viterbi39_blk_av()
|
/external/rust/crates/libz-sys/src/zlib-ng/arch/power/ |
D | adler32_power8.c | 49 __b = vec_add(__b, __a); in vec_sumsu() 51 __a = vec_add(__a, __b); in vec_sumsu() 98 vs1_save = vec_add(vs1_save, vs1); in adler32_power8() 100 vs1 = vec_add(vsum1, vs1); in adler32_power8() 101 vs2 = vec_add(vsum2, vs2); in adler32_power8() 108 vs2 = vec_add(vs1_save, vs2); in adler32_power8() 133 vs1_save = vec_add(vs1_save, vs1); in adler32_power8() 135 vs1 = vec_add(vsum1, vs1); in adler32_power8() 136 vs2 = vec_add(vsum2, vs2); in adler32_power8() 143 vs2 = vec_add(vs1_save, vs2); in adler32_power8()
|
/external/libpng/powerpc/ |
D | filter_vsx_intrinsics.c | 81 rp_vec = vec_add(rp_vec,pp_vec); in png_read_filter_row_up_vsx() 208 rp_vec = vec_add(rp_vec,part_vec); in png_read_filter_row_sub4_vsx() 211 rp_vec = vec_add(rp_vec,part_vec); in png_read_filter_row_sub4_vsx() 214 rp_vec = vec_add(rp_vec,part_vec); in png_read_filter_row_sub4_vsx() 265 rp_vec = vec_add(rp_vec,part_vec); in png_read_filter_row_sub3_vsx() 268 rp_vec = vec_add(rp_vec,part_vec); in png_read_filter_row_sub3_vsx() 271 rp_vec = vec_add(rp_vec,part_vec); in png_read_filter_row_sub3_vsx() 274 rp_vec = vec_add(rp_vec,part_vec); in png_read_filter_row_sub3_vsx() 351 rp_vec = vec_add(rp_vec,avg_vec); in png_read_filter_row_avg4_vsx() 357 rp_vec = vec_add(rp_vec,avg_vec); in png_read_filter_row_avg4_vsx() [all …]
|
/external/llvm-project/clang/lib/Headers/ppc_wrappers/ |
D | pmmintrin.h | 51 return (__m128) vec_add (__X, even_neg_Y); in _mm_addsub_ps() 59 return (__m128d) vec_add (__X, even_neg_Y); in _mm_addsub_pd() 77 return (__m128) vec_add (vec_perm ((__v4sf) __X, (__v4sf) __Y, xform2), in _mm_hadd_ps() 103 return (__m128d) vec_add (vec_mergeh ((__v2df) __X, (__v2df)__Y), in _mm_hadd_pd()
|
D | tmmintrin.h | 167 return (__m128i) vec_add (__C, __D); in _mm_hadd_epi16() 180 return (__m128i) vec_add (__C, __D); in _mm_hadd_epi32() 194 __C = vec_add (__C, __D); in _mm_hadd_pi16() 209 __C = vec_add (__C, __D); in _mm_hadd_pi32() 350 __v16qi __conv = vec_add (__selectneg, __selectpos); in _mm_sign_epi8() 362 __v8hi __conv = vec_add (__selectneg, __selectpos); in _mm_sign_epi16() 374 __v4si __conv = vec_add (__selectneg, __selectpos); in _mm_sign_epi32() 466 __C = vec_add (__C, __ones); in _mm_mulhrs_epi16() 468 __D = vec_add (__D, __ones); in _mm_mulhrs_epi16() 485 __C = vec_add (__C, __ones); in _mm_mulhrs_pi16()
|
/external/llvm-project/clang-tools-extra/test/clang-tidy/checkers/ |
D | portability-simd-intrinsics-ppc.cpp | 7 vector int vec_add(vector int, vector int); 12 vec_add(i0, i1); in PPC()
|
/external/clang/test/CodeGen/ |
D | builtins-ppc-quadword.c | 28 res_vlll = vec_add(vlll, vlll); in test1() 33 res_vulll = vec_add(vulll, vulll); in test1()
|
D | builtins-ppc-altivec.c | 111 res_vsc = vec_add(vsc, vsc); in test1() 115 res_vsc = vec_add(vbc, vsc); in test1() 119 res_vsc = vec_add(vsc, vbc); in test1() 123 res_vuc = vec_add(vuc, vuc); in test1() 127 res_vuc = vec_add(vbc, vuc); in test1() 131 res_vuc = vec_add(vuc, vbc); in test1() 135 res_vs = vec_add(vs, vs); in test1() 139 res_vs = vec_add(vbs, vs); in test1() 143 res_vs = vec_add(vs, vbs); in test1() 147 res_vus = vec_add(vus, vus); in test1() [all …]
|
/external/libaom/libaom/av1/common/ppc/ |
D | cfl_ppc.c | 67 int32x4_t sum_32x4 = vec_add(sum_32x4_0, sum_32x4_1); in subtract_average_vsx() 70 sum_32x4 = vec_add(sum_32x4, perm_64); in subtract_average_vsx() 72 sum_32x4 = vec_add(sum_32x4, perm_32); in subtract_average_vsx()
|
/external/llvm-project/clang/test/CodeGen/ |
D | builtins-ppc-quadword.c | 34 res_vlll = vec_add(vlll, vlll); in test1() 39 res_vulll = vec_add(vulll, vulll); in test1()
|
D | builtins-ppc-altivec.c | 149 res_vsc = vec_add(vsc, vsc); in test1() 153 res_vsc = vec_add(vbc, vsc); in test1() 157 res_vsc = vec_add(vsc, vbc); in test1() 161 res_vuc = vec_add(vuc, vuc); in test1() 165 res_vuc = vec_add(vbc, vuc); in test1() 169 res_vuc = vec_add(vuc, vbc); in test1() 173 res_vs = vec_add(vs, vs); in test1() 177 res_vs = vec_add(vbs, vs); in test1() 181 res_vs = vec_add(vs, vbs); in test1() 185 res_vus = vec_add(vus, vus); in test1() [all …]
|
/external/libvpx/libvpx/vp9/encoder/ppc/ |
D | vp9_quantize_vsx.c | 30 return vec_xor(vec_add(a, mask), mask); in vec_sign() 160 dqcoeffe = vec_add(dqcoeffe, vec_is_neg(dqcoeffe)); in dequantize_coeff_32() 161 dqcoeffo = vec_add(dqcoeffo, vec_is_neg(dqcoeffo)); in dequantize_coeff_32() 202 round = vec_sra(vec_add(round, vec_ones_s16), vec_ones_u16); in vp9_quantize_fp_32x32_vsx()
|
/external/eigen/Eigen/src/Core/arch/AltiVec/ |
D | MathFunctions.h | 189 emm0 = vec_add(emm0, p4i_0x7f); 287 emm0 = vec_add(emm0, p2l_1023); 297 emm04i = vec_add(emm04i, p4i_1023);
|
/external/pffft/simd/ |
D | pf_altivec_float.h | 54 # define VADD(a,b) vec_add(a,b)
|
/external/llvm-project/clang-tools-extra/docs/clang-tidy/checks/ |
D | portability-simd-intrinsics.rst | 14 vec_add(a, b); // Power
|
/external/clang/test/Parser/ |
D | cxx-altivec.cpp | 174 result.xyzw = vec_add(lhs.xyzw, rhs.xyzw); in Add()
|
/external/llvm-project/clang/test/Parser/ |
D | cxx-altivec.cpp | 190 result.xyzw = vec_add(lhs.xyzw, rhs.xyzw); in Add()
|