/third_party/ffmpeg/libavcodec/ppc/ |
D | h264dsp.c | 91 vec_u8 va_u8; in h264_idct_add_altivec() 95 vec_u8 vdst, vdst_orig; in h264_idct_add_altivec() 96 vec_u8 vdst_mask = vec_lvsl(0, dst); in h264_idct_add_altivec() 185 vec_u8 hv = vec_ld( 0, d ); \ 186 vec_u8 lv = vec_ld( 7, d); \ 187 vec_u8 dstv = vec_perm( hv, lv, (vec_u8)perm_ldv ); 189 vec_u8 edgehv; \ 190 vec_u8 bodyv = vec_perm( idstsum8, idstsum8, perm_stv ); \ 191 vec_u8 edgelv = vec_perm( sel, zero_u8v, perm_stv ); \ 201 #define dstv_load(d) vec_u8 dstv = vec_vsx_ld(0, d) [all …]
|
D | h264chroma_template.c | 27 vsrc2ssH = (vec_s16)VEC_MERGEH(zero_u8v,(vec_u8)vsrc2uc);\ 28 vsrc3ssH = (vec_s16)VEC_MERGEH(zero_u8v,(vec_u8)vsrc3uc);\ 38 ppsum = (vec_u8)vec_pack(psum, psum);\ 53 vsrc0ssH = (vec_s16)VEC_MERGEH(zero_u8v,(vec_u8)vsrc0uc);\ 54 vsrc1ssH = (vec_s16)VEC_MERGEH(zero_u8v,(vec_u8)vsrc1uc);\ 61 ppsum = (vec_u8)vec_pack(psum, psum);\ 76 vec_u8 vsrcCuc, vsrcDuc; \ 86 vec_u8 vsrcCuc, vsrcDuc; \ 122 vec_u8 fperm; in PREFIX_h264_chroma_mc8_altivec() 132 vec_u8 vsrcperm0, vsrcperm1; in PREFIX_h264_chroma_mc8_altivec() [all …]
|
D | pixblockdsp.c | 67 const vec_u8 zero = (const vec_u8)vec_splat_u8(0); in get_pixels_altivec() 70 vec_u8 perm = vec_lvsl(0, pixels); in get_pixels_altivec() 74 vec_u8 pixl = vec_ld(0, pixels); in get_pixels_altivec() 75 vec_u8 pixr = vec_ld(7, pixels); in get_pixels_altivec() 76 vec_u8 bytes = vec_perm(pixl, pixr, perm); in get_pixels_altivec() 157 vec_u8 perm; in diff_pixels_altivec() 158 const vec_u8 zero = (const vec_u8)vec_splat_u8(0); in diff_pixels_altivec() 166 vec_u8 pixl = vec_ld(0, s1); in diff_pixels_altivec() 167 vec_u8 pixr = vec_ld(15, s1); in diff_pixels_altivec() 168 vec_u8 bytes = vec_perm(pixl, pixr, perm); in diff_pixels_altivec()
|
D | vp8dsp_altivec.c | 103 vec_u8 align_vec0, align_vec8, permh0, permh8; in put_vp8_epel_h_altivec_core() 104 vec_u8 perm_6tap0, perm_6tap8, perml0, perml8; in put_vp8_epel_h_altivec_core() 105 vec_u8 b; in put_vp8_epel_h_altivec_core() 107 vec_u8 filt, a, pixh, pixl, outer; in put_vp8_epel_h_altivec_core() 111 vec_u8 perm_inner6 = { 1,2,3,4, 2,3,4,5, 3,4,5,6, 4,5,6,7 }; in put_vp8_epel_h_altivec_core() 112 vec_u8 perm_inner4 = { 0,1,2,3, 1,2,3,4, 2,3,4,5, 3,4,5,6 }; in put_vp8_epel_h_altivec_core() 113 vec_u8 perm_inner = is6tap ? perm_inner6 : perm_inner4; in put_vp8_epel_h_altivec_core() 114 vec_u8 perm_outer = { 4,9, 0,5, 5,10, 1,6, 6,11, 2,7, 7,12, 3,8 }; in put_vp8_epel_h_altivec_core() 150 static const vec_u8 v_subpel_filters[7] = 162 vec_u8 subpel_filter = v_subpel_filters[i]; \ [all …]
|
D | h264qpel_template.c | 34 vec_u8 srcR1 = vec_ld(-2, s);\ 35 vec_u8 srcR2 = vec_ld(14, s);\ 54 vec_u8 srcR3 = vec_ld(30, s);\ 63 vec_u8 srcR3 = vec_ld(30, s);\ 72 vec_u8 srcR3 = vec_ld(30, s);\ 81 vec_u8 srcR3 = vec_ld(30, s);\ 111 vec_u8 permM2, permM1, permP0, permP1, permP2, permP3; in PREFIX_h264_qpel16_h_lowpass_altivec() 117 vec_u8 srcM2, srcM1, srcP0, srcP1, srcP2, srcP3; in PREFIX_h264_qpel16_h_lowpass_altivec() 128 vec_u8 sum, fsum; in PREFIX_h264_qpel16_h_lowpass_altivec() 202 vec_u8 perm; in PREFIX_h264_qpel16_v_lowpass_altivec() [all …]
|
D | idctdsp.c | 174 vec_u8 tmp; in idct_put_altivec() 203 vec_u8 tmp; in idct_add_altivec() 205 vec_u8 perm0; in idct_add_altivec() 206 vec_u8 perm1; in idct_add_altivec() 207 vec_u8 p0, p1, p; in idct_add_altivec() 222 tmp2 = (vec_s16) vec_perm(tmp, (vec_u8) zero, prm); in idct_add_altivec() 226 tmp2 = (vec_s16) vec_mergeh(tmp, (vec_u8) zero) in idct_add_altivec()
|
D | vp3dsp_altivec.c | 37 static const vec_u8 interleave_high = 40 static const vec_u8 interleave_high = 125 vec_u8 t; in vp3_idct_put_altivec() 155 vec_u8 t, vdst; in vp3_idct_add_altivec() 157 vec_u8 vdst_mask = vec_mergeh(vec_splat_u8(-1), vec_lvsl(0, dst)); in vp3_idct_add_altivec()
|
D | h264qpel.c | 218 vec_u8 a, b, d, mask_; in put_pixels16_l2_altivec() 220 vec_u8 tmp1, tmp2, mask, edges, align; in put_pixels16_l2_altivec() 258 vec_u8 a, b, d, mask_; in avg_pixels16_l2_altivec() 261 vec_u8 tmp1, tmp2, mask, edges, align; in avg_pixels16_l2_altivec()
|
D | lossless_audiodsp_altivec.c | 56 register vec_u8 align = vec_lvsl(0, v2); in scalarproduct_and_madd_int16_altivec()
|
D | hevcdsp.c | 39 static const vec_u8 mask[2] = {
|
/third_party/ffmpeg/libavutil/ppc/ |
D | util_altivec.h | 34 #define vec_u8 vector unsigned char macro 45 #define LOAD_ZERO const vec_u8 zerov = vec_splat_u8( 0 ) 47 #define zero_u8v (vec_u8) zerov 67 #define vcprm(a,b,c,d) (const vec_u8){WORD_ ## a, WORD_ ## b, WORD_ ## c, WORD_ ## d} 129 static inline vec_u8 unaligned_load(int offset, const uint8_t *src) in unaligned_load() 131 register vec_u8 first = vec_ld(offset, src); in unaligned_load() 132 register vec_u8 second = vec_ld(offset + 15, src); in unaligned_load() 133 register vec_u8 mask = vec_lvsl(offset, src); in unaligned_load() 136 static inline vec_u8 load_with_perm_vec(int offset, const uint8_t *src, vec_u8 perm_vec) in load_with_perm_vec() 138 vec_u8 a = vec_ld(offset, src); in load_with_perm_vec() [all …]
|
D | float_dsp_vsx.c | 45 const vec_u8 reverse = vcprm(3, 2, 1, 0); in ff_vector_fmul_window_vsx()
|
D | float_dsp_altivec.c | 44 const vec_u8 reverse = vcprm(3, 2, 1, 0); in ff_vector_fmul_window_altivec()
|
/third_party/ffmpeg/libswscale/ppc/ |
D | swscale_vsx.c | 109 vec_u8 vd; in yuv2plane1_8_vsx() 220 const vec_u8 vperm = (vec_u8) {0, 1, 8, 9, 2, 3, 10, 11, 4, 5, 12, 13, 6, 7, 14, 15}; in yuv2planeX_nbps_vsx() 486 tmp8 = (vec_u8) vec_mergeh((vec_u16) out0, (vec_u16) out1); \ 488 tmp8 = (vec_u8) vec_mergel((vec_u16) out0, (vec_u16) out1); \ 497 tmp8 = (vec_u8) vec_mergeh((vec_u16) out0, (vec_u16) out1); \ 499 tmp8 = (vec_u8) vec_mergel((vec_u16) out0, (vec_u16) out1); \ 508 tmp8 = (vec_u8) vec_mergeh((vec_u16) out0, (vec_u16) out1); \ 510 tmp8 = (vec_u8) vec_mergel((vec_u16) out0, (vec_u16) out1); \ 519 tmp8 = (vec_u8) vec_mergeh((vec_u16) out0, (vec_u16) out1); \ 521 tmp8 = (vec_u8) vec_mergel((vec_u16) out0, (vec_u16) out1); \ [all …]
|