Home
last modified time | relevance | path

Searched refs:vec_vsx_ld (Results 1 – 13 of 13) sorted by relevance

/external/libvpx/libvpx/vpx_dsp/ppc/
Dintrapred_vsx.c16 const uint8x16_t d = vec_vsx_ld(0, above); in vpx_v_predictor_16x16_vsx()
27 const uint8x16_t d0 = vec_vsx_ld(0, above); in vpx_v_predictor_32x32_vsx()
28 const uint8x16_t d1 = vec_vsx_ld(16, above); in vpx_v_predictor_32x32_vsx()
42 const uint8x16_t d = vec_vsx_ld(0, left); in vpx_h_predictor_4x4_vsx()
50 vec_vsx_st(vec_sel(v0, vec_vsx_ld(0, dst), (uint8x16_t)mask4), 0, dst); in vpx_h_predictor_4x4_vsx()
52 vec_vsx_st(vec_sel(v1, vec_vsx_ld(0, dst), (uint8x16_t)mask4), 0, dst); in vpx_h_predictor_4x4_vsx()
54 vec_vsx_st(vec_sel(v2, vec_vsx_ld(0, dst), (uint8x16_t)mask4), 0, dst); in vpx_h_predictor_4x4_vsx()
56 vec_vsx_st(vec_sel(v3, vec_vsx_ld(0, dst), (uint8x16_t)mask4), 0, dst); in vpx_h_predictor_4x4_vsx()
61 const uint8x16_t d = vec_vsx_ld(0, left); in vpx_h_predictor_8x8_vsx()
74 vec_vsx_st(xxpermdi(v0, vec_vsx_ld(0, dst), 1), 0, dst); in vpx_h_predictor_8x8_vsx()
[all …]
Dvariance_vsx.c17 const uint32x4_t a0 = (uint32x4_t)vec_vsx_ld(0, a); in read4x2()
18 const uint32x4_t a1 = (uint32x4_t)vec_vsx_ld(0, a + stride); in read4x2()
47 const int16x8_t v = vec_vsx_ld(0, a + i); in vpx_get_mb_ss_vsx()
67 const uint8x16_t v = vec_avg(vec_vsx_ld(j, pred), vec_vsx_ld(j, ref)); in vpx_comp_avg_pred_vsx()
77 const uint8x16_t r0 = vec_vsx_ld(0, ref); in vpx_comp_avg_pred_vsx()
78 const uint8x16_t r1 = vec_vsx_ld(0, ref + ref_stride); in vpx_comp_avg_pred_vsx()
80 const uint8x16_t v = vec_avg(vec_vsx_ld(0, pred), r); in vpx_comp_avg_pred_vsx()
90 const uint32x4_t r0 = (uint32x4_t)vec_vsx_ld(0, ref); in vpx_comp_avg_pred_vsx()
91 const uint32x4_t r1 = (uint32x4_t)vec_vsx_ld(0, ref + ref_stride); in vpx_comp_avg_pred_vsx()
92 const uint32x4_t r2 = (uint32x4_t)vec_vsx_ld(0, ref + ref_stride * 2); in vpx_comp_avg_pred_vsx()
[all …]
Dvpx_convolve_vsx.c22 vec_vsx_st(vec_vsx_ld(0, src), 0, dst); in copy_w16()
33 vec_vsx_st(vec_vsx_ld(0, src), 0, dst); in copy_w32()
34 vec_vsx_st(vec_vsx_ld(16, src), 16, dst); in copy_w32()
45 vec_vsx_st(vec_vsx_ld(0, src), 0, dst); in copy_w64()
46 vec_vsx_st(vec_vsx_ld(16, src), 16, dst); in copy_w64()
47 vec_vsx_st(vec_vsx_ld(32, src), 32, dst); in copy_w64()
48 vec_vsx_st(vec_vsx_ld(48, src), 48, dst); in copy_w64()
94 const uint8x16_t v = vec_avg(vec_vsx_ld(0, src), vec_vsx_ld(0, dst)); in avg_w16()
106 const uint8x16_t v0 = vec_avg(vec_vsx_ld(0, src), vec_vsx_ld(0, dst)); in avg_w32()
107 const uint8x16_t v1 = vec_avg(vec_vsx_ld(16, src), vec_vsx_ld(16, dst)); in avg_w32()
[all …]
Dhadamard_vsx.c49 v[0] = vec_vsx_ld(0, src_diff); in vpx_hadamard_8x8_vsx()
50 v[1] = vec_vsx_ld(0, src_diff + src_stride); in vpx_hadamard_8x8_vsx()
51 v[2] = vec_vsx_ld(0, src_diff + (2 * src_stride)); in vpx_hadamard_8x8_vsx()
52 v[3] = vec_vsx_ld(0, src_diff + (3 * src_stride)); in vpx_hadamard_8x8_vsx()
53 v[4] = vec_vsx_ld(0, src_diff + (4 * src_stride)); in vpx_hadamard_8x8_vsx()
54 v[5] = vec_vsx_ld(0, src_diff + (5 * src_stride)); in vpx_hadamard_8x8_vsx()
55 v[6] = vec_vsx_ld(0, src_diff + (6 * src_stride)); in vpx_hadamard_8x8_vsx()
56 v[7] = vec_vsx_ld(0, src_diff + (7 * src_stride)); in vpx_hadamard_8x8_vsx()
Dbitdepth_conversion_vsx.h23 int32x4_t u = vec_vsx_ld(c, s); in load_tran_low()
24 int32x4_t v = vec_vsx_ld(c, s + 4); in load_tran_low()
27 return vec_vsx_ld(c, s); in load_tran_low()
Dsad_vsx.c18 v_a = vec_vsx_ld(offset, a); \
19 v_b = vec_vsx_ld(offset, b); \
/external/clang/test/CodeGen/
Dbuiltins-ppc-vsx.c339 res_vbi = vec_vsx_ld(0, &vbi); in test1()
343 res_vsi = vec_vsx_ld(0, &vsi); in test1()
347 res_vsi = vec_vsx_ld(0, asi); in test1()
351 res_vui = vec_vsx_ld(0, &vui); in test1()
355 res_vui = vec_vsx_ld(0, aui); in test1()
359 res_vf = vec_vsx_ld (0, &vf); in test1()
363 res_vf = vec_vsx_ld (0, af); in test1()
367 res_vsll = vec_vsx_ld(0, &vsll); in test1()
371 res_vull = vec_vsx_ld(0, &vull); in test1()
375 res_vd = vec_vsx_ld(0, &vd); in test1()
[all …]
/external/libjpeg-turbo/simd/
Djcgryext-altivec.c139 rgb0 = vec_vsx_ld(0, inptr); in jsimd_rgb_gray_convert_altivec()
141 rgb1 = vec_vsx_ld(16, inptr); in jsimd_rgb_gray_convert_altivec()
143 rgb2 = vec_vsx_ld(32, inptr); in jsimd_rgb_gray_convert_altivec()
146 rgb3 = vec_vsx_ld(48, inptr); in jsimd_rgb_gray_convert_altivec()
Djsimd_altivec.h94 #define VEC_LD(a, b) vec_vsx_ld(a, b)
Djquanti-altivec.c45 in##row = vec_vsx_ld(0, elemptr); \
/external/eigen/Eigen/src/Core/arch/AltiVec/
DPacketMath.h243 return vec_vsx_ld(0, from);
253 return vec_vsx_ld(0, from);
441 return (Packet4i) vec_vsx_ld((long)from & 15, (const int*) _EIGEN_ALIGNED_PTR(from));
446 return (Packet4f) vec_vsx_ld((long)from & 15, (const float*) _EIGEN_ALIGNED_PTR(from));
849 return vec_vsx_ld(0, from);
932 return (Packet2d) vec_vsx_ld((long)from & 15, (const double*) _EIGEN_ALIGNED_PTR(from));
/external/boringssl/src/crypto/fipsmodule/sha/
Dsha1-altivec.c109 vec_vsx_ld(0, (const unsigned char*) data); in sched_00_15()
/external/clang/lib/Headers/
Daltivec.h10264 vec_vsx_ld(int __a, const vector bool int *__b) { in vec_vsx_ld() function
10269 vec_vsx_ld(int __a, const vector signed int *__b) { in vec_vsx_ld() function
10274 vec_vsx_ld(int __a, const signed int *__b) { in vec_vsx_ld() function
10279 vec_vsx_ld(int __a, const vector unsigned int *__b) { in vec_vsx_ld() function
10284 vec_vsx_ld(int __a, const unsigned int *__b) { in vec_vsx_ld() function
10289 vec_vsx_ld(int __a, const vector float *__b) { in vec_vsx_ld() function
10293 static __inline__ vector float __ATTRS_o_ai vec_vsx_ld(int __a, in vec_vsx_ld() function
10299 vec_vsx_ld(int __a, const vector signed long long *__b) { in vec_vsx_ld() function
10304 vec_vsx_ld(int __a, const vector unsigned long long *__b) { in vec_vsx_ld() function
10309 vec_vsx_ld(int __a, const vector double *__b) { in vec_vsx_ld() function
[all …]