Home
last modified time | relevance | path

Searched refs:vec_vsx_ld (Results 1 – 22 of 22) sorted by relevance

/external/libaom/libaom/av1/common/ppc/
Dcfl_ppc.c51 sum_32x4_0 = vec_sum4s(vec_vsx_ld(OFF_0, sum_buf), sum_32x4_0); in subtract_average_vsx()
52 sum_32x4_1 = vec_sum4s(vec_vsx_ld(OFF_0 + CFL_LINE_1, sum_buf), sum_32x4_1); in subtract_average_vsx()
54 sum_32x4_0 = vec_sum4s(vec_vsx_ld(OFF_1, sum_buf), sum_32x4_0); in subtract_average_vsx()
56 vec_sum4s(vec_vsx_ld(OFF_1 + CFL_LINE_1, sum_buf), sum_32x4_1); in subtract_average_vsx()
59 sum_32x4_0 = vec_sum4s(vec_vsx_ld(OFF_2, sum_buf), sum_32x4_0); in subtract_average_vsx()
61 vec_sum4s(vec_vsx_ld(OFF_2 + CFL_LINE_1, sum_buf), sum_32x4_1); in subtract_average_vsx()
62 sum_32x4_0 = vec_sum4s(vec_vsx_ld(OFF_3, sum_buf), sum_32x4_0); in subtract_average_vsx()
64 vec_sum4s(vec_vsx_ld(OFF_3 + CFL_LINE_1, sum_buf), sum_32x4_1); in subtract_average_vsx()
76 vec_vsx_st(vec_sub(vec_vsx_ld(OFF_0, dst), vec_avg), OFF_0, dst); in subtract_average_vsx()
77 vec_vsx_st(vec_sub(vec_vsx_ld(OFF_0 + CFL_LINE_1, dst), vec_avg), in subtract_average_vsx()
[all …]
/external/libvpx/libvpx/vpx_dsp/ppc/
Dintrapred_vsx.c16 const uint8x16_t d = vec_vsx_ld(0, above); in vpx_v_predictor_16x16_vsx()
27 const uint8x16_t d0 = vec_vsx_ld(0, above); in vpx_v_predictor_32x32_vsx()
28 const uint8x16_t d1 = vec_vsx_ld(16, above); in vpx_v_predictor_32x32_vsx()
42 const uint8x16_t d = vec_vsx_ld(0, left); in vpx_h_predictor_4x4_vsx()
50 vec_vsx_st(vec_sel(v0, vec_vsx_ld(0, dst), (uint8x16_t)mask4), 0, dst); in vpx_h_predictor_4x4_vsx()
52 vec_vsx_st(vec_sel(v1, vec_vsx_ld(0, dst), (uint8x16_t)mask4), 0, dst); in vpx_h_predictor_4x4_vsx()
54 vec_vsx_st(vec_sel(v2, vec_vsx_ld(0, dst), (uint8x16_t)mask4), 0, dst); in vpx_h_predictor_4x4_vsx()
56 vec_vsx_st(vec_sel(v3, vec_vsx_ld(0, dst), (uint8x16_t)mask4), 0, dst); in vpx_h_predictor_4x4_vsx()
61 const uint8x16_t d = vec_vsx_ld(0, left); in vpx_h_predictor_8x8_vsx()
74 vec_vsx_st(xxpermdi(v0, vec_vsx_ld(0, dst), 1), 0, dst); in vpx_h_predictor_8x8_vsx()
[all …]
Dvpx_convolve_vsx.c25 vec_vsx_st(vec_vsx_ld(0, src), 0, dst); in copy_w16()
37 vec_vsx_st(vec_vsx_ld(0, src), 0, dst); in copy_w32()
38 vec_vsx_st(vec_vsx_ld(16, src), 16, dst); in copy_w32()
50 vec_vsx_st(vec_vsx_ld(0, src), 0, dst); in copy_w64()
51 vec_vsx_st(vec_vsx_ld(16, src), 16, dst); in copy_w64()
52 vec_vsx_st(vec_vsx_ld(32, src), 32, dst); in copy_w64()
53 vec_vsx_st(vec_vsx_ld(48, src), 48, dst); in copy_w64()
100 const uint8x16_t v = vec_avg(vec_vsx_ld(0, src), vec_vsx_ld(0, dst)); in avg_w16()
113 const uint8x16_t v0 = vec_avg(vec_vsx_ld(0, src), vec_vsx_ld(0, dst)); in avg_w32()
114 const uint8x16_t v1 = vec_avg(vec_vsx_ld(16, src), vec_vsx_ld(16, dst)); in avg_w32()
[all …]
Dsubtract_vsx.c24 const int16x8_t d0 = vec_vsx_ld(0, diff); in subtract_block4x4()
25 const int16x8_t d1 = vec_vsx_ld(0, diff + diff_stride); in subtract_block4x4()
26 const int16x8_t d2 = vec_vsx_ld(0, diff1); in subtract_block4x4()
27 const int16x8_t d3 = vec_vsx_ld(0, diff1 + diff_stride); in subtract_block4x4()
54 const uint8x16_t s0 = vec_vsx_ld(0, src + c); in vpx_subtract_block_vsx()
55 const uint8x16_t s1 = vec_vsx_ld(16, src + c); in vpx_subtract_block_vsx()
56 const uint8x16_t p0 = vec_vsx_ld(0, pred + c); in vpx_subtract_block_vsx()
57 const uint8x16_t p1 = vec_vsx_ld(16, pred + c); in vpx_subtract_block_vsx()
78 const uint8x16_t s0 = vec_vsx_ld(0, src); in vpx_subtract_block_vsx()
79 const uint8x16_t p0 = vec_vsx_ld(0, pred); in vpx_subtract_block_vsx()
[all …]
Dvariance_vsx.c43 const int16x8_t v = vec_vsx_ld(0, src_ptr + i); in vpx_get_mb_ss_vsx()
63 const uint8x16_t v = vec_avg(vec_vsx_ld(j, pred), vec_vsx_ld(j, ref)); in vpx_comp_avg_pred_vsx()
73 const uint8x16_t r0 = vec_vsx_ld(0, ref); in vpx_comp_avg_pred_vsx()
74 const uint8x16_t r1 = vec_vsx_ld(0, ref + ref_stride); in vpx_comp_avg_pred_vsx()
76 const uint8x16_t v = vec_avg(vec_vsx_ld(0, pred), r); in vpx_comp_avg_pred_vsx()
86 const uint32x4_t r0 = (uint32x4_t)vec_vsx_ld(0, ref); in vpx_comp_avg_pred_vsx()
87 const uint32x4_t r1 = (uint32x4_t)vec_vsx_ld(0, ref + ref_stride); in vpx_comp_avg_pred_vsx()
88 const uint32x4_t r2 = (uint32x4_t)vec_vsx_ld(0, ref + ref_stride * 2); in vpx_comp_avg_pred_vsx()
89 const uint32x4_t r3 = (uint32x4_t)vec_vsx_ld(0, ref + ref_stride * 3); in vpx_comp_avg_pred_vsx()
92 const uint8x16_t v = vec_avg(vec_vsx_ld(0, pred), r); in vpx_comp_avg_pred_vsx()
[all …]
Dquantize_vsx.c83 int16x8_t scan = vec_vsx_ld(index, iscan_ptr); in nonzero_scanindex()
108 int16x8_t zbin = vec_vsx_ld(0, zbin_ptr); in vpx_quantize_b_vsx()
109 int16x8_t round = vec_vsx_ld(0, round_ptr); in vpx_quantize_b_vsx()
110 int16x8_t quant = vec_vsx_ld(0, quant_ptr); in vpx_quantize_b_vsx()
111 int16x8_t dequant = vec_vsx_ld(0, dequant_ptr); in vpx_quantize_b_vsx()
112 int16x8_t quant_shift = vec_vsx_ld(0, quant_shift_ptr); in vpx_quantize_b_vsx()
114 int16x8_t coeff0 = vec_vsx_ld(0, coeff_ptr); in vpx_quantize_b_vsx()
115 int16x8_t coeff1 = vec_vsx_ld(16, coeff_ptr); in vpx_quantize_b_vsx()
155 coeff0 = vec_vsx_ld(off0, coeff_ptr); in vpx_quantize_b_vsx()
156 coeff1 = vec_vsx_ld(off1, coeff_ptr); in vpx_quantize_b_vsx()
[all …]
Dfdct32x32_vsx.c91 const int16x8_t l0 = vec_vsx_ld(0, a); in load()
92 const int16x8_t l1 = vec_vsx_ld(0, a + stride); in load()
93 const int16x8_t l2 = vec_vsx_ld(0, a + 2 * stride); in load()
94 const int16x8_t l3 = vec_vsx_ld(0, a + 3 * stride); in load()
95 const int16x8_t l4 = vec_vsx_ld(0, a + 4 * stride); in load()
96 const int16x8_t l5 = vec_vsx_ld(0, a + 5 * stride); in load()
97 const int16x8_t l6 = vec_vsx_ld(0, a + 6 * stride); in load()
98 const int16x8_t l7 = vec_vsx_ld(0, a + 7 * stride); in load()
100 const int16x8_t l8 = vec_vsx_ld(0, a + 8 * stride); in load()
101 const int16x8_t l9 = vec_vsx_ld(0, a + 9 * stride); in load()
[all …]
Ddeblock_vsx.c39 ctx[0] = vec_vsx_ld(col - 2 * stride, src); in vert_ctx()
40 ctx[1] = vec_vsx_ld(col - stride, src); in vert_ctx()
41 ctx[2] = vec_vsx_ld(col + stride, src); in vert_ctx()
42 ctx[3] = vec_vsx_ld(col + 2 * stride, src); in vert_ctx()
77 const uint8x16_t filter = vec_vsx_ld(col, f); in vpx_post_proc_down_and_across_mb_row_vsx()
78 v = vec_vsx_ld(col, src_ptr); in vpx_post_proc_down_and_across_mb_row_vsx()
84 const uint8x16_t filter = vec_vsx_ld(col, f); in vpx_post_proc_down_and_across_mb_row_vsx()
85 v = vec_vsx_ld(col, src_ptr); in vpx_post_proc_down_and_across_mb_row_vsx()
93 v = vec_vsx_ld(0, dst_ptr); in vpx_post_proc_down_and_across_mb_row_vsx()
95 const uint8x16_t filter = vec_vsx_ld(col, f); in vpx_post_proc_down_and_across_mb_row_vsx()
[all …]
Dhadamard_vsx.c49 v[0] = vec_vsx_ld(0, src_diff); in vpx_hadamard_8x8_vsx()
50 v[1] = vec_vsx_ld(0, src_diff + src_stride); in vpx_hadamard_8x8_vsx()
51 v[2] = vec_vsx_ld(0, src_diff + (2 * src_stride)); in vpx_hadamard_8x8_vsx()
52 v[3] = vec_vsx_ld(0, src_diff + (3 * src_stride)); in vpx_hadamard_8x8_vsx()
53 v[4] = vec_vsx_ld(0, src_diff + (4 * src_stride)); in vpx_hadamard_8x8_vsx()
54 v[5] = vec_vsx_ld(0, src_diff + (5 * src_stride)); in vpx_hadamard_8x8_vsx()
55 v[6] = vec_vsx_ld(0, src_diff + (6 * src_stride)); in vpx_hadamard_8x8_vsx()
56 v[7] = vec_vsx_ld(0, src_diff + (7 * src_stride)); in vpx_hadamard_8x8_vsx()
Dbitdepth_conversion_vsx.h23 int32x4_t u = vec_vsx_ld(c, s); in load_tran_low()
24 int32x4_t v = vec_vsx_ld(c, s + 4); in load_tran_low()
27 return vec_vsx_ld(c, s); in load_tran_low()
Dsad_vsx.c21 v_a = vec_vsx_ld(offset, a); \
22 v_b = vec_vsx_ld(offset, b); \
156 v_b = vec_vsx_ld(offset, ref); \
167 v_a = vec_vsx_ld(offset, src); \
Dtypes_vsx.h72 const uint32x4_t a0 = (uint32x4_t)vec_vsx_ld(0, a); in read4x2()
73 const uint32x4_t a1 = (uint32x4_t)vec_vsx_ld(0, a + stride); in read4x2()
Dinv_txfm_vsx.c184 uint8x16_t dest0 = vec_vsx_ld(0, dest); in vpx_round_store4x4_vsx()
185 uint8x16_t dest1 = vec_vsx_ld(stride, dest); in vpx_round_store4x4_vsx()
186 uint8x16_t dest2 = vec_vsx_ld(2 * stride, dest); in vpx_round_store4x4_vsx()
187 uint8x16_t dest3 = vec_vsx_ld(3 * stride, dest); in vpx_round_store4x4_vsx()
347 uint8x16_t dest0 = vec_vsx_ld(0, dest); in vpx_round_store8x8_vsx()
348 uint8x16_t dest1 = vec_vsx_ld(stride, dest); in vpx_round_store8x8_vsx()
349 uint8x16_t dest2 = vec_vsx_ld(2 * stride, dest); in vpx_round_store8x8_vsx()
350 uint8x16_t dest3 = vec_vsx_ld(3 * stride, dest); in vpx_round_store8x8_vsx()
351 uint8x16_t dest4 = vec_vsx_ld(4 * stride, dest); in vpx_round_store8x8_vsx()
352 uint8x16_t dest5 = vec_vsx_ld(5 * stride, dest); in vpx_round_store8x8_vsx()
[all …]
/external/libvpx/libvpx/vp9/encoder/ppc/
Dvp9_quantize_vsx.c50 int16x8_t round = vec_vsx_ld(0, round_ptr); in vp9_quantize_fp_vsx()
51 int16x8_t quant = vec_vsx_ld(0, quant_ptr); in vp9_quantize_fp_vsx()
52 int16x8_t dequant = vec_vsx_ld(0, dequant_ptr); in vp9_quantize_fp_vsx()
53 int16x8_t coeff0 = vec_vsx_ld(0, coeff_ptr); in vp9_quantize_fp_vsx()
54 int16x8_t coeff1 = vec_vsx_ld(16, coeff_ptr); in vp9_quantize_fp_vsx()
55 int16x8_t scan0 = vec_vsx_ld(0, iscan); in vp9_quantize_fp_vsx()
56 int16x8_t scan1 = vec_vsx_ld(16, iscan); in vp9_quantize_fp_vsx()
103 coeff0 = vec_vsx_ld(off0, coeff_ptr); in vp9_quantize_fp_vsx()
104 coeff1 = vec_vsx_ld(off1, coeff_ptr); in vp9_quantize_fp_vsx()
105 coeff2 = vec_vsx_ld(off2, coeff_ptr); in vp9_quantize_fp_vsx()
[all …]
/external/clang/test/CodeGen/
Dbuiltins-ppc-vsx.c339 res_vbi = vec_vsx_ld(0, &vbi); in test1()
343 res_vsi = vec_vsx_ld(0, &vsi); in test1()
347 res_vsi = vec_vsx_ld(0, asi); in test1()
351 res_vui = vec_vsx_ld(0, &vui); in test1()
355 res_vui = vec_vsx_ld(0, aui); in test1()
359 res_vf = vec_vsx_ld (0, &vf); in test1()
363 res_vf = vec_vsx_ld (0, af); in test1()
367 res_vsll = vec_vsx_ld(0, &vsll); in test1()
371 res_vull = vec_vsx_ld(0, &vull); in test1()
375 res_vd = vec_vsx_ld(0, &vd); in test1()
[all …]
/external/libjpeg-turbo/simd/powerpc/
Djcgryext-altivec.c140 rgb0 = vec_vsx_ld(0, inptr); in jsimd_rgb_gray_convert_altivec()
142 rgb1 = vec_vsx_ld(16, inptr); in jsimd_rgb_gray_convert_altivec()
144 rgb2 = vec_vsx_ld(32, inptr); in jsimd_rgb_gray_convert_altivec()
147 rgb3 = vec_vsx_ld(48, inptr); in jsimd_rgb_gray_convert_altivec()
Djsimd_altivec.h93 #define VEC_LD(a, b) vec_vsx_ld(a, b)
Djquanti-altivec.c45 in##row = vec_vsx_ld(0, elemptr); \
/external/eigen/Eigen/src/Core/arch/AltiVec/
DPacketMath.h243 return vec_vsx_ld(0, from);
253 return vec_vsx_ld(0, from);
441 return (Packet4i) vec_vsx_ld((long)from & 15, (const int*) _EIGEN_ALIGNED_PTR(from));
446 return (Packet4f) vec_vsx_ld((long)from & 15, (const float*) _EIGEN_ALIGNED_PTR(from));
849 return vec_vsx_ld(0, from);
932 return (Packet2d) vec_vsx_ld((long)from & 15, (const double*) _EIGEN_ALIGNED_PTR(from));
/external/boringssl/src/crypto/fipsmodule/sha/
Dsha1-altivec.c109 vec_vsx_ld(0, (const unsigned char*) data); in sched_00_15()
/external/clang/lib/Headers/
Daltivec.h10264 vec_vsx_ld(int __a, const vector bool int *__b) { in vec_vsx_ld() function
10269 vec_vsx_ld(int __a, const vector signed int *__b) { in vec_vsx_ld() function
10274 vec_vsx_ld(int __a, const signed int *__b) { in vec_vsx_ld() function
10279 vec_vsx_ld(int __a, const vector unsigned int *__b) { in vec_vsx_ld() function
10284 vec_vsx_ld(int __a, const unsigned int *__b) { in vec_vsx_ld() function
10289 vec_vsx_ld(int __a, const vector float *__b) { in vec_vsx_ld() function
10293 static __inline__ vector float __ATTRS_o_ai vec_vsx_ld(int __a, in vec_vsx_ld() function
10299 vec_vsx_ld(int __a, const vector signed long long *__b) { in vec_vsx_ld() function
10304 vec_vsx_ld(int __a, const vector unsigned long long *__b) { in vec_vsx_ld() function
10309 vec_vsx_ld(int __a, const vector double *__b) { in vec_vsx_ld() function
[all …]
/external/libpng/powerpc/
Dfilter_vsx_intrinsics.c28 #define vec_ld_unaligned(vec,data) vec = vec_vsx_ld(0,data)