• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  Copyright (c) 2017 The WebM project authors. All Rights Reserved.
3  *
4  *  Use of this source code is governed by a BSD-style license
5  *  that can be found in the LICENSE file in the root of the source
6  *  tree. An additional intellectual property rights grant can be found
7  *  in the file PATENTS.  All contributing project authors may
8  *  be found in the AUTHORS file in the root of the source tree.
9  */
10 
11 #include "./vpx_dsp_rtcd.h"
12 #include "vpx_dsp/ppc/types_vsx.h"
13 #include "vpx_dsp/ppc/transpose_vsx.h"
14 #include "vpx_dsp/ppc/bitdepth_conversion_vsx.h"
15 
vpx_hadamard_s16_8x8_one_pass(int16x8_t v[8])16 static void vpx_hadamard_s16_8x8_one_pass(int16x8_t v[8]) {
17   const int16x8_t b0 = vec_add(v[0], v[1]);
18   const int16x8_t b1 = vec_sub(v[0], v[1]);
19   const int16x8_t b2 = vec_add(v[2], v[3]);
20   const int16x8_t b3 = vec_sub(v[2], v[3]);
21   const int16x8_t b4 = vec_add(v[4], v[5]);
22   const int16x8_t b5 = vec_sub(v[4], v[5]);
23   const int16x8_t b6 = vec_add(v[6], v[7]);
24   const int16x8_t b7 = vec_sub(v[6], v[7]);
25 
26   const int16x8_t c0 = vec_add(b0, b2);
27   const int16x8_t c1 = vec_add(b1, b3);
28   const int16x8_t c2 = vec_sub(b0, b2);
29   const int16x8_t c3 = vec_sub(b1, b3);
30   const int16x8_t c4 = vec_add(b4, b6);
31   const int16x8_t c5 = vec_add(b5, b7);
32   const int16x8_t c6 = vec_sub(b4, b6);
33   const int16x8_t c7 = vec_sub(b5, b7);
34 
35   v[0] = vec_add(c0, c4);
36   v[1] = vec_sub(c2, c6);
37   v[2] = vec_sub(c0, c4);
38   v[3] = vec_add(c2, c6);
39   v[4] = vec_add(c3, c7);
40   v[5] = vec_sub(c3, c7);
41   v[6] = vec_sub(c1, c5);
42   v[7] = vec_add(c1, c5);
43 }
44 
vpx_hadamard_8x8_vsx(const int16_t * src_diff,ptrdiff_t src_stride,tran_low_t * coeff)45 void vpx_hadamard_8x8_vsx(const int16_t *src_diff, ptrdiff_t src_stride,
46                           tran_low_t *coeff) {
47   int16x8_t v[8];
48 
49   v[0] = vec_vsx_ld(0, src_diff);
50   v[1] = vec_vsx_ld(0, src_diff + src_stride);
51   v[2] = vec_vsx_ld(0, src_diff + (2 * src_stride));
52   v[3] = vec_vsx_ld(0, src_diff + (3 * src_stride));
53   v[4] = vec_vsx_ld(0, src_diff + (4 * src_stride));
54   v[5] = vec_vsx_ld(0, src_diff + (5 * src_stride));
55   v[6] = vec_vsx_ld(0, src_diff + (6 * src_stride));
56   v[7] = vec_vsx_ld(0, src_diff + (7 * src_stride));
57 
58   vpx_hadamard_s16_8x8_one_pass(v);
59 
60   vpx_transpose_s16_8x8(v);
61 
62   vpx_hadamard_s16_8x8_one_pass(v);
63 
64   store_tran_low(v[0], 0, coeff);
65   store_tran_low(v[1], 0, coeff + 8);
66   store_tran_low(v[2], 0, coeff + 16);
67   store_tran_low(v[3], 0, coeff + 24);
68   store_tran_low(v[4], 0, coeff + 32);
69   store_tran_low(v[5], 0, coeff + 40);
70   store_tran_low(v[6], 0, coeff + 48);
71   store_tran_low(v[7], 0, coeff + 56);
72 }
73 
vpx_hadamard_16x16_vsx(const int16_t * src_diff,ptrdiff_t src_stride,tran_low_t * coeff)74 void vpx_hadamard_16x16_vsx(const int16_t *src_diff, ptrdiff_t src_stride,
75                             tran_low_t *coeff) {
76   int i;
77   const uint16x8_t ones = vec_splat_u16(1);
78 
79   /* Rearrange 16x16 to 8x32 and remove stride.
80    * Top left first. */
81   vpx_hadamard_8x8_vsx(src_diff, src_stride, coeff);
82   /* Top right. */
83   vpx_hadamard_8x8_vsx(src_diff + 8 + 0 * src_stride, src_stride, coeff + 64);
84   /* Bottom left. */
85   vpx_hadamard_8x8_vsx(src_diff + 0 + 8 * src_stride, src_stride, coeff + 128);
86   /* Bottom right. */
87   vpx_hadamard_8x8_vsx(src_diff + 8 + 8 * src_stride, src_stride, coeff + 192);
88 
89   /* Overlay the 8x8 blocks and combine. */
90   for (i = 0; i < 64; i += 8) {
91     const int16x8_t a0 = load_tran_low(0, coeff);
92     const int16x8_t a1 = load_tran_low(0, coeff + 64);
93     const int16x8_t a2 = load_tran_low(0, coeff + 128);
94     const int16x8_t a3 = load_tran_low(0, coeff + 192);
95 
96     /* Prevent the result from escaping int16_t. */
97     const int16x8_t b0 = vec_sra(a0, ones);
98     const int16x8_t b1 = vec_sra(a1, ones);
99     const int16x8_t b2 = vec_sra(a2, ones);
100     const int16x8_t b3 = vec_sra(a3, ones);
101 
102     const int16x8_t c0 = vec_add(b0, b1);
103     const int16x8_t c2 = vec_add(b2, b3);
104     const int16x8_t c1 = vec_sub(b0, b1);
105     const int16x8_t c3 = vec_sub(b2, b3);
106 
107     const int16x8_t d0 = vec_add(c0, c2);
108     const int16x8_t d1 = vec_add(c1, c3);
109     const int16x8_t d2 = vec_sub(c0, c2);
110     const int16x8_t d3 = vec_sub(c1, c3);
111 
112     store_tran_low(d0, 0, coeff);
113     store_tran_low(d1, 0, coeff + 64);
114     store_tran_low(d2, 0, coeff + 128);
115     store_tran_low(d3, 0, coeff + 192);
116 
117     coeff += 8;
118   }
119 }
120