1 /*
2 * Copyright (c) 2014 The WebM project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10 #include <stdlib.h>
11
12 #include "./vpx_dsp_rtcd.h"
13 #include "vpx_ports/mem.h"
14
vpx_avg_8x8_c(const uint8_t * s,int p)15 unsigned int vpx_avg_8x8_c(const uint8_t *s, int p) {
16 int i, j;
17 int sum = 0;
18 for (i = 0; i < 8; ++i, s += p)
19 for (j = 0; j < 8; sum += s[j], ++j) {
20 }
21
22 return (sum + 32) >> 6;
23 }
24
vpx_avg_4x4_c(const uint8_t * s,int p)25 unsigned int vpx_avg_4x4_c(const uint8_t *s, int p) {
26 int i, j;
27 int sum = 0;
28 for (i = 0; i < 4; ++i, s += p)
29 for (j = 0; j < 4; sum += s[j], ++j) {
30 }
31
32 return (sum + 8) >> 4;
33 }
34
35 // src_diff: first pass, 9 bit, dynamic range [-255, 255]
36 // second pass, 12 bit, dynamic range [-2040, 2040]
hadamard_col8(const int16_t * src_diff,int src_stride,int16_t * coeff)37 static void hadamard_col8(const int16_t *src_diff, int src_stride,
38 int16_t *coeff) {
39 int16_t b0 = src_diff[0 * src_stride] + src_diff[1 * src_stride];
40 int16_t b1 = src_diff[0 * src_stride] - src_diff[1 * src_stride];
41 int16_t b2 = src_diff[2 * src_stride] + src_diff[3 * src_stride];
42 int16_t b3 = src_diff[2 * src_stride] - src_diff[3 * src_stride];
43 int16_t b4 = src_diff[4 * src_stride] + src_diff[5 * src_stride];
44 int16_t b5 = src_diff[4 * src_stride] - src_diff[5 * src_stride];
45 int16_t b6 = src_diff[6 * src_stride] + src_diff[7 * src_stride];
46 int16_t b7 = src_diff[6 * src_stride] - src_diff[7 * src_stride];
47
48 int16_t c0 = b0 + b2;
49 int16_t c1 = b1 + b3;
50 int16_t c2 = b0 - b2;
51 int16_t c3 = b1 - b3;
52 int16_t c4 = b4 + b6;
53 int16_t c5 = b5 + b7;
54 int16_t c6 = b4 - b6;
55 int16_t c7 = b5 - b7;
56
57 coeff[0] = c0 + c4;
58 coeff[7] = c1 + c5;
59 coeff[3] = c2 + c6;
60 coeff[4] = c3 + c7;
61 coeff[2] = c0 - c4;
62 coeff[6] = c1 - c5;
63 coeff[1] = c2 - c6;
64 coeff[5] = c3 - c7;
65 }
66
67 // The order of the output coeff of the hadamard is not important. For
68 // optimization purposes the final transpose may be skipped.
vpx_hadamard_8x8_c(const int16_t * src_diff,int src_stride,tran_low_t * coeff)69 void vpx_hadamard_8x8_c(const int16_t *src_diff, int src_stride,
70 tran_low_t *coeff) {
71 int idx;
72 int16_t buffer[64];
73 int16_t buffer2[64];
74 int16_t *tmp_buf = &buffer[0];
75 for (idx = 0; idx < 8; ++idx) {
76 hadamard_col8(src_diff, src_stride, tmp_buf); // src_diff: 9 bit
77 // dynamic range [-255, 255]
78 tmp_buf += 8;
79 ++src_diff;
80 }
81
82 tmp_buf = &buffer[0];
83 for (idx = 0; idx < 8; ++idx) {
84 hadamard_col8(tmp_buf, 8, buffer2 + 8 * idx); // tmp_buf: 12 bit
85 // dynamic range [-2040, 2040]
86 // buffer2: 15 bit
87 // dynamic range [-16320, 16320]
88 ++tmp_buf;
89 }
90
91 for (idx = 0; idx < 64; ++idx) coeff[idx] = (tran_low_t)buffer2[idx];
92 }
93
94 // In place 16x16 2D Hadamard transform
vpx_hadamard_16x16_c(const int16_t * src_diff,int src_stride,tran_low_t * coeff)95 void vpx_hadamard_16x16_c(const int16_t *src_diff, int src_stride,
96 tran_low_t *coeff) {
97 int idx;
98 for (idx = 0; idx < 4; ++idx) {
99 // src_diff: 9 bit, dynamic range [-255, 255]
100 const int16_t *src_ptr =
101 src_diff + (idx >> 1) * 8 * src_stride + (idx & 0x01) * 8;
102 vpx_hadamard_8x8_c(src_ptr, src_stride, coeff + idx * 64);
103 }
104
105 // coeff: 15 bit, dynamic range [-16320, 16320]
106 for (idx = 0; idx < 64; ++idx) {
107 tran_low_t a0 = coeff[0];
108 tran_low_t a1 = coeff[64];
109 tran_low_t a2 = coeff[128];
110 tran_low_t a3 = coeff[192];
111
112 tran_low_t b0 = (a0 + a1) >> 1; // (a0 + a1): 16 bit, [-32640, 32640]
113 tran_low_t b1 = (a0 - a1) >> 1; // b0-b3: 15 bit, dynamic range
114 tran_low_t b2 = (a2 + a3) >> 1; // [-16320, 16320]
115 tran_low_t b3 = (a2 - a3) >> 1;
116
117 coeff[0] = b0 + b2; // 16 bit, [-32640, 32640]
118 coeff[64] = b1 + b3;
119 coeff[128] = b0 - b2;
120 coeff[192] = b1 - b3;
121
122 ++coeff;
123 }
124 }
125
126 // coeff: 16 bits, dynamic range [-32640, 32640].
127 // length: value range {16, 64, 256, 1024}.
vpx_satd_c(const tran_low_t * coeff,int length)128 int vpx_satd_c(const tran_low_t *coeff, int length) {
129 int i;
130 int satd = 0;
131 for (i = 0; i < length; ++i) satd += abs(coeff[i]);
132
133 // satd: 26 bits, dynamic range [-32640 * 1024, 32640 * 1024]
134 return satd;
135 }
136
137 // Integer projection onto row vectors.
138 // height: value range {16, 32, 64}.
vpx_int_pro_row_c(int16_t hbuf[16],const uint8_t * ref,const int ref_stride,const int height)139 void vpx_int_pro_row_c(int16_t hbuf[16], const uint8_t *ref,
140 const int ref_stride, const int height) {
141 int idx;
142 const int norm_factor = height >> 1;
143 for (idx = 0; idx < 16; ++idx) {
144 int i;
145 hbuf[idx] = 0;
146 // hbuf[idx]: 14 bit, dynamic range [0, 16320].
147 for (i = 0; i < height; ++i) hbuf[idx] += ref[i * ref_stride];
148 // hbuf[idx]: 9 bit, dynamic range [0, 510].
149 hbuf[idx] /= norm_factor;
150 ++ref;
151 }
152 }
153
154 // width: value range {16, 32, 64}.
vpx_int_pro_col_c(const uint8_t * ref,const int width)155 int16_t vpx_int_pro_col_c(const uint8_t *ref, const int width) {
156 int idx;
157 int16_t sum = 0;
158 // sum: 14 bit, dynamic range [0, 16320]
159 for (idx = 0; idx < width; ++idx) sum += ref[idx];
160 return sum;
161 }
162
163 // ref: [0 - 510]
164 // src: [0 - 510]
165 // bwl: {2, 3, 4}
vpx_vector_var_c(const int16_t * ref,const int16_t * src,const int bwl)166 int vpx_vector_var_c(const int16_t *ref, const int16_t *src, const int bwl) {
167 int i;
168 int width = 4 << bwl;
169 int sse = 0, mean = 0, var;
170
171 for (i = 0; i < width; ++i) {
172 int diff = ref[i] - src[i]; // diff: dynamic range [-510, 510], 10 bits.
173 mean += diff; // mean: dynamic range 16 bits.
174 sse += diff * diff; // sse: dynamic range 26 bits.
175 }
176
177 // (mean * mean): dynamic range 31 bits.
178 var = sse - ((mean * mean) >> (bwl + 2));
179 return var;
180 }
181
vpx_minmax_8x8_c(const uint8_t * s,int p,const uint8_t * d,int dp,int * min,int * max)182 void vpx_minmax_8x8_c(const uint8_t *s, int p, const uint8_t *d, int dp,
183 int *min, int *max) {
184 int i, j;
185 *min = 255;
186 *max = 0;
187 for (i = 0; i < 8; ++i, s += p, d += dp) {
188 for (j = 0; j < 8; ++j) {
189 int diff = abs(s[j] - d[j]);
190 *min = diff < *min ? diff : *min;
191 *max = diff > *max ? diff : *max;
192 }
193 }
194 }
195
196 #if CONFIG_VP9_HIGHBITDEPTH
vpx_highbd_avg_8x8_c(const uint8_t * s8,int p)197 unsigned int vpx_highbd_avg_8x8_c(const uint8_t *s8, int p) {
198 int i, j;
199 int sum = 0;
200 const uint16_t *s = CONVERT_TO_SHORTPTR(s8);
201 for (i = 0; i < 8; ++i, s += p)
202 for (j = 0; j < 8; sum += s[j], ++j) {
203 }
204
205 return (sum + 32) >> 6;
206 }
207
vpx_highbd_avg_4x4_c(const uint8_t * s8,int p)208 unsigned int vpx_highbd_avg_4x4_c(const uint8_t *s8, int p) {
209 int i, j;
210 int sum = 0;
211 const uint16_t *s = CONVERT_TO_SHORTPTR(s8);
212 for (i = 0; i < 4; ++i, s += p)
213 for (j = 0; j < 4; sum += s[j], ++j) {
214 }
215
216 return (sum + 8) >> 4;
217 }
218
vpx_highbd_minmax_8x8_c(const uint8_t * s8,int p,const uint8_t * d8,int dp,int * min,int * max)219 void vpx_highbd_minmax_8x8_c(const uint8_t *s8, int p, const uint8_t *d8,
220 int dp, int *min, int *max) {
221 int i, j;
222 const uint16_t *s = CONVERT_TO_SHORTPTR(s8);
223 const uint16_t *d = CONVERT_TO_SHORTPTR(d8);
224 *min = 255;
225 *max = 0;
226 for (i = 0; i < 8; ++i, s += p, d += dp) {
227 for (j = 0; j < 8; ++j) {
228 int diff = abs(s[j] - d[j]);
229 *min = diff < *min ? diff : *min;
230 *max = diff > *max ? diff : *max;
231 }
232 }
233 }
234 #endif // CONFIG_VP9_HIGHBITDEPTH
235