1 /*
2 * Copyright (c) 2016 The WebM project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #include <math.h>
12 #include <assert.h>
13 #include "./vpx_dsp_rtcd.h"
14 #include "vpx_dsp/psnr.h"
15 #include "vpx_scale/yv12config.h"
16
vpx_sse_to_psnr(double samples,double peak,double sse)17 double vpx_sse_to_psnr(double samples, double peak, double sse) {
18 if (sse > 0.0) {
19 const double psnr = 10.0 * log10(samples * peak * peak / sse);
20 return psnr > MAX_PSNR ? MAX_PSNR : psnr;
21 } else {
22 return MAX_PSNR;
23 }
24 }
25
26 /* TODO(yaowu): The block_variance calls the unoptimized versions of variance()
27 * and highbd_8_variance(). It should not.
28 */
encoder_variance(const uint8_t * a,int a_stride,const uint8_t * b,int b_stride,int w,int h,unsigned int * sse,int * sum)29 static void encoder_variance(const uint8_t *a, int a_stride, const uint8_t *b,
30 int b_stride, int w, int h, unsigned int *sse,
31 int *sum) {
32 int i, j;
33
34 *sum = 0;
35 *sse = 0;
36
37 for (i = 0; i < h; i++) {
38 for (j = 0; j < w; j++) {
39 const int diff = a[j] - b[j];
40 *sum += diff;
41 *sse += diff * diff;
42 }
43
44 a += a_stride;
45 b += b_stride;
46 }
47 }
48
49 #if CONFIG_VP9_HIGHBITDEPTH
encoder_highbd_variance64(const uint8_t * a8,int a_stride,const uint8_t * b8,int b_stride,int w,int h,uint64_t * sse,int64_t * sum)50 static void encoder_highbd_variance64(const uint8_t *a8, int a_stride,
51 const uint8_t *b8, int b_stride, int w,
52 int h, uint64_t *sse, int64_t *sum) {
53 int i, j;
54
55 uint16_t *a = CONVERT_TO_SHORTPTR(a8);
56 uint16_t *b = CONVERT_TO_SHORTPTR(b8);
57 *sum = 0;
58 *sse = 0;
59
60 for (i = 0; i < h; i++) {
61 for (j = 0; j < w; j++) {
62 const int diff = a[j] - b[j];
63 *sum += diff;
64 *sse += diff * diff;
65 }
66 a += a_stride;
67 b += b_stride;
68 }
69 }
70
encoder_highbd_8_variance(const uint8_t * a8,int a_stride,const uint8_t * b8,int b_stride,int w,int h,unsigned int * sse,int * sum)71 static void encoder_highbd_8_variance(const uint8_t *a8, int a_stride,
72 const uint8_t *b8, int b_stride, int w,
73 int h, unsigned int *sse, int *sum) {
74 uint64_t sse_long = 0;
75 int64_t sum_long = 0;
76 encoder_highbd_variance64(a8, a_stride, b8, b_stride, w, h, &sse_long,
77 &sum_long);
78 *sse = (unsigned int)sse_long;
79 *sum = (int)sum_long;
80 }
81 #endif // CONFIG_VP9_HIGHBITDEPTH
82
get_sse(const uint8_t * a,int a_stride,const uint8_t * b,int b_stride,int width,int height)83 static int64_t get_sse(const uint8_t *a, int a_stride, const uint8_t *b,
84 int b_stride, int width, int height) {
85 const int dw = width % 16;
86 const int dh = height % 16;
87 int64_t total_sse = 0;
88 unsigned int sse = 0;
89 int sum = 0;
90 int x, y;
91
92 if (dw > 0) {
93 encoder_variance(&a[width - dw], a_stride, &b[width - dw], b_stride, dw,
94 height, &sse, &sum);
95 total_sse += sse;
96 }
97
98 if (dh > 0) {
99 encoder_variance(&a[(height - dh) * a_stride], a_stride,
100 &b[(height - dh) * b_stride], b_stride, width - dw, dh,
101 &sse, &sum);
102 total_sse += sse;
103 }
104
105 for (y = 0; y < height / 16; ++y) {
106 const uint8_t *pa = a;
107 const uint8_t *pb = b;
108 for (x = 0; x < width / 16; ++x) {
109 vpx_mse16x16(pa, a_stride, pb, b_stride, &sse);
110 total_sse += sse;
111
112 pa += 16;
113 pb += 16;
114 }
115
116 a += 16 * a_stride;
117 b += 16 * b_stride;
118 }
119
120 return total_sse;
121 }
122
123 #if CONFIG_VP9_HIGHBITDEPTH
highbd_get_sse_shift(const uint8_t * a8,int a_stride,const uint8_t * b8,int b_stride,int width,int height,unsigned int input_shift)124 static int64_t highbd_get_sse_shift(const uint8_t *a8, int a_stride,
125 const uint8_t *b8, int b_stride, int width,
126 int height, unsigned int input_shift) {
127 const uint16_t *a = CONVERT_TO_SHORTPTR(a8);
128 const uint16_t *b = CONVERT_TO_SHORTPTR(b8);
129 int64_t total_sse = 0;
130 int x, y;
131 for (y = 0; y < height; ++y) {
132 for (x = 0; x < width; ++x) {
133 int64_t diff;
134 diff = (a[x] >> input_shift) - (b[x] >> input_shift);
135 total_sse += diff * diff;
136 }
137 a += a_stride;
138 b += b_stride;
139 }
140 return total_sse;
141 }
142
highbd_get_sse(const uint8_t * a,int a_stride,const uint8_t * b,int b_stride,int width,int height)143 static int64_t highbd_get_sse(const uint8_t *a, int a_stride, const uint8_t *b,
144 int b_stride, int width, int height) {
145 int64_t total_sse = 0;
146 int x, y;
147 const int dw = width % 16;
148 const int dh = height % 16;
149 unsigned int sse = 0;
150 int sum = 0;
151 if (dw > 0) {
152 encoder_highbd_8_variance(&a[width - dw], a_stride, &b[width - dw],
153 b_stride, dw, height, &sse, &sum);
154 total_sse += sse;
155 }
156 if (dh > 0) {
157 encoder_highbd_8_variance(&a[(height - dh) * a_stride], a_stride,
158 &b[(height - dh) * b_stride], b_stride,
159 width - dw, dh, &sse, &sum);
160 total_sse += sse;
161 }
162 for (y = 0; y < height / 16; ++y) {
163 const uint8_t *pa = a;
164 const uint8_t *pb = b;
165 for (x = 0; x < width / 16; ++x) {
166 vpx_highbd_8_mse16x16(pa, a_stride, pb, b_stride, &sse);
167 total_sse += sse;
168 pa += 16;
169 pb += 16;
170 }
171 a += 16 * a_stride;
172 b += 16 * b_stride;
173 }
174 return total_sse;
175 }
176 #endif // CONFIG_VP9_HIGHBITDEPTH
177
vpx_get_y_sse(const YV12_BUFFER_CONFIG * a,const YV12_BUFFER_CONFIG * b)178 int64_t vpx_get_y_sse(const YV12_BUFFER_CONFIG *a,
179 const YV12_BUFFER_CONFIG *b) {
180 assert(a->y_crop_width == b->y_crop_width);
181 assert(a->y_crop_height == b->y_crop_height);
182
183 return get_sse(a->y_buffer, a->y_stride, b->y_buffer, b->y_stride,
184 a->y_crop_width, a->y_crop_height);
185 }
186
187 #if CONFIG_VP9_HIGHBITDEPTH
vpx_highbd_get_y_sse(const YV12_BUFFER_CONFIG * a,const YV12_BUFFER_CONFIG * b)188 int64_t vpx_highbd_get_y_sse(const YV12_BUFFER_CONFIG *a,
189 const YV12_BUFFER_CONFIG *b) {
190 assert(a->y_crop_width == b->y_crop_width);
191 assert(a->y_crop_height == b->y_crop_height);
192 assert((a->flags & YV12_FLAG_HIGHBITDEPTH) != 0);
193 assert((b->flags & YV12_FLAG_HIGHBITDEPTH) != 0);
194
195 return highbd_get_sse(a->y_buffer, a->y_stride, b->y_buffer, b->y_stride,
196 a->y_crop_width, a->y_crop_height);
197 }
198 #endif // CONFIG_VP9_HIGHBITDEPTH
199
200 #if CONFIG_VP9_HIGHBITDEPTH
vpx_calc_highbd_psnr(const YV12_BUFFER_CONFIG * a,const YV12_BUFFER_CONFIG * b,PSNR_STATS * psnr,uint32_t bit_depth,uint32_t in_bit_depth)201 void vpx_calc_highbd_psnr(const YV12_BUFFER_CONFIG *a,
202 const YV12_BUFFER_CONFIG *b, PSNR_STATS *psnr,
203 uint32_t bit_depth, uint32_t in_bit_depth) {
204 const int widths[3] = { a->y_crop_width, a->uv_crop_width, a->uv_crop_width };
205 const int heights[3] = { a->y_crop_height, a->uv_crop_height,
206 a->uv_crop_height };
207 const uint8_t *a_planes[3] = { a->y_buffer, a->u_buffer, a->v_buffer };
208 const int a_strides[3] = { a->y_stride, a->uv_stride, a->uv_stride };
209 const uint8_t *b_planes[3] = { b->y_buffer, b->u_buffer, b->v_buffer };
210 const int b_strides[3] = { b->y_stride, b->uv_stride, b->uv_stride };
211 int i;
212 uint64_t total_sse = 0;
213 uint32_t total_samples = 0;
214 const double peak = (double)((1 << in_bit_depth) - 1);
215 const unsigned int input_shift = bit_depth - in_bit_depth;
216
217 for (i = 0; i < 3; ++i) {
218 const int w = widths[i];
219 const int h = heights[i];
220 const uint32_t samples = w * h;
221 uint64_t sse;
222 if (a->flags & YV12_FLAG_HIGHBITDEPTH) {
223 if (input_shift) {
224 sse = highbd_get_sse_shift(a_planes[i], a_strides[i], b_planes[i],
225 b_strides[i], w, h, input_shift);
226 } else {
227 sse = highbd_get_sse(a_planes[i], a_strides[i], b_planes[i],
228 b_strides[i], w, h);
229 }
230 } else {
231 sse = get_sse(a_planes[i], a_strides[i], b_planes[i], b_strides[i], w, h);
232 }
233 psnr->sse[1 + i] = sse;
234 psnr->samples[1 + i] = samples;
235 psnr->psnr[1 + i] = vpx_sse_to_psnr(samples, peak, (double)sse);
236
237 total_sse += sse;
238 total_samples += samples;
239 }
240
241 psnr->sse[0] = total_sse;
242 psnr->samples[0] = total_samples;
243 psnr->psnr[0] =
244 vpx_sse_to_psnr((double)total_samples, peak, (double)total_sse);
245 }
246
247 #endif // !CONFIG_VP9_HIGHBITDEPTH
248
vpx_calc_psnr(const YV12_BUFFER_CONFIG * a,const YV12_BUFFER_CONFIG * b,PSNR_STATS * psnr)249 void vpx_calc_psnr(const YV12_BUFFER_CONFIG *a, const YV12_BUFFER_CONFIG *b,
250 PSNR_STATS *psnr) {
251 static const double peak = 255.0;
252 const int widths[3] = { a->y_crop_width, a->uv_crop_width, a->uv_crop_width };
253 const int heights[3] = { a->y_crop_height, a->uv_crop_height,
254 a->uv_crop_height };
255 const uint8_t *a_planes[3] = { a->y_buffer, a->u_buffer, a->v_buffer };
256 const int a_strides[3] = { a->y_stride, a->uv_stride, a->uv_stride };
257 const uint8_t *b_planes[3] = { b->y_buffer, b->u_buffer, b->v_buffer };
258 const int b_strides[3] = { b->y_stride, b->uv_stride, b->uv_stride };
259 int i;
260 uint64_t total_sse = 0;
261 uint32_t total_samples = 0;
262
263 for (i = 0; i < 3; ++i) {
264 const int w = widths[i];
265 const int h = heights[i];
266 const uint32_t samples = w * h;
267 const uint64_t sse =
268 get_sse(a_planes[i], a_strides[i], b_planes[i], b_strides[i], w, h);
269 psnr->sse[1 + i] = sse;
270 psnr->samples[1 + i] = samples;
271 psnr->psnr[1 + i] = vpx_sse_to_psnr(samples, peak, (double)sse);
272
273 total_sse += sse;
274 total_samples += samples;
275 }
276
277 psnr->sse[0] = total_sse;
278 psnr->samples[0] = total_samples;
279 psnr->psnr[0] =
280 vpx_sse_to_psnr((double)total_samples, peak, (double)total_sse);
281 }
282