1 /*
2 * Copyright (c) 2012 The WebM project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 /* MFQE: Multiframe Quality Enhancement
12 * In rate limited situations keyframes may cause significant visual artifacts
13 * commonly referred to as "popping." This file implements a postproccesing
14 * algorithm which blends data from the preceeding frame when there is no
15 * motion and the q from the previous frame is lower which indicates that it is
16 * higher quality.
17 */
18
19 #include "./vp8_rtcd.h"
20 #include "./vpx_dsp_rtcd.h"
21 #include "vp8/common/postproc.h"
22 #include "vpx_dsp/variance.h"
23 #include "vpx_mem/vpx_mem.h"
24 #include "vpx_scale/yv12config.h"
25
26 #include <limits.h>
27 #include <stdlib.h>
28
filter_by_weight(unsigned char * src,int src_stride,unsigned char * dst,int dst_stride,int block_size,int src_weight)29 static void filter_by_weight(unsigned char *src, int src_stride,
30 unsigned char *dst, int dst_stride, int block_size,
31 int src_weight) {
32 int dst_weight = (1 << MFQE_PRECISION) - src_weight;
33 int rounding_bit = 1 << (MFQE_PRECISION - 1);
34 int r, c;
35
36 for (r = 0; r < block_size; ++r) {
37 for (c = 0; c < block_size; ++c) {
38 dst[c] = (src[c] * src_weight + dst[c] * dst_weight + rounding_bit) >>
39 MFQE_PRECISION;
40 }
41 src += src_stride;
42 dst += dst_stride;
43 }
44 }
45
vp8_filter_by_weight16x16_c(unsigned char * src,int src_stride,unsigned char * dst,int dst_stride,int src_weight)46 void vp8_filter_by_weight16x16_c(unsigned char *src, int src_stride,
47 unsigned char *dst, int dst_stride,
48 int src_weight) {
49 filter_by_weight(src, src_stride, dst, dst_stride, 16, src_weight);
50 }
51
vp8_filter_by_weight8x8_c(unsigned char * src,int src_stride,unsigned char * dst,int dst_stride,int src_weight)52 void vp8_filter_by_weight8x8_c(unsigned char *src, int src_stride,
53 unsigned char *dst, int dst_stride,
54 int src_weight) {
55 filter_by_weight(src, src_stride, dst, dst_stride, 8, src_weight);
56 }
57
vp8_filter_by_weight4x4_c(unsigned char * src,int src_stride,unsigned char * dst,int dst_stride,int src_weight)58 void vp8_filter_by_weight4x4_c(unsigned char *src, int src_stride,
59 unsigned char *dst, int dst_stride,
60 int src_weight) {
61 filter_by_weight(src, src_stride, dst, dst_stride, 4, src_weight);
62 }
63
apply_ifactor(unsigned char * y_src,int y_src_stride,unsigned char * y_dst,int y_dst_stride,unsigned char * u_src,unsigned char * v_src,int uv_src_stride,unsigned char * u_dst,unsigned char * v_dst,int uv_dst_stride,int block_size,int src_weight)64 static void apply_ifactor(unsigned char *y_src, int y_src_stride,
65 unsigned char *y_dst, int y_dst_stride,
66 unsigned char *u_src, unsigned char *v_src,
67 int uv_src_stride, unsigned char *u_dst,
68 unsigned char *v_dst, int uv_dst_stride,
69 int block_size, int src_weight) {
70 if (block_size == 16) {
71 vp8_filter_by_weight16x16(y_src, y_src_stride, y_dst, y_dst_stride,
72 src_weight);
73 vp8_filter_by_weight8x8(u_src, uv_src_stride, u_dst, uv_dst_stride,
74 src_weight);
75 vp8_filter_by_weight8x8(v_src, uv_src_stride, v_dst, uv_dst_stride,
76 src_weight);
77 } else {
78 vp8_filter_by_weight8x8(y_src, y_src_stride, y_dst, y_dst_stride,
79 src_weight);
80 vp8_filter_by_weight4x4(u_src, uv_src_stride, u_dst, uv_dst_stride,
81 src_weight);
82 vp8_filter_by_weight4x4(v_src, uv_src_stride, v_dst, uv_dst_stride,
83 src_weight);
84 }
85 }
86
int_sqrt(unsigned int x)87 static unsigned int int_sqrt(unsigned int x) {
88 unsigned int y = x;
89 unsigned int guess;
90 int p = 1;
91 while (y >>= 1) p++;
92 p >>= 1;
93
94 guess = 0;
95 while (p >= 0) {
96 guess |= (1 << p);
97 if (x < guess * guess) guess -= (1 << p);
98 p--;
99 }
100 /* choose between guess or guess+1 */
101 return guess + (guess * guess + guess + 1 <= x);
102 }
103
104 #define USE_SSD
multiframe_quality_enhance_block(int blksize,int qcurr,int qprev,unsigned char * y,unsigned char * u,unsigned char * v,int y_stride,int uv_stride,unsigned char * yd,unsigned char * ud,unsigned char * vd,int yd_stride,int uvd_stride)105 static void multiframe_quality_enhance_block(
106 int blksize, /* Currently only values supported are 16, 8 */
107 int qcurr, int qprev, unsigned char *y, unsigned char *u, unsigned char *v,
108 int y_stride, int uv_stride, unsigned char *yd, unsigned char *ud,
109 unsigned char *vd, int yd_stride, int uvd_stride) {
110 static const unsigned char VP8_ZEROS[16] = { 0, 0, 0, 0, 0, 0, 0, 0,
111 0, 0, 0, 0, 0, 0, 0, 0 };
112 int uvblksize = blksize >> 1;
113 int qdiff = qcurr - qprev;
114
115 int i;
116 unsigned char *up;
117 unsigned char *udp;
118 unsigned char *vp;
119 unsigned char *vdp;
120
121 unsigned int act, actd, sad, usad, vsad, sse, thr, thrsq, actrisk;
122
123 if (blksize == 16) {
124 actd = (vpx_variance16x16(yd, yd_stride, VP8_ZEROS, 0, &sse) + 128) >> 8;
125 act = (vpx_variance16x16(y, y_stride, VP8_ZEROS, 0, &sse) + 128) >> 8;
126 #ifdef USE_SSD
127 vpx_variance16x16(y, y_stride, yd, yd_stride, &sse);
128 sad = (sse + 128) >> 8;
129 vpx_variance8x8(u, uv_stride, ud, uvd_stride, &sse);
130 usad = (sse + 32) >> 6;
131 vpx_variance8x8(v, uv_stride, vd, uvd_stride, &sse);
132 vsad = (sse + 32) >> 6;
133 #else
134 sad = (vpx_sad16x16(y, y_stride, yd, yd_stride) + 128) >> 8;
135 usad = (vpx_sad8x8(u, uv_stride, ud, uvd_stride) + 32) >> 6;
136 vsad = (vpx_sad8x8(v, uv_stride, vd, uvd_stride) + 32) >> 6;
137 #endif
138 } else {
139 actd = (vpx_variance8x8(yd, yd_stride, VP8_ZEROS, 0, &sse) + 32) >> 6;
140 act = (vpx_variance8x8(y, y_stride, VP8_ZEROS, 0, &sse) + 32) >> 6;
141 #ifdef USE_SSD
142 vpx_variance8x8(y, y_stride, yd, yd_stride, &sse);
143 sad = (sse + 32) >> 6;
144 vpx_variance4x4(u, uv_stride, ud, uvd_stride, &sse);
145 usad = (sse + 8) >> 4;
146 vpx_variance4x4(v, uv_stride, vd, uvd_stride, &sse);
147 vsad = (sse + 8) >> 4;
148 #else
149 sad = (vpx_sad8x8(y, y_stride, yd, yd_stride) + 32) >> 6;
150 usad = (vpx_sad4x4(u, uv_stride, ud, uvd_stride) + 8) >> 4;
151 vsad = (vpx_sad4x4(v, uv_stride, vd, uvd_stride) + 8) >> 4;
152 #endif
153 }
154
155 actrisk = (actd > act * 5);
156
157 /* thr = qdiff/16 + log2(act) + log4(qprev) */
158 thr = (qdiff >> 4);
159 while (actd >>= 1) thr++;
160 while (qprev >>= 2) thr++;
161
162 #ifdef USE_SSD
163 thrsq = thr * thr;
164 if (sad < thrsq &&
165 /* additional checks for color mismatch and excessive addition of
166 * high-frequencies */
167 4 * usad < thrsq && 4 * vsad < thrsq && !actrisk)
168 #else
169 if (sad < thr &&
170 /* additional checks for color mismatch and excessive addition of
171 * high-frequencies */
172 2 * usad < thr && 2 * vsad < thr && !actrisk)
173 #endif
174 {
175 int ifactor;
176 #ifdef USE_SSD
177 /* TODO: optimize this later to not need sqr root */
178 sad = int_sqrt(sad);
179 #endif
180 ifactor = (sad << MFQE_PRECISION) / thr;
181 ifactor >>= (qdiff >> 5);
182
183 if (ifactor) {
184 apply_ifactor(y, y_stride, yd, yd_stride, u, v, uv_stride, ud, vd,
185 uvd_stride, blksize, ifactor);
186 }
187 } else { /* else implicitly copy from previous frame */
188 if (blksize == 16) {
189 vp8_copy_mem16x16(y, y_stride, yd, yd_stride);
190 vp8_copy_mem8x8(u, uv_stride, ud, uvd_stride);
191 vp8_copy_mem8x8(v, uv_stride, vd, uvd_stride);
192 } else {
193 vp8_copy_mem8x8(y, y_stride, yd, yd_stride);
194 for (up = u, udp = ud, i = 0; i < uvblksize;
195 ++i, up += uv_stride, udp += uvd_stride) {
196 memcpy(udp, up, uvblksize);
197 }
198 for (vp = v, vdp = vd, i = 0; i < uvblksize;
199 ++i, vp += uv_stride, vdp += uvd_stride) {
200 memcpy(vdp, vp, uvblksize);
201 }
202 }
203 }
204 }
205
qualify_inter_mb(const MODE_INFO * mode_info_context,int * map)206 static int qualify_inter_mb(const MODE_INFO *mode_info_context, int *map) {
207 if (mode_info_context->mbmi.mb_skip_coeff) {
208 map[0] = map[1] = map[2] = map[3] = 1;
209 } else if (mode_info_context->mbmi.mode == SPLITMV) {
210 static int ndx[4][4] = {
211 { 0, 1, 4, 5 }, { 2, 3, 6, 7 }, { 8, 9, 12, 13 }, { 10, 11, 14, 15 }
212 };
213 int i, j;
214 for (i = 0; i < 4; ++i) {
215 map[i] = 1;
216 for (j = 0; j < 4 && map[j]; ++j) {
217 map[i] &= (mode_info_context->bmi[ndx[i][j]].mv.as_mv.row <= 2 &&
218 mode_info_context->bmi[ndx[i][j]].mv.as_mv.col <= 2);
219 }
220 }
221 } else {
222 map[0] = map[1] = map[2] = map[3] =
223 (mode_info_context->mbmi.mode > B_PRED &&
224 abs(mode_info_context->mbmi.mv.as_mv.row) <= 2 &&
225 abs(mode_info_context->mbmi.mv.as_mv.col) <= 2);
226 }
227 return (map[0] + map[1] + map[2] + map[3]);
228 }
229
vp8_multiframe_quality_enhance(VP8_COMMON * cm)230 void vp8_multiframe_quality_enhance(VP8_COMMON *cm) {
231 YV12_BUFFER_CONFIG *show = cm->frame_to_show;
232 YV12_BUFFER_CONFIG *dest = &cm->post_proc_buffer;
233
234 FRAME_TYPE frame_type = cm->frame_type;
235 /* Point at base of Mb MODE_INFO list has motion vectors etc */
236 const MODE_INFO *mode_info_context = cm->show_frame_mi;
237 int mb_row;
238 int mb_col;
239 int totmap, map[4];
240 int qcurr = cm->base_qindex;
241 int qprev = cm->postproc_state.last_base_qindex;
242
243 unsigned char *y_ptr, *u_ptr, *v_ptr;
244 unsigned char *yd_ptr, *ud_ptr, *vd_ptr;
245
246 /* Set up the buffer pointers */
247 y_ptr = show->y_buffer;
248 u_ptr = show->u_buffer;
249 v_ptr = show->v_buffer;
250 yd_ptr = dest->y_buffer;
251 ud_ptr = dest->u_buffer;
252 vd_ptr = dest->v_buffer;
253
254 /* postprocess each macro block */
255 for (mb_row = 0; mb_row < cm->mb_rows; ++mb_row) {
256 for (mb_col = 0; mb_col < cm->mb_cols; ++mb_col) {
257 /* if motion is high there will likely be no benefit */
258 if (frame_type == INTER_FRAME) {
259 totmap = qualify_inter_mb(mode_info_context, map);
260 } else {
261 totmap = (frame_type == KEY_FRAME ? 4 : 0);
262 }
263 if (totmap) {
264 if (totmap < 4) {
265 int i, j;
266 for (i = 0; i < 2; ++i) {
267 for (j = 0; j < 2; ++j) {
268 if (map[i * 2 + j]) {
269 multiframe_quality_enhance_block(
270 8, qcurr, qprev, y_ptr + 8 * (i * show->y_stride + j),
271 u_ptr + 4 * (i * show->uv_stride + j),
272 v_ptr + 4 * (i * show->uv_stride + j), show->y_stride,
273 show->uv_stride, yd_ptr + 8 * (i * dest->y_stride + j),
274 ud_ptr + 4 * (i * dest->uv_stride + j),
275 vd_ptr + 4 * (i * dest->uv_stride + j), dest->y_stride,
276 dest->uv_stride);
277 } else {
278 /* copy a 8x8 block */
279 int k;
280 unsigned char *up = u_ptr + 4 * (i * show->uv_stride + j);
281 unsigned char *udp = ud_ptr + 4 * (i * dest->uv_stride + j);
282 unsigned char *vp = v_ptr + 4 * (i * show->uv_stride + j);
283 unsigned char *vdp = vd_ptr + 4 * (i * dest->uv_stride + j);
284 vp8_copy_mem8x8(
285 y_ptr + 8 * (i * show->y_stride + j), show->y_stride,
286 yd_ptr + 8 * (i * dest->y_stride + j), dest->y_stride);
287 for (k = 0; k < 4; ++k, up += show->uv_stride,
288 udp += dest->uv_stride, vp += show->uv_stride,
289 vdp += dest->uv_stride) {
290 memcpy(udp, up, 4);
291 memcpy(vdp, vp, 4);
292 }
293 }
294 }
295 }
296 } else { /* totmap = 4 */
297 multiframe_quality_enhance_block(
298 16, qcurr, qprev, y_ptr, u_ptr, v_ptr, show->y_stride,
299 show->uv_stride, yd_ptr, ud_ptr, vd_ptr, dest->y_stride,
300 dest->uv_stride);
301 }
302 } else {
303 vp8_copy_mem16x16(y_ptr, show->y_stride, yd_ptr, dest->y_stride);
304 vp8_copy_mem8x8(u_ptr, show->uv_stride, ud_ptr, dest->uv_stride);
305 vp8_copy_mem8x8(v_ptr, show->uv_stride, vd_ptr, dest->uv_stride);
306 }
307 y_ptr += 16;
308 u_ptr += 8;
309 v_ptr += 8;
310 yd_ptr += 16;
311 ud_ptr += 8;
312 vd_ptr += 8;
313 mode_info_context++; /* step to next MB */
314 }
315
316 y_ptr += show->y_stride * 16 - 16 * cm->mb_cols;
317 u_ptr += show->uv_stride * 8 - 8 * cm->mb_cols;
318 v_ptr += show->uv_stride * 8 - 8 * cm->mb_cols;
319 yd_ptr += dest->y_stride * 16 - 16 * cm->mb_cols;
320 ud_ptr += dest->uv_stride * 8 - 8 * cm->mb_cols;
321 vd_ptr += dest->uv_stride * 8 - 8 * cm->mb_cols;
322
323 mode_info_context++; /* Skip border mb */
324 }
325 }
326