• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  Copyright (c) 2013 The WebM project authors. All Rights Reserved.
3  *
4  *  Use of this source code is governed by a BSD-style license
5  *  that can be found in the LICENSE file in the root of the source
6  *  tree. An additional intellectual property rights grant can be found
7  *  in the file PATENTS.  All contributing project authors may
8  *  be found in the AUTHORS file in the root of the source tree.
9  */
10 
11 #include <math.h>
12 
13 #include "vpx_ports/mem.h"
14 #include "vpx_ports/system_state.h"
15 
16 #include "vp9/encoder/vp9_aq_variance.h"
17 
18 #include "vp9/common/vp9_seg_common.h"
19 
20 #include "vp9/encoder/vp9_ratectrl.h"
21 #include "vp9/encoder/vp9_rd.h"
22 #include "vp9/encoder/vp9_encodeframe.h"
23 #include "vp9/encoder/vp9_segmentation.h"
24 
25 #define ENERGY_MIN (-4)
26 #define ENERGY_MAX (1)
27 #define ENERGY_SPAN (ENERGY_MAX - ENERGY_MIN + 1)
28 #define ENERGY_IN_BOUNDS(energy) \
29   assert((energy) >= ENERGY_MIN && (energy) <= ENERGY_MAX)
30 
31 static const double rate_ratio[MAX_SEGMENTS] = { 2.5,  2.0, 1.5, 1.0,
32                                                  0.75, 1.0, 1.0, 1.0 };
33 static const int segment_id[ENERGY_SPAN] = { 0, 1, 1, 2, 3, 4 };
34 
35 #define SEGMENT_ID(i) segment_id[(i)-ENERGY_MIN]
36 
37 DECLARE_ALIGNED(16, static const uint8_t, vp9_64_zeros[64]) = { 0 };
38 #if CONFIG_VP9_HIGHBITDEPTH
39 DECLARE_ALIGNED(16, static const uint16_t, vp9_highbd_64_zeros[64]) = { 0 };
40 #endif
41 
vp9_vaq_segment_id(int energy)42 unsigned int vp9_vaq_segment_id(int energy) {
43   ENERGY_IN_BOUNDS(energy);
44   return SEGMENT_ID(energy);
45 }
46 
vp9_vaq_frame_setup(VP9_COMP * cpi)47 void vp9_vaq_frame_setup(VP9_COMP *cpi) {
48   VP9_COMMON *cm = &cpi->common;
49   struct segmentation *seg = &cm->seg;
50   int i;
51 
52   if (frame_is_intra_only(cm) || cm->error_resilient_mode ||
53       cpi->refresh_alt_ref_frame || cpi->force_update_segmentation ||
54       (cpi->refresh_golden_frame && !cpi->rc.is_src_frame_alt_ref)) {
55     vp9_enable_segmentation(seg);
56     vp9_clearall_segfeatures(seg);
57 
58     seg->abs_delta = SEGMENT_DELTADATA;
59 
60     vpx_clear_system_state();
61 
62     for (i = 0; i < MAX_SEGMENTS; ++i) {
63       int qindex_delta =
64           vp9_compute_qdelta_by_rate(&cpi->rc, cm->frame_type, cm->base_qindex,
65                                      rate_ratio[i], cm->bit_depth);
66 
67       // We don't allow qindex 0 in a segment if the base value is not 0.
68       // Q index 0 (lossless) implies 4x4 encoding only and in AQ mode a segment
69       // Q delta is sometimes applied without going back around the rd loop.
70       // This could lead to an illegal combination of partition size and q.
71       if ((cm->base_qindex != 0) && ((cm->base_qindex + qindex_delta) == 0)) {
72         qindex_delta = -cm->base_qindex + 1;
73       }
74 
75       // No need to enable SEG_LVL_ALT_Q for this segment.
76       if (rate_ratio[i] == 1.0) {
77         continue;
78       }
79 
80       vp9_set_segdata(seg, i, SEG_LVL_ALT_Q, qindex_delta);
81       vp9_enable_segfeature(seg, i, SEG_LVL_ALT_Q);
82     }
83   }
84 }
85 
86 /* TODO(agrange, paulwilkins): The block_variance calls the unoptimized versions
87  * of variance() and highbd_8_variance(). It should not.
88  */
aq_variance(const uint8_t * a,int a_stride,const uint8_t * b,int b_stride,int w,int h,unsigned int * sse,int * sum)89 static void aq_variance(const uint8_t *a, int a_stride, const uint8_t *b,
90                         int b_stride, int w, int h, unsigned int *sse,
91                         int *sum) {
92   int i, j;
93 
94   *sum = 0;
95   *sse = 0;
96 
97   for (i = 0; i < h; i++) {
98     for (j = 0; j < w; j++) {
99       const int diff = a[j] - b[j];
100       *sum += diff;
101       *sse += diff * diff;
102     }
103 
104     a += a_stride;
105     b += b_stride;
106   }
107 }
108 
109 #if CONFIG_VP9_HIGHBITDEPTH
aq_highbd_variance64(const uint8_t * a8,int a_stride,const uint8_t * b8,int b_stride,int w,int h,uint64_t * sse,uint64_t * sum)110 static void aq_highbd_variance64(const uint8_t *a8, int a_stride,
111                                  const uint8_t *b8, int b_stride, int w, int h,
112                                  uint64_t *sse, uint64_t *sum) {
113   int i, j;
114 
115   uint16_t *a = CONVERT_TO_SHORTPTR(a8);
116   uint16_t *b = CONVERT_TO_SHORTPTR(b8);
117   *sum = 0;
118   *sse = 0;
119 
120   for (i = 0; i < h; i++) {
121     for (j = 0; j < w; j++) {
122       const int diff = a[j] - b[j];
123       *sum += diff;
124       *sse += diff * diff;
125     }
126     a += a_stride;
127     b += b_stride;
128   }
129 }
130 
aq_highbd_8_variance(const uint8_t * a8,int a_stride,const uint8_t * b8,int b_stride,int w,int h,unsigned int * sse,int * sum)131 static void aq_highbd_8_variance(const uint8_t *a8, int a_stride,
132                                  const uint8_t *b8, int b_stride, int w, int h,
133                                  unsigned int *sse, int *sum) {
134   uint64_t sse_long = 0;
135   uint64_t sum_long = 0;
136   aq_highbd_variance64(a8, a_stride, b8, b_stride, w, h, &sse_long, &sum_long);
137   *sse = (unsigned int)sse_long;
138   *sum = (int)sum_long;
139 }
140 #endif  // CONFIG_VP9_HIGHBITDEPTH
141 
block_variance(VP9_COMP * cpi,MACROBLOCK * x,BLOCK_SIZE bs)142 static unsigned int block_variance(VP9_COMP *cpi, MACROBLOCK *x,
143                                    BLOCK_SIZE bs) {
144   MACROBLOCKD *xd = &x->e_mbd;
145   unsigned int var, sse;
146   int right_overflow =
147       (xd->mb_to_right_edge < 0) ? ((-xd->mb_to_right_edge) >> 3) : 0;
148   int bottom_overflow =
149       (xd->mb_to_bottom_edge < 0) ? ((-xd->mb_to_bottom_edge) >> 3) : 0;
150 
151   if (right_overflow || bottom_overflow) {
152     const int bw = 8 * num_8x8_blocks_wide_lookup[bs] - right_overflow;
153     const int bh = 8 * num_8x8_blocks_high_lookup[bs] - bottom_overflow;
154     int avg;
155 #if CONFIG_VP9_HIGHBITDEPTH
156     if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
157       aq_highbd_8_variance(x->plane[0].src.buf, x->plane[0].src.stride,
158                            CONVERT_TO_BYTEPTR(vp9_highbd_64_zeros), 0, bw, bh,
159                            &sse, &avg);
160       sse >>= 2 * (xd->bd - 8);
161       avg >>= (xd->bd - 8);
162     } else {
163       aq_variance(x->plane[0].src.buf, x->plane[0].src.stride, vp9_64_zeros, 0,
164                   bw, bh, &sse, &avg);
165     }
166 #else
167     aq_variance(x->plane[0].src.buf, x->plane[0].src.stride, vp9_64_zeros, 0,
168                 bw, bh, &sse, &avg);
169 #endif  // CONFIG_VP9_HIGHBITDEPTH
170     var = sse - (unsigned int)(((int64_t)avg * avg) / (bw * bh));
171     return (unsigned int)(((uint64_t)256 * var) / (bw * bh));
172   } else {
173 #if CONFIG_VP9_HIGHBITDEPTH
174     if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
175       var =
176           cpi->fn_ptr[bs].vf(x->plane[0].src.buf, x->plane[0].src.stride,
177                              CONVERT_TO_BYTEPTR(vp9_highbd_64_zeros), 0, &sse);
178     } else {
179       var = cpi->fn_ptr[bs].vf(x->plane[0].src.buf, x->plane[0].src.stride,
180                                vp9_64_zeros, 0, &sse);
181     }
182 #else
183     var = cpi->fn_ptr[bs].vf(x->plane[0].src.buf, x->plane[0].src.stride,
184                              vp9_64_zeros, 0, &sse);
185 #endif  // CONFIG_VP9_HIGHBITDEPTH
186     return (unsigned int)(((uint64_t)256 * var) >> num_pels_log2_lookup[bs]);
187   }
188 }
189 
vp9_log_block_var(VP9_COMP * cpi,MACROBLOCK * x,BLOCK_SIZE bs)190 double vp9_log_block_var(VP9_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bs) {
191   unsigned int var = block_variance(cpi, x, bs);
192   vpx_clear_system_state();
193   return log(var + 1.0);
194 }
195 
196 // Get the range of sub block energy values;
vp9_get_sub_block_energy(VP9_COMP * cpi,MACROBLOCK * mb,int mi_row,int mi_col,BLOCK_SIZE bsize,int * min_e,int * max_e)197 void vp9_get_sub_block_energy(VP9_COMP *cpi, MACROBLOCK *mb, int mi_row,
198                               int mi_col, BLOCK_SIZE bsize, int *min_e,
199                               int *max_e) {
200   VP9_COMMON *const cm = &cpi->common;
201   const int bw = num_8x8_blocks_wide_lookup[bsize];
202   const int bh = num_8x8_blocks_high_lookup[bsize];
203   const int xmis = VPXMIN(cm->mi_cols - mi_col, bw);
204   const int ymis = VPXMIN(cm->mi_rows - mi_row, bh);
205   int x, y;
206 
207   if (xmis < bw || ymis < bh) {
208     vp9_setup_src_planes(mb, cpi->Source, mi_row, mi_col);
209     *min_e = vp9_block_energy(cpi, mb, bsize);
210     *max_e = *min_e;
211   } else {
212     int energy;
213     *min_e = ENERGY_MAX;
214     *max_e = ENERGY_MIN;
215 
216     for (y = 0; y < ymis; ++y) {
217       for (x = 0; x < xmis; ++x) {
218         vp9_setup_src_planes(mb, cpi->Source, mi_row + y, mi_col + x);
219         energy = vp9_block_energy(cpi, mb, BLOCK_8X8);
220         *min_e = VPXMIN(*min_e, energy);
221         *max_e = VPXMAX(*max_e, energy);
222       }
223     }
224   }
225 
226   // Re-instate source pointers back to what they should have been on entry.
227   vp9_setup_src_planes(mb, cpi->Source, mi_row, mi_col);
228 }
229 
230 #define DEFAULT_E_MIDPOINT 10.0
vp9_block_energy(VP9_COMP * cpi,MACROBLOCK * x,BLOCK_SIZE bs)231 int vp9_block_energy(VP9_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bs) {
232   double energy;
233   double energy_midpoint;
234   vpx_clear_system_state();
235   energy_midpoint =
236       (cpi->oxcf.pass == 2) ? cpi->twopass.mb_av_energy : DEFAULT_E_MIDPOINT;
237   energy = vp9_log_block_var(cpi, x, bs) - energy_midpoint;
238   return clamp((int)round(energy), ENERGY_MIN, ENERGY_MAX);
239 }
240