1 // Copyright 2011 Google Inc.
2 //
3 // This code is licensed under the same terms as WebM:
4 // Software License Agreement: http://www.webmproject.org/license/software/
5 // Additional IP Rights Grant: http://www.webmproject.org/license/additional/
6 // -----------------------------------------------------------------------------
7 //
8 // Macroblock analysis
9 //
10 // Author: Skal (pascal.massimino@gmail.com)
11
12 #include <stdlib.h>
13 #include <string.h>
14 #include <assert.h>
15
16 #include "vp8enci.h"
17 #include "cost.h"
18
19 #if defined(__cplusplus) || defined(c_plusplus)
20 extern "C" {
21 #endif
22
23 #define MAX_ITERS_K_MEANS 6
24
ClipAlpha(int alpha)25 static int ClipAlpha(int alpha) {
26 return alpha < 0 ? 0 : alpha > 255 ? 255 : alpha;
27 }
28
29 //-----------------------------------------------------------------------------
30 // Smooth the segment map by replacing isolated block by the majority of its
31 // neighbours.
32
SmoothSegmentMap(VP8Encoder * const enc)33 static void SmoothSegmentMap(VP8Encoder* const enc) {
34 int n, x, y;
35 const int w = enc->mb_w_;
36 const int h = enc->mb_h_;
37 const int majority_cnt_3_x_3_grid = 5;
38 uint8_t* tmp = (uint8_t*)malloc(w * h * sizeof(uint8_t));
39
40 if (tmp == NULL) return;
41 for (y = 1; y < h - 1; ++y) {
42 for (x = 1; x < w - 1; ++x) {
43 int cnt[NUM_MB_SEGMENTS] = { 0 };
44 const VP8MBInfo* const mb = &enc->mb_info_[x + w * y];
45 int majority_seg = mb->segment_;
46 // Check the 8 neighbouring segment values.
47 cnt[mb[-w - 1].segment_]++; // top-left
48 cnt[mb[-w + 0].segment_]++; // top
49 cnt[mb[-w + 1].segment_]++; // top-right
50 cnt[mb[ - 1].segment_]++; // left
51 cnt[mb[ + 1].segment_]++; // right
52 cnt[mb[ w - 1].segment_]++; // bottom-left
53 cnt[mb[ w + 0].segment_]++; // bottom
54 cnt[mb[ w + 1].segment_]++; // bottom-right
55 for (n = 0; n < NUM_MB_SEGMENTS; ++n) {
56 if (cnt[n] >= majority_cnt_3_x_3_grid) {
57 majority_seg = n;
58 }
59 }
60 tmp[x + y * w] = majority_seg;
61 }
62 }
63 for (y = 1; y < h - 1; ++y) {
64 for (x = 1; x < w - 1; ++x) {
65 VP8MBInfo* const mb = &enc->mb_info_[x + w * y];
66 mb->segment_ = tmp[x + y * w];
67 }
68 }
69 free(tmp);
70 }
71
72 //-----------------------------------------------------------------------------
73 // Finalize Segment probability based on the coding tree
74
GetProba(int a,int b)75 static int GetProba(int a, int b) {
76 int proba;
77 const int total = a + b;
78 if (total == 0) return 255; // that's the default probability.
79 proba = (255 * a + total / 2) / total;
80 return proba;
81 }
82
SetSegmentProbas(VP8Encoder * const enc)83 static void SetSegmentProbas(VP8Encoder* const enc) {
84 int p[NUM_MB_SEGMENTS] = { 0 };
85 int n;
86
87 for (n = 0; n < enc->mb_w_ * enc->mb_h_; ++n) {
88 const VP8MBInfo* const mb = &enc->mb_info_[n];
89 p[mb->segment_]++;
90 }
91 if (enc->pic_->stats) {
92 for (n = 0; n < NUM_MB_SEGMENTS; ++n) {
93 enc->pic_->stats->segment_size[n] = p[n];
94 }
95 }
96 if (enc->segment_hdr_.num_segments_ > 1) {
97 uint8_t* const probas = enc->proba_.segments_;
98 probas[0] = GetProba(p[0] + p[1], p[2] + p[3]);
99 probas[1] = GetProba(p[0], p[1]);
100 probas[2] = GetProba(p[2], p[3]);
101
102 enc->segment_hdr_.update_map_ =
103 (probas[0] != 255) || (probas[1] != 255) || (probas[2] != 255);
104 enc->segment_hdr_.size_ =
105 p[0] * (VP8BitCost(0, probas[0]) + VP8BitCost(0, probas[1])) +
106 p[1] * (VP8BitCost(0, probas[0]) + VP8BitCost(1, probas[1])) +
107 p[2] * (VP8BitCost(1, probas[0]) + VP8BitCost(0, probas[2])) +
108 p[3] * (VP8BitCost(1, probas[0]) + VP8BitCost(1, probas[2]));
109 } else {
110 enc->segment_hdr_.update_map_ = 0;
111 enc->segment_hdr_.size_ = 0;
112 }
113 }
114
clip(int v,int m,int M)115 static inline int clip(int v, int m, int M) {
116 return v < m ? m : v > M ? M : v;
117 }
118
SetSegmentAlphas(VP8Encoder * const enc,const int centers[NUM_MB_SEGMENTS],int mid)119 static void SetSegmentAlphas(VP8Encoder* const enc,
120 const int centers[NUM_MB_SEGMENTS],
121 int mid) {
122 const int nb = enc->segment_hdr_.num_segments_;
123 int min = centers[0], max = centers[0];
124 int n;
125
126 if (nb > 1) {
127 for (n = 0; n < nb; ++n) {
128 if (min > centers[n]) min = centers[n];
129 if (max < centers[n]) max = centers[n];
130 }
131 }
132 if (max == min) max = min + 1;
133 assert(mid <= max && mid >= min);
134 for (n = 0; n < nb; ++n) {
135 const int alpha = 255 * (centers[n] - mid) / (max - min);
136 const int beta = 255 * (centers[n] - min) / (max - min);
137 enc->dqm_[n].alpha_ = clip(alpha, -127, 127);
138 enc->dqm_[n].beta_ = clip(beta, 0, 255);
139 }
140 }
141
142 //-----------------------------------------------------------------------------
143 // Simplified k-Means, to assign Nb segments based on alpha-histogram
144
AssignSegments(VP8Encoder * const enc,const int alphas[256])145 static void AssignSegments(VP8Encoder* const enc, const int alphas[256]) {
146 const int nb = enc->segment_hdr_.num_segments_;
147 int centers[NUM_MB_SEGMENTS];
148 int weighted_average;
149 int map[256];
150 int a, n, k;
151 int min_a = 0, max_a = 255, range_a;
152 // 'int' type is ok for histo, and won't overflow
153 int accum[NUM_MB_SEGMENTS], dist_accum[NUM_MB_SEGMENTS];
154
155 // bracket the input
156 for (n = 0; n < 256 && alphas[n] == 0; ++n) {}
157 min_a = n;
158 for (n = 255; n > min_a && alphas[n] == 0; --n) {}
159 max_a = n;
160 range_a = max_a - min_a;
161
162 // Spread initial centers evenly
163 for (n = 1, k = 0; n < 2 * nb; n += 2) {
164 centers[k++] = min_a + (n * range_a) / (2 * nb);
165 }
166
167 for (k = 0; k < MAX_ITERS_K_MEANS; ++k) { // few iters are enough
168 int total_weight;
169 int displaced;
170 // Reset stats
171 for (n = 0; n < nb; ++n) {
172 accum[n] = 0;
173 dist_accum[n] = 0;
174 }
175 // Assign nearest center for each 'a'
176 n = 0; // track the nearest center for current 'a'
177 for (a = min_a; a <= max_a; ++a) {
178 if (alphas[a]) {
179 while (n < nb - 1 && abs(a - centers[n + 1]) < abs(a - centers[n])) {
180 n++;
181 }
182 map[a] = n;
183 // accumulate contribution into best centroid
184 dist_accum[n] += a * alphas[a];
185 accum[n] += alphas[a];
186 }
187 }
188 // All point are classified. Move the centroids to the
189 // center of their respective cloud.
190 displaced = 0;
191 weighted_average = 0;
192 total_weight = 0;
193 for (n = 0; n < nb; ++n) {
194 if (accum[n]) {
195 const int new_center = (dist_accum[n] + accum[n] / 2) / accum[n];
196 displaced += abs(centers[n] - new_center);
197 centers[n] = new_center;
198 weighted_average += new_center * accum[n];
199 total_weight += accum[n];
200 }
201 }
202 weighted_average = (weighted_average + total_weight / 2) / total_weight;
203 if (displaced < 5) break; // no need to keep on looping...
204 }
205
206 // Map each original value to the closest centroid
207 for (n = 0; n < enc->mb_w_ * enc->mb_h_; ++n) {
208 VP8MBInfo* const mb = &enc->mb_info_[n];
209 const int a = mb->alpha_;
210 mb->segment_ = map[a];
211 mb->alpha_ = centers[map[a]]; // just for the record.
212 }
213
214 if (nb > 1) {
215 const int smooth = (enc->config_->preprocessing & 1);
216 if (smooth) SmoothSegmentMap(enc);
217 }
218
219 SetSegmentProbas(enc); // Assign final proba
220 SetSegmentAlphas(enc, centers, weighted_average); // pick some alphas.
221 }
222
223 //-----------------------------------------------------------------------------
224 // Macroblock analysis: collect histogram for each mode, deduce the maximal
225 // susceptibility and set best modes for this macroblock.
226 // Segment assignment is done later.
227
228 // Number of modes to inspect for alpha_ evaluation. For high-quality settings,
229 // we don't need to test all the possible modes during the analysis phase.
230 #define MAX_INTRA16_MODE 2
231 #define MAX_INTRA4_MODE 2
232 #define MAX_UV_MODE 2
233
MBAnalyzeBestIntra16Mode(VP8EncIterator * const it)234 static int MBAnalyzeBestIntra16Mode(VP8EncIterator* const it) {
235 const int max_mode = (it->enc_->method_ >= 3) ? MAX_INTRA16_MODE : 4;
236 int mode;
237 int best_alpha = -1;
238 int best_mode = 0;
239
240 VP8MakeLuma16Preds(it);
241 for (mode = 0; mode < max_mode; ++mode) {
242 const int alpha = VP8CollectHistogram(it->yuv_in_ + Y_OFF,
243 it->yuv_p_ + VP8I16ModeOffsets[mode],
244 0, 16);
245 if (alpha > best_alpha) {
246 best_alpha = alpha;
247 best_mode = mode;
248 }
249 }
250 VP8SetIntra16Mode(it, best_mode);
251 return best_alpha;
252 }
253
MBAnalyzeBestIntra4Mode(VP8EncIterator * const it,int best_alpha)254 static int MBAnalyzeBestIntra4Mode(VP8EncIterator* const it,
255 int best_alpha) {
256 int modes[16];
257 const int max_mode = (it->enc_->method_ >= 3) ? MAX_INTRA4_MODE : NUM_BMODES;
258 int i4_alpha = 0;
259 VP8IteratorStartI4(it);
260 do {
261 int mode;
262 int best_mode_alpha = -1;
263 const uint8_t* const src = it->yuv_in_ + Y_OFF + VP8Scan[it->i4_];
264
265 VP8MakeIntra4Preds(it);
266 for (mode = 0; mode < max_mode; ++mode) {
267 const int alpha = VP8CollectHistogram(src,
268 it->yuv_p_ + VP8I4ModeOffsets[mode],
269 0, 1);
270 if (alpha > best_mode_alpha) {
271 best_mode_alpha = alpha;
272 modes[it->i4_] = mode;
273 }
274 }
275 i4_alpha += best_mode_alpha;
276 // Note: we reuse the original samples for predictors
277 } while (VP8IteratorRotateI4(it, it->yuv_in_ + Y_OFF));
278
279 if (i4_alpha > best_alpha) {
280 VP8SetIntra4Mode(it, modes);
281 best_alpha = ClipAlpha(i4_alpha);
282 }
283 return best_alpha;
284 }
285
MBAnalyzeBestUVMode(VP8EncIterator * const it)286 static int MBAnalyzeBestUVMode(VP8EncIterator* const it) {
287 int best_alpha = -1;
288 int best_mode = 0;
289 const int max_mode = (it->enc_->method_ >= 3) ? MAX_UV_MODE : 4;
290 int mode;
291 VP8MakeChroma8Preds(it);
292 for (mode = 0; mode < max_mode; ++mode) {
293 const int alpha = VP8CollectHistogram(it->yuv_in_ + U_OFF,
294 it->yuv_p_ + VP8UVModeOffsets[mode],
295 16, 16 + 4 + 4);
296 if (alpha > best_alpha) {
297 best_alpha = alpha;
298 best_mode = mode;
299 }
300 }
301 VP8SetIntraUVMode(it, best_mode);
302 return best_alpha;
303 }
304
MBAnalyze(VP8EncIterator * const it,int alphas[256],int * const uv_alpha)305 static void MBAnalyze(VP8EncIterator* const it,
306 int alphas[256], int* const uv_alpha) {
307 const VP8Encoder* const enc = it->enc_;
308 int best_alpha, best_uv_alpha;
309
310 VP8SetIntra16Mode(it, 0); // default: Intra16, DC_PRED
311 VP8SetSkip(it, 0); // not skipped
312 VP8SetSegment(it, 0); // default segment, spec-wise.
313
314 best_alpha = MBAnalyzeBestIntra16Mode(it);
315 if (enc->method_ != 3) {
316 // We go and make a fast decision for intra4/intra16.
317 // It's usually not a good and definitive pick, but helps seeding the stats
318 // about level bit-cost.
319 // TODO(skal): improve criterion.
320 best_alpha = MBAnalyzeBestIntra4Mode(it, best_alpha);
321 }
322 best_uv_alpha = MBAnalyzeBestUVMode(it);
323
324 // Final susceptibility mix
325 best_alpha = (best_alpha + best_uv_alpha + 1) / 2;
326 alphas[best_alpha]++;
327 *uv_alpha += best_uv_alpha;
328 it->mb_->alpha_ = best_alpha; // Informative only.
329 }
330
331 //-----------------------------------------------------------------------------
332 // Main analysis loop:
333 // Collect all susceptibilities for each macroblock and record their
334 // distribution in alphas[]. Segments is assigned a-posteriori, based on
335 // this histogram.
336 // We also pick an intra16 prediction mode, which shouldn't be considered
337 // final except for fast-encode settings. We can also pick some intra4 modes
338 // and decide intra4/intra16, but that's usually almost always a bad choice at
339 // this stage.
340
VP8EncAnalyze(VP8Encoder * const enc)341 int VP8EncAnalyze(VP8Encoder* const enc) {
342 int alphas[256] = { 0 };
343 VP8EncIterator it;
344
345 VP8IteratorInit(enc, &it);
346 enc->uv_alpha_ = 0;
347 do {
348 VP8IteratorImport(&it);
349 MBAnalyze(&it, alphas, &enc->uv_alpha_);
350 // Let's pretend we have perfect lossless reconstruction.
351 } while (VP8IteratorNext(&it, it.yuv_in_));
352 enc->uv_alpha_ /= enc->mb_w_ * enc->mb_h_;
353 AssignSegments(enc, alphas);
354
355 return 1;
356 }
357
358 #if defined(__cplusplus) || defined(c_plusplus)
359 } // extern "C"
360 #endif
361