1 // Copyright 2011 Google Inc. All Rights Reserved.
2 //
3 // Use of this source code is governed by a BSD-style license
4 // that can be found in the COPYING file in the root of the source
5 // tree. An additional intellectual property rights grant can be found
6 // in the file PATENTS. All contributing project authors may
7 // be found in the AUTHORS file in the root of the source tree.
8 // -----------------------------------------------------------------------------
9 //
10 // Macroblock analysis
11 //
12 // Author: Skal (pascal.massimino@gmail.com)
13
14 #include <stdlib.h>
15 #include <string.h>
16 #include <assert.h>
17
18 #include "src/enc/vp8i_enc.h"
19 #include "src/enc/cost_enc.h"
20 #include "src/utils/utils.h"
21
22 #define MAX_ITERS_K_MEANS 6
23
24 //------------------------------------------------------------------------------
25 // Smooth the segment map by replacing isolated block by the majority of its
26 // neighbours.
27
SmoothSegmentMap(VP8Encoder * const enc)28 static void SmoothSegmentMap(VP8Encoder* const enc) {
29 int n, x, y;
30 const int w = enc->mb_w_;
31 const int h = enc->mb_h_;
32 const int majority_cnt_3_x_3_grid = 5;
33 uint8_t* const tmp = (uint8_t*)WebPSafeMalloc(w * h, sizeof(*tmp));
34 assert((uint64_t)(w * h) == (uint64_t)w * h); // no overflow, as per spec
35
36 if (tmp == NULL) return;
37 for (y = 1; y < h - 1; ++y) {
38 for (x = 1; x < w - 1; ++x) {
39 int cnt[NUM_MB_SEGMENTS] = { 0 };
40 const VP8MBInfo* const mb = &enc->mb_info_[x + w * y];
41 int majority_seg = mb->segment_;
42 // Check the 8 neighbouring segment values.
43 cnt[mb[-w - 1].segment_]++; // top-left
44 cnt[mb[-w + 0].segment_]++; // top
45 cnt[mb[-w + 1].segment_]++; // top-right
46 cnt[mb[ - 1].segment_]++; // left
47 cnt[mb[ + 1].segment_]++; // right
48 cnt[mb[ w - 1].segment_]++; // bottom-left
49 cnt[mb[ w + 0].segment_]++; // bottom
50 cnt[mb[ w + 1].segment_]++; // bottom-right
51 for (n = 0; n < NUM_MB_SEGMENTS; ++n) {
52 if (cnt[n] >= majority_cnt_3_x_3_grid) {
53 majority_seg = n;
54 break;
55 }
56 }
57 tmp[x + y * w] = majority_seg;
58 }
59 }
60 for (y = 1; y < h - 1; ++y) {
61 for (x = 1; x < w - 1; ++x) {
62 VP8MBInfo* const mb = &enc->mb_info_[x + w * y];
63 mb->segment_ = tmp[x + y * w];
64 }
65 }
66 WebPSafeFree(tmp);
67 }
68
69 //------------------------------------------------------------------------------
70 // set segment susceptibility alpha_ / beta_
71
clip(int v,int m,int M)72 static WEBP_INLINE int clip(int v, int m, int M) {
73 return (v < m) ? m : (v > M) ? M : v;
74 }
75
SetSegmentAlphas(VP8Encoder * const enc,const int centers[NUM_MB_SEGMENTS],int mid)76 static void SetSegmentAlphas(VP8Encoder* const enc,
77 const int centers[NUM_MB_SEGMENTS],
78 int mid) {
79 const int nb = enc->segment_hdr_.num_segments_;
80 int min = centers[0], max = centers[0];
81 int n;
82
83 if (nb > 1) {
84 for (n = 0; n < nb; ++n) {
85 if (min > centers[n]) min = centers[n];
86 if (max < centers[n]) max = centers[n];
87 }
88 }
89 if (max == min) max = min + 1;
90 assert(mid <= max && mid >= min);
91 for (n = 0; n < nb; ++n) {
92 const int alpha = 255 * (centers[n] - mid) / (max - min);
93 const int beta = 255 * (centers[n] - min) / (max - min);
94 enc->dqm_[n].alpha_ = clip(alpha, -127, 127);
95 enc->dqm_[n].beta_ = clip(beta, 0, 255);
96 }
97 }
98
99 //------------------------------------------------------------------------------
100 // Compute susceptibility based on DCT-coeff histograms:
101 // the higher, the "easier" the macroblock is to compress.
102
103 #define MAX_ALPHA 255 // 8b of precision for susceptibilities.
104 #define ALPHA_SCALE (2 * MAX_ALPHA) // scaling factor for alpha.
105 #define DEFAULT_ALPHA (-1)
106 #define IS_BETTER_ALPHA(alpha, best_alpha) ((alpha) > (best_alpha))
107
FinalAlphaValue(int alpha)108 static int FinalAlphaValue(int alpha) {
109 alpha = MAX_ALPHA - alpha;
110 return clip(alpha, 0, MAX_ALPHA);
111 }
112
GetAlpha(const VP8Histogram * const histo)113 static int GetAlpha(const VP8Histogram* const histo) {
114 // 'alpha' will later be clipped to [0..MAX_ALPHA] range, clamping outer
115 // values which happen to be mostly noise. This leaves the maximum precision
116 // for handling the useful small values which contribute most.
117 const int max_value = histo->max_value;
118 const int last_non_zero = histo->last_non_zero;
119 const int alpha =
120 (max_value > 1) ? ALPHA_SCALE * last_non_zero / max_value : 0;
121 return alpha;
122 }
123
InitHistogram(VP8Histogram * const histo)124 static void InitHistogram(VP8Histogram* const histo) {
125 histo->max_value = 0;
126 histo->last_non_zero = 1;
127 }
128
MergeHistograms(const VP8Histogram * const in,VP8Histogram * const out)129 static void MergeHistograms(const VP8Histogram* const in,
130 VP8Histogram* const out) {
131 if (in->max_value > out->max_value) {
132 out->max_value = in->max_value;
133 }
134 if (in->last_non_zero > out->last_non_zero) {
135 out->last_non_zero = in->last_non_zero;
136 }
137 }
138
139 //------------------------------------------------------------------------------
140 // Simplified k-Means, to assign Nb segments based on alpha-histogram
141
AssignSegments(VP8Encoder * const enc,const int alphas[MAX_ALPHA+1])142 static void AssignSegments(VP8Encoder* const enc,
143 const int alphas[MAX_ALPHA + 1]) {
144 // 'num_segments_' is previously validated and <= NUM_MB_SEGMENTS, but an
145 // explicit check is needed to avoid spurious warning about 'n + 1' exceeding
146 // array bounds of 'centers' with some compilers (noticed with gcc-4.9).
147 const int nb = (enc->segment_hdr_.num_segments_ < NUM_MB_SEGMENTS) ?
148 enc->segment_hdr_.num_segments_ : NUM_MB_SEGMENTS;
149 int centers[NUM_MB_SEGMENTS];
150 int weighted_average = 0;
151 int map[MAX_ALPHA + 1];
152 int a, n, k;
153 int min_a = 0, max_a = MAX_ALPHA, range_a;
154 // 'int' type is ok for histo, and won't overflow
155 int accum[NUM_MB_SEGMENTS], dist_accum[NUM_MB_SEGMENTS];
156
157 assert(nb >= 1);
158 assert(nb <= NUM_MB_SEGMENTS);
159
160 // bracket the input
161 for (n = 0; n <= MAX_ALPHA && alphas[n] == 0; ++n) {}
162 min_a = n;
163 for (n = MAX_ALPHA; n > min_a && alphas[n] == 0; --n) {}
164 max_a = n;
165 range_a = max_a - min_a;
166
167 // Spread initial centers evenly
168 for (k = 0, n = 1; k < nb; ++k, n += 2) {
169 assert(n < 2 * nb);
170 centers[k] = min_a + (n * range_a) / (2 * nb);
171 }
172
173 for (k = 0; k < MAX_ITERS_K_MEANS; ++k) { // few iters are enough
174 int total_weight;
175 int displaced;
176 // Reset stats
177 for (n = 0; n < nb; ++n) {
178 accum[n] = 0;
179 dist_accum[n] = 0;
180 }
181 // Assign nearest center for each 'a'
182 n = 0; // track the nearest center for current 'a'
183 for (a = min_a; a <= max_a; ++a) {
184 if (alphas[a]) {
185 while (n + 1 < nb && abs(a - centers[n + 1]) < abs(a - centers[n])) {
186 n++;
187 }
188 map[a] = n;
189 // accumulate contribution into best centroid
190 dist_accum[n] += a * alphas[a];
191 accum[n] += alphas[a];
192 }
193 }
194 // All point are classified. Move the centroids to the
195 // center of their respective cloud.
196 displaced = 0;
197 weighted_average = 0;
198 total_weight = 0;
199 for (n = 0; n < nb; ++n) {
200 if (accum[n]) {
201 const int new_center = (dist_accum[n] + accum[n] / 2) / accum[n];
202 displaced += abs(centers[n] - new_center);
203 centers[n] = new_center;
204 weighted_average += new_center * accum[n];
205 total_weight += accum[n];
206 }
207 }
208 weighted_average = (weighted_average + total_weight / 2) / total_weight;
209 if (displaced < 5) break; // no need to keep on looping...
210 }
211
212 // Map each original value to the closest centroid
213 for (n = 0; n < enc->mb_w_ * enc->mb_h_; ++n) {
214 VP8MBInfo* const mb = &enc->mb_info_[n];
215 const int alpha = mb->alpha_;
216 mb->segment_ = map[alpha];
217 mb->alpha_ = centers[map[alpha]]; // for the record.
218 }
219
220 if (nb > 1) {
221 const int smooth = (enc->config_->preprocessing & 1);
222 if (smooth) SmoothSegmentMap(enc);
223 }
224
225 SetSegmentAlphas(enc, centers, weighted_average); // pick some alphas.
226 }
227
228 //------------------------------------------------------------------------------
229 // Macroblock analysis: collect histogram for each mode, deduce the maximal
230 // susceptibility and set best modes for this macroblock.
231 // Segment assignment is done later.
232
233 // Number of modes to inspect for alpha_ evaluation. We don't need to test all
234 // the possible modes during the analysis phase: we risk falling into a local
235 // optimum, or be subject to boundary effect
236 #define MAX_INTRA16_MODE 2
237 #define MAX_INTRA4_MODE 2
238 #define MAX_UV_MODE 2
239
MBAnalyzeBestIntra16Mode(VP8EncIterator * const it)240 static int MBAnalyzeBestIntra16Mode(VP8EncIterator* const it) {
241 const int max_mode = MAX_INTRA16_MODE;
242 int mode;
243 int best_alpha = DEFAULT_ALPHA;
244 int best_mode = 0;
245
246 VP8MakeLuma16Preds(it);
247 for (mode = 0; mode < max_mode; ++mode) {
248 VP8Histogram histo;
249 int alpha;
250
251 InitHistogram(&histo);
252 VP8CollectHistogram(it->yuv_in_ + Y_OFF_ENC,
253 it->yuv_p_ + VP8I16ModeOffsets[mode],
254 0, 16, &histo);
255 alpha = GetAlpha(&histo);
256 if (IS_BETTER_ALPHA(alpha, best_alpha)) {
257 best_alpha = alpha;
258 best_mode = mode;
259 }
260 }
261 VP8SetIntra16Mode(it, best_mode);
262 return best_alpha;
263 }
264
FastMBAnalyze(VP8EncIterator * const it)265 static int FastMBAnalyze(VP8EncIterator* const it) {
266 // Empirical cut-off value, should be around 16 (~=block size). We use the
267 // [8-17] range and favor intra4 at high quality, intra16 for low quality.
268 const int q = (int)it->enc_->config_->quality;
269 const uint32_t kThreshold = 8 + (17 - 8) * q / 100;
270 int k;
271 uint32_t dc[16], m, m2;
272 for (k = 0; k < 16; k += 4) {
273 VP8Mean16x4(it->yuv_in_ + Y_OFF_ENC + k * BPS, &dc[k]);
274 }
275 for (m = 0, m2 = 0, k = 0; k < 16; ++k) {
276 m += dc[k];
277 m2 += dc[k] * dc[k];
278 }
279 if (kThreshold * m2 < m * m) {
280 VP8SetIntra16Mode(it, 0); // DC16
281 } else {
282 const uint8_t modes[16] = { 0 }; // DC4
283 VP8SetIntra4Mode(it, modes);
284 }
285 return 0;
286 }
287
MBAnalyzeBestIntra4Mode(VP8EncIterator * const it,int best_alpha)288 static int MBAnalyzeBestIntra4Mode(VP8EncIterator* const it,
289 int best_alpha) {
290 uint8_t modes[16];
291 const int max_mode = MAX_INTRA4_MODE;
292 int i4_alpha;
293 VP8Histogram total_histo;
294 int cur_histo = 0;
295 InitHistogram(&total_histo);
296
297 VP8IteratorStartI4(it);
298 do {
299 int mode;
300 int best_mode_alpha = DEFAULT_ALPHA;
301 VP8Histogram histos[2];
302 const uint8_t* const src = it->yuv_in_ + Y_OFF_ENC + VP8Scan[it->i4_];
303
304 VP8MakeIntra4Preds(it);
305 for (mode = 0; mode < max_mode; ++mode) {
306 int alpha;
307
308 InitHistogram(&histos[cur_histo]);
309 VP8CollectHistogram(src, it->yuv_p_ + VP8I4ModeOffsets[mode],
310 0, 1, &histos[cur_histo]);
311 alpha = GetAlpha(&histos[cur_histo]);
312 if (IS_BETTER_ALPHA(alpha, best_mode_alpha)) {
313 best_mode_alpha = alpha;
314 modes[it->i4_] = mode;
315 cur_histo ^= 1; // keep track of best histo so far.
316 }
317 }
318 // accumulate best histogram
319 MergeHistograms(&histos[cur_histo ^ 1], &total_histo);
320 // Note: we reuse the original samples for predictors
321 } while (VP8IteratorRotateI4(it, it->yuv_in_ + Y_OFF_ENC));
322
323 i4_alpha = GetAlpha(&total_histo);
324 if (IS_BETTER_ALPHA(i4_alpha, best_alpha)) {
325 VP8SetIntra4Mode(it, modes);
326 best_alpha = i4_alpha;
327 }
328 return best_alpha;
329 }
330
MBAnalyzeBestUVMode(VP8EncIterator * const it)331 static int MBAnalyzeBestUVMode(VP8EncIterator* const it) {
332 int best_alpha = DEFAULT_ALPHA;
333 int smallest_alpha = 0;
334 int best_mode = 0;
335 const int max_mode = MAX_UV_MODE;
336 int mode;
337
338 VP8MakeChroma8Preds(it);
339 for (mode = 0; mode < max_mode; ++mode) {
340 VP8Histogram histo;
341 int alpha;
342 InitHistogram(&histo);
343 VP8CollectHistogram(it->yuv_in_ + U_OFF_ENC,
344 it->yuv_p_ + VP8UVModeOffsets[mode],
345 16, 16 + 4 + 4, &histo);
346 alpha = GetAlpha(&histo);
347 if (IS_BETTER_ALPHA(alpha, best_alpha)) {
348 best_alpha = alpha;
349 }
350 // The best prediction mode tends to be the one with the smallest alpha.
351 if (mode == 0 || alpha < smallest_alpha) {
352 smallest_alpha = alpha;
353 best_mode = mode;
354 }
355 }
356 VP8SetIntraUVMode(it, best_mode);
357 return best_alpha;
358 }
359
MBAnalyze(VP8EncIterator * const it,int alphas[MAX_ALPHA+1],int * const alpha,int * const uv_alpha)360 static void MBAnalyze(VP8EncIterator* const it,
361 int alphas[MAX_ALPHA + 1],
362 int* const alpha, int* const uv_alpha) {
363 const VP8Encoder* const enc = it->enc_;
364 int best_alpha, best_uv_alpha;
365
366 VP8SetIntra16Mode(it, 0); // default: Intra16, DC_PRED
367 VP8SetSkip(it, 0); // not skipped
368 VP8SetSegment(it, 0); // default segment, spec-wise.
369
370 if (enc->method_ <= 1) {
371 best_alpha = FastMBAnalyze(it);
372 } else {
373 best_alpha = MBAnalyzeBestIntra16Mode(it);
374 if (enc->method_ >= 5) {
375 // We go and make a fast decision for intra4/intra16.
376 // It's usually not a good and definitive pick, but helps seeding the
377 // stats about level bit-cost.
378 // TODO(skal): improve criterion.
379 best_alpha = MBAnalyzeBestIntra4Mode(it, best_alpha);
380 }
381 }
382 best_uv_alpha = MBAnalyzeBestUVMode(it);
383
384 // Final susceptibility mix
385 best_alpha = (3 * best_alpha + best_uv_alpha + 2) >> 2;
386 best_alpha = FinalAlphaValue(best_alpha);
387 alphas[best_alpha]++;
388 it->mb_->alpha_ = best_alpha; // for later remapping.
389
390 // Accumulate for later complexity analysis.
391 *alpha += best_alpha; // mixed susceptibility (not just luma)
392 *uv_alpha += best_uv_alpha;
393 }
394
DefaultMBInfo(VP8MBInfo * const mb)395 static void DefaultMBInfo(VP8MBInfo* const mb) {
396 mb->type_ = 1; // I16x16
397 mb->uv_mode_ = 0;
398 mb->skip_ = 0; // not skipped
399 mb->segment_ = 0; // default segment
400 mb->alpha_ = 0;
401 }
402
403 //------------------------------------------------------------------------------
404 // Main analysis loop:
405 // Collect all susceptibilities for each macroblock and record their
406 // distribution in alphas[]. Segments is assigned a-posteriori, based on
407 // this histogram.
408 // We also pick an intra16 prediction mode, which shouldn't be considered
409 // final except for fast-encode settings. We can also pick some intra4 modes
410 // and decide intra4/intra16, but that's usually almost always a bad choice at
411 // this stage.
412
ResetAllMBInfo(VP8Encoder * const enc)413 static void ResetAllMBInfo(VP8Encoder* const enc) {
414 int n;
415 for (n = 0; n < enc->mb_w_ * enc->mb_h_; ++n) {
416 DefaultMBInfo(&enc->mb_info_[n]);
417 }
418 // Default susceptibilities.
419 enc->dqm_[0].alpha_ = 0;
420 enc->dqm_[0].beta_ = 0;
421 // Note: we can't compute this alpha_ / uv_alpha_ -> set to default value.
422 enc->alpha_ = 0;
423 enc->uv_alpha_ = 0;
424 WebPReportProgress(enc->pic_, enc->percent_ + 20, &enc->percent_);
425 }
426
427 // struct used to collect job result
428 typedef struct {
429 WebPWorker worker;
430 int alphas[MAX_ALPHA + 1];
431 int alpha, uv_alpha;
432 VP8EncIterator it;
433 int delta_progress;
434 } SegmentJob;
435
436 // main work call
DoSegmentsJob(void * arg1,void * arg2)437 static int DoSegmentsJob(void* arg1, void* arg2) {
438 SegmentJob* const job = (SegmentJob*)arg1;
439 VP8EncIterator* const it = (VP8EncIterator*)arg2;
440 int ok = 1;
441 if (!VP8IteratorIsDone(it)) {
442 uint8_t tmp[32 + WEBP_ALIGN_CST];
443 uint8_t* const scratch = (uint8_t*)WEBP_ALIGN(tmp);
444 do {
445 // Let's pretend we have perfect lossless reconstruction.
446 VP8IteratorImport(it, scratch);
447 MBAnalyze(it, job->alphas, &job->alpha, &job->uv_alpha);
448 ok = VP8IteratorProgress(it, job->delta_progress);
449 } while (ok && VP8IteratorNext(it));
450 }
451 return ok;
452 }
453
MergeJobs(const SegmentJob * const src,SegmentJob * const dst)454 static void MergeJobs(const SegmentJob* const src, SegmentJob* const dst) {
455 int i;
456 for (i = 0; i <= MAX_ALPHA; ++i) dst->alphas[i] += src->alphas[i];
457 dst->alpha += src->alpha;
458 dst->uv_alpha += src->uv_alpha;
459 }
460
461 // initialize the job struct with some tasks to perform
InitSegmentJob(VP8Encoder * const enc,SegmentJob * const job,int start_row,int end_row)462 static void InitSegmentJob(VP8Encoder* const enc, SegmentJob* const job,
463 int start_row, int end_row) {
464 WebPGetWorkerInterface()->Init(&job->worker);
465 job->worker.data1 = job;
466 job->worker.data2 = &job->it;
467 job->worker.hook = DoSegmentsJob;
468 VP8IteratorInit(enc, &job->it);
469 VP8IteratorSetRow(&job->it, start_row);
470 VP8IteratorSetCountDown(&job->it, (end_row - start_row) * enc->mb_w_);
471 memset(job->alphas, 0, sizeof(job->alphas));
472 job->alpha = 0;
473 job->uv_alpha = 0;
474 // only one of both jobs can record the progress, since we don't
475 // expect the user's hook to be multi-thread safe
476 job->delta_progress = (start_row == 0) ? 20 : 0;
477 }
478
479 // main entry point
VP8EncAnalyze(VP8Encoder * const enc)480 int VP8EncAnalyze(VP8Encoder* const enc) {
481 int ok = 1;
482 const int do_segments =
483 enc->config_->emulate_jpeg_size || // We need the complexity evaluation.
484 (enc->segment_hdr_.num_segments_ > 1) ||
485 (enc->method_ <= 1); // for method 0 - 1, we need preds_[] to be filled.
486 if (do_segments) {
487 const int last_row = enc->mb_h_;
488 // We give a little more than a half work to the main thread.
489 const int split_row = (9 * last_row + 15) >> 4;
490 const int total_mb = last_row * enc->mb_w_;
491 #ifdef WEBP_USE_THREAD
492 const int kMinSplitRow = 2; // minimal rows needed for mt to be worth it
493 const int do_mt = (enc->thread_level_ > 0) && (split_row >= kMinSplitRow);
494 #else
495 const int do_mt = 0;
496 #endif
497 const WebPWorkerInterface* const worker_interface =
498 WebPGetWorkerInterface();
499 SegmentJob main_job;
500 if (do_mt) {
501 SegmentJob side_job;
502 // Note the use of '&' instead of '&&' because we must call the functions
503 // no matter what.
504 InitSegmentJob(enc, &main_job, 0, split_row);
505 InitSegmentJob(enc, &side_job, split_row, last_row);
506 // we don't need to call Reset() on main_job.worker, since we're calling
507 // WebPWorkerExecute() on it
508 ok &= worker_interface->Reset(&side_job.worker);
509 // launch the two jobs in parallel
510 if (ok) {
511 worker_interface->Launch(&side_job.worker);
512 worker_interface->Execute(&main_job.worker);
513 ok &= worker_interface->Sync(&side_job.worker);
514 ok &= worker_interface->Sync(&main_job.worker);
515 }
516 worker_interface->End(&side_job.worker);
517 if (ok) MergeJobs(&side_job, &main_job); // merge results together
518 } else {
519 // Even for single-thread case, we use the generic Worker tools.
520 InitSegmentJob(enc, &main_job, 0, last_row);
521 worker_interface->Execute(&main_job.worker);
522 ok &= worker_interface->Sync(&main_job.worker);
523 }
524 worker_interface->End(&main_job.worker);
525 if (ok) {
526 enc->alpha_ = main_job.alpha / total_mb;
527 enc->uv_alpha_ = main_job.uv_alpha / total_mb;
528 AssignSegments(enc, main_job.alphas);
529 }
530 } else { // Use only one default segment.
531 ResetAllMBInfo(enc);
532 }
533 return ok;
534 }
535
536