1 // Copyright 2011 Google Inc.
2 //
3 // This code is licensed under the same terms as WebM:
4 // Software License Agreement: http://www.webmproject.org/license/software/
5 // Additional IP Rights Grant: http://www.webmproject.org/license/additional/
6 // -----------------------------------------------------------------------------
7 //
8 // Quantization
9 //
10 // Author: Skal (pascal.massimino@gmail.com)
11
12 #include <assert.h>
13 #include <math.h>
14
15 #include "vp8enci.h"
16 #include "cost.h"
17
18 #define DO_TRELLIS_I4 1
19 #define DO_TRELLIS_I16 1 // not a huge gain, but ok at low bitrate.
20 #define DO_TRELLIS_UV 0 // disable trellis for UV. Risky. Not worth.
21 #define USE_TDISTO 1
22
23 #define MID_ALPHA 64 // neutral value for susceptibility
24 #define MIN_ALPHA 30 // lowest usable value for susceptibility
25 #define MAX_ALPHA 100 // higher meaninful value for susceptibility
26
27 #define SNS_TO_DQ 0.9 // Scaling constant between the sns value and the QP
28 // power-law modulation. Must be strictly less than 1.
29
30 #define MULT_8B(a, b) (((a) * (b) + 128) >> 8)
31
32 #if defined(__cplusplus) || defined(c_plusplus)
33 extern "C" {
34 #endif
35
36 //-----------------------------------------------------------------------------
37
clip(int v,int m,int M)38 static inline int clip(int v, int m, int M) {
39 return v < m ? m : v > M ? M : v;
40 }
41
42 const uint8_t VP8Zigzag[16] = {
43 0, 1, 4, 8, 5, 2, 3, 6, 9, 12, 13, 10, 7, 11, 14, 15
44 };
45
46 static const uint8_t kDcTable[128] = {
47 4, 5, 6, 7, 8, 9, 10, 10,
48 11, 12, 13, 14, 15, 16, 17, 17,
49 18, 19, 20, 20, 21, 21, 22, 22,
50 23, 23, 24, 25, 25, 26, 27, 28,
51 29, 30, 31, 32, 33, 34, 35, 36,
52 37, 37, 38, 39, 40, 41, 42, 43,
53 44, 45, 46, 46, 47, 48, 49, 50,
54 51, 52, 53, 54, 55, 56, 57, 58,
55 59, 60, 61, 62, 63, 64, 65, 66,
56 67, 68, 69, 70, 71, 72, 73, 74,
57 75, 76, 76, 77, 78, 79, 80, 81,
58 82, 83, 84, 85, 86, 87, 88, 89,
59 91, 93, 95, 96, 98, 100, 101, 102,
60 104, 106, 108, 110, 112, 114, 116, 118,
61 122, 124, 126, 128, 130, 132, 134, 136,
62 138, 140, 143, 145, 148, 151, 154, 157
63 };
64
65 static const uint16_t kAcTable[128] = {
66 4, 5, 6, 7, 8, 9, 10, 11,
67 12, 13, 14, 15, 16, 17, 18, 19,
68 20, 21, 22, 23, 24, 25, 26, 27,
69 28, 29, 30, 31, 32, 33, 34, 35,
70 36, 37, 38, 39, 40, 41, 42, 43,
71 44, 45, 46, 47, 48, 49, 50, 51,
72 52, 53, 54, 55, 56, 57, 58, 60,
73 62, 64, 66, 68, 70, 72, 74, 76,
74 78, 80, 82, 84, 86, 88, 90, 92,
75 94, 96, 98, 100, 102, 104, 106, 108,
76 110, 112, 114, 116, 119, 122, 125, 128,
77 131, 134, 137, 140, 143, 146, 149, 152,
78 155, 158, 161, 164, 167, 170, 173, 177,
79 181, 185, 189, 193, 197, 201, 205, 209,
80 213, 217, 221, 225, 229, 234, 239, 245,
81 249, 254, 259, 264, 269, 274, 279, 284
82 };
83
84 static const uint16_t kAcTable2[128] = {
85 8, 8, 9, 10, 12, 13, 15, 17,
86 18, 20, 21, 23, 24, 26, 27, 29,
87 31, 32, 34, 35, 37, 38, 40, 41,
88 43, 44, 46, 48, 49, 51, 52, 54,
89 55, 57, 58, 60, 62, 63, 65, 66,
90 68, 69, 71, 72, 74, 75, 77, 79,
91 80, 82, 83, 85, 86, 88, 89, 93,
92 96, 99, 102, 105, 108, 111, 114, 117,
93 120, 124, 127, 130, 133, 136, 139, 142,
94 145, 148, 151, 155, 158, 161, 164, 167,
95 170, 173, 176, 179, 184, 189, 193, 198,
96 203, 207, 212, 217, 221, 226, 230, 235,
97 240, 244, 249, 254, 258, 263, 268, 274,
98 280, 286, 292, 299, 305, 311, 317, 323,
99 330, 336, 342, 348, 354, 362, 370, 379,
100 385, 393, 401, 409, 416, 424, 432, 440
101 };
102
103 static const uint16_t kCoeffThresh[16] = {
104 0, 10, 20, 30,
105 10, 20, 30, 30,
106 20, 30, 30, 30,
107 30, 30, 30, 30
108 };
109
110 // TODO(skal): tune more. Coeff thresholding?
111 static const uint8_t kBiasMatrices[3][16] = { // [3] = [luma-ac,luma-dc,chroma]
112 { 96, 96, 96, 96,
113 96, 96, 96, 96,
114 96, 96, 96, 96,
115 96, 96, 96, 96 },
116 { 96, 96, 96, 96,
117 96, 96, 96, 96,
118 96, 96, 96, 96,
119 96, 96, 96, 96 },
120 { 96, 96, 96, 96,
121 96, 96, 96, 96,
122 96, 96, 96, 96,
123 96, 96, 96, 96 }
124 };
125
126 // Sharpening by (slightly) raising the hi-frequency coeffs (only for trellis).
127 // Hack-ish but helpful for mid-bitrate range. Use with care.
128 static const uint8_t kFreqSharpening[16] = {
129 0, 30, 60, 90,
130 30, 60, 90, 90,
131 60, 90, 90, 90,
132 90, 90, 90, 90
133 };
134
135 //-----------------------------------------------------------------------------
136 // Initialize quantization parameters in VP8Matrix
137
138 // Returns the average quantizer
ExpandMatrix(VP8Matrix * const m,int type)139 static int ExpandMatrix(VP8Matrix* const m, int type) {
140 int i;
141 int sum = 0;
142 for (i = 2; i < 16; ++i) {
143 m->q_[i] = m->q_[1];
144 }
145 for (i = 0; i < 16; ++i) {
146 const int j = VP8Zigzag[i];
147 const int bias = kBiasMatrices[type][j];
148 m->iq_[j] = (1 << QFIX) / m->q_[j];
149 m->bias_[j] = BIAS(bias);
150 // TODO(skal): tune kCoeffThresh[]
151 m->zthresh_[j] = ((256 /*+ kCoeffThresh[j]*/ - bias) * m->q_[j] + 127) >> 8;
152 m->sharpen_[j] = (kFreqSharpening[j] * m->q_[j]) >> 11;
153 sum += m->q_[j];
154 }
155 return (sum + 8) >> 4;
156 }
157
SetupMatrices(VP8Encoder * enc)158 static void SetupMatrices(VP8Encoder* enc) {
159 int i;
160 const int tlambda_scale =
161 (enc->method_ >= 4) ? enc->config_->sns_strength
162 : 0;
163 const int num_segments = enc->segment_hdr_.num_segments_;
164 for (i = 0; i < num_segments; ++i) {
165 VP8SegmentInfo* const m = &enc->dqm_[i];
166 const int q = m->quant_;
167 int q4, q16, quv;
168 m->y1_.q_[0] = kDcTable[clip(q + enc->dq_y1_dc_, 0, 127)];
169 m->y1_.q_[1] = kAcTable[clip(q, 0, 127)];
170
171 m->y2_.q_[0] = kDcTable[ clip(q + enc->dq_y2_dc_, 0, 127)] * 2;
172 m->y2_.q_[1] = kAcTable2[clip(q + enc->dq_y2_ac_, 0, 127)];
173
174 m->uv_.q_[0] = kDcTable[clip(q + enc->dq_uv_dc_, 0, 117)];
175 m->uv_.q_[1] = kAcTable[clip(q + enc->dq_uv_ac_, 0, 127)];
176
177 q4 = ExpandMatrix(&m->y1_, 0);
178 q16 = ExpandMatrix(&m->y2_, 1);
179 quv = ExpandMatrix(&m->uv_, 2);
180
181 // TODO: Switch to kLambda*[] tables?
182 {
183 m->lambda_i4_ = (3 * q4 * q4) >> 7;
184 m->lambda_i16_ = (3 * q16 * q16);
185 m->lambda_uv_ = (3 * quv * quv) >> 6;
186 m->lambda_mode_ = (1 * q4 * q4) >> 7;
187 m->lambda_trellis_i4_ = (7 * q4 * q4) >> 3;
188 m->lambda_trellis_i16_ = (q16 * q16) >> 2;
189 m->lambda_trellis_uv_ = (quv *quv) << 1;
190 m->tlambda_ = (tlambda_scale * q4) >> 5;
191 }
192 }
193 }
194
195 //-----------------------------------------------------------------------------
196 // Initialize filtering parameters
197
198 // Very small filter-strength values have close to no visual effect. So we can
199 // save a little decoding-CPU by turning filtering off for these.
200 #define FSTRENGTH_CUTOFF 3
201
SetupFilterStrength(VP8Encoder * const enc)202 static void SetupFilterStrength(VP8Encoder* const enc) {
203 int i;
204 const int level0 = enc->config_->filter_strength;
205 for (i = 0; i < NUM_MB_SEGMENTS; ++i) {
206 // Segments with lower quantizer will be less filtered. TODO: tune (wrt SNS)
207 const int level = level0 * 256 * enc->dqm_[i].quant_ / 128;
208 const int f = level / (256 + enc->dqm_[i].beta_);
209 enc->dqm_[i].fstrength_ = (f < FSTRENGTH_CUTOFF) ? 0 : (f > 63) ? 63 : f;
210 }
211 // We record the initial strength (mainly for the case of 1-segment only).
212 enc->filter_hdr_.level_ = enc->dqm_[0].fstrength_;
213 enc->filter_hdr_.simple_ = (enc->config_->filter_type == 0);
214 enc->filter_hdr_.sharpness_ = enc->config_->filter_sharpness;
215 }
216
217 //-----------------------------------------------------------------------------
218
219 // Note: if you change the values below, remember that the max range
220 // allowed by the syntax for DQ_UV is [-16,16].
221 #define MAX_DQ_UV (6)
222 #define MIN_DQ_UV (-4)
223
224 // We want to emulate jpeg-like behaviour where the expected "good" quality
225 // is around q=75. Internally, our "good" middle is around c=50. So we
226 // map accordingly using linear piece-wise function
QualityToCompression(double q)227 static double QualityToCompression(double q) {
228 const double c = q / 100.;
229 return (c < 0.75) ? c * (2. / 3.) : 2. * c - 1.;
230 }
231
VP8SetSegmentParams(VP8Encoder * const enc,float quality)232 void VP8SetSegmentParams(VP8Encoder* const enc, float quality) {
233 int i;
234 int dq_uv_ac, dq_uv_dc;
235 const int num_segments = enc->config_->segments;
236 const double amp = SNS_TO_DQ * enc->config_->sns_strength / 100. / 128.;
237 const double c_base = QualityToCompression(quality);
238 for (i = 0; i < num_segments; ++i) {
239 // The file size roughly scales as pow(quantizer, 3.). Actually, the
240 // exponent is somewhere between 2.8 and 3.2, but we're mostly interested
241 // in the mid-quant range. So we scale the compressibility inversely to
242 // this power-law: quant ~= compression ^ 1/3. This law holds well for
243 // low quant. Finer modelling for high-quant would make use of kAcTable[]
244 // more explicitely.
245 // Additionally, we modulate the base exponent 1/3 to accommodate for the
246 // quantization susceptibility and allow denser segments to be quantized
247 // more.
248 const double expn = (1. - amp * enc->dqm_[i].alpha_) / 3.;
249 const double c = pow(c_base, expn);
250 const int q = (int)(127. * (1. - c));
251 assert(expn > 0.);
252 enc->dqm_[i].quant_ = clip(q, 0, 127);
253 }
254
255 // purely indicative in the bitstream (except for the 1-segment case)
256 enc->base_quant_ = enc->dqm_[0].quant_;
257
258 // fill-in values for the unused segments (required by the syntax)
259 for (i = num_segments; i < NUM_MB_SEGMENTS; ++i) {
260 enc->dqm_[i].quant_ = enc->base_quant_;
261 }
262
263 // uv_alpha_ is normally spread around ~60. The useful range is
264 // typically ~30 (quite bad) to ~100 (ok to decimate UV more).
265 // We map it to the safe maximal range of MAX/MIN_DQ_UV for dq_uv.
266 dq_uv_ac = (enc->uv_alpha_ - MID_ALPHA) * (MAX_DQ_UV - MIN_DQ_UV)
267 / (MAX_ALPHA - MIN_ALPHA);
268 // we rescale by the user-defined strength of adaptation
269 dq_uv_ac = dq_uv_ac * enc->config_->sns_strength / 100;
270 // and make it safe.
271 dq_uv_ac = clip(dq_uv_ac, MIN_DQ_UV, MAX_DQ_UV);
272 // We also boost the dc-uv-quant a little, based on sns-strength, since
273 // U/V channels are quite more reactive to high quants (flat DC-blocks
274 // tend to appear, and are displeasant).
275 dq_uv_dc = -4 * enc->config_->sns_strength / 100;
276 dq_uv_dc = clip(dq_uv_dc, -15, 15); // 4bit-signed max allowed
277
278 enc->dq_y1_dc_ = 0; // TODO(skal): dq-lum
279 enc->dq_y2_dc_ = 0;
280 enc->dq_y2_ac_ = 0;
281 enc->dq_uv_dc_ = dq_uv_dc;
282 enc->dq_uv_ac_ = dq_uv_ac;
283
284 SetupMatrices(enc);
285
286 SetupFilterStrength(enc); // initialize segments' filtering, eventually
287 }
288
289 //-----------------------------------------------------------------------------
290 // Form the predictions in cache
291
292 // Must be ordered using {DC_PRED, TM_PRED, V_PRED, H_PRED} as index
293 const int VP8I16ModeOffsets[4] = { I16DC16, I16TM16, I16VE16, I16HE16 };
294 const int VP8UVModeOffsets[4] = { C8DC8, C8TM8, C8VE8, C8HE8 };
295
296 // Must be indexed using {B_DC_PRED -> B_HU_PRED} as index
297 const int VP8I4ModeOffsets[NUM_BMODES] = {
298 I4DC4, I4TM4, I4VE4, I4HE4, I4RD4, I4VR4, I4LD4, I4VL4, I4HD4, I4HU4
299 };
300
VP8MakeLuma16Preds(const VP8EncIterator * const it)301 void VP8MakeLuma16Preds(const VP8EncIterator* const it) {
302 VP8Encoder* const enc = it->enc_;
303 const uint8_t* left = it->x_ ? enc->y_left_ : NULL;
304 const uint8_t* top = it->y_ ? enc->y_top_ + it->x_ * 16 : NULL;
305 VP8EncPredLuma16(it->yuv_p_, left, top);
306 }
307
VP8MakeChroma8Preds(const VP8EncIterator * const it)308 void VP8MakeChroma8Preds(const VP8EncIterator* const it) {
309 VP8Encoder* const enc = it->enc_;
310 const uint8_t* left = it->x_ ? enc->u_left_ : NULL;
311 const uint8_t* top = it->y_ ? enc->uv_top_ + it->x_ * 16 : NULL;
312 VP8EncPredChroma8(it->yuv_p_, left, top);
313 }
314
VP8MakeIntra4Preds(const VP8EncIterator * const it)315 void VP8MakeIntra4Preds(const VP8EncIterator* const it) {
316 VP8EncPredLuma4(it->yuv_p_, it->i4_top_);
317 }
318
319 //-----------------------------------------------------------------------------
320 // Quantize
321
322 // Layout:
323 // +----+
324 // |YYYY| 0
325 // |YYYY| 4
326 // |YYYY| 8
327 // |YYYY| 12
328 // +----+
329 // |UUVV| 16
330 // |UUVV| 20
331 // +----+
332
333 const int VP8Scan[16 + 4 + 4] = {
334 // Luma
335 0 + 0 * BPS, 4 + 0 * BPS, 8 + 0 * BPS, 12 + 0 * BPS,
336 0 + 4 * BPS, 4 + 4 * BPS, 8 + 4 * BPS, 12 + 4 * BPS,
337 0 + 8 * BPS, 4 + 8 * BPS, 8 + 8 * BPS, 12 + 8 * BPS,
338 0 + 12 * BPS, 4 + 12 * BPS, 8 + 12 * BPS, 12 + 12 * BPS,
339
340 0 + 0 * BPS, 4 + 0 * BPS, 0 + 4 * BPS, 4 + 4 * BPS, // U
341 8 + 0 * BPS, 12 + 0 * BPS, 8 + 4 * BPS, 12 + 4 * BPS // V
342 };
343
344 //-----------------------------------------------------------------------------
345 // Distortion measurement
346
347 static const uint16_t kWeightY[16] = {
348 38, 32, 20, 9, 32, 28, 17, 7, 20, 17, 10, 4, 9, 7, 4, 2
349 };
350
351 static const uint16_t kWeightTrellis[16] = {
352 #if USE_TDISTO == 0
353 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16
354 #else
355 30, 27, 19, 11,
356 27, 24, 17, 10,
357 19, 17, 12, 8,
358 11, 10, 8, 6
359 #endif
360 };
361
362 // Init/Copy the common fields in score.
InitScore(VP8ModeScore * const rd)363 static void InitScore(VP8ModeScore* const rd) {
364 rd->D = 0;
365 rd->SD = 0;
366 rd->R = 0;
367 rd->nz = 0;
368 rd->score = MAX_COST;
369 }
370
CopyScore(VP8ModeScore * const dst,const VP8ModeScore * const src)371 static void CopyScore(VP8ModeScore* const dst, const VP8ModeScore* const src) {
372 dst->D = src->D;
373 dst->SD = src->SD;
374 dst->R = src->R;
375 dst->nz = src->nz; // note that nz is not accumulated, but just copied.
376 dst->score = src->score;
377 }
378
AddScore(VP8ModeScore * const dst,const VP8ModeScore * const src)379 static void AddScore(VP8ModeScore* const dst, const VP8ModeScore* const src) {
380 dst->D += src->D;
381 dst->SD += src->SD;
382 dst->R += src->R;
383 dst->nz |= src->nz; // here, new nz bits are accumulated.
384 dst->score += src->score;
385 }
386
387 //-----------------------------------------------------------------------------
388 // Performs trellis-optimized quantization.
389
390 // Trellis
391
392 typedef struct {
393 int prev; // best previous
394 int level; // level
395 int sign; // sign of coeff_i
396 score_t cost; // bit cost
397 score_t error; // distortion = sum of (|coeff_i| - level_i * Q_i)^2
398 int ctx; // context (only depends on 'level'. Could be spared.)
399 } Node;
400
401 // If a coefficient was quantized to a value Q (using a neutral bias),
402 // we test all alternate possibilities between [Q-MIN_DELTA, Q+MAX_DELTA]
403 // We don't test negative values though.
404 #define MIN_DELTA 0 // how much lower level to try
405 #define MAX_DELTA 1 // how much higher
406 #define NUM_NODES (MIN_DELTA + 1 + MAX_DELTA)
407 #define NODE(n, l) (nodes[(n) + 1][(l) + MIN_DELTA])
408
SetRDScore(int lambda,VP8ModeScore * const rd)409 static inline void SetRDScore(int lambda, VP8ModeScore* const rd) {
410 // TODO: incorporate the "* 256" in the tables?
411 rd->score = rd->R * lambda + 256 * (rd->D + rd->SD);
412 }
413
RDScoreTrellis(int lambda,score_t rate,score_t distortion)414 static inline score_t RDScoreTrellis(int lambda, score_t rate,
415 score_t distortion) {
416 return rate * lambda + 256 * distortion;
417 }
418
TrellisQuantizeBlock(const VP8EncIterator * const it,int16_t in[16],int16_t out[16],int ctx0,int coeff_type,const VP8Matrix * const mtx,int lambda)419 static int TrellisQuantizeBlock(const VP8EncIterator* const it,
420 int16_t in[16], int16_t out[16],
421 int ctx0, int coeff_type,
422 const VP8Matrix* const mtx,
423 int lambda) {
424 ProbaArray* const last_costs = it->enc_->proba_.coeffs_[coeff_type];
425 CostArray* const costs = it->enc_->proba_.level_cost_[coeff_type];
426 const int first = (coeff_type == 0) ? 1 : 0;
427 Node nodes[17][NUM_NODES];
428 int best_path[3] = {-1, -1, -1}; // store best-last/best-level/best-previous
429 score_t best_score;
430 int best_node;
431 int last = first - 1;
432 int n, m, p, nz;
433
434 {
435 score_t cost;
436 score_t max_error;
437 const int thresh = mtx->q_[1] * mtx->q_[1] / 4;
438 const int last_proba = last_costs[VP8EncBands[first]][ctx0][0];
439
440 // compute maximal distortion.
441 max_error = 0;
442 for (n = first; n < 16; ++n) {
443 const int j = VP8Zigzag[n];
444 const int err = in[j] * in[j];
445 max_error += kWeightTrellis[j] * err;
446 if (err > thresh) last = n;
447 }
448 // we don't need to go inspect up to n = 16 coeffs. We can just go up
449 // to last + 1 (inclusive) without losing much.
450 if (last < 15) ++last;
451
452 // compute 'skip' score. This is the max score one can do.
453 cost = VP8BitCost(0, last_proba);
454 best_score = RDScoreTrellis(lambda, cost, max_error);
455
456 // initialize source node.
457 n = first - 1;
458 for (m = -MIN_DELTA; m <= MAX_DELTA; ++m) {
459 NODE(n, m).cost = 0;
460 NODE(n, m).error = max_error;
461 NODE(n, m).ctx = ctx0;
462 }
463 }
464
465 // traverse trellis.
466 for (n = first; n <= last; ++n) {
467 const int j = VP8Zigzag[n];
468 const int Q = mtx->q_[j];
469 const int iQ = mtx->iq_[j];
470 const int B = BIAS(0x00); // neutral bias
471 // note: it's important to take sign of the _original_ coeff,
472 // so we don't have to consider level < 0 afterward.
473 const int sign = (in[j] < 0);
474 int coeff0 = (sign ? -in[j] : in[j]) + mtx->sharpen_[j];
475 int level0;
476 if (coeff0 > 2047) coeff0 = 2047;
477
478 level0 = QUANTDIV(coeff0, iQ, B);
479 // test all alternate level values around level0.
480 for (m = -MIN_DELTA; m <= MAX_DELTA; ++m) {
481 Node* const cur = &NODE(n, m);
482 int delta_error, new_error;
483 score_t cur_score = MAX_COST;
484 int level = level0 + m;
485 int last_proba;
486
487 cur->sign = sign;
488 cur->level = level;
489 cur->ctx = (level == 0) ? 0 : (level == 1) ? 1 : 2;
490 if (level >= 2048 || level < 0) { // node is dead?
491 cur->cost = MAX_COST;
492 continue;
493 }
494 last_proba = last_costs[VP8EncBands[n + 1]][cur->ctx][0];
495
496 // Compute delta_error = how much coding this level will
497 // subtract as distortion to max_error
498 new_error = coeff0 - level * Q;
499 delta_error =
500 kWeightTrellis[j] * (coeff0 * coeff0 - new_error * new_error);
501
502 // Inspect all possible non-dead predecessors. Retain only the best one.
503 for (p = -MIN_DELTA; p <= MAX_DELTA; ++p) {
504 const Node* const prev = &NODE(n - 1, p);
505 const int prev_ctx = prev->ctx;
506 const uint16_t* const tcost = costs[VP8EncBands[n]][prev_ctx];
507 const score_t total_error = prev->error - delta_error;
508 score_t cost, base_cost, score;
509
510 if (prev->cost >= MAX_COST) { // dead node?
511 continue;
512 }
513
514 // Base cost of both terminal/non-terminal
515 base_cost = prev->cost + VP8LevelCost(tcost, level);
516
517 // Examine node assuming it's a non-terminal one.
518 cost = base_cost;
519 if (level && n < 15) {
520 cost += VP8BitCost(1, last_proba);
521 }
522 score = RDScoreTrellis(lambda, cost, total_error);
523 if (score < cur_score) {
524 cur_score = score;
525 cur->cost = cost;
526 cur->error = total_error;
527 cur->prev = p;
528 }
529
530 // Now, record best terminal node (and thus best entry in the graph).
531 if (level) {
532 cost = base_cost;
533 if (n < 15) cost += VP8BitCost(0, last_proba);
534 score = RDScoreTrellis(lambda, cost, total_error);
535 if (score < best_score) {
536 best_score = score;
537 best_path[0] = n; // best eob position
538 best_path[1] = m; // best level
539 best_path[2] = p; // best predecessor
540 }
541 }
542 }
543 }
544 }
545
546 // Fresh start
547 memset(in + first, 0, (16 - first) * sizeof(*in));
548 memset(out + first, 0, (16 - first) * sizeof(*out));
549 if (best_path[0] == -1) {
550 return 0; // skip!
551 }
552
553 // Unwind the best path.
554 // Note: best-prev on terminal node is not necessarily equal to the
555 // best_prev for non-terminal. So we patch best_path[2] in.
556 n = best_path[0];
557 best_node = best_path[1];
558 NODE(n, best_node).prev = best_path[2]; // force best-prev for terminal
559 nz = 0;
560
561 for (; n >= first; --n) {
562 const Node* const node = &NODE(n, best_node);
563 const int j = VP8Zigzag[n];
564 out[n] = node->sign ? -node->level : node->level;
565 nz |= (node->level != 0);
566 in[j] = out[n] * mtx->q_[j];
567 best_node = node->prev;
568 }
569 return nz;
570 }
571
572 #undef NODE
573
574 //-----------------------------------------------------------------------------
575 // Performs: difference, transform, quantize, back-transform, add
576 // all at once. Output is the reconstructed block in *yuv_out, and the
577 // quantized levels in *levels.
578
ReconstructIntra16(VP8EncIterator * const it,VP8ModeScore * const rd,uint8_t * const yuv_out,int mode)579 static int ReconstructIntra16(VP8EncIterator* const it,
580 VP8ModeScore* const rd,
581 uint8_t* const yuv_out,
582 int mode) {
583 const VP8Encoder* const enc = it->enc_;
584 const uint8_t* const ref = it->yuv_p_ + VP8I16ModeOffsets[mode];
585 const uint8_t* const src = it->yuv_in_ + Y_OFF;
586 const VP8SegmentInfo* const dqm = &enc->dqm_[it->mb_->segment_];
587 int nz = 0;
588 int n;
589 int16_t tmp[16][16], dc_tmp[16];
590
591 for (n = 0; n < 16; ++n) {
592 VP8FTransform(src + VP8Scan[n], ref + VP8Scan[n], tmp[n]);
593 }
594 VP8FTransformWHT(tmp[0], dc_tmp);
595 nz |= VP8EncQuantizeBlock(dc_tmp, rd->y_dc_levels, 0, &dqm->y2_) << 24;
596
597 if (DO_TRELLIS_I16 && it->do_trellis_) {
598 int x, y;
599 VP8IteratorNzToBytes(it);
600 for (y = 0, n = 0; y < 4; ++y) {
601 for (x = 0; x < 4; ++x, ++n) {
602 const int ctx = it->top_nz_[x] + it->left_nz_[y];
603 const int non_zero =
604 TrellisQuantizeBlock(it, tmp[n], rd->y_ac_levels[n], ctx, 0,
605 &dqm->y1_, dqm->lambda_trellis_i16_);
606 it->top_nz_[x] = it->left_nz_[y] = non_zero;
607 nz |= non_zero << n;
608 }
609 }
610 } else {
611 for (n = 0; n < 16; ++n) {
612 nz |= VP8EncQuantizeBlock(tmp[n], rd->y_ac_levels[n], 1, &dqm->y1_) << n;
613 }
614 }
615
616 // Transform back
617 VP8ITransformWHT(dc_tmp, tmp[0]);
618 for (n = 0; n < 16; n += 2) {
619 VP8ITransform(ref + VP8Scan[n], tmp[n], yuv_out + VP8Scan[n], 1);
620 }
621
622 return nz;
623 }
624
ReconstructIntra4(VP8EncIterator * const it,int16_t levels[16],const uint8_t * const src,uint8_t * const yuv_out,int mode)625 static int ReconstructIntra4(VP8EncIterator* const it,
626 int16_t levels[16],
627 const uint8_t* const src,
628 uint8_t* const yuv_out,
629 int mode) {
630 const VP8Encoder* const enc = it->enc_;
631 const uint8_t* const ref = it->yuv_p_ + VP8I4ModeOffsets[mode];
632 const VP8SegmentInfo* const dqm = &enc->dqm_[it->mb_->segment_];
633 int nz = 0;
634 int16_t tmp[16];
635
636 VP8FTransform(src, ref, tmp);
637 if (DO_TRELLIS_I4 && it->do_trellis_) {
638 const int x = it->i4_ & 3, y = it->i4_ >> 2;
639 const int ctx = it->top_nz_[x] + it->left_nz_[y];
640 nz = TrellisQuantizeBlock(it, tmp, levels, ctx, 3, &dqm->y1_,
641 dqm->lambda_trellis_i4_);
642 } else {
643 nz = VP8EncQuantizeBlock(tmp, levels, 0, &dqm->y1_);
644 }
645 VP8ITransform(ref, tmp, yuv_out, 0);
646 return nz;
647 }
648
ReconstructUV(VP8EncIterator * const it,VP8ModeScore * const rd,uint8_t * const yuv_out,int mode)649 static int ReconstructUV(VP8EncIterator* const it, VP8ModeScore* const rd,
650 uint8_t* const yuv_out, int mode) {
651 const VP8Encoder* const enc = it->enc_;
652 const uint8_t* const ref = it->yuv_p_ + VP8UVModeOffsets[mode];
653 const uint8_t* const src = it->yuv_in_ + U_OFF;
654 const VP8SegmentInfo* const dqm = &enc->dqm_[it->mb_->segment_];
655 int nz = 0;
656 int n;
657 int16_t tmp[8][16];
658
659 for (n = 0; n < 8; ++n) {
660 VP8FTransform(src + VP8Scan[16 + n], ref + VP8Scan[16 + n], tmp[n]);
661 }
662 if (DO_TRELLIS_UV && it->do_trellis_) {
663 int ch, x, y;
664 for (ch = 0, n = 0; ch <= 2; ch += 2) {
665 for (y = 0; y < 2; ++y) {
666 for (x = 0; x < 2; ++x, ++n) {
667 const int ctx = it->top_nz_[4 + ch + x] + it->left_nz_[4 + ch + y];
668 const int non_zero =
669 TrellisQuantizeBlock(it, tmp[n], rd->uv_levels[n], ctx, 2,
670 &dqm->uv_, dqm->lambda_trellis_uv_);
671 it->top_nz_[4 + ch + x] = it->left_nz_[4 + ch + y] = non_zero;
672 nz |= non_zero << n;
673 }
674 }
675 }
676 } else {
677 for (n = 0; n < 8; ++n) {
678 nz |= VP8EncQuantizeBlock(tmp[n], rd->uv_levels[n], 0, &dqm->uv_) << n;
679 }
680 }
681
682 for (n = 0; n < 8; n += 2) {
683 VP8ITransform(ref + VP8Scan[16 + n], tmp[n], yuv_out + VP8Scan[16 + n], 1);
684 }
685 return (nz << 16);
686 }
687
688 //-----------------------------------------------------------------------------
689 // RD-opt decision. Reconstruct each modes, evalue distortion and bit-cost.
690 // Pick the mode is lower RD-cost = Rate + lamba * Distortion.
691
SwapPtr(uint8_t ** a,uint8_t ** b)692 static void SwapPtr(uint8_t** a, uint8_t** b) {
693 uint8_t* const tmp = *a;
694 *a = *b;
695 *b = tmp;
696 }
697
SwapOut(VP8EncIterator * const it)698 static void SwapOut(VP8EncIterator* const it) {
699 SwapPtr(&it->yuv_out_, &it->yuv_out2_);
700 }
701
PickBestIntra16(VP8EncIterator * const it,VP8ModeScore * const rd)702 static void PickBestIntra16(VP8EncIterator* const it, VP8ModeScore* const rd) {
703 VP8Encoder* const enc = it->enc_;
704 const VP8SegmentInfo* const dqm = &enc->dqm_[it->mb_->segment_];
705 const int lambda = dqm->lambda_i16_;
706 const int tlambda = dqm->tlambda_;
707 const uint8_t* const src = it->yuv_in_ + Y_OFF;
708 VP8ModeScore rd16;
709 int mode;
710
711 rd->mode_i16 = -1;
712 for (mode = 0; mode < 4; ++mode) {
713 uint8_t* const tmp_dst = it->yuv_out2_ + Y_OFF; // scratch buffer
714 int nz;
715
716 // Reconstruct
717 nz = ReconstructIntra16(it, &rd16, tmp_dst, mode);
718
719 // Measure RD-score
720 rd16.D = VP8SSE16x16(src, tmp_dst);
721 rd16.SD = tlambda ? MULT_8B(tlambda, VP8TDisto16x16(src, tmp_dst, kWeightY))
722 : 0;
723 rd16.R = VP8GetCostLuma16(it, &rd16);
724 rd16.R += VP8FixedCostsI16[mode];
725
726 // Since we always examine Intra16 first, we can overwrite *rd directly.
727 SetRDScore(lambda, &rd16);
728 if (mode == 0 || rd16.score < rd->score) {
729 CopyScore(rd, &rd16);
730 rd->mode_i16 = mode;
731 rd->nz = nz;
732 memcpy(rd->y_ac_levels, rd16.y_ac_levels, sizeof(rd16.y_ac_levels));
733 memcpy(rd->y_dc_levels, rd16.y_dc_levels, sizeof(rd16.y_dc_levels));
734 SwapOut(it);
735 }
736 }
737 SetRDScore(dqm->lambda_mode_, rd); // finalize score for mode decision.
738 VP8SetIntra16Mode(it, rd->mode_i16);
739 }
740
741 //-----------------------------------------------------------------------------
742
743 // return the cost array corresponding to the surrounding prediction modes.
GetCostModeI4(VP8EncIterator * const it,const int modes[16])744 static const uint16_t* GetCostModeI4(VP8EncIterator* const it,
745 const int modes[16]) {
746 const int preds_w = it->enc_->preds_w_;
747 const int x = (it->i4_ & 3), y = it->i4_ >> 2;
748 const int left = (x == 0) ? it->preds_[y * preds_w - 1] : modes[it->i4_ - 1];
749 const int top = (y == 0) ? it->preds_[-preds_w + x] : modes[it->i4_ - 4];
750 return VP8FixedCostsI4[top][left];
751 }
752
PickBestIntra4(VP8EncIterator * const it,VP8ModeScore * const rd)753 static int PickBestIntra4(VP8EncIterator* const it, VP8ModeScore* const rd) {
754 VP8Encoder* const enc = it->enc_;
755 const VP8SegmentInfo* const dqm = &enc->dqm_[it->mb_->segment_];
756 const int lambda = dqm->lambda_i4_;
757 const int tlambda = dqm->tlambda_;
758 const uint8_t* const src0 = it->yuv_in_ + Y_OFF;
759 uint8_t* const best_blocks = it->yuv_out2_ + Y_OFF;
760 VP8ModeScore rd_best;
761
762 InitScore(&rd_best);
763 rd_best.score = 0;
764 VP8IteratorStartI4(it);
765 do {
766 VP8ModeScore rd_i4;
767 int mode;
768 int best_mode = -1;
769 const uint8_t* const src = src0 + VP8Scan[it->i4_];
770 const uint16_t* const mode_costs = GetCostModeI4(it, rd->modes_i4);
771 uint8_t* best_block = best_blocks + VP8Scan[it->i4_];
772 uint8_t* tmp_dst = it->yuv_p_ + I4TMP; // scratch buffer.
773
774 InitScore(&rd_i4);
775 VP8MakeIntra4Preds(it);
776 for (mode = 0; mode < NUM_BMODES; ++mode) {
777 VP8ModeScore rd_tmp;
778 int16_t tmp_levels[16];
779
780 // Reconstruct
781 rd_tmp.nz =
782 ReconstructIntra4(it, tmp_levels, src, tmp_dst, mode) << it->i4_;
783
784 // Compute RD-score
785 rd_tmp.D = VP8SSE4x4(src, tmp_dst);
786 rd_tmp.SD =
787 tlambda ? MULT_8B(tlambda, VP8TDisto4x4(src, tmp_dst, kWeightY))
788 : 0;
789 rd_tmp.R = VP8GetCostLuma4(it, tmp_levels);
790 rd_tmp.R += mode_costs[mode];
791
792 SetRDScore(lambda, &rd_tmp);
793 if (best_mode < 0 || rd_tmp.score < rd_i4.score) {
794 CopyScore(&rd_i4, &rd_tmp);
795 best_mode = mode;
796 SwapPtr(&tmp_dst, &best_block);
797 memcpy(rd_best.y_ac_levels[it->i4_], tmp_levels, sizeof(tmp_levels));
798 }
799 }
800 SetRDScore(dqm->lambda_mode_, &rd_i4);
801 AddScore(&rd_best, &rd_i4);
802 if (rd_best.score >= rd->score) {
803 return 0;
804 }
805 // Copy selected samples if not in the right place already.
806 if (best_block != best_blocks + VP8Scan[it->i4_])
807 VP8Copy4x4(best_block, best_blocks + VP8Scan[it->i4_]);
808 rd->modes_i4[it->i4_] = best_mode;
809 it->top_nz_[it->i4_ & 3] = it->left_nz_[it->i4_ >> 2] = (rd_i4.nz ? 1 : 0);
810 } while (VP8IteratorRotateI4(it, best_blocks));
811
812 // finalize state
813 CopyScore(rd, &rd_best);
814 VP8SetIntra4Mode(it, rd->modes_i4);
815 SwapOut(it);
816 memcpy(rd->y_ac_levels, rd_best.y_ac_levels, sizeof(rd->y_ac_levels));
817 return 1; // select intra4x4 over intra16x16
818 }
819
820 //-----------------------------------------------------------------------------
821
PickBestUV(VP8EncIterator * const it,VP8ModeScore * const rd)822 static void PickBestUV(VP8EncIterator* const it, VP8ModeScore* const rd) {
823 VP8Encoder* const enc = it->enc_;
824 const VP8SegmentInfo* const dqm = &enc->dqm_[it->mb_->segment_];
825 const int lambda = dqm->lambda_uv_;
826 const uint8_t* const src = it->yuv_in_ + U_OFF;
827 uint8_t* const tmp_dst = it->yuv_out2_ + U_OFF; // scratch buffer
828 uint8_t* const dst0 = it->yuv_out_ + U_OFF;
829 VP8ModeScore rd_best;
830 int mode;
831
832 rd->mode_uv = -1;
833 InitScore(&rd_best);
834 for (mode = 0; mode < 4; ++mode) {
835 VP8ModeScore rd_uv;
836
837 // Reconstruct
838 rd_uv.nz = ReconstructUV(it, &rd_uv, tmp_dst, mode);
839
840 // Compute RD-score
841 rd_uv.D = VP8SSE16x8(src, tmp_dst);
842 rd_uv.SD = 0; // TODO: should we call TDisto? it tends to flatten areas.
843 rd_uv.R = VP8GetCostUV(it, &rd_uv);
844 rd_uv.R += VP8FixedCostsUV[mode];
845
846 SetRDScore(lambda, &rd_uv);
847 if (mode == 0 || rd_uv.score < rd_best.score) {
848 CopyScore(&rd_best, &rd_uv);
849 rd->mode_uv = mode;
850 memcpy(rd->uv_levels, rd_uv.uv_levels, sizeof(rd->uv_levels));
851 memcpy(dst0, tmp_dst, UV_SIZE); // TODO: SwapUVOut() ?
852 }
853 }
854 VP8SetIntraUVMode(it, rd->mode_uv);
855 AddScore(rd, &rd_best);
856 }
857
858 //-----------------------------------------------------------------------------
859 // Final reconstruction and quantization.
860
SimpleQuantize(VP8EncIterator * const it,VP8ModeScore * const rd)861 static void SimpleQuantize(VP8EncIterator* const it, VP8ModeScore* const rd) {
862 const VP8Encoder* const enc = it->enc_;
863 const int i16 = (it->mb_->type_ == 1);
864 int nz = 0;
865
866 if (i16) {
867 nz = ReconstructIntra16(it, rd, it->yuv_out_ + Y_OFF, it->preds_[0]);
868 } else {
869 VP8IteratorStartI4(it);
870 do {
871 const int mode =
872 it->preds_[(it->i4_ & 3) + (it->i4_ >> 2) * enc->preds_w_];
873 const uint8_t* const src = it->yuv_in_ + Y_OFF + VP8Scan[it->i4_];
874 uint8_t* const dst = it->yuv_out_ + Y_OFF + VP8Scan[it->i4_];
875 VP8MakeIntra4Preds(it);
876 nz |= ReconstructIntra4(it, rd->y_ac_levels[it->i4_],
877 src, dst, mode) << it->i4_;
878 } while (VP8IteratorRotateI4(it, it->yuv_out_ + Y_OFF));
879 }
880
881 nz |= ReconstructUV(it, rd, it->yuv_out_ + U_OFF, it->mb_->uv_mode_);
882 rd->nz = nz;
883 }
884
885 //-----------------------------------------------------------------------------
886 // Entry point
887
VP8Decimate(VP8EncIterator * const it,VP8ModeScore * const rd,int rd_opt)888 int VP8Decimate(VP8EncIterator* const it, VP8ModeScore* const rd, int rd_opt) {
889 int is_skipped;
890
891 InitScore(rd);
892
893 // We can perform predictions for Luma16x16 and Chroma8x8 already.
894 // Luma4x4 predictions needs to be done as-we-go.
895 VP8MakeLuma16Preds(it);
896 VP8MakeChroma8Preds(it);
897
898 // for rd_opt = 2, we perform trellis-quant on the final decision only.
899 // for rd_opt > 2, we use it for every scoring (=much slower).
900 if (rd_opt > 0) {
901 it->do_trellis_ = (rd_opt > 2);
902 PickBestIntra16(it, rd);
903 if (it->enc_->method_ >= 2) {
904 PickBestIntra4(it, rd);
905 }
906 PickBestUV(it, rd);
907 if (rd_opt == 2) {
908 it->do_trellis_ = 1;
909 SimpleQuantize(it, rd);
910 }
911 } else {
912 // TODO: for method_ == 2, pick the best intra4/intra16 based on SSE
913 it->do_trellis_ = (it->enc_->method_ == 2);
914 SimpleQuantize(it, rd);
915 }
916 is_skipped = (rd->nz == 0);
917 VP8SetSkip(it, is_skipped);
918 return is_skipped;
919 }
920
921 #if defined(__cplusplus) || defined(c_plusplus)
922 } // extern "C"
923 #endif
924