1 /*
2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #include <math.h>
12 #include "vpx_mem/vpx_mem.h"
13
14 #include "onyx_int.h"
15 #include "vp8/encoder/quantize.h"
16 #include "vp8/common/quant_common.h"
17
vp8_fast_quantize_b_c(BLOCK * b,BLOCKD * d)18 void vp8_fast_quantize_b_c(BLOCK *b, BLOCKD *d) {
19 int i, rc, eob;
20 int x, y, z, sz;
21 short *coeff_ptr = b->coeff;
22 short *round_ptr = b->round;
23 short *quant_ptr = b->quant_fast;
24 short *qcoeff_ptr = d->qcoeff;
25 short *dqcoeff_ptr = d->dqcoeff;
26 short *dequant_ptr = d->dequant;
27
28 eob = -1;
29 for (i = 0; i < 16; ++i) {
30 rc = vp8_default_zig_zag1d[i];
31 z = coeff_ptr[rc];
32
33 sz = (z >> 31); /* sign of z */
34 x = (z ^ sz) - sz; /* x = abs(z) */
35
36 y = ((x + round_ptr[rc]) * quant_ptr[rc]) >> 16; /* quantize (x) */
37 x = (y ^ sz) - sz; /* get the sign back */
38 qcoeff_ptr[rc] = x; /* write to destination */
39 dqcoeff_ptr[rc] = x * dequant_ptr[rc]; /* dequantized value */
40
41 if (y) {
42 eob = i; /* last nonzero coeffs */
43 }
44 }
45 *d->eob = (char)(eob + 1);
46 }
47
vp8_regular_quantize_b_c(BLOCK * b,BLOCKD * d)48 void vp8_regular_quantize_b_c(BLOCK *b, BLOCKD *d) {
49 int i, rc, eob;
50 int zbin;
51 int x, y, z, sz;
52 short *zbin_boost_ptr = b->zrun_zbin_boost;
53 short *coeff_ptr = b->coeff;
54 short *zbin_ptr = b->zbin;
55 short *round_ptr = b->round;
56 short *quant_ptr = b->quant;
57 short *quant_shift_ptr = b->quant_shift;
58 short *qcoeff_ptr = d->qcoeff;
59 short *dqcoeff_ptr = d->dqcoeff;
60 short *dequant_ptr = d->dequant;
61 short zbin_oq_value = b->zbin_extra;
62
63 memset(qcoeff_ptr, 0, 32);
64 memset(dqcoeff_ptr, 0, 32);
65
66 eob = -1;
67
68 for (i = 0; i < 16; ++i) {
69 rc = vp8_default_zig_zag1d[i];
70 z = coeff_ptr[rc];
71
72 zbin = zbin_ptr[rc] + *zbin_boost_ptr + zbin_oq_value;
73
74 zbin_boost_ptr++;
75 sz = (z >> 31); /* sign of z */
76 x = (z ^ sz) - sz; /* x = abs(z) */
77
78 if (x >= zbin) {
79 x += round_ptr[rc];
80 y = ((((x * quant_ptr[rc]) >> 16) + x) * quant_shift_ptr[rc]) >>
81 16; /* quantize (x) */
82 x = (y ^ sz) - sz; /* get the sign back */
83 qcoeff_ptr[rc] = x; /* write to destination */
84 dqcoeff_ptr[rc] = x * dequant_ptr[rc]; /* dequantized value */
85
86 if (y) {
87 eob = i; /* last nonzero coeffs */
88 zbin_boost_ptr = b->zrun_zbin_boost; /* reset zero runlength */
89 }
90 }
91 }
92
93 *d->eob = (char)(eob + 1);
94 }
95
vp8_quantize_mby(MACROBLOCK * x)96 void vp8_quantize_mby(MACROBLOCK *x) {
97 int i;
98 int has_2nd_order = (x->e_mbd.mode_info_context->mbmi.mode != B_PRED &&
99 x->e_mbd.mode_info_context->mbmi.mode != SPLITMV);
100
101 for (i = 0; i < 16; ++i) x->quantize_b(&x->block[i], &x->e_mbd.block[i]);
102
103 if (has_2nd_order) x->quantize_b(&x->block[24], &x->e_mbd.block[24]);
104 }
105
vp8_quantize_mb(MACROBLOCK * x)106 void vp8_quantize_mb(MACROBLOCK *x) {
107 int i;
108 int has_2nd_order = (x->e_mbd.mode_info_context->mbmi.mode != B_PRED &&
109 x->e_mbd.mode_info_context->mbmi.mode != SPLITMV);
110
111 for (i = 0; i < 24 + has_2nd_order; ++i) {
112 x->quantize_b(&x->block[i], &x->e_mbd.block[i]);
113 }
114 }
115
vp8_quantize_mbuv(MACROBLOCK * x)116 void vp8_quantize_mbuv(MACROBLOCK *x) {
117 int i;
118
119 for (i = 16; i < 24; ++i) x->quantize_b(&x->block[i], &x->e_mbd.block[i]);
120 }
121
122 static const int qrounding_factors[129] = {
123 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
124 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
125 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
126 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
127 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
128 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
129 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48
130 };
131
132 static const int qzbin_factors[129] = {
133 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84,
134 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84,
135 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 80, 80, 80, 80, 80, 80, 80, 80, 80,
136 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80,
137 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80,
138 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80,
139 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80
140 };
141
142 static const int qrounding_factors_y2[129] = {
143 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
144 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
145 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
146 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
147 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
148 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
149 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48
150 };
151
152 static const int qzbin_factors_y2[129] = {
153 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84,
154 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84,
155 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 80, 80, 80, 80, 80, 80, 80, 80, 80,
156 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80,
157 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80,
158 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80,
159 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80
160 };
161
invert_quant(int improved_quant,short * quant,short * shift,short d)162 static void invert_quant(int improved_quant, short *quant, short *shift,
163 short d) {
164 if (improved_quant) {
165 unsigned t;
166 int l, m;
167 t = d;
168 for (l = 0; t > 1; ++l) t >>= 1;
169 m = 1 + (1 << (16 + l)) / d;
170 *quant = (short)(m - (1 << 16));
171 *shift = l;
172 /* use multiplication and constant shift by 16 */
173 *shift = 1 << (16 - *shift);
174 } else {
175 *quant = (1 << 16) / d;
176 *shift = 0;
177 /* use multiplication and constant shift by 16 */
178 *shift = 1 << (16 - *shift);
179 }
180 }
181
vp8cx_init_quantizer(VP8_COMP * cpi)182 void vp8cx_init_quantizer(VP8_COMP *cpi) {
183 int i;
184 int quant_val;
185 int Q;
186
187 int zbin_boost[16] = { 0, 0, 8, 10, 12, 14, 16, 20,
188 24, 28, 32, 36, 40, 44, 44, 44 };
189
190 for (Q = 0; Q < QINDEX_RANGE; ++Q) {
191 /* dc values */
192 quant_val = vp8_dc_quant(Q, cpi->common.y1dc_delta_q);
193 cpi->Y1quant_fast[Q][0] = (1 << 16) / quant_val;
194 invert_quant(cpi->sf.improved_quant, cpi->Y1quant[Q] + 0,
195 cpi->Y1quant_shift[Q] + 0, quant_val);
196 cpi->Y1zbin[Q][0] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
197 cpi->Y1round[Q][0] = (qrounding_factors[Q] * quant_val) >> 7;
198 cpi->common.Y1dequant[Q][0] = quant_val;
199 cpi->zrun_zbin_boost_y1[Q][0] = (quant_val * zbin_boost[0]) >> 7;
200
201 quant_val = vp8_dc2quant(Q, cpi->common.y2dc_delta_q);
202 cpi->Y2quant_fast[Q][0] = (1 << 16) / quant_val;
203 invert_quant(cpi->sf.improved_quant, cpi->Y2quant[Q] + 0,
204 cpi->Y2quant_shift[Q] + 0, quant_val);
205 cpi->Y2zbin[Q][0] = ((qzbin_factors_y2[Q] * quant_val) + 64) >> 7;
206 cpi->Y2round[Q][0] = (qrounding_factors_y2[Q] * quant_val) >> 7;
207 cpi->common.Y2dequant[Q][0] = quant_val;
208 cpi->zrun_zbin_boost_y2[Q][0] = (quant_val * zbin_boost[0]) >> 7;
209
210 quant_val = vp8_dc_uv_quant(Q, cpi->common.uvdc_delta_q);
211 cpi->UVquant_fast[Q][0] = (1 << 16) / quant_val;
212 invert_quant(cpi->sf.improved_quant, cpi->UVquant[Q] + 0,
213 cpi->UVquant_shift[Q] + 0, quant_val);
214 cpi->UVzbin[Q][0] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
215 cpi->UVround[Q][0] = (qrounding_factors[Q] * quant_val) >> 7;
216 cpi->common.UVdequant[Q][0] = quant_val;
217 cpi->zrun_zbin_boost_uv[Q][0] = (quant_val * zbin_boost[0]) >> 7;
218
219 /* all the ac values = ; */
220 quant_val = vp8_ac_yquant(Q);
221 cpi->Y1quant_fast[Q][1] = (1 << 16) / quant_val;
222 invert_quant(cpi->sf.improved_quant, cpi->Y1quant[Q] + 1,
223 cpi->Y1quant_shift[Q] + 1, quant_val);
224 cpi->Y1zbin[Q][1] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
225 cpi->Y1round[Q][1] = (qrounding_factors[Q] * quant_val) >> 7;
226 cpi->common.Y1dequant[Q][1] = quant_val;
227 cpi->zrun_zbin_boost_y1[Q][1] = (quant_val * zbin_boost[1]) >> 7;
228
229 quant_val = vp8_ac2quant(Q, cpi->common.y2ac_delta_q);
230 cpi->Y2quant_fast[Q][1] = (1 << 16) / quant_val;
231 invert_quant(cpi->sf.improved_quant, cpi->Y2quant[Q] + 1,
232 cpi->Y2quant_shift[Q] + 1, quant_val);
233 cpi->Y2zbin[Q][1] = ((qzbin_factors_y2[Q] * quant_val) + 64) >> 7;
234 cpi->Y2round[Q][1] = (qrounding_factors_y2[Q] * quant_val) >> 7;
235 cpi->common.Y2dequant[Q][1] = quant_val;
236 cpi->zrun_zbin_boost_y2[Q][1] = (quant_val * zbin_boost[1]) >> 7;
237
238 quant_val = vp8_ac_uv_quant(Q, cpi->common.uvac_delta_q);
239 cpi->UVquant_fast[Q][1] = (1 << 16) / quant_val;
240 invert_quant(cpi->sf.improved_quant, cpi->UVquant[Q] + 1,
241 cpi->UVquant_shift[Q] + 1, quant_val);
242 cpi->UVzbin[Q][1] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
243 cpi->UVround[Q][1] = (qrounding_factors[Q] * quant_val) >> 7;
244 cpi->common.UVdequant[Q][1] = quant_val;
245 cpi->zrun_zbin_boost_uv[Q][1] = (quant_val * zbin_boost[1]) >> 7;
246
247 for (i = 2; i < 16; ++i) {
248 cpi->Y1quant_fast[Q][i] = cpi->Y1quant_fast[Q][1];
249 cpi->Y1quant[Q][i] = cpi->Y1quant[Q][1];
250 cpi->Y1quant_shift[Q][i] = cpi->Y1quant_shift[Q][1];
251 cpi->Y1zbin[Q][i] = cpi->Y1zbin[Q][1];
252 cpi->Y1round[Q][i] = cpi->Y1round[Q][1];
253 cpi->zrun_zbin_boost_y1[Q][i] =
254 (cpi->common.Y1dequant[Q][1] * zbin_boost[i]) >> 7;
255
256 cpi->Y2quant_fast[Q][i] = cpi->Y2quant_fast[Q][1];
257 cpi->Y2quant[Q][i] = cpi->Y2quant[Q][1];
258 cpi->Y2quant_shift[Q][i] = cpi->Y2quant_shift[Q][1];
259 cpi->Y2zbin[Q][i] = cpi->Y2zbin[Q][1];
260 cpi->Y2round[Q][i] = cpi->Y2round[Q][1];
261 cpi->zrun_zbin_boost_y2[Q][i] =
262 (cpi->common.Y2dequant[Q][1] * zbin_boost[i]) >> 7;
263
264 cpi->UVquant_fast[Q][i] = cpi->UVquant_fast[Q][1];
265 cpi->UVquant[Q][i] = cpi->UVquant[Q][1];
266 cpi->UVquant_shift[Q][i] = cpi->UVquant_shift[Q][1];
267 cpi->UVzbin[Q][i] = cpi->UVzbin[Q][1];
268 cpi->UVround[Q][i] = cpi->UVround[Q][1];
269 cpi->zrun_zbin_boost_uv[Q][i] =
270 (cpi->common.UVdequant[Q][1] * zbin_boost[i]) >> 7;
271 }
272 }
273 }
274
275 #define ZBIN_EXTRA_Y \
276 ((cpi->common.Y1dequant[QIndex][1] * \
277 (x->zbin_over_quant + x->zbin_mode_boost + x->act_zbin_adj)) >> \
278 7)
279
280 #define ZBIN_EXTRA_UV \
281 ((cpi->common.UVdequant[QIndex][1] * \
282 (x->zbin_over_quant + x->zbin_mode_boost + x->act_zbin_adj)) >> \
283 7)
284
285 #define ZBIN_EXTRA_Y2 \
286 ((cpi->common.Y2dequant[QIndex][1] * \
287 ((x->zbin_over_quant / 2) + x->zbin_mode_boost + x->act_zbin_adj)) >> \
288 7)
289
vp8cx_mb_init_quantizer(VP8_COMP * cpi,MACROBLOCK * x,int ok_to_skip)290 void vp8cx_mb_init_quantizer(VP8_COMP *cpi, MACROBLOCK *x, int ok_to_skip) {
291 int i;
292 int QIndex;
293 MACROBLOCKD *xd = &x->e_mbd;
294 int zbin_extra;
295
296 /* Select the baseline MB Q index. */
297 if (xd->segmentation_enabled) {
298 /* Abs Value */
299 if (xd->mb_segement_abs_delta == SEGMENT_ABSDATA) {
300 QIndex = xd->segment_feature_data[MB_LVL_ALT_Q]
301 [xd->mode_info_context->mbmi.segment_id];
302 /* Delta Value */
303 } else {
304 QIndex = cpi->common.base_qindex +
305 xd->segment_feature_data[MB_LVL_ALT_Q]
306 [xd->mode_info_context->mbmi.segment_id];
307 /* Clamp to valid range */
308 QIndex = (QIndex >= 0) ? ((QIndex <= MAXQ) ? QIndex : MAXQ) : 0;
309 }
310 } else {
311 QIndex = cpi->common.base_qindex;
312 }
313
314 /* This initialization should be called at least once. Use ok_to_skip to
315 * decide if it is ok to skip.
316 * Before encoding a frame, this function is always called with ok_to_skip
317 * =0, which means no skiping of calculations. The "last" values are
318 * initialized at that time.
319 */
320 if (!ok_to_skip || QIndex != x->q_index) {
321 xd->dequant_y1_dc[0] = 1;
322 xd->dequant_y1[0] = cpi->common.Y1dequant[QIndex][0];
323 xd->dequant_y2[0] = cpi->common.Y2dequant[QIndex][0];
324 xd->dequant_uv[0] = cpi->common.UVdequant[QIndex][0];
325
326 for (i = 1; i < 16; ++i) {
327 xd->dequant_y1_dc[i] = xd->dequant_y1[i] =
328 cpi->common.Y1dequant[QIndex][1];
329 xd->dequant_y2[i] = cpi->common.Y2dequant[QIndex][1];
330 xd->dequant_uv[i] = cpi->common.UVdequant[QIndex][1];
331 }
332 #if 1
333 /*TODO: Remove dequant from BLOCKD. This is a temporary solution until
334 * the quantizer code uses a passed in pointer to the dequant constants.
335 * This will also require modifications to the x86 and neon assembly.
336 * */
337 for (i = 0; i < 16; ++i) x->e_mbd.block[i].dequant = xd->dequant_y1;
338 for (i = 16; i < 24; ++i) x->e_mbd.block[i].dequant = xd->dequant_uv;
339 x->e_mbd.block[24].dequant = xd->dequant_y2;
340 #endif
341
342 /* Y */
343 zbin_extra = ZBIN_EXTRA_Y;
344
345 for (i = 0; i < 16; ++i) {
346 x->block[i].quant = cpi->Y1quant[QIndex];
347 x->block[i].quant_fast = cpi->Y1quant_fast[QIndex];
348 x->block[i].quant_shift = cpi->Y1quant_shift[QIndex];
349 x->block[i].zbin = cpi->Y1zbin[QIndex];
350 x->block[i].round = cpi->Y1round[QIndex];
351 x->block[i].zrun_zbin_boost = cpi->zrun_zbin_boost_y1[QIndex];
352 x->block[i].zbin_extra = (short)zbin_extra;
353 }
354
355 /* UV */
356 zbin_extra = ZBIN_EXTRA_UV;
357
358 for (i = 16; i < 24; ++i) {
359 x->block[i].quant = cpi->UVquant[QIndex];
360 x->block[i].quant_fast = cpi->UVquant_fast[QIndex];
361 x->block[i].quant_shift = cpi->UVquant_shift[QIndex];
362 x->block[i].zbin = cpi->UVzbin[QIndex];
363 x->block[i].round = cpi->UVround[QIndex];
364 x->block[i].zrun_zbin_boost = cpi->zrun_zbin_boost_uv[QIndex];
365 x->block[i].zbin_extra = (short)zbin_extra;
366 }
367
368 /* Y2 */
369 zbin_extra = ZBIN_EXTRA_Y2;
370
371 x->block[24].quant_fast = cpi->Y2quant_fast[QIndex];
372 x->block[24].quant = cpi->Y2quant[QIndex];
373 x->block[24].quant_shift = cpi->Y2quant_shift[QIndex];
374 x->block[24].zbin = cpi->Y2zbin[QIndex];
375 x->block[24].round = cpi->Y2round[QIndex];
376 x->block[24].zrun_zbin_boost = cpi->zrun_zbin_boost_y2[QIndex];
377 x->block[24].zbin_extra = (short)zbin_extra;
378
379 /* save this macroblock QIndex for vp8_update_zbin_extra() */
380 x->q_index = QIndex;
381
382 x->last_zbin_over_quant = x->zbin_over_quant;
383 x->last_zbin_mode_boost = x->zbin_mode_boost;
384 x->last_act_zbin_adj = x->act_zbin_adj;
385
386 } else if (x->last_zbin_over_quant != x->zbin_over_quant ||
387 x->last_zbin_mode_boost != x->zbin_mode_boost ||
388 x->last_act_zbin_adj != x->act_zbin_adj) {
389 /* Y */
390 zbin_extra = ZBIN_EXTRA_Y;
391
392 for (i = 0; i < 16; ++i) x->block[i].zbin_extra = (short)zbin_extra;
393
394 /* UV */
395 zbin_extra = ZBIN_EXTRA_UV;
396
397 for (i = 16; i < 24; ++i) x->block[i].zbin_extra = (short)zbin_extra;
398
399 /* Y2 */
400 zbin_extra = ZBIN_EXTRA_Y2;
401 x->block[24].zbin_extra = (short)zbin_extra;
402
403 x->last_zbin_over_quant = x->zbin_over_quant;
404 x->last_zbin_mode_boost = x->zbin_mode_boost;
405 x->last_act_zbin_adj = x->act_zbin_adj;
406 }
407 }
408
vp8_update_zbin_extra(VP8_COMP * cpi,MACROBLOCK * x)409 void vp8_update_zbin_extra(VP8_COMP *cpi, MACROBLOCK *x) {
410 int i;
411 int QIndex = x->q_index;
412 int zbin_extra;
413
414 /* Y */
415 zbin_extra = ZBIN_EXTRA_Y;
416
417 for (i = 0; i < 16; ++i) x->block[i].zbin_extra = (short)zbin_extra;
418
419 /* UV */
420 zbin_extra = ZBIN_EXTRA_UV;
421
422 for (i = 16; i < 24; ++i) x->block[i].zbin_extra = (short)zbin_extra;
423
424 /* Y2 */
425 zbin_extra = ZBIN_EXTRA_Y2;
426 x->block[24].zbin_extra = (short)zbin_extra;
427 }
428 #undef ZBIN_EXTRA_Y
429 #undef ZBIN_EXTRA_UV
430 #undef ZBIN_EXTRA_Y2
431
vp8cx_frame_init_quantizer(VP8_COMP * cpi)432 void vp8cx_frame_init_quantizer(VP8_COMP *cpi) {
433 /* Clear Zbin mode boost for default case */
434 cpi->mb.zbin_mode_boost = 0;
435
436 /* MB level quantizer setup */
437 vp8cx_mb_init_quantizer(cpi, &cpi->mb, 0);
438 }
439
vp8_set_quantizer(struct VP8_COMP * cpi,int Q)440 void vp8_set_quantizer(struct VP8_COMP *cpi, int Q) {
441 VP8_COMMON *cm = &cpi->common;
442 MACROBLOCKD *mbd = &cpi->mb.e_mbd;
443 int update = 0;
444 int new_delta_q;
445 int new_uv_delta_q;
446 cm->base_qindex = Q;
447
448 /* if any of the delta_q values are changing update flag has to be set */
449 /* currently only y2dc_delta_q may change */
450
451 cm->y1dc_delta_q = 0;
452 cm->y2ac_delta_q = 0;
453
454 if (Q < 4) {
455 new_delta_q = 4 - Q;
456 } else {
457 new_delta_q = 0;
458 }
459
460 update |= cm->y2dc_delta_q != new_delta_q;
461 cm->y2dc_delta_q = new_delta_q;
462
463 new_uv_delta_q = 0;
464 // For screen content, lower the q value for UV channel. For now, select
465 // conservative delta; same delta for dc and ac, and decrease it with lower
466 // Q, and set to 0 below some threshold. May want to condition this in
467 // future on the variance/energy in UV channel.
468 if (cpi->oxcf.screen_content_mode && Q > 40) {
469 new_uv_delta_q = -(int)(0.15 * Q);
470 // Check range: magnitude of delta is 4 bits.
471 if (new_uv_delta_q < -15) {
472 new_uv_delta_q = -15;
473 }
474 }
475 update |= cm->uvdc_delta_q != new_uv_delta_q;
476 cm->uvdc_delta_q = new_uv_delta_q;
477 cm->uvac_delta_q = new_uv_delta_q;
478
479 /* Set Segment specific quatizers */
480 mbd->segment_feature_data[MB_LVL_ALT_Q][0] =
481 cpi->segment_feature_data[MB_LVL_ALT_Q][0];
482 mbd->segment_feature_data[MB_LVL_ALT_Q][1] =
483 cpi->segment_feature_data[MB_LVL_ALT_Q][1];
484 mbd->segment_feature_data[MB_LVL_ALT_Q][2] =
485 cpi->segment_feature_data[MB_LVL_ALT_Q][2];
486 mbd->segment_feature_data[MB_LVL_ALT_Q][3] =
487 cpi->segment_feature_data[MB_LVL_ALT_Q][3];
488
489 /* quantizer has to be reinitialized for any delta_q changes */
490 if (update) vp8cx_init_quantizer(cpi);
491 }
492