1 /*
2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #include <math.h>
12 #include "vpx_mem/vpx_mem.h"
13
14 #include "onyx_int.h"
15 #include "vp8/encoder/quantize.h"
16 #include "vp8/common/quant_common.h"
17
vp8_fast_quantize_b_c(BLOCK * b,BLOCKD * d)18 void vp8_fast_quantize_b_c(BLOCK *b, BLOCKD *d) {
19 int i, rc, eob;
20 int x, y, z, sz;
21 short *coeff_ptr = b->coeff;
22 short *round_ptr = b->round;
23 short *quant_ptr = b->quant_fast;
24 short *qcoeff_ptr = d->qcoeff;
25 short *dqcoeff_ptr = d->dqcoeff;
26 short *dequant_ptr = d->dequant;
27
28 eob = -1;
29 for (i = 0; i < 16; ++i) {
30 rc = vp8_default_zig_zag1d[i];
31 z = coeff_ptr[rc];
32
33 sz = (z >> 31); /* sign of z */
34 x = (z ^ sz) - sz; /* x = abs(z) */
35
36 y = ((x + round_ptr[rc]) * quant_ptr[rc]) >> 16; /* quantize (x) */
37 x = (y ^ sz) - sz; /* get the sign back */
38 qcoeff_ptr[rc] = x; /* write to destination */
39 dqcoeff_ptr[rc] = x * dequant_ptr[rc]; /* dequantized value */
40
41 if (y) {
42 eob = i; /* last nonzero coeffs */
43 }
44 }
45 *d->eob = (char)(eob + 1);
46 }
47
vp8_regular_quantize_b_c(BLOCK * b,BLOCKD * d)48 void vp8_regular_quantize_b_c(BLOCK *b, BLOCKD *d) {
49 int i, rc, eob;
50 int zbin;
51 int x, y, z, sz;
52 short *zbin_boost_ptr = b->zrun_zbin_boost;
53 short *coeff_ptr = b->coeff;
54 short *zbin_ptr = b->zbin;
55 short *round_ptr = b->round;
56 short *quant_ptr = b->quant;
57 short *quant_shift_ptr = b->quant_shift;
58 short *qcoeff_ptr = d->qcoeff;
59 short *dqcoeff_ptr = d->dqcoeff;
60 short *dequant_ptr = d->dequant;
61 short zbin_oq_value = b->zbin_extra;
62
63 memset(qcoeff_ptr, 0, 32);
64 memset(dqcoeff_ptr, 0, 32);
65
66 eob = -1;
67
68 for (i = 0; i < 16; ++i) {
69 rc = vp8_default_zig_zag1d[i];
70 z = coeff_ptr[rc];
71
72 zbin = zbin_ptr[rc] + *zbin_boost_ptr + zbin_oq_value;
73
74 zbin_boost_ptr++;
75 sz = (z >> 31); /* sign of z */
76 x = (z ^ sz) - sz; /* x = abs(z) */
77
78 if (x >= zbin) {
79 x += round_ptr[rc];
80 y = ((((x * quant_ptr[rc]) >> 16) + x) * quant_shift_ptr[rc]) >>
81 16; /* quantize (x) */
82 x = (y ^ sz) - sz; /* get the sign back */
83 qcoeff_ptr[rc] = x; /* write to destination */
84 dqcoeff_ptr[rc] = x * dequant_ptr[rc]; /* dequantized value */
85
86 if (y) {
87 eob = i; /* last nonzero coeffs */
88 zbin_boost_ptr = b->zrun_zbin_boost; /* reset zero runlength */
89 }
90 }
91 }
92
93 *d->eob = (char)(eob + 1);
94 }
95
vp8_quantize_mby(MACROBLOCK * x)96 void vp8_quantize_mby(MACROBLOCK *x) {
97 int i;
98 int has_2nd_order = (x->e_mbd.mode_info_context->mbmi.mode != B_PRED &&
99 x->e_mbd.mode_info_context->mbmi.mode != SPLITMV);
100
101 for (i = 0; i < 16; ++i) x->quantize_b(&x->block[i], &x->e_mbd.block[i]);
102
103 if (has_2nd_order) x->quantize_b(&x->block[24], &x->e_mbd.block[24]);
104 }
105
vp8_quantize_mb(MACROBLOCK * x)106 void vp8_quantize_mb(MACROBLOCK *x) {
107 int i;
108 int has_2nd_order = (x->e_mbd.mode_info_context->mbmi.mode != B_PRED &&
109 x->e_mbd.mode_info_context->mbmi.mode != SPLITMV);
110
111 for (i = 0; i < 24 + has_2nd_order; ++i) {
112 x->quantize_b(&x->block[i], &x->e_mbd.block[i]);
113 }
114 }
115
vp8_quantize_mbuv(MACROBLOCK * x)116 void vp8_quantize_mbuv(MACROBLOCK *x) {
117 int i;
118
119 for (i = 16; i < 24; ++i) x->quantize_b(&x->block[i], &x->e_mbd.block[i]);
120 }
121
122 static const int qrounding_factors[129] = {
123 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
124 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
125 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
126 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
127 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
128 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
129 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48
130 };
131
132 static const int qzbin_factors[129] = {
133 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84,
134 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84,
135 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 80, 80, 80, 80, 80, 80, 80, 80, 80,
136 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80,
137 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80,
138 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80,
139 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80
140 };
141
142 static const int qrounding_factors_y2[129] = {
143 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
144 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
145 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
146 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
147 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
148 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
149 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48
150 };
151
152 static const int qzbin_factors_y2[129] = {
153 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84,
154 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84,
155 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 80, 80, 80, 80, 80, 80, 80, 80, 80,
156 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80,
157 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80,
158 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80,
159 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80
160 };
161
invert_quant(int improved_quant,short * quant,short * shift,short d)162 static void invert_quant(int improved_quant, short *quant, short *shift,
163 short d) {
164 if (improved_quant) {
165 unsigned t;
166 int l, m;
167 t = d;
168 for (l = 0; t > 1; ++l) t >>= 1;
169 m = 1 + (1 << (16 + l)) / d;
170 *quant = (short)(m - (1 << 16));
171 *shift = l;
172 /* use multiplication and constant shift by 16 */
173 *shift = 1 << (16 - *shift);
174 } else {
175 *quant = (1 << 16) / d;
176 *shift = 0;
177 }
178 }
179
vp8cx_init_quantizer(VP8_COMP * cpi)180 void vp8cx_init_quantizer(VP8_COMP *cpi) {
181 int i;
182 int quant_val;
183 int Q;
184
185 int zbin_boost[16] = { 0, 0, 8, 10, 12, 14, 16, 20,
186 24, 28, 32, 36, 40, 44, 44, 44 };
187
188 for (Q = 0; Q < QINDEX_RANGE; ++Q) {
189 /* dc values */
190 quant_val = vp8_dc_quant(Q, cpi->common.y1dc_delta_q);
191 cpi->Y1quant_fast[Q][0] = (1 << 16) / quant_val;
192 invert_quant(cpi->sf.improved_quant, cpi->Y1quant[Q] + 0,
193 cpi->Y1quant_shift[Q] + 0, quant_val);
194 cpi->Y1zbin[Q][0] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
195 cpi->Y1round[Q][0] = (qrounding_factors[Q] * quant_val) >> 7;
196 cpi->common.Y1dequant[Q][0] = quant_val;
197 cpi->zrun_zbin_boost_y1[Q][0] = (quant_val * zbin_boost[0]) >> 7;
198
199 quant_val = vp8_dc2quant(Q, cpi->common.y2dc_delta_q);
200 cpi->Y2quant_fast[Q][0] = (1 << 16) / quant_val;
201 invert_quant(cpi->sf.improved_quant, cpi->Y2quant[Q] + 0,
202 cpi->Y2quant_shift[Q] + 0, quant_val);
203 cpi->Y2zbin[Q][0] = ((qzbin_factors_y2[Q] * quant_val) + 64) >> 7;
204 cpi->Y2round[Q][0] = (qrounding_factors_y2[Q] * quant_val) >> 7;
205 cpi->common.Y2dequant[Q][0] = quant_val;
206 cpi->zrun_zbin_boost_y2[Q][0] = (quant_val * zbin_boost[0]) >> 7;
207
208 quant_val = vp8_dc_uv_quant(Q, cpi->common.uvdc_delta_q);
209 cpi->UVquant_fast[Q][0] = (1 << 16) / quant_val;
210 invert_quant(cpi->sf.improved_quant, cpi->UVquant[Q] + 0,
211 cpi->UVquant_shift[Q] + 0, quant_val);
212 cpi->UVzbin[Q][0] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
213 cpi->UVround[Q][0] = (qrounding_factors[Q] * quant_val) >> 7;
214 cpi->common.UVdequant[Q][0] = quant_val;
215 cpi->zrun_zbin_boost_uv[Q][0] = (quant_val * zbin_boost[0]) >> 7;
216
217 /* all the ac values = ; */
218 quant_val = vp8_ac_yquant(Q);
219 cpi->Y1quant_fast[Q][1] = (1 << 16) / quant_val;
220 invert_quant(cpi->sf.improved_quant, cpi->Y1quant[Q] + 1,
221 cpi->Y1quant_shift[Q] + 1, quant_val);
222 cpi->Y1zbin[Q][1] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
223 cpi->Y1round[Q][1] = (qrounding_factors[Q] * quant_val) >> 7;
224 cpi->common.Y1dequant[Q][1] = quant_val;
225 cpi->zrun_zbin_boost_y1[Q][1] = (quant_val * zbin_boost[1]) >> 7;
226
227 quant_val = vp8_ac2quant(Q, cpi->common.y2ac_delta_q);
228 cpi->Y2quant_fast[Q][1] = (1 << 16) / quant_val;
229 invert_quant(cpi->sf.improved_quant, cpi->Y2quant[Q] + 1,
230 cpi->Y2quant_shift[Q] + 1, quant_val);
231 cpi->Y2zbin[Q][1] = ((qzbin_factors_y2[Q] * quant_val) + 64) >> 7;
232 cpi->Y2round[Q][1] = (qrounding_factors_y2[Q] * quant_val) >> 7;
233 cpi->common.Y2dequant[Q][1] = quant_val;
234 cpi->zrun_zbin_boost_y2[Q][1] = (quant_val * zbin_boost[1]) >> 7;
235
236 quant_val = vp8_ac_uv_quant(Q, cpi->common.uvac_delta_q);
237 cpi->UVquant_fast[Q][1] = (1 << 16) / quant_val;
238 invert_quant(cpi->sf.improved_quant, cpi->UVquant[Q] + 1,
239 cpi->UVquant_shift[Q] + 1, quant_val);
240 cpi->UVzbin[Q][1] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
241 cpi->UVround[Q][1] = (qrounding_factors[Q] * quant_val) >> 7;
242 cpi->common.UVdequant[Q][1] = quant_val;
243 cpi->zrun_zbin_boost_uv[Q][1] = (quant_val * zbin_boost[1]) >> 7;
244
245 for (i = 2; i < 16; ++i) {
246 cpi->Y1quant_fast[Q][i] = cpi->Y1quant_fast[Q][1];
247 cpi->Y1quant[Q][i] = cpi->Y1quant[Q][1];
248 cpi->Y1quant_shift[Q][i] = cpi->Y1quant_shift[Q][1];
249 cpi->Y1zbin[Q][i] = cpi->Y1zbin[Q][1];
250 cpi->Y1round[Q][i] = cpi->Y1round[Q][1];
251 cpi->zrun_zbin_boost_y1[Q][i] =
252 (cpi->common.Y1dequant[Q][1] * zbin_boost[i]) >> 7;
253
254 cpi->Y2quant_fast[Q][i] = cpi->Y2quant_fast[Q][1];
255 cpi->Y2quant[Q][i] = cpi->Y2quant[Q][1];
256 cpi->Y2quant_shift[Q][i] = cpi->Y2quant_shift[Q][1];
257 cpi->Y2zbin[Q][i] = cpi->Y2zbin[Q][1];
258 cpi->Y2round[Q][i] = cpi->Y2round[Q][1];
259 cpi->zrun_zbin_boost_y2[Q][i] =
260 (cpi->common.Y2dequant[Q][1] * zbin_boost[i]) >> 7;
261
262 cpi->UVquant_fast[Q][i] = cpi->UVquant_fast[Q][1];
263 cpi->UVquant[Q][i] = cpi->UVquant[Q][1];
264 cpi->UVquant_shift[Q][i] = cpi->UVquant_shift[Q][1];
265 cpi->UVzbin[Q][i] = cpi->UVzbin[Q][1];
266 cpi->UVround[Q][i] = cpi->UVround[Q][1];
267 cpi->zrun_zbin_boost_uv[Q][i] =
268 (cpi->common.UVdequant[Q][1] * zbin_boost[i]) >> 7;
269 }
270 }
271 }
272
273 #define ZBIN_EXTRA_Y \
274 ((cpi->common.Y1dequant[QIndex][1] * \
275 (x->zbin_over_quant + x->zbin_mode_boost + x->act_zbin_adj)) >> \
276 7)
277
278 #define ZBIN_EXTRA_UV \
279 ((cpi->common.UVdequant[QIndex][1] * \
280 (x->zbin_over_quant + x->zbin_mode_boost + x->act_zbin_adj)) >> \
281 7)
282
283 #define ZBIN_EXTRA_Y2 \
284 ((cpi->common.Y2dequant[QIndex][1] * \
285 ((x->zbin_over_quant / 2) + x->zbin_mode_boost + x->act_zbin_adj)) >> \
286 7)
287
vp8cx_mb_init_quantizer(VP8_COMP * cpi,MACROBLOCK * x,int ok_to_skip)288 void vp8cx_mb_init_quantizer(VP8_COMP *cpi, MACROBLOCK *x, int ok_to_skip) {
289 int i;
290 int QIndex;
291 MACROBLOCKD *xd = &x->e_mbd;
292 int zbin_extra;
293
294 /* Select the baseline MB Q index. */
295 if (xd->segmentation_enabled) {
296 /* Abs Value */
297 if (xd->mb_segement_abs_delta == SEGMENT_ABSDATA) {
298 QIndex = xd->segment_feature_data[MB_LVL_ALT_Q]
299 [xd->mode_info_context->mbmi.segment_id];
300 /* Delta Value */
301 } else {
302 QIndex = cpi->common.base_qindex +
303 xd->segment_feature_data[MB_LVL_ALT_Q]
304 [xd->mode_info_context->mbmi.segment_id];
305 /* Clamp to valid range */
306 QIndex = (QIndex >= 0) ? ((QIndex <= MAXQ) ? QIndex : MAXQ) : 0;
307 }
308 } else {
309 QIndex = cpi->common.base_qindex;
310 }
311
312 /* This initialization should be called at least once. Use ok_to_skip to
313 * decide if it is ok to skip.
314 * Before encoding a frame, this function is always called with ok_to_skip
315 * =0, which means no skiping of calculations. The "last" values are
316 * initialized at that time.
317 */
318 if (!ok_to_skip || QIndex != x->q_index) {
319 xd->dequant_y1_dc[0] = 1;
320 xd->dequant_y1[0] = cpi->common.Y1dequant[QIndex][0];
321 xd->dequant_y2[0] = cpi->common.Y2dequant[QIndex][0];
322 xd->dequant_uv[0] = cpi->common.UVdequant[QIndex][0];
323
324 for (i = 1; i < 16; ++i) {
325 xd->dequant_y1_dc[i] = xd->dequant_y1[i] =
326 cpi->common.Y1dequant[QIndex][1];
327 xd->dequant_y2[i] = cpi->common.Y2dequant[QIndex][1];
328 xd->dequant_uv[i] = cpi->common.UVdequant[QIndex][1];
329 }
330 #if 1
331 /*TODO: Remove dequant from BLOCKD. This is a temporary solution until
332 * the quantizer code uses a passed in pointer to the dequant constants.
333 * This will also require modifications to the x86 and neon assembly.
334 * */
335 for (i = 0; i < 16; ++i) x->e_mbd.block[i].dequant = xd->dequant_y1;
336 for (i = 16; i < 24; ++i) x->e_mbd.block[i].dequant = xd->dequant_uv;
337 x->e_mbd.block[24].dequant = xd->dequant_y2;
338 #endif
339
340 /* Y */
341 zbin_extra = ZBIN_EXTRA_Y;
342
343 for (i = 0; i < 16; ++i) {
344 x->block[i].quant = cpi->Y1quant[QIndex];
345 x->block[i].quant_fast = cpi->Y1quant_fast[QIndex];
346 x->block[i].quant_shift = cpi->Y1quant_shift[QIndex];
347 x->block[i].zbin = cpi->Y1zbin[QIndex];
348 x->block[i].round = cpi->Y1round[QIndex];
349 x->block[i].zrun_zbin_boost = cpi->zrun_zbin_boost_y1[QIndex];
350 x->block[i].zbin_extra = (short)zbin_extra;
351 }
352
353 /* UV */
354 zbin_extra = ZBIN_EXTRA_UV;
355
356 for (i = 16; i < 24; ++i) {
357 x->block[i].quant = cpi->UVquant[QIndex];
358 x->block[i].quant_fast = cpi->UVquant_fast[QIndex];
359 x->block[i].quant_shift = cpi->UVquant_shift[QIndex];
360 x->block[i].zbin = cpi->UVzbin[QIndex];
361 x->block[i].round = cpi->UVround[QIndex];
362 x->block[i].zrun_zbin_boost = cpi->zrun_zbin_boost_uv[QIndex];
363 x->block[i].zbin_extra = (short)zbin_extra;
364 }
365
366 /* Y2 */
367 zbin_extra = ZBIN_EXTRA_Y2;
368
369 x->block[24].quant_fast = cpi->Y2quant_fast[QIndex];
370 x->block[24].quant = cpi->Y2quant[QIndex];
371 x->block[24].quant_shift = cpi->Y2quant_shift[QIndex];
372 x->block[24].zbin = cpi->Y2zbin[QIndex];
373 x->block[24].round = cpi->Y2round[QIndex];
374 x->block[24].zrun_zbin_boost = cpi->zrun_zbin_boost_y2[QIndex];
375 x->block[24].zbin_extra = (short)zbin_extra;
376
377 /* save this macroblock QIndex for vp8_update_zbin_extra() */
378 x->q_index = QIndex;
379
380 x->last_zbin_over_quant = x->zbin_over_quant;
381 x->last_zbin_mode_boost = x->zbin_mode_boost;
382 x->last_act_zbin_adj = x->act_zbin_adj;
383
384 } else if (x->last_zbin_over_quant != x->zbin_over_quant ||
385 x->last_zbin_mode_boost != x->zbin_mode_boost ||
386 x->last_act_zbin_adj != x->act_zbin_adj) {
387 /* Y */
388 zbin_extra = ZBIN_EXTRA_Y;
389
390 for (i = 0; i < 16; ++i) x->block[i].zbin_extra = (short)zbin_extra;
391
392 /* UV */
393 zbin_extra = ZBIN_EXTRA_UV;
394
395 for (i = 16; i < 24; ++i) x->block[i].zbin_extra = (short)zbin_extra;
396
397 /* Y2 */
398 zbin_extra = ZBIN_EXTRA_Y2;
399 x->block[24].zbin_extra = (short)zbin_extra;
400
401 x->last_zbin_over_quant = x->zbin_over_quant;
402 x->last_zbin_mode_boost = x->zbin_mode_boost;
403 x->last_act_zbin_adj = x->act_zbin_adj;
404 }
405 }
406
vp8_update_zbin_extra(VP8_COMP * cpi,MACROBLOCK * x)407 void vp8_update_zbin_extra(VP8_COMP *cpi, MACROBLOCK *x) {
408 int i;
409 int QIndex = x->q_index;
410 int zbin_extra;
411
412 /* Y */
413 zbin_extra = ZBIN_EXTRA_Y;
414
415 for (i = 0; i < 16; ++i) x->block[i].zbin_extra = (short)zbin_extra;
416
417 /* UV */
418 zbin_extra = ZBIN_EXTRA_UV;
419
420 for (i = 16; i < 24; ++i) x->block[i].zbin_extra = (short)zbin_extra;
421
422 /* Y2 */
423 zbin_extra = ZBIN_EXTRA_Y2;
424 x->block[24].zbin_extra = (short)zbin_extra;
425 }
426 #undef ZBIN_EXTRA_Y
427 #undef ZBIN_EXTRA_UV
428 #undef ZBIN_EXTRA_Y2
429
vp8cx_frame_init_quantizer(VP8_COMP * cpi)430 void vp8cx_frame_init_quantizer(VP8_COMP *cpi) {
431 /* Clear Zbin mode boost for default case */
432 cpi->mb.zbin_mode_boost = 0;
433
434 /* MB level quantizer setup */
435 vp8cx_mb_init_quantizer(cpi, &cpi->mb, 0);
436 }
437
vp8_set_quantizer(struct VP8_COMP * cpi,int Q)438 void vp8_set_quantizer(struct VP8_COMP *cpi, int Q) {
439 VP8_COMMON *cm = &cpi->common;
440 MACROBLOCKD *mbd = &cpi->mb.e_mbd;
441 int update = 0;
442 int new_delta_q;
443 int new_uv_delta_q;
444 cm->base_qindex = Q;
445
446 /* if any of the delta_q values are changing update flag has to be set */
447 /* currently only y2dc_delta_q may change */
448
449 cm->y1dc_delta_q = 0;
450 cm->y2ac_delta_q = 0;
451
452 if (Q < 4) {
453 new_delta_q = 4 - Q;
454 } else {
455 new_delta_q = 0;
456 }
457
458 update |= cm->y2dc_delta_q != new_delta_q;
459 cm->y2dc_delta_q = new_delta_q;
460
461 new_uv_delta_q = 0;
462 // For screen content, lower the q value for UV channel. For now, select
463 // conservative delta; same delta for dc and ac, and decrease it with lower
464 // Q, and set to 0 below some threshold. May want to condition this in
465 // future on the variance/energy in UV channel.
466 if (cpi->oxcf.screen_content_mode && Q > 40) {
467 new_uv_delta_q = -(int)(0.15 * Q);
468 // Check range: magnitude of delta is 4 bits.
469 if (new_uv_delta_q < -15) {
470 new_uv_delta_q = -15;
471 }
472 }
473 update |= cm->uvdc_delta_q != new_uv_delta_q;
474 cm->uvdc_delta_q = new_uv_delta_q;
475 cm->uvac_delta_q = new_uv_delta_q;
476
477 /* Set Segment specific quatizers */
478 mbd->segment_feature_data[MB_LVL_ALT_Q][0] =
479 cpi->segment_feature_data[MB_LVL_ALT_Q][0];
480 mbd->segment_feature_data[MB_LVL_ALT_Q][1] =
481 cpi->segment_feature_data[MB_LVL_ALT_Q][1];
482 mbd->segment_feature_data[MB_LVL_ALT_Q][2] =
483 cpi->segment_feature_data[MB_LVL_ALT_Q][2];
484 mbd->segment_feature_data[MB_LVL_ALT_Q][3] =
485 cpi->segment_feature_data[MB_LVL_ALT_Q][3];
486
487 /* quantizer has to be reinitialized for any delta_q changes */
488 if (update) vp8cx_init_quantizer(cpi);
489 }
490