1 /*
2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #include "vpx_config.h"
12 #include "./vpx_scale_rtcd.h"
13 #include "./vpx_dsp_rtcd.h"
14 #include "./vp8_rtcd.h"
15 #include "bitstream.h"
16 #include "vp8/common/onyxc_int.h"
17 #include "vp8/common/blockd.h"
18 #include "onyx_int.h"
19 #include "vp8/common/systemdependent.h"
20 #include "vp8/common/vp8_skin_detection.h"
21 #include "vp8/encoder/quantize.h"
22 #include "vp8/common/alloccommon.h"
23 #include "mcomp.h"
24 #include "firstpass.h"
25 #include "vpx_dsp/psnr.h"
26 #include "vpx_scale/vpx_scale.h"
27 #include "vp8/common/extend.h"
28 #include "ratectrl.h"
29 #include "vp8/common/quant_common.h"
30 #include "segmentation.h"
31 #if CONFIG_POSTPROC
32 #include "vp8/common/postproc.h"
33 #endif
34 #include "vpx_mem/vpx_mem.h"
35 #include "vp8/common/reconintra.h"
36 #include "vp8/common/swapyv12buffer.h"
37 #include "vp8/common/threading.h"
38 #include "vpx_ports/system_state.h"
39 #include "vpx_ports/vpx_timer.h"
40 #include "vpx_util/vpx_write_yuv_frame.h"
41 #if VPX_ARCH_ARM
42 #include "vpx_ports/arm.h"
43 #endif
44 #if CONFIG_MULTI_RES_ENCODING
45 #include "mr_dissim.h"
46 #endif
47 #include "encodeframe.h"
48 #if CONFIG_MULTITHREAD
49 #include "ethreading.h"
50 #endif
51 #include "picklpf.h"
52 #if !CONFIG_REALTIME_ONLY
53 #include "temporal_filter.h"
54 #endif
55
56 #include <assert.h>
57 #include <math.h>
58 #include <stdio.h>
59 #include <limits.h>
60
61 #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
62 extern int vp8_update_coef_context(VP8_COMP *cpi);
63 #endif
64
65 extern unsigned int vp8_get_processor_freq();
66
67 int vp8_calc_ss_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest);
68
69 static void set_default_lf_deltas(VP8_COMP *cpi);
70
71 extern const int vp8_gf_interval_table[101];
72
73 #if CONFIG_INTERNAL_STATS
74 #include "math.h"
75 #include "vpx_dsp/ssim.h"
76 #endif
77
78 #ifdef OUTPUT_YUV_SRC
79 FILE *yuv_file;
80 #endif
81 #ifdef OUTPUT_YUV_DENOISED
82 FILE *yuv_denoised_file;
83 #endif
84 #ifdef OUTPUT_YUV_SKINMAP
85 static FILE *yuv_skinmap_file = NULL;
86 #endif
87
88 #if 0
89 FILE *framepsnr;
90 FILE *kf_list;
91 FILE *keyfile;
92 #endif
93
94 #if 0
95 extern int skip_true_count;
96 extern int skip_false_count;
97 #endif
98
99 #ifdef SPEEDSTATS
100 unsigned int frames_at_speed[16] = { 0, 0, 0, 0, 0, 0, 0, 0,
101 0, 0, 0, 0, 0, 0, 0, 0 };
102 unsigned int tot_pm = 0;
103 unsigned int cnt_pm = 0;
104 unsigned int tot_ef = 0;
105 unsigned int cnt_ef = 0;
106 #endif
107
108 #ifdef MODE_STATS
109 extern unsigned __int64 Sectionbits[50];
110 extern int y_modes[5];
111 extern int uv_modes[4];
112 extern int b_modes[10];
113
114 extern int inter_y_modes[10];
115 extern int inter_uv_modes[4];
116 extern unsigned int inter_b_modes[15];
117 #endif
118
119 extern const int vp8_bits_per_mb[2][QINDEX_RANGE];
120
121 extern const int qrounding_factors[129];
122 extern const int qzbin_factors[129];
123 extern void vp8cx_init_quantizer(VP8_COMP *cpi);
124 extern const int vp8cx_base_skip_false_prob[128];
125
126 /* Tables relating active max Q to active min Q */
127 static const unsigned char kf_low_motion_minq[QINDEX_RANGE] = {
128 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
129 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
130 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,
131 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 4, 4, 4, 5, 5, 5,
132 5, 5, 6, 6, 6, 6, 7, 7, 8, 8, 8, 8, 9, 9, 10, 10, 10, 10, 11,
133 11, 11, 11, 12, 12, 13, 13, 13, 13, 14, 14, 15, 15, 15, 15, 16, 16, 16, 16,
134 17, 17, 18, 18, 18, 18, 19, 20, 20, 21, 21, 22, 23, 23
135 };
136 static const unsigned char kf_high_motion_minq[QINDEX_RANGE] = {
137 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
138 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1,
139 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 5,
140 5, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 8, 8, 8, 8, 9, 9, 10, 10,
141 10, 10, 11, 11, 11, 11, 12, 12, 13, 13, 13, 13, 14, 14, 15, 15, 15, 15, 16,
142 16, 16, 16, 17, 17, 18, 18, 18, 18, 19, 19, 20, 20, 20, 20, 21, 21, 21, 21,
143 22, 22, 23, 23, 24, 25, 25, 26, 26, 27, 28, 28, 29, 30
144 };
145 static const unsigned char gf_low_motion_minq[QINDEX_RANGE] = {
146 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3,
147 3, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8,
148 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15,
149 15, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20, 21, 21, 22, 22, 23, 23, 24, 24,
150 25, 25, 26, 26, 27, 27, 28, 28, 29, 29, 30, 30, 31, 31, 32, 32, 33, 33, 34,
151 34, 35, 35, 36, 36, 37, 37, 38, 38, 39, 39, 40, 40, 41, 41, 42, 42, 43, 44,
152 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58
153 };
154 static const unsigned char gf_mid_motion_minq[QINDEX_RANGE] = {
155 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 2, 2, 3, 3, 3, 4, 4, 4, 5,
156 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10, 10, 11,
157 11, 11, 12, 12, 12, 12, 13, 13, 13, 14, 14, 14, 15, 15, 16, 16, 17, 17, 18,
158 18, 19, 19, 20, 20, 21, 21, 22, 22, 23, 23, 24, 24, 25, 25, 26, 26, 27, 27,
159 28, 28, 29, 29, 30, 30, 31, 31, 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 37,
160 37, 38, 39, 39, 40, 40, 41, 41, 42, 42, 43, 43, 44, 45, 46, 47, 48, 49, 50,
161 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64
162 };
163 static const unsigned char gf_high_motion_minq[QINDEX_RANGE] = {
164 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5,
165 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10, 11, 11,
166 12, 12, 13, 13, 14, 14, 15, 15, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20, 21,
167 21, 22, 22, 23, 23, 24, 24, 25, 25, 26, 26, 27, 27, 28, 28, 29, 29, 30, 30,
168 31, 31, 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 37, 37, 38, 38, 39, 39, 40,
169 40, 41, 41, 42, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56,
170 57, 58, 59, 60, 62, 64, 66, 68, 70, 72, 74, 76, 78, 80
171 };
172 static const unsigned char inter_minq[QINDEX_RANGE] = {
173 0, 0, 1, 1, 2, 3, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 9, 10, 11,
174 11, 12, 13, 13, 14, 15, 15, 16, 17, 17, 18, 19, 20, 20, 21, 22, 22, 23, 24,
175 24, 25, 26, 27, 27, 28, 29, 30, 30, 31, 32, 33, 33, 34, 35, 36, 36, 37, 38,
176 39, 39, 40, 41, 42, 42, 43, 44, 45, 46, 46, 47, 48, 49, 50, 50, 51, 52, 53,
177 54, 55, 55, 56, 57, 58, 59, 60, 60, 61, 62, 63, 64, 65, 66, 67, 67, 68, 69,
178 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 86,
179 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100
180 };
181
182 #ifdef PACKET_TESTING
183 extern FILE *vpxlogc;
184 #endif
185
save_layer_context(VP8_COMP * cpi)186 static void save_layer_context(VP8_COMP *cpi) {
187 LAYER_CONTEXT *lc = &cpi->layer_context[cpi->current_layer];
188
189 /* Save layer dependent coding state */
190 lc->target_bandwidth = cpi->target_bandwidth;
191 lc->starting_buffer_level = cpi->oxcf.starting_buffer_level;
192 lc->optimal_buffer_level = cpi->oxcf.optimal_buffer_level;
193 lc->maximum_buffer_size = cpi->oxcf.maximum_buffer_size;
194 lc->starting_buffer_level_in_ms = cpi->oxcf.starting_buffer_level_in_ms;
195 lc->optimal_buffer_level_in_ms = cpi->oxcf.optimal_buffer_level_in_ms;
196 lc->maximum_buffer_size_in_ms = cpi->oxcf.maximum_buffer_size_in_ms;
197 lc->buffer_level = cpi->buffer_level;
198 lc->bits_off_target = cpi->bits_off_target;
199 lc->total_actual_bits = cpi->total_actual_bits;
200 lc->worst_quality = cpi->worst_quality;
201 lc->active_worst_quality = cpi->active_worst_quality;
202 lc->best_quality = cpi->best_quality;
203 lc->active_best_quality = cpi->active_best_quality;
204 lc->ni_av_qi = cpi->ni_av_qi;
205 lc->ni_tot_qi = cpi->ni_tot_qi;
206 lc->ni_frames = cpi->ni_frames;
207 lc->avg_frame_qindex = cpi->avg_frame_qindex;
208 lc->rate_correction_factor = cpi->rate_correction_factor;
209 lc->key_frame_rate_correction_factor = cpi->key_frame_rate_correction_factor;
210 lc->gf_rate_correction_factor = cpi->gf_rate_correction_factor;
211 lc->zbin_over_quant = cpi->mb.zbin_over_quant;
212 lc->inter_frame_target = cpi->inter_frame_target;
213 lc->total_byte_count = cpi->total_byte_count;
214 lc->filter_level = cpi->common.filter_level;
215 lc->frames_since_last_drop_overshoot = cpi->frames_since_last_drop_overshoot;
216 lc->force_maxqp = cpi->force_maxqp;
217 lc->last_frame_percent_intra = cpi->last_frame_percent_intra;
218 lc->last_q[0] = cpi->last_q[0];
219 lc->last_q[1] = cpi->last_q[1];
220
221 memcpy(lc->count_mb_ref_frame_usage, cpi->mb.count_mb_ref_frame_usage,
222 sizeof(cpi->mb.count_mb_ref_frame_usage));
223 }
224
restore_layer_context(VP8_COMP * cpi,const int layer)225 static void restore_layer_context(VP8_COMP *cpi, const int layer) {
226 LAYER_CONTEXT *lc = &cpi->layer_context[layer];
227
228 /* Restore layer dependent coding state */
229 cpi->current_layer = layer;
230 cpi->target_bandwidth = lc->target_bandwidth;
231 cpi->oxcf.target_bandwidth = lc->target_bandwidth;
232 cpi->oxcf.starting_buffer_level = lc->starting_buffer_level;
233 cpi->oxcf.optimal_buffer_level = lc->optimal_buffer_level;
234 cpi->oxcf.maximum_buffer_size = lc->maximum_buffer_size;
235 cpi->oxcf.starting_buffer_level_in_ms = lc->starting_buffer_level_in_ms;
236 cpi->oxcf.optimal_buffer_level_in_ms = lc->optimal_buffer_level_in_ms;
237 cpi->oxcf.maximum_buffer_size_in_ms = lc->maximum_buffer_size_in_ms;
238 cpi->buffer_level = lc->buffer_level;
239 cpi->bits_off_target = lc->bits_off_target;
240 cpi->total_actual_bits = lc->total_actual_bits;
241 cpi->active_worst_quality = lc->active_worst_quality;
242 cpi->active_best_quality = lc->active_best_quality;
243 cpi->ni_av_qi = lc->ni_av_qi;
244 cpi->ni_tot_qi = lc->ni_tot_qi;
245 cpi->ni_frames = lc->ni_frames;
246 cpi->avg_frame_qindex = lc->avg_frame_qindex;
247 cpi->rate_correction_factor = lc->rate_correction_factor;
248 cpi->key_frame_rate_correction_factor = lc->key_frame_rate_correction_factor;
249 cpi->gf_rate_correction_factor = lc->gf_rate_correction_factor;
250 cpi->mb.zbin_over_quant = lc->zbin_over_quant;
251 cpi->inter_frame_target = lc->inter_frame_target;
252 cpi->total_byte_count = lc->total_byte_count;
253 cpi->common.filter_level = lc->filter_level;
254 cpi->frames_since_last_drop_overshoot = lc->frames_since_last_drop_overshoot;
255 cpi->force_maxqp = lc->force_maxqp;
256 cpi->last_frame_percent_intra = lc->last_frame_percent_intra;
257 cpi->last_q[0] = lc->last_q[0];
258 cpi->last_q[1] = lc->last_q[1];
259
260 memcpy(cpi->mb.count_mb_ref_frame_usage, lc->count_mb_ref_frame_usage,
261 sizeof(cpi->mb.count_mb_ref_frame_usage));
262 }
263
rescale(int val,int num,int denom)264 static int rescale(int val, int num, int denom) {
265 int64_t llnum = num;
266 int64_t llden = denom;
267 int64_t llval = val;
268
269 return (int)(llval * llnum / llden);
270 }
271
init_temporal_layer_context(VP8_COMP * cpi,VP8_CONFIG * oxcf,const int layer,double prev_layer_framerate)272 static void init_temporal_layer_context(VP8_COMP *cpi, VP8_CONFIG *oxcf,
273 const int layer,
274 double prev_layer_framerate) {
275 LAYER_CONTEXT *lc = &cpi->layer_context[layer];
276
277 lc->framerate = cpi->output_framerate / cpi->oxcf.rate_decimator[layer];
278 lc->target_bandwidth = cpi->oxcf.target_bitrate[layer] * 1000;
279
280 lc->starting_buffer_level_in_ms = oxcf->starting_buffer_level;
281 lc->optimal_buffer_level_in_ms = oxcf->optimal_buffer_level;
282 lc->maximum_buffer_size_in_ms = oxcf->maximum_buffer_size;
283
284 lc->starting_buffer_level =
285 rescale((int)(oxcf->starting_buffer_level), lc->target_bandwidth, 1000);
286
287 if (oxcf->optimal_buffer_level == 0) {
288 lc->optimal_buffer_level = lc->target_bandwidth / 8;
289 } else {
290 lc->optimal_buffer_level =
291 rescale((int)(oxcf->optimal_buffer_level), lc->target_bandwidth, 1000);
292 }
293
294 if (oxcf->maximum_buffer_size == 0) {
295 lc->maximum_buffer_size = lc->target_bandwidth / 8;
296 } else {
297 lc->maximum_buffer_size =
298 rescale((int)(oxcf->maximum_buffer_size), lc->target_bandwidth, 1000);
299 }
300
301 /* Work out the average size of a frame within this layer */
302 if (layer > 0) {
303 lc->avg_frame_size_for_layer =
304 (int)((cpi->oxcf.target_bitrate[layer] -
305 cpi->oxcf.target_bitrate[layer - 1]) *
306 1000 / (lc->framerate - prev_layer_framerate));
307 }
308
309 lc->active_worst_quality = cpi->oxcf.worst_allowed_q;
310 lc->active_best_quality = cpi->oxcf.best_allowed_q;
311 lc->avg_frame_qindex = cpi->oxcf.worst_allowed_q;
312
313 lc->buffer_level = lc->starting_buffer_level;
314 lc->bits_off_target = lc->starting_buffer_level;
315
316 lc->total_actual_bits = 0;
317 lc->ni_av_qi = 0;
318 lc->ni_tot_qi = 0;
319 lc->ni_frames = 0;
320 lc->rate_correction_factor = 1.0;
321 lc->key_frame_rate_correction_factor = 1.0;
322 lc->gf_rate_correction_factor = 1.0;
323 lc->inter_frame_target = 0;
324 }
325
326 // Upon a run-time change in temporal layers, reset the layer context parameters
327 // for any "new" layers. For "existing" layers, let them inherit the parameters
328 // from the previous layer state (at the same layer #). In future we may want
329 // to better map the previous layer state(s) to the "new" ones.
reset_temporal_layer_change(VP8_COMP * cpi,VP8_CONFIG * oxcf,const int prev_num_layers)330 static void reset_temporal_layer_change(VP8_COMP *cpi, VP8_CONFIG *oxcf,
331 const int prev_num_layers) {
332 int i;
333 double prev_layer_framerate = 0;
334 const int curr_num_layers = cpi->oxcf.number_of_layers;
335 // If the previous state was 1 layer, get current layer context from cpi.
336 // We need this to set the layer context for the new layers below.
337 if (prev_num_layers == 1) {
338 cpi->current_layer = 0;
339 save_layer_context(cpi);
340 }
341 for (i = 0; i < curr_num_layers; ++i) {
342 LAYER_CONTEXT *lc = &cpi->layer_context[i];
343 if (i >= prev_num_layers) {
344 init_temporal_layer_context(cpi, oxcf, i, prev_layer_framerate);
345 }
346 // The initial buffer levels are set based on their starting levels.
347 // We could set the buffer levels based on the previous state (normalized
348 // properly by the layer bandwidths) but we would need to keep track of
349 // the previous set of layer bandwidths (i.e., target_bitrate[i])
350 // before the layer change. For now, reset to the starting levels.
351 lc->buffer_level =
352 cpi->oxcf.starting_buffer_level_in_ms * cpi->oxcf.target_bitrate[i];
353 lc->bits_off_target = lc->buffer_level;
354 // TDOD(marpan): Should we set the rate_correction_factor and
355 // active_worst/best_quality to values derived from the previous layer
356 // state (to smooth-out quality dips/rate fluctuation at transition)?
357
358 // We need to treat the 1 layer case separately: oxcf.target_bitrate[i]
359 // is not set for 1 layer, and the restore_layer_context/save_context()
360 // are not called in the encoding loop, so we need to call it here to
361 // pass the layer context state to |cpi|.
362 if (curr_num_layers == 1) {
363 lc->target_bandwidth = cpi->oxcf.target_bandwidth;
364 lc->buffer_level =
365 cpi->oxcf.starting_buffer_level_in_ms * lc->target_bandwidth / 1000;
366 lc->bits_off_target = lc->buffer_level;
367 restore_layer_context(cpi, 0);
368 }
369 prev_layer_framerate = cpi->output_framerate / cpi->oxcf.rate_decimator[i];
370 }
371 }
372
setup_features(VP8_COMP * cpi)373 static void setup_features(VP8_COMP *cpi) {
374 // If segmentation enabled set the update flags
375 if (cpi->mb.e_mbd.segmentation_enabled) {
376 cpi->mb.e_mbd.update_mb_segmentation_map = 1;
377 cpi->mb.e_mbd.update_mb_segmentation_data = 1;
378 } else {
379 cpi->mb.e_mbd.update_mb_segmentation_map = 0;
380 cpi->mb.e_mbd.update_mb_segmentation_data = 0;
381 }
382
383 cpi->mb.e_mbd.mode_ref_lf_delta_enabled = 0;
384 cpi->mb.e_mbd.mode_ref_lf_delta_update = 0;
385 memset(cpi->mb.e_mbd.ref_lf_deltas, 0, sizeof(cpi->mb.e_mbd.ref_lf_deltas));
386 memset(cpi->mb.e_mbd.mode_lf_deltas, 0, sizeof(cpi->mb.e_mbd.mode_lf_deltas));
387 memset(cpi->mb.e_mbd.last_ref_lf_deltas, 0,
388 sizeof(cpi->mb.e_mbd.ref_lf_deltas));
389 memset(cpi->mb.e_mbd.last_mode_lf_deltas, 0,
390 sizeof(cpi->mb.e_mbd.mode_lf_deltas));
391
392 set_default_lf_deltas(cpi);
393 }
394
395 static void dealloc_raw_frame_buffers(VP8_COMP *cpi);
396
vp8_initialize_enc(void)397 void vp8_initialize_enc(void) {
398 static volatile int init_done = 0;
399
400 if (!init_done) {
401 vpx_dsp_rtcd();
402 vp8_init_intra_predictors();
403 init_done = 1;
404 }
405 }
406
dealloc_compressor_data(VP8_COMP * cpi)407 static void dealloc_compressor_data(VP8_COMP *cpi) {
408 vpx_free(cpi->tplist);
409 cpi->tplist = NULL;
410
411 /* Delete last frame MV storage buffers */
412 vpx_free(cpi->lfmv);
413 cpi->lfmv = 0;
414
415 vpx_free(cpi->lf_ref_frame_sign_bias);
416 cpi->lf_ref_frame_sign_bias = 0;
417
418 vpx_free(cpi->lf_ref_frame);
419 cpi->lf_ref_frame = 0;
420
421 /* Delete sementation map */
422 vpx_free(cpi->segmentation_map);
423 cpi->segmentation_map = 0;
424
425 vpx_free(cpi->active_map);
426 cpi->active_map = 0;
427
428 vp8_de_alloc_frame_buffers(&cpi->common);
429
430 vp8_yv12_de_alloc_frame_buffer(&cpi->pick_lf_lvl_frame);
431 vp8_yv12_de_alloc_frame_buffer(&cpi->scaled_source);
432 dealloc_raw_frame_buffers(cpi);
433
434 vpx_free(cpi->tok);
435 cpi->tok = 0;
436
437 /* Structure used to monitor GF usage */
438 vpx_free(cpi->gf_active_flags);
439 cpi->gf_active_flags = 0;
440
441 /* Activity mask based per mb zbin adjustments */
442 vpx_free(cpi->mb_activity_map);
443 cpi->mb_activity_map = 0;
444
445 vpx_free(cpi->mb.pip);
446 cpi->mb.pip = 0;
447
448 #if CONFIG_MULTITHREAD
449 vpx_free(cpi->mt_current_mb_col);
450 cpi->mt_current_mb_col = NULL;
451 #endif
452 }
453
enable_segmentation(VP8_COMP * cpi)454 static void enable_segmentation(VP8_COMP *cpi) {
455 /* Set the appropriate feature bit */
456 cpi->mb.e_mbd.segmentation_enabled = 1;
457 cpi->mb.e_mbd.update_mb_segmentation_map = 1;
458 cpi->mb.e_mbd.update_mb_segmentation_data = 1;
459 }
disable_segmentation(VP8_COMP * cpi)460 static void disable_segmentation(VP8_COMP *cpi) {
461 /* Clear the appropriate feature bit */
462 cpi->mb.e_mbd.segmentation_enabled = 0;
463 }
464
465 /* Valid values for a segment are 0 to 3
466 * Segmentation map is arrange as [Rows][Columns]
467 */
set_segmentation_map(VP8_COMP * cpi,unsigned char * segmentation_map)468 static void set_segmentation_map(VP8_COMP *cpi,
469 unsigned char *segmentation_map) {
470 /* Copy in the new segmentation map */
471 memcpy(cpi->segmentation_map, segmentation_map,
472 (cpi->common.mb_rows * cpi->common.mb_cols));
473
474 /* Signal that the map should be updated. */
475 cpi->mb.e_mbd.update_mb_segmentation_map = 1;
476 cpi->mb.e_mbd.update_mb_segmentation_data = 1;
477 }
478
479 /* The values given for each segment can be either deltas (from the default
480 * value chosen for the frame) or absolute values.
481 *
482 * Valid range for abs values is:
483 * (0-127 for MB_LVL_ALT_Q), (0-63 for SEGMENT_ALT_LF)
484 * Valid range for delta values are:
485 * (+/-127 for MB_LVL_ALT_Q), (+/-63 for SEGMENT_ALT_LF)
486 *
487 * abs_delta = SEGMENT_DELTADATA (deltas)
488 * abs_delta = SEGMENT_ABSDATA (use the absolute values given).
489 *
490 */
set_segment_data(VP8_COMP * cpi,signed char * feature_data,unsigned char abs_delta)491 static void set_segment_data(VP8_COMP *cpi, signed char *feature_data,
492 unsigned char abs_delta) {
493 cpi->mb.e_mbd.mb_segement_abs_delta = abs_delta;
494 memcpy(cpi->segment_feature_data, feature_data,
495 sizeof(cpi->segment_feature_data));
496 }
497
498 /* A simple function to cyclically refresh the background at a lower Q */
cyclic_background_refresh(VP8_COMP * cpi,int Q,int lf_adjustment)499 static void cyclic_background_refresh(VP8_COMP *cpi, int Q, int lf_adjustment) {
500 unsigned char *seg_map = cpi->segmentation_map;
501 signed char feature_data[MB_LVL_MAX][MAX_MB_SEGMENTS];
502 int i;
503 int block_count = cpi->cyclic_refresh_mode_max_mbs_perframe;
504 int mbs_in_frame = cpi->common.mb_rows * cpi->common.mb_cols;
505
506 cpi->cyclic_refresh_q = Q / 2;
507
508 if (cpi->oxcf.screen_content_mode) {
509 // Modify quality ramp-up based on Q. Above some Q level, increase the
510 // number of blocks to be refreshed, and reduce it below the thredhold.
511 // Turn-off under certain conditions (i.e., away from key frame, and if
512 // we are at good quality (low Q) and most of the blocks were
513 // skipped-encoded
514 // in previous frame.
515 int qp_thresh = (cpi->oxcf.screen_content_mode == 2) ? 80 : 100;
516 if (Q >= qp_thresh) {
517 cpi->cyclic_refresh_mode_max_mbs_perframe =
518 (cpi->common.mb_rows * cpi->common.mb_cols) / 10;
519 } else if (cpi->frames_since_key > 250 && Q < 20 &&
520 cpi->mb.skip_true_count > (int)(0.95 * mbs_in_frame)) {
521 cpi->cyclic_refresh_mode_max_mbs_perframe = 0;
522 } else {
523 cpi->cyclic_refresh_mode_max_mbs_perframe =
524 (cpi->common.mb_rows * cpi->common.mb_cols) / 20;
525 }
526 block_count = cpi->cyclic_refresh_mode_max_mbs_perframe;
527 }
528
529 // Set every macroblock to be eligible for update.
530 // For key frame this will reset seg map to 0.
531 memset(cpi->segmentation_map, 0, mbs_in_frame);
532
533 if (cpi->common.frame_type != KEY_FRAME && block_count > 0) {
534 /* Cycle through the macro_block rows */
535 /* MB loop to set local segmentation map */
536 i = cpi->cyclic_refresh_mode_index;
537 assert(i < mbs_in_frame);
538 do {
539 /* If the MB is as a candidate for clean up then mark it for
540 * possible boost/refresh (segment 1) The segment id may get
541 * reset to 0 later if the MB gets coded anything other than
542 * last frame 0,0 as only (last frame 0,0) MBs are eligable for
543 * refresh : that is to say Mbs likely to be background blocks.
544 */
545 if (cpi->cyclic_refresh_map[i] == 0) {
546 seg_map[i] = 1;
547 block_count--;
548 } else if (cpi->cyclic_refresh_map[i] < 0) {
549 cpi->cyclic_refresh_map[i]++;
550 }
551
552 i++;
553 if (i == mbs_in_frame) i = 0;
554
555 } while (block_count && i != cpi->cyclic_refresh_mode_index);
556
557 cpi->cyclic_refresh_mode_index = i;
558
559 #if CONFIG_TEMPORAL_DENOISING
560 if (cpi->oxcf.noise_sensitivity > 0) {
561 if (cpi->denoiser.denoiser_mode == kDenoiserOnYUVAggressive &&
562 Q < (int)cpi->denoiser.denoise_pars.qp_thresh &&
563 (cpi->frames_since_key >
564 2 * cpi->denoiser.denoise_pars.consec_zerolast)) {
565 // Under aggressive denoising, use segmentation to turn off loop
566 // filter below some qp thresh. The filter is reduced for all
567 // blocks that have been encoded as ZEROMV LAST x frames in a row,
568 // where x is set by cpi->denoiser.denoise_pars.consec_zerolast.
569 // This is to avoid "dot" artifacts that can occur from repeated
570 // loop filtering on noisy input source.
571 cpi->cyclic_refresh_q = Q;
572 // lf_adjustment = -MAX_LOOP_FILTER;
573 lf_adjustment = -40;
574 for (i = 0; i < mbs_in_frame; ++i) {
575 seg_map[i] = (cpi->consec_zero_last[i] >
576 cpi->denoiser.denoise_pars.consec_zerolast)
577 ? 1
578 : 0;
579 }
580 }
581 }
582 #endif
583 }
584
585 /* Activate segmentation. */
586 cpi->mb.e_mbd.update_mb_segmentation_map = 1;
587 cpi->mb.e_mbd.update_mb_segmentation_data = 1;
588 enable_segmentation(cpi);
589
590 /* Set up the quant segment data */
591 feature_data[MB_LVL_ALT_Q][0] = 0;
592 feature_data[MB_LVL_ALT_Q][1] = (cpi->cyclic_refresh_q - Q);
593 feature_data[MB_LVL_ALT_Q][2] = 0;
594 feature_data[MB_LVL_ALT_Q][3] = 0;
595
596 /* Set up the loop segment data */
597 feature_data[MB_LVL_ALT_LF][0] = 0;
598 feature_data[MB_LVL_ALT_LF][1] = lf_adjustment;
599 feature_data[MB_LVL_ALT_LF][2] = 0;
600 feature_data[MB_LVL_ALT_LF][3] = 0;
601
602 /* Initialise the feature data structure */
603 set_segment_data(cpi, &feature_data[0][0], SEGMENT_DELTADATA);
604 }
605
compute_skin_map(VP8_COMP * cpi)606 static void compute_skin_map(VP8_COMP *cpi) {
607 int mb_row, mb_col, num_bl;
608 VP8_COMMON *cm = &cpi->common;
609 const uint8_t *src_y = cpi->Source->y_buffer;
610 const uint8_t *src_u = cpi->Source->u_buffer;
611 const uint8_t *src_v = cpi->Source->v_buffer;
612 const int src_ystride = cpi->Source->y_stride;
613 const int src_uvstride = cpi->Source->uv_stride;
614
615 const SKIN_DETECTION_BLOCK_SIZE bsize =
616 (cm->Width * cm->Height <= 352 * 288) ? SKIN_8X8 : SKIN_16X16;
617
618 for (mb_row = 0; mb_row < cm->mb_rows; mb_row++) {
619 num_bl = 0;
620 for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) {
621 const int bl_index = mb_row * cm->mb_cols + mb_col;
622 cpi->skin_map[bl_index] =
623 vp8_compute_skin_block(src_y, src_u, src_v, src_ystride, src_uvstride,
624 bsize, cpi->consec_zero_last[bl_index], 0);
625 num_bl++;
626 src_y += 16;
627 src_u += 8;
628 src_v += 8;
629 }
630 src_y += (src_ystride << 4) - (num_bl << 4);
631 src_u += (src_uvstride << 3) - (num_bl << 3);
632 src_v += (src_uvstride << 3) - (num_bl << 3);
633 }
634
635 // Remove isolated skin blocks (none of its neighbors are skin) and isolated
636 // non-skin blocks (all of its neighbors are skin). Skip the boundary.
637 for (mb_row = 1; mb_row < cm->mb_rows - 1; mb_row++) {
638 for (mb_col = 1; mb_col < cm->mb_cols - 1; mb_col++) {
639 const int bl_index = mb_row * cm->mb_cols + mb_col;
640 int num_neighbor = 0;
641 int mi, mj;
642 int non_skin_threshold = 8;
643
644 for (mi = -1; mi <= 1; mi += 1) {
645 for (mj = -1; mj <= 1; mj += 1) {
646 int bl_neighbor_index = (mb_row + mi) * cm->mb_cols + mb_col + mj;
647 if (cpi->skin_map[bl_neighbor_index]) num_neighbor++;
648 }
649 }
650
651 if (cpi->skin_map[bl_index] && num_neighbor < 2)
652 cpi->skin_map[bl_index] = 0;
653 if (!cpi->skin_map[bl_index] && num_neighbor == non_skin_threshold)
654 cpi->skin_map[bl_index] = 1;
655 }
656 }
657 }
658
set_default_lf_deltas(VP8_COMP * cpi)659 static void set_default_lf_deltas(VP8_COMP *cpi) {
660 cpi->mb.e_mbd.mode_ref_lf_delta_enabled = 1;
661 cpi->mb.e_mbd.mode_ref_lf_delta_update = 1;
662
663 memset(cpi->mb.e_mbd.ref_lf_deltas, 0, sizeof(cpi->mb.e_mbd.ref_lf_deltas));
664 memset(cpi->mb.e_mbd.mode_lf_deltas, 0, sizeof(cpi->mb.e_mbd.mode_lf_deltas));
665
666 /* Test of ref frame deltas */
667 cpi->mb.e_mbd.ref_lf_deltas[INTRA_FRAME] = 2;
668 cpi->mb.e_mbd.ref_lf_deltas[LAST_FRAME] = 0;
669 cpi->mb.e_mbd.ref_lf_deltas[GOLDEN_FRAME] = -2;
670 cpi->mb.e_mbd.ref_lf_deltas[ALTREF_FRAME] = -2;
671
672 cpi->mb.e_mbd.mode_lf_deltas[0] = 4; /* BPRED */
673
674 if (cpi->oxcf.Mode == MODE_REALTIME) {
675 cpi->mb.e_mbd.mode_lf_deltas[1] = -12; /* Zero */
676 } else {
677 cpi->mb.e_mbd.mode_lf_deltas[1] = -2; /* Zero */
678 }
679
680 cpi->mb.e_mbd.mode_lf_deltas[2] = 2; /* New mv */
681 cpi->mb.e_mbd.mode_lf_deltas[3] = 4; /* Split mv */
682 }
683
684 /* Convenience macros for mapping speed and mode into a continuous
685 * range
686 */
687 #define GOOD(x) ((x) + 1)
688 #define RT(x) ((x) + 7)
689
speed_map(int speed,const int * map)690 static int speed_map(int speed, const int *map) {
691 int res;
692
693 do {
694 res = *map++;
695 } while (speed >= *map++);
696 return res;
697 }
698
699 static const int thresh_mult_map_znn[] = {
700 /* map common to zero, nearest, and near */
701 0, GOOD(2), 1500, GOOD(3), 2000, RT(0), 1000, RT(2), 2000, INT_MAX
702 };
703
704 static const int thresh_mult_map_vhpred[] = { 1000, GOOD(2), 1500, GOOD(3),
705 2000, RT(0), 1000, RT(1),
706 2000, RT(7), INT_MAX, INT_MAX };
707
708 static const int thresh_mult_map_bpred[] = { 2000, GOOD(0), 2500, GOOD(2),
709 5000, GOOD(3), 7500, RT(0),
710 2500, RT(1), 5000, RT(6),
711 INT_MAX, INT_MAX };
712
713 static const int thresh_mult_map_tm[] = { 1000, GOOD(2), 1500, GOOD(3),
714 2000, RT(0), 0, RT(1),
715 1000, RT(2), 2000, RT(7),
716 INT_MAX, INT_MAX };
717
718 static const int thresh_mult_map_new1[] = { 1000, GOOD(2), 2000,
719 RT(0), 2000, INT_MAX };
720
721 static const int thresh_mult_map_new2[] = { 1000, GOOD(2), 2000, GOOD(3),
722 2500, GOOD(5), 4000, RT(0),
723 2000, RT(2), 2500, RT(5),
724 4000, INT_MAX };
725
726 static const int thresh_mult_map_split1[] = {
727 2500, GOOD(0), 1700, GOOD(2), 10000, GOOD(3), 25000, GOOD(4), INT_MAX,
728 RT(0), 5000, RT(1), 10000, RT(2), 25000, RT(3), INT_MAX, INT_MAX
729 };
730
731 static const int thresh_mult_map_split2[] = {
732 5000, GOOD(0), 4500, GOOD(2), 20000, GOOD(3), 50000, GOOD(4), INT_MAX,
733 RT(0), 10000, RT(1), 20000, RT(2), 50000, RT(3), INT_MAX, INT_MAX
734 };
735
736 static const int mode_check_freq_map_zn2[] = {
737 /* {zero,nearest}{2,3} */
738 0, RT(10), 1 << 1, RT(11), 1 << 2, RT(12), 1 << 3, INT_MAX
739 };
740
741 static const int mode_check_freq_map_vhbpred[] = { 0, GOOD(5), 2, RT(0),
742 0, RT(3), 2, RT(5),
743 4, INT_MAX };
744
745 static const int mode_check_freq_map_near2[] = {
746 0, GOOD(5), 2, RT(0), 0, RT(3), 2,
747 RT(10), 1 << 2, RT(11), 1 << 3, RT(12), 1 << 4, INT_MAX
748 };
749
750 static const int mode_check_freq_map_new1[] = {
751 0, RT(10), 1 << 1, RT(11), 1 << 2, RT(12), 1 << 3, INT_MAX
752 };
753
754 static const int mode_check_freq_map_new2[] = { 0, GOOD(5), 4, RT(0),
755 0, RT(3), 4, RT(10),
756 1 << 3, RT(11), 1 << 4, RT(12),
757 1 << 5, INT_MAX };
758
759 static const int mode_check_freq_map_split1[] = { 0, GOOD(2), 2, GOOD(3),
760 7, RT(1), 2, RT(2),
761 7, INT_MAX };
762
763 static const int mode_check_freq_map_split2[] = { 0, GOOD(1), 2, GOOD(2),
764 4, GOOD(3), 15, RT(1),
765 4, RT(2), 15, INT_MAX };
766
vp8_set_speed_features(VP8_COMP * cpi)767 void vp8_set_speed_features(VP8_COMP *cpi) {
768 SPEED_FEATURES *sf = &cpi->sf;
769 int Mode = cpi->compressor_speed;
770 int Speed = cpi->Speed;
771 int Speed2;
772 int i;
773 VP8_COMMON *cm = &cpi->common;
774 int last_improved_quant = sf->improved_quant;
775 int ref_frames;
776
777 /* Initialise default mode frequency sampling variables */
778 for (i = 0; i < MAX_MODES; ++i) {
779 cpi->mode_check_freq[i] = 0;
780 }
781
782 cpi->mb.mbs_tested_so_far = 0;
783 cpi->mb.mbs_zero_last_dot_suppress = 0;
784
785 /* best quality defaults */
786 sf->RD = 1;
787 sf->search_method = NSTEP;
788 sf->improved_quant = 1;
789 sf->improved_dct = 1;
790 sf->auto_filter = 1;
791 sf->recode_loop = 1;
792 sf->quarter_pixel_search = 1;
793 sf->half_pixel_search = 1;
794 sf->iterative_sub_pixel = 1;
795 sf->optimize_coefficients = 1;
796 sf->use_fastquant_for_pick = 0;
797 sf->no_skip_block4x4_search = 1;
798
799 sf->first_step = 0;
800 sf->max_step_search_steps = MAX_MVSEARCH_STEPS;
801 sf->improved_mv_pred = 1;
802
803 /* default thresholds to 0 */
804 for (i = 0; i < MAX_MODES; ++i) sf->thresh_mult[i] = 0;
805
806 /* Count enabled references */
807 ref_frames = 1;
808 if (cpi->ref_frame_flags & VP8_LAST_FRAME) ref_frames++;
809 if (cpi->ref_frame_flags & VP8_GOLD_FRAME) ref_frames++;
810 if (cpi->ref_frame_flags & VP8_ALTR_FRAME) ref_frames++;
811
812 /* Convert speed to continuous range, with clamping */
813 if (Mode == 0) {
814 Speed = 0;
815 } else if (Mode == 2) {
816 Speed = RT(Speed);
817 } else {
818 if (Speed > 5) Speed = 5;
819 Speed = GOOD(Speed);
820 }
821
822 sf->thresh_mult[THR_ZERO1] = sf->thresh_mult[THR_NEAREST1] =
823 sf->thresh_mult[THR_NEAR1] = sf->thresh_mult[THR_DC] = 0; /* always */
824
825 sf->thresh_mult[THR_ZERO2] = sf->thresh_mult[THR_ZERO3] =
826 sf->thresh_mult[THR_NEAREST2] = sf->thresh_mult[THR_NEAREST3] =
827 sf->thresh_mult[THR_NEAR2] = sf->thresh_mult[THR_NEAR3] =
828 speed_map(Speed, thresh_mult_map_znn);
829
830 sf->thresh_mult[THR_V_PRED] = sf->thresh_mult[THR_H_PRED] =
831 speed_map(Speed, thresh_mult_map_vhpred);
832 sf->thresh_mult[THR_B_PRED] = speed_map(Speed, thresh_mult_map_bpred);
833 sf->thresh_mult[THR_TM] = speed_map(Speed, thresh_mult_map_tm);
834 sf->thresh_mult[THR_NEW1] = speed_map(Speed, thresh_mult_map_new1);
835 sf->thresh_mult[THR_NEW2] = sf->thresh_mult[THR_NEW3] =
836 speed_map(Speed, thresh_mult_map_new2);
837 sf->thresh_mult[THR_SPLIT1] = speed_map(Speed, thresh_mult_map_split1);
838 sf->thresh_mult[THR_SPLIT2] = sf->thresh_mult[THR_SPLIT3] =
839 speed_map(Speed, thresh_mult_map_split2);
840
841 // Special case for temporal layers.
842 // Reduce the thresholds for zero/nearest/near for GOLDEN, if GOLDEN is
843 // used as second reference. We don't modify thresholds for ALTREF case
844 // since ALTREF is usually used as long-term reference in temporal layers.
845 if ((cpi->Speed <= 6) && (cpi->oxcf.number_of_layers > 1) &&
846 (cpi->ref_frame_flags & VP8_LAST_FRAME) &&
847 (cpi->ref_frame_flags & VP8_GOLD_FRAME)) {
848 if (cpi->closest_reference_frame == GOLDEN_FRAME) {
849 sf->thresh_mult[THR_ZERO2] = sf->thresh_mult[THR_ZERO2] >> 3;
850 sf->thresh_mult[THR_NEAREST2] = sf->thresh_mult[THR_NEAREST2] >> 3;
851 sf->thresh_mult[THR_NEAR2] = sf->thresh_mult[THR_NEAR2] >> 3;
852 } else {
853 sf->thresh_mult[THR_ZERO2] = sf->thresh_mult[THR_ZERO2] >> 1;
854 sf->thresh_mult[THR_NEAREST2] = sf->thresh_mult[THR_NEAREST2] >> 1;
855 sf->thresh_mult[THR_NEAR2] = sf->thresh_mult[THR_NEAR2] >> 1;
856 }
857 }
858
859 cpi->mode_check_freq[THR_ZERO1] = cpi->mode_check_freq[THR_NEAREST1] =
860 cpi->mode_check_freq[THR_NEAR1] = cpi->mode_check_freq[THR_TM] =
861 cpi->mode_check_freq[THR_DC] = 0; /* always */
862
863 cpi->mode_check_freq[THR_ZERO2] = cpi->mode_check_freq[THR_ZERO3] =
864 cpi->mode_check_freq[THR_NEAREST2] = cpi->mode_check_freq[THR_NEAREST3] =
865 speed_map(Speed, mode_check_freq_map_zn2);
866
867 cpi->mode_check_freq[THR_NEAR2] = cpi->mode_check_freq[THR_NEAR3] =
868 speed_map(Speed, mode_check_freq_map_near2);
869
870 cpi->mode_check_freq[THR_V_PRED] = cpi->mode_check_freq[THR_H_PRED] =
871 cpi->mode_check_freq[THR_B_PRED] =
872 speed_map(Speed, mode_check_freq_map_vhbpred);
873
874 // For real-time mode at speed 10 keep the mode_check_freq threshold
875 // for NEW1 similar to that of speed 9.
876 Speed2 = Speed;
877 if (cpi->Speed == 10 && Mode == 2) Speed2 = RT(9);
878 cpi->mode_check_freq[THR_NEW1] = speed_map(Speed2, mode_check_freq_map_new1);
879
880 cpi->mode_check_freq[THR_NEW2] = cpi->mode_check_freq[THR_NEW3] =
881 speed_map(Speed, mode_check_freq_map_new2);
882
883 cpi->mode_check_freq[THR_SPLIT1] =
884 speed_map(Speed, mode_check_freq_map_split1);
885 cpi->mode_check_freq[THR_SPLIT2] = cpi->mode_check_freq[THR_SPLIT3] =
886 speed_map(Speed, mode_check_freq_map_split2);
887 Speed = cpi->Speed;
888 switch (Mode) {
889 #if !CONFIG_REALTIME_ONLY
890 case 0: /* best quality mode */
891 sf->first_step = 0;
892 sf->max_step_search_steps = MAX_MVSEARCH_STEPS;
893 break;
894 case 1:
895 case 3:
896 if (Speed > 0) {
897 /* Disable coefficient optimization above speed 0 */
898 sf->optimize_coefficients = 0;
899 sf->use_fastquant_for_pick = 1;
900 sf->no_skip_block4x4_search = 0;
901
902 sf->first_step = 1;
903 }
904
905 if (Speed > 2) {
906 sf->improved_quant = 0;
907 sf->improved_dct = 0;
908
909 /* Only do recode loop on key frames, golden frames and
910 * alt ref frames
911 */
912 sf->recode_loop = 2;
913 }
914
915 if (Speed > 3) {
916 sf->auto_filter = 1;
917 sf->recode_loop = 0; /* recode loop off */
918 sf->RD = 0; /* Turn rd off */
919 }
920
921 if (Speed > 4) {
922 sf->auto_filter = 0; /* Faster selection of loop filter */
923 }
924
925 break;
926 #endif
927 case 2:
928 sf->optimize_coefficients = 0;
929 sf->recode_loop = 0;
930 sf->auto_filter = 1;
931 sf->iterative_sub_pixel = 1;
932 sf->search_method = NSTEP;
933
934 if (Speed > 0) {
935 sf->improved_quant = 0;
936 sf->improved_dct = 0;
937
938 sf->use_fastquant_for_pick = 1;
939 sf->no_skip_block4x4_search = 0;
940 sf->first_step = 1;
941 }
942
943 if (Speed > 2) sf->auto_filter = 0; /* Faster selection of loop filter */
944
945 if (Speed > 3) {
946 sf->RD = 0;
947 sf->auto_filter = 1;
948 }
949
950 if (Speed > 4) {
951 sf->auto_filter = 0; /* Faster selection of loop filter */
952 sf->search_method = HEX;
953 sf->iterative_sub_pixel = 0;
954 }
955
956 if (Speed > 6) {
957 unsigned int sum = 0;
958 unsigned int total_mbs = cm->MBs;
959 int thresh;
960 unsigned int total_skip;
961
962 int min = 2000;
963
964 if (cpi->oxcf.encode_breakout > 2000) min = cpi->oxcf.encode_breakout;
965
966 min >>= 7;
967
968 for (i = 0; i < min; ++i) {
969 sum += cpi->mb.error_bins[i];
970 }
971
972 total_skip = sum;
973 sum = 0;
974
975 /* i starts from 2 to make sure thresh started from 2048 */
976 for (; i < 1024; ++i) {
977 sum += cpi->mb.error_bins[i];
978
979 if (10 * sum >=
980 (unsigned int)(cpi->Speed - 6) * (total_mbs - total_skip)) {
981 break;
982 }
983 }
984
985 i--;
986 thresh = (i << 7);
987
988 if (thresh < 2000) thresh = 2000;
989
990 if (ref_frames > 1) {
991 sf->thresh_mult[THR_NEW1] = thresh;
992 sf->thresh_mult[THR_NEAREST1] = thresh >> 1;
993 sf->thresh_mult[THR_NEAR1] = thresh >> 1;
994 }
995
996 if (ref_frames > 2) {
997 sf->thresh_mult[THR_NEW2] = thresh << 1;
998 sf->thresh_mult[THR_NEAREST2] = thresh;
999 sf->thresh_mult[THR_NEAR2] = thresh;
1000 }
1001
1002 if (ref_frames > 3) {
1003 sf->thresh_mult[THR_NEW3] = thresh << 1;
1004 sf->thresh_mult[THR_NEAREST3] = thresh;
1005 sf->thresh_mult[THR_NEAR3] = thresh;
1006 }
1007
1008 sf->improved_mv_pred = 0;
1009 }
1010
1011 if (Speed > 8) sf->quarter_pixel_search = 0;
1012
1013 if (cm->version == 0) {
1014 cm->filter_type = NORMAL_LOOPFILTER;
1015
1016 if (Speed >= 14) cm->filter_type = SIMPLE_LOOPFILTER;
1017 } else {
1018 cm->filter_type = SIMPLE_LOOPFILTER;
1019 }
1020
1021 /* This has a big hit on quality. Last resort */
1022 if (Speed >= 15) sf->half_pixel_search = 0;
1023
1024 memset(cpi->mb.error_bins, 0, sizeof(cpi->mb.error_bins));
1025
1026 }; /* switch */
1027
1028 /* Slow quant, dct and trellis not worthwhile for first pass
1029 * so make sure they are always turned off.
1030 */
1031 if (cpi->pass == 1) {
1032 sf->improved_quant = 0;
1033 sf->optimize_coefficients = 0;
1034 sf->improved_dct = 0;
1035 }
1036
1037 if (cpi->sf.search_method == NSTEP) {
1038 vp8_init3smotion_compensation(&cpi->mb,
1039 cm->yv12_fb[cm->lst_fb_idx].y_stride);
1040 } else if (cpi->sf.search_method == DIAMOND) {
1041 vp8_init_dsmotion_compensation(&cpi->mb,
1042 cm->yv12_fb[cm->lst_fb_idx].y_stride);
1043 }
1044
1045 if (cpi->sf.improved_dct) {
1046 cpi->mb.short_fdct8x4 = vp8_short_fdct8x4;
1047 cpi->mb.short_fdct4x4 = vp8_short_fdct4x4;
1048 } else {
1049 /* No fast FDCT defined for any platform at this time. */
1050 cpi->mb.short_fdct8x4 = vp8_short_fdct8x4;
1051 cpi->mb.short_fdct4x4 = vp8_short_fdct4x4;
1052 }
1053
1054 cpi->mb.short_walsh4x4 = vp8_short_walsh4x4;
1055
1056 if (cpi->sf.improved_quant) {
1057 cpi->mb.quantize_b = vp8_regular_quantize_b;
1058 } else {
1059 cpi->mb.quantize_b = vp8_fast_quantize_b;
1060 }
1061 if (cpi->sf.improved_quant != last_improved_quant) vp8cx_init_quantizer(cpi);
1062
1063 if (cpi->sf.iterative_sub_pixel == 1) {
1064 cpi->find_fractional_mv_step = vp8_find_best_sub_pixel_step_iteratively;
1065 } else if (cpi->sf.quarter_pixel_search) {
1066 cpi->find_fractional_mv_step = vp8_find_best_sub_pixel_step;
1067 } else if (cpi->sf.half_pixel_search) {
1068 cpi->find_fractional_mv_step = vp8_find_best_half_pixel_step;
1069 } else {
1070 cpi->find_fractional_mv_step = vp8_skip_fractional_mv_step;
1071 }
1072
1073 if (cpi->sf.optimize_coefficients == 1 && cpi->pass != 1) {
1074 cpi->mb.optimize = 1;
1075 } else {
1076 cpi->mb.optimize = 0;
1077 }
1078
1079 if (cpi->common.full_pixel) {
1080 cpi->find_fractional_mv_step = vp8_skip_fractional_mv_step;
1081 }
1082
1083 #ifdef SPEEDSTATS
1084 frames_at_speed[cpi->Speed]++;
1085 #endif
1086 }
1087 #undef GOOD
1088 #undef RT
1089
alloc_raw_frame_buffers(VP8_COMP * cpi)1090 static void alloc_raw_frame_buffers(VP8_COMP *cpi) {
1091 #if VP8_TEMPORAL_ALT_REF
1092 int width = (cpi->oxcf.Width + 15) & ~15;
1093 int height = (cpi->oxcf.Height + 15) & ~15;
1094 #endif
1095
1096 cpi->lookahead = vp8_lookahead_init(cpi->oxcf.Width, cpi->oxcf.Height,
1097 cpi->oxcf.lag_in_frames);
1098 if (!cpi->lookahead) {
1099 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1100 "Failed to allocate lag buffers");
1101 }
1102
1103 #if VP8_TEMPORAL_ALT_REF
1104
1105 if (vp8_yv12_alloc_frame_buffer(&cpi->alt_ref_buffer, width, height,
1106 VP8BORDERINPIXELS)) {
1107 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1108 "Failed to allocate altref buffer");
1109 }
1110
1111 #endif
1112 }
1113
dealloc_raw_frame_buffers(VP8_COMP * cpi)1114 static void dealloc_raw_frame_buffers(VP8_COMP *cpi) {
1115 #if VP8_TEMPORAL_ALT_REF
1116 vp8_yv12_de_alloc_frame_buffer(&cpi->alt_ref_buffer);
1117 #endif
1118 vp8_lookahead_destroy(cpi->lookahead);
1119 }
1120
vp8_alloc_partition_data(VP8_COMP * cpi)1121 static int vp8_alloc_partition_data(VP8_COMP *cpi) {
1122 vpx_free(cpi->mb.pip);
1123
1124 cpi->mb.pip =
1125 vpx_calloc((cpi->common.mb_cols + 1) * (cpi->common.mb_rows + 1),
1126 sizeof(PARTITION_INFO));
1127 if (!cpi->mb.pip) return 1;
1128
1129 cpi->mb.pi = cpi->mb.pip + cpi->common.mode_info_stride + 1;
1130
1131 return 0;
1132 }
1133
vp8_alloc_compressor_data(VP8_COMP * cpi)1134 void vp8_alloc_compressor_data(VP8_COMP *cpi) {
1135 VP8_COMMON *cm = &cpi->common;
1136
1137 int width = cm->Width;
1138 int height = cm->Height;
1139
1140 if (vp8_alloc_frame_buffers(cm, width, height)) {
1141 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1142 "Failed to allocate frame buffers");
1143 }
1144
1145 if (vp8_alloc_partition_data(cpi)) {
1146 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1147 "Failed to allocate partition data");
1148 }
1149
1150 if ((width & 0xf) != 0) width += 16 - (width & 0xf);
1151
1152 if ((height & 0xf) != 0) height += 16 - (height & 0xf);
1153
1154 if (vp8_yv12_alloc_frame_buffer(&cpi->pick_lf_lvl_frame, width, height,
1155 VP8BORDERINPIXELS)) {
1156 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1157 "Failed to allocate last frame buffer");
1158 }
1159
1160 if (vp8_yv12_alloc_frame_buffer(&cpi->scaled_source, width, height,
1161 VP8BORDERINPIXELS)) {
1162 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1163 "Failed to allocate scaled source buffer");
1164 }
1165
1166 vpx_free(cpi->tok);
1167
1168 {
1169 #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
1170 unsigned int tokens = 8 * 24 * 16; /* one MB for each thread */
1171 #else
1172 unsigned int tokens = cm->mb_rows * cm->mb_cols * 24 * 16;
1173 #endif
1174 CHECK_MEM_ERROR(cpi->tok, vpx_calloc(tokens, sizeof(*cpi->tok)));
1175 }
1176
1177 /* Data used for real time vc mode to see if gf needs refreshing */
1178 cpi->zeromv_count = 0;
1179
1180 /* Structures used to monitor GF usage */
1181 vpx_free(cpi->gf_active_flags);
1182 CHECK_MEM_ERROR(
1183 cpi->gf_active_flags,
1184 vpx_calloc(sizeof(*cpi->gf_active_flags), cm->mb_rows * cm->mb_cols));
1185 cpi->gf_active_count = cm->mb_rows * cm->mb_cols;
1186
1187 vpx_free(cpi->mb_activity_map);
1188 CHECK_MEM_ERROR(
1189 cpi->mb_activity_map,
1190 vpx_calloc(sizeof(*cpi->mb_activity_map), cm->mb_rows * cm->mb_cols));
1191
1192 /* allocate memory for storing last frame's MVs for MV prediction. */
1193 vpx_free(cpi->lfmv);
1194 CHECK_MEM_ERROR(cpi->lfmv, vpx_calloc((cm->mb_rows + 2) * (cm->mb_cols + 2),
1195 sizeof(*cpi->lfmv)));
1196 vpx_free(cpi->lf_ref_frame_sign_bias);
1197 CHECK_MEM_ERROR(cpi->lf_ref_frame_sign_bias,
1198 vpx_calloc((cm->mb_rows + 2) * (cm->mb_cols + 2),
1199 sizeof(*cpi->lf_ref_frame_sign_bias)));
1200 vpx_free(cpi->lf_ref_frame);
1201 CHECK_MEM_ERROR(cpi->lf_ref_frame,
1202 vpx_calloc((cm->mb_rows + 2) * (cm->mb_cols + 2),
1203 sizeof(*cpi->lf_ref_frame)));
1204
1205 /* Create the encoder segmentation map and set all entries to 0 */
1206 vpx_free(cpi->segmentation_map);
1207 CHECK_MEM_ERROR(
1208 cpi->segmentation_map,
1209 vpx_calloc(cm->mb_rows * cm->mb_cols, sizeof(*cpi->segmentation_map)));
1210 cpi->cyclic_refresh_mode_index = 0;
1211 vpx_free(cpi->active_map);
1212 CHECK_MEM_ERROR(cpi->active_map, vpx_calloc(cm->mb_rows * cm->mb_cols,
1213 sizeof(*cpi->active_map)));
1214 memset(cpi->active_map, 1, (cm->mb_rows * cm->mb_cols));
1215
1216 #if CONFIG_MULTITHREAD
1217 if (width < 640) {
1218 cpi->mt_sync_range = 1;
1219 } else if (width <= 1280) {
1220 cpi->mt_sync_range = 4;
1221 } else if (width <= 2560) {
1222 cpi->mt_sync_range = 8;
1223 } else {
1224 cpi->mt_sync_range = 16;
1225 }
1226
1227 if (cpi->oxcf.multi_threaded > 1) {
1228 int i;
1229
1230 vpx_free(cpi->mt_current_mb_col);
1231 CHECK_MEM_ERROR(cpi->mt_current_mb_col,
1232 vpx_malloc(sizeof(*cpi->mt_current_mb_col) * cm->mb_rows));
1233 for (i = 0; i < cm->mb_rows; ++i)
1234 vpx_atomic_init(&cpi->mt_current_mb_col[i], 0);
1235 }
1236
1237 #endif
1238
1239 vpx_free(cpi->tplist);
1240 CHECK_MEM_ERROR(cpi->tplist, vpx_malloc(sizeof(TOKENLIST) * cm->mb_rows));
1241
1242 #if CONFIG_TEMPORAL_DENOISING
1243 if (cpi->oxcf.noise_sensitivity > 0) {
1244 vp8_denoiser_free(&cpi->denoiser);
1245 if (vp8_denoiser_allocate(&cpi->denoiser, width, height, cm->mb_rows,
1246 cm->mb_cols, cpi->oxcf.noise_sensitivity)) {
1247 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1248 "Failed to allocate denoiser");
1249 }
1250 }
1251 #endif
1252 }
1253
1254 /* Quant MOD */
1255 static const int q_trans[] = {
1256 0, 1, 2, 3, 4, 5, 7, 8, 9, 10, 12, 13, 15, 17, 18, 19,
1257 20, 21, 23, 24, 25, 26, 27, 28, 29, 30, 31, 33, 35, 37, 39, 41,
1258 43, 45, 47, 49, 51, 53, 55, 57, 59, 61, 64, 67, 70, 73, 76, 79,
1259 82, 85, 88, 91, 94, 97, 100, 103, 106, 109, 112, 115, 118, 121, 124, 127,
1260 };
1261
vp8_reverse_trans(int x)1262 int vp8_reverse_trans(int x) {
1263 int i;
1264
1265 for (i = 0; i < 64; ++i) {
1266 if (q_trans[i] >= x) return i;
1267 }
1268
1269 return 63;
1270 }
vp8_new_framerate(VP8_COMP * cpi,double framerate)1271 void vp8_new_framerate(VP8_COMP *cpi, double framerate) {
1272 if (framerate < .1) framerate = 30;
1273
1274 cpi->framerate = framerate;
1275 cpi->output_framerate = framerate;
1276 cpi->per_frame_bandwidth =
1277 (int)(cpi->oxcf.target_bandwidth / cpi->output_framerate);
1278 cpi->av_per_frame_bandwidth = cpi->per_frame_bandwidth;
1279 cpi->min_frame_bandwidth = (int)(cpi->av_per_frame_bandwidth *
1280 cpi->oxcf.two_pass_vbrmin_section / 100);
1281
1282 /* Set Maximum gf/arf interval */
1283 cpi->max_gf_interval = ((int)(cpi->output_framerate / 2.0) + 2);
1284
1285 if (cpi->max_gf_interval < 12) cpi->max_gf_interval = 12;
1286
1287 /* Extended interval for genuinely static scenes */
1288 cpi->twopass.static_scene_max_gf_interval = cpi->key_frame_frequency >> 1;
1289
1290 /* Special conditions when altr ref frame enabled in lagged compress mode */
1291 if (cpi->oxcf.play_alternate && cpi->oxcf.lag_in_frames) {
1292 if (cpi->max_gf_interval > cpi->oxcf.lag_in_frames - 1) {
1293 cpi->max_gf_interval = cpi->oxcf.lag_in_frames - 1;
1294 }
1295
1296 if (cpi->twopass.static_scene_max_gf_interval >
1297 cpi->oxcf.lag_in_frames - 1) {
1298 cpi->twopass.static_scene_max_gf_interval = cpi->oxcf.lag_in_frames - 1;
1299 }
1300 }
1301
1302 if (cpi->max_gf_interval > cpi->twopass.static_scene_max_gf_interval) {
1303 cpi->max_gf_interval = cpi->twopass.static_scene_max_gf_interval;
1304 }
1305 }
1306
init_config(VP8_COMP * cpi,VP8_CONFIG * oxcf)1307 static void init_config(VP8_COMP *cpi, VP8_CONFIG *oxcf) {
1308 VP8_COMMON *cm = &cpi->common;
1309
1310 cpi->oxcf = *oxcf;
1311
1312 cpi->auto_gold = 1;
1313 cpi->auto_adjust_gold_quantizer = 1;
1314
1315 cm->version = oxcf->Version;
1316 vp8_setup_version(cm);
1317
1318 /* Frame rate is not available on the first frame, as it's derived from
1319 * the observed timestamps. The actual value used here doesn't matter
1320 * too much, as it will adapt quickly.
1321 */
1322 if (oxcf->timebase.num > 0) {
1323 cpi->framerate =
1324 (double)(oxcf->timebase.den) / (double)(oxcf->timebase.num);
1325 } else {
1326 cpi->framerate = 30;
1327 }
1328
1329 /* If the reciprocal of the timebase seems like a reasonable framerate,
1330 * then use that as a guess, otherwise use 30.
1331 */
1332 if (cpi->framerate > 180) cpi->framerate = 30;
1333
1334 cpi->ref_framerate = cpi->framerate;
1335
1336 cpi->ref_frame_flags = VP8_ALTR_FRAME | VP8_GOLD_FRAME | VP8_LAST_FRAME;
1337
1338 cm->refresh_golden_frame = 0;
1339 cm->refresh_last_frame = 1;
1340 cm->refresh_entropy_probs = 1;
1341
1342 /* change includes all joint functionality */
1343 vp8_change_config(cpi, oxcf);
1344
1345 /* Initialize active best and worst q and average q values. */
1346 cpi->active_worst_quality = cpi->oxcf.worst_allowed_q;
1347 cpi->active_best_quality = cpi->oxcf.best_allowed_q;
1348 cpi->avg_frame_qindex = cpi->oxcf.worst_allowed_q;
1349
1350 /* Initialise the starting buffer levels */
1351 cpi->buffer_level = cpi->oxcf.starting_buffer_level;
1352 cpi->bits_off_target = cpi->oxcf.starting_buffer_level;
1353
1354 cpi->rolling_target_bits = cpi->av_per_frame_bandwidth;
1355 cpi->rolling_actual_bits = cpi->av_per_frame_bandwidth;
1356 cpi->long_rolling_target_bits = cpi->av_per_frame_bandwidth;
1357 cpi->long_rolling_actual_bits = cpi->av_per_frame_bandwidth;
1358
1359 cpi->total_actual_bits = 0;
1360 cpi->total_target_vs_actual = 0;
1361
1362 /* Temporal scalabilty */
1363 if (cpi->oxcf.number_of_layers > 1) {
1364 unsigned int i;
1365 double prev_layer_framerate = 0;
1366
1367 for (i = 0; i < cpi->oxcf.number_of_layers; ++i) {
1368 init_temporal_layer_context(cpi, oxcf, i, prev_layer_framerate);
1369 prev_layer_framerate =
1370 cpi->output_framerate / cpi->oxcf.rate_decimator[i];
1371 }
1372 }
1373
1374 #if VP8_TEMPORAL_ALT_REF
1375 {
1376 int i;
1377
1378 cpi->fixed_divide[0] = 0;
1379
1380 for (i = 1; i < 512; ++i) cpi->fixed_divide[i] = 0x80000 / i;
1381 }
1382 #endif
1383 }
1384
update_layer_contexts(VP8_COMP * cpi)1385 static void update_layer_contexts(VP8_COMP *cpi) {
1386 VP8_CONFIG *oxcf = &cpi->oxcf;
1387
1388 /* Update snapshots of the layer contexts to reflect new parameters */
1389 if (oxcf->number_of_layers > 1) {
1390 unsigned int i;
1391 double prev_layer_framerate = 0;
1392
1393 assert(oxcf->number_of_layers <= VPX_TS_MAX_LAYERS);
1394 for (i = 0; i < oxcf->number_of_layers && i < VPX_TS_MAX_LAYERS; ++i) {
1395 LAYER_CONTEXT *lc = &cpi->layer_context[i];
1396
1397 lc->framerate = cpi->ref_framerate / oxcf->rate_decimator[i];
1398 lc->target_bandwidth = oxcf->target_bitrate[i] * 1000;
1399
1400 lc->starting_buffer_level = rescale(
1401 (int)oxcf->starting_buffer_level_in_ms, lc->target_bandwidth, 1000);
1402
1403 if (oxcf->optimal_buffer_level == 0) {
1404 lc->optimal_buffer_level = lc->target_bandwidth / 8;
1405 } else {
1406 lc->optimal_buffer_level = rescale(
1407 (int)oxcf->optimal_buffer_level_in_ms, lc->target_bandwidth, 1000);
1408 }
1409
1410 if (oxcf->maximum_buffer_size == 0) {
1411 lc->maximum_buffer_size = lc->target_bandwidth / 8;
1412 } else {
1413 lc->maximum_buffer_size = rescale((int)oxcf->maximum_buffer_size_in_ms,
1414 lc->target_bandwidth, 1000);
1415 }
1416
1417 /* Work out the average size of a frame within this layer */
1418 if (i > 0) {
1419 lc->avg_frame_size_for_layer =
1420 (int)((oxcf->target_bitrate[i] - oxcf->target_bitrate[i - 1]) *
1421 1000 / (lc->framerate - prev_layer_framerate));
1422 }
1423
1424 prev_layer_framerate = lc->framerate;
1425 }
1426 }
1427 }
1428
vp8_change_config(VP8_COMP * cpi,VP8_CONFIG * oxcf)1429 void vp8_change_config(VP8_COMP *cpi, VP8_CONFIG *oxcf) {
1430 VP8_COMMON *cm = &cpi->common;
1431 int last_w, last_h;
1432 unsigned int prev_number_of_layers;
1433 unsigned int raw_target_rate;
1434
1435 if (!cpi) return;
1436
1437 if (!oxcf) return;
1438
1439 if (cm->version != oxcf->Version) {
1440 cm->version = oxcf->Version;
1441 vp8_setup_version(cm);
1442 }
1443
1444 last_w = cpi->oxcf.Width;
1445 last_h = cpi->oxcf.Height;
1446 prev_number_of_layers = cpi->oxcf.number_of_layers;
1447
1448 cpi->oxcf = *oxcf;
1449
1450 switch (cpi->oxcf.Mode) {
1451 case MODE_REALTIME:
1452 cpi->pass = 0;
1453 cpi->compressor_speed = 2;
1454
1455 if (cpi->oxcf.cpu_used < -16) {
1456 cpi->oxcf.cpu_used = -16;
1457 }
1458
1459 if (cpi->oxcf.cpu_used > 16) cpi->oxcf.cpu_used = 16;
1460
1461 break;
1462
1463 case MODE_GOODQUALITY:
1464 cpi->pass = 0;
1465 cpi->compressor_speed = 1;
1466
1467 if (cpi->oxcf.cpu_used < -5) {
1468 cpi->oxcf.cpu_used = -5;
1469 }
1470
1471 if (cpi->oxcf.cpu_used > 5) cpi->oxcf.cpu_used = 5;
1472
1473 break;
1474
1475 case MODE_BESTQUALITY:
1476 cpi->pass = 0;
1477 cpi->compressor_speed = 0;
1478 break;
1479
1480 case MODE_FIRSTPASS:
1481 cpi->pass = 1;
1482 cpi->compressor_speed = 1;
1483 break;
1484 case MODE_SECONDPASS:
1485 cpi->pass = 2;
1486 cpi->compressor_speed = 1;
1487
1488 if (cpi->oxcf.cpu_used < -5) {
1489 cpi->oxcf.cpu_used = -5;
1490 }
1491
1492 if (cpi->oxcf.cpu_used > 5) cpi->oxcf.cpu_used = 5;
1493
1494 break;
1495 case MODE_SECONDPASS_BEST:
1496 cpi->pass = 2;
1497 cpi->compressor_speed = 0;
1498 break;
1499 }
1500
1501 if (cpi->pass == 0) cpi->auto_worst_q = 1;
1502
1503 cpi->oxcf.worst_allowed_q = q_trans[oxcf->worst_allowed_q];
1504 cpi->oxcf.best_allowed_q = q_trans[oxcf->best_allowed_q];
1505 cpi->oxcf.cq_level = q_trans[cpi->oxcf.cq_level];
1506
1507 if (oxcf->fixed_q >= 0) {
1508 if (oxcf->worst_allowed_q < 0) {
1509 cpi->oxcf.fixed_q = q_trans[0];
1510 } else {
1511 cpi->oxcf.fixed_q = q_trans[oxcf->worst_allowed_q];
1512 }
1513
1514 if (oxcf->alt_q < 0) {
1515 cpi->oxcf.alt_q = q_trans[0];
1516 } else {
1517 cpi->oxcf.alt_q = q_trans[oxcf->alt_q];
1518 }
1519
1520 if (oxcf->key_q < 0) {
1521 cpi->oxcf.key_q = q_trans[0];
1522 } else {
1523 cpi->oxcf.key_q = q_trans[oxcf->key_q];
1524 }
1525
1526 if (oxcf->gold_q < 0) {
1527 cpi->oxcf.gold_q = q_trans[0];
1528 } else {
1529 cpi->oxcf.gold_q = q_trans[oxcf->gold_q];
1530 }
1531 }
1532
1533 cpi->ext_refresh_frame_flags_pending = 0;
1534
1535 cpi->baseline_gf_interval =
1536 cpi->oxcf.alt_freq ? cpi->oxcf.alt_freq : DEFAULT_GF_INTERVAL;
1537
1538 // GF behavior for 1 pass CBR, used when error_resilience is off.
1539 if (!cpi->oxcf.error_resilient_mode &&
1540 cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER &&
1541 cpi->oxcf.Mode == MODE_REALTIME)
1542 cpi->baseline_gf_interval = cpi->gf_interval_onepass_cbr;
1543
1544 #if (CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING)
1545 cpi->oxcf.token_partitions = 3;
1546 #endif
1547
1548 if (cpi->oxcf.token_partitions >= 0 && cpi->oxcf.token_partitions <= 3) {
1549 cm->multi_token_partition = (TOKEN_PARTITION)cpi->oxcf.token_partitions;
1550 }
1551
1552 setup_features(cpi);
1553
1554 if (!cpi->use_roi_static_threshold) {
1555 int i;
1556 for (i = 0; i < MAX_MB_SEGMENTS; ++i) {
1557 cpi->segment_encode_breakout[i] = cpi->oxcf.encode_breakout;
1558 }
1559 }
1560
1561 /* At the moment the first order values may not be > MAXQ */
1562 if (cpi->oxcf.fixed_q > MAXQ) cpi->oxcf.fixed_q = MAXQ;
1563
1564 /* local file playback mode == really big buffer */
1565 if (cpi->oxcf.end_usage == USAGE_LOCAL_FILE_PLAYBACK) {
1566 cpi->oxcf.starting_buffer_level = 60000;
1567 cpi->oxcf.optimal_buffer_level = 60000;
1568 cpi->oxcf.maximum_buffer_size = 240000;
1569 cpi->oxcf.starting_buffer_level_in_ms = 60000;
1570 cpi->oxcf.optimal_buffer_level_in_ms = 60000;
1571 cpi->oxcf.maximum_buffer_size_in_ms = 240000;
1572 }
1573
1574 raw_target_rate = (unsigned int)((int64_t)cpi->oxcf.Width * cpi->oxcf.Height *
1575 8 * 3 * cpi->framerate / 1000);
1576 if (cpi->oxcf.target_bandwidth > raw_target_rate)
1577 cpi->oxcf.target_bandwidth = raw_target_rate;
1578 /* Convert target bandwidth from Kbit/s to Bit/s */
1579 cpi->oxcf.target_bandwidth *= 1000;
1580
1581 cpi->oxcf.starting_buffer_level = rescale(
1582 (int)cpi->oxcf.starting_buffer_level, cpi->oxcf.target_bandwidth, 1000);
1583
1584 /* Set or reset optimal and maximum buffer levels. */
1585 if (cpi->oxcf.optimal_buffer_level == 0) {
1586 cpi->oxcf.optimal_buffer_level = cpi->oxcf.target_bandwidth / 8;
1587 } else {
1588 cpi->oxcf.optimal_buffer_level = rescale(
1589 (int)cpi->oxcf.optimal_buffer_level, cpi->oxcf.target_bandwidth, 1000);
1590 }
1591
1592 if (cpi->oxcf.maximum_buffer_size == 0) {
1593 cpi->oxcf.maximum_buffer_size = cpi->oxcf.target_bandwidth / 8;
1594 } else {
1595 cpi->oxcf.maximum_buffer_size = rescale((int)cpi->oxcf.maximum_buffer_size,
1596 cpi->oxcf.target_bandwidth, 1000);
1597 }
1598 // Under a configuration change, where maximum_buffer_size may change,
1599 // keep buffer level clipped to the maximum allowed buffer size.
1600 if (cpi->bits_off_target > cpi->oxcf.maximum_buffer_size) {
1601 cpi->bits_off_target = cpi->oxcf.maximum_buffer_size;
1602 cpi->buffer_level = cpi->bits_off_target;
1603 }
1604
1605 /* Set up frame rate and related parameters rate control values. */
1606 vp8_new_framerate(cpi, cpi->framerate);
1607
1608 /* Set absolute upper and lower quality limits */
1609 cpi->worst_quality = cpi->oxcf.worst_allowed_q;
1610 cpi->best_quality = cpi->oxcf.best_allowed_q;
1611
1612 /* active values should only be modified if out of new range */
1613 if (cpi->active_worst_quality > cpi->oxcf.worst_allowed_q) {
1614 cpi->active_worst_quality = cpi->oxcf.worst_allowed_q;
1615 }
1616 /* less likely */
1617 else if (cpi->active_worst_quality < cpi->oxcf.best_allowed_q) {
1618 cpi->active_worst_quality = cpi->oxcf.best_allowed_q;
1619 }
1620 if (cpi->active_best_quality < cpi->oxcf.best_allowed_q) {
1621 cpi->active_best_quality = cpi->oxcf.best_allowed_q;
1622 }
1623 /* less likely */
1624 else if (cpi->active_best_quality > cpi->oxcf.worst_allowed_q) {
1625 cpi->active_best_quality = cpi->oxcf.worst_allowed_q;
1626 }
1627
1628 cpi->buffered_mode = cpi->oxcf.optimal_buffer_level > 0;
1629
1630 cpi->cq_target_quality = cpi->oxcf.cq_level;
1631
1632 /* Only allow dropped frames in buffered mode */
1633 cpi->drop_frames_allowed = cpi->oxcf.allow_df && cpi->buffered_mode;
1634
1635 cpi->target_bandwidth = cpi->oxcf.target_bandwidth;
1636
1637 // Check if the number of temporal layers has changed, and if so reset the
1638 // pattern counter and set/initialize the temporal layer context for the
1639 // new layer configuration.
1640 if (cpi->oxcf.number_of_layers != prev_number_of_layers) {
1641 // If the number of temporal layers are changed we must start at the
1642 // base of the pattern cycle, so set the layer id to 0 and reset
1643 // the temporal pattern counter.
1644 if (cpi->temporal_layer_id > 0) {
1645 cpi->temporal_layer_id = 0;
1646 }
1647 cpi->temporal_pattern_counter = 0;
1648 reset_temporal_layer_change(cpi, oxcf, prev_number_of_layers);
1649 }
1650
1651 if (!cpi->initial_width) {
1652 cpi->initial_width = cpi->oxcf.Width;
1653 cpi->initial_height = cpi->oxcf.Height;
1654 }
1655
1656 cm->Width = cpi->oxcf.Width;
1657 cm->Height = cpi->oxcf.Height;
1658 assert(cm->Width <= cpi->initial_width);
1659 assert(cm->Height <= cpi->initial_height);
1660
1661 /* TODO(jkoleszar): if an internal spatial resampling is active,
1662 * and we downsize the input image, maybe we should clear the
1663 * internal scale immediately rather than waiting for it to
1664 * correct.
1665 */
1666
1667 /* VP8 sharpness level mapping 0-7 (vs 0-10 in general VPx dialogs) */
1668 if (cpi->oxcf.Sharpness > 7) cpi->oxcf.Sharpness = 7;
1669
1670 cm->sharpness_level = cpi->oxcf.Sharpness;
1671
1672 if (cm->horiz_scale != NORMAL || cm->vert_scale != NORMAL) {
1673 int hr, hs, vr, vs;
1674
1675 Scale2Ratio(cm->horiz_scale, &hr, &hs);
1676 Scale2Ratio(cm->vert_scale, &vr, &vs);
1677
1678 /* always go to the next whole number */
1679 cm->Width = (hs - 1 + cpi->oxcf.Width * hr) / hs;
1680 cm->Height = (vs - 1 + cpi->oxcf.Height * vr) / vs;
1681 }
1682
1683 if (last_w != cpi->oxcf.Width || last_h != cpi->oxcf.Height) {
1684 cpi->force_next_frame_intra = 1;
1685 }
1686
1687 if (((cm->Width + 15) & ~15) != cm->yv12_fb[cm->lst_fb_idx].y_width ||
1688 ((cm->Height + 15) & ~15) != cm->yv12_fb[cm->lst_fb_idx].y_height ||
1689 cm->yv12_fb[cm->lst_fb_idx].y_width == 0) {
1690 dealloc_raw_frame_buffers(cpi);
1691 alloc_raw_frame_buffers(cpi);
1692 vp8_alloc_compressor_data(cpi);
1693 }
1694
1695 if (cpi->oxcf.fixed_q >= 0) {
1696 cpi->last_q[0] = cpi->oxcf.fixed_q;
1697 cpi->last_q[1] = cpi->oxcf.fixed_q;
1698 }
1699
1700 cpi->Speed = cpi->oxcf.cpu_used;
1701
1702 /* force to allowlag to 0 if lag_in_frames is 0; */
1703 if (cpi->oxcf.lag_in_frames == 0) {
1704 cpi->oxcf.allow_lag = 0;
1705 }
1706 /* Limit on lag buffers as these are not currently dynamically allocated */
1707 else if (cpi->oxcf.lag_in_frames > MAX_LAG_BUFFERS) {
1708 cpi->oxcf.lag_in_frames = MAX_LAG_BUFFERS;
1709 }
1710
1711 /* YX Temp */
1712 cpi->alt_ref_source = NULL;
1713 cpi->is_src_frame_alt_ref = 0;
1714
1715 #if CONFIG_TEMPORAL_DENOISING
1716 if (cpi->oxcf.noise_sensitivity) {
1717 if (!cpi->denoiser.yv12_mc_running_avg.buffer_alloc) {
1718 int width = (cpi->oxcf.Width + 15) & ~15;
1719 int height = (cpi->oxcf.Height + 15) & ~15;
1720 if (vp8_denoiser_allocate(&cpi->denoiser, width, height, cm->mb_rows,
1721 cm->mb_cols, cpi->oxcf.noise_sensitivity)) {
1722 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1723 "Failed to allocate denoiser");
1724 }
1725 }
1726 }
1727 #endif
1728
1729 #if 0
1730 /* Experimental RD Code */
1731 cpi->frame_distortion = 0;
1732 cpi->last_frame_distortion = 0;
1733 #endif
1734 }
1735
1736 #ifndef M_LOG2_E
1737 #define M_LOG2_E 0.693147180559945309417
1738 #endif
1739 #define log2f(x) (log(x) / (float)M_LOG2_E)
1740
cal_mvsadcosts(int * mvsadcost[2])1741 static void cal_mvsadcosts(int *mvsadcost[2]) {
1742 int i = 1;
1743
1744 mvsadcost[0][0] = 300;
1745 mvsadcost[1][0] = 300;
1746
1747 do {
1748 double z = 256 * (2 * (log2f(8 * i) + .6));
1749 mvsadcost[0][i] = (int)z;
1750 mvsadcost[1][i] = (int)z;
1751 mvsadcost[0][-i] = (int)z;
1752 mvsadcost[1][-i] = (int)z;
1753 } while (++i <= mvfp_max);
1754 }
1755
vp8_create_compressor(VP8_CONFIG * oxcf)1756 struct VP8_COMP *vp8_create_compressor(VP8_CONFIG *oxcf) {
1757 int i;
1758
1759 VP8_COMP *cpi;
1760 VP8_COMMON *cm;
1761
1762 cpi = vpx_memalign(32, sizeof(VP8_COMP));
1763 /* Check that the CPI instance is valid */
1764 if (!cpi) return 0;
1765
1766 cm = &cpi->common;
1767
1768 memset(cpi, 0, sizeof(VP8_COMP));
1769
1770 if (setjmp(cm->error.jmp)) {
1771 cpi->common.error.setjmp = 0;
1772 vp8_remove_compressor(&cpi);
1773 return 0;
1774 }
1775
1776 cpi->common.error.setjmp = 1;
1777
1778 CHECK_MEM_ERROR(cpi->mb.ss, vpx_calloc(sizeof(search_site),
1779 (MAX_MVSEARCH_STEPS * 8) + 1));
1780
1781 vp8_create_common(&cpi->common);
1782
1783 init_config(cpi, oxcf);
1784
1785 memcpy(cpi->base_skip_false_prob, vp8cx_base_skip_false_prob,
1786 sizeof(vp8cx_base_skip_false_prob));
1787 cpi->common.current_video_frame = 0;
1788 cpi->temporal_pattern_counter = 0;
1789 cpi->temporal_layer_id = -1;
1790 cpi->kf_overspend_bits = 0;
1791 cpi->kf_bitrate_adjustment = 0;
1792 cpi->frames_till_gf_update_due = 0;
1793 cpi->gf_overspend_bits = 0;
1794 cpi->non_gf_bitrate_adjustment = 0;
1795 cpi->prob_last_coded = 128;
1796 cpi->prob_gf_coded = 128;
1797 cpi->prob_intra_coded = 63;
1798
1799 /* Prime the recent reference frame usage counters.
1800 * Hereafter they will be maintained as a sort of moving average
1801 */
1802 cpi->recent_ref_frame_usage[INTRA_FRAME] = 1;
1803 cpi->recent_ref_frame_usage[LAST_FRAME] = 1;
1804 cpi->recent_ref_frame_usage[GOLDEN_FRAME] = 1;
1805 cpi->recent_ref_frame_usage[ALTREF_FRAME] = 1;
1806
1807 /* Set reference frame sign bias for ALTREF frame to 1 (for now) */
1808 cpi->common.ref_frame_sign_bias[ALTREF_FRAME] = 1;
1809
1810 cpi->twopass.gf_decay_rate = 0;
1811 cpi->baseline_gf_interval = DEFAULT_GF_INTERVAL;
1812
1813 cpi->gold_is_last = 0;
1814 cpi->alt_is_last = 0;
1815 cpi->gold_is_alt = 0;
1816
1817 cpi->active_map_enabled = 0;
1818
1819 cpi->use_roi_static_threshold = 0;
1820
1821 #if 0
1822 /* Experimental code for lagged and one pass */
1823 /* Initialise one_pass GF frames stats */
1824 /* Update stats used for GF selection */
1825 if (cpi->pass == 0)
1826 {
1827 cpi->one_pass_frame_index = 0;
1828
1829 for (i = 0; i < MAX_LAG_BUFFERS; ++i)
1830 {
1831 cpi->one_pass_frame_stats[i].frames_so_far = 0;
1832 cpi->one_pass_frame_stats[i].frame_intra_error = 0.0;
1833 cpi->one_pass_frame_stats[i].frame_coded_error = 0.0;
1834 cpi->one_pass_frame_stats[i].frame_pcnt_inter = 0.0;
1835 cpi->one_pass_frame_stats[i].frame_pcnt_motion = 0.0;
1836 cpi->one_pass_frame_stats[i].frame_mvr = 0.0;
1837 cpi->one_pass_frame_stats[i].frame_mvr_abs = 0.0;
1838 cpi->one_pass_frame_stats[i].frame_mvc = 0.0;
1839 cpi->one_pass_frame_stats[i].frame_mvc_abs = 0.0;
1840 }
1841 }
1842 #endif
1843
1844 cpi->mse_source_denoised = 0;
1845
1846 /* Should we use the cyclic refresh method.
1847 * Currently there is no external control for this.
1848 * Enable it for error_resilient_mode, or for 1 pass CBR mode.
1849 */
1850 cpi->cyclic_refresh_mode_enabled =
1851 (cpi->oxcf.error_resilient_mode ||
1852 (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER &&
1853 cpi->oxcf.Mode <= 2));
1854 cpi->cyclic_refresh_mode_max_mbs_perframe =
1855 (cpi->common.mb_rows * cpi->common.mb_cols) / 7;
1856 if (cpi->oxcf.number_of_layers == 1) {
1857 cpi->cyclic_refresh_mode_max_mbs_perframe =
1858 (cpi->common.mb_rows * cpi->common.mb_cols) / 20;
1859 } else if (cpi->oxcf.number_of_layers == 2) {
1860 cpi->cyclic_refresh_mode_max_mbs_perframe =
1861 (cpi->common.mb_rows * cpi->common.mb_cols) / 10;
1862 }
1863 cpi->cyclic_refresh_mode_index = 0;
1864 cpi->cyclic_refresh_q = 32;
1865
1866 // GF behavior for 1 pass CBR, used when error_resilience is off.
1867 cpi->gf_update_onepass_cbr = 0;
1868 cpi->gf_noboost_onepass_cbr = 0;
1869 if (!cpi->oxcf.error_resilient_mode &&
1870 cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER && cpi->oxcf.Mode <= 2) {
1871 cpi->gf_update_onepass_cbr = 1;
1872 cpi->gf_noboost_onepass_cbr = 1;
1873 cpi->gf_interval_onepass_cbr =
1874 cpi->cyclic_refresh_mode_max_mbs_perframe > 0
1875 ? (2 * (cpi->common.mb_rows * cpi->common.mb_cols) /
1876 cpi->cyclic_refresh_mode_max_mbs_perframe)
1877 : 10;
1878 cpi->gf_interval_onepass_cbr =
1879 VPXMIN(40, VPXMAX(6, cpi->gf_interval_onepass_cbr));
1880 cpi->baseline_gf_interval = cpi->gf_interval_onepass_cbr;
1881 }
1882
1883 if (cpi->cyclic_refresh_mode_enabled) {
1884 CHECK_MEM_ERROR(cpi->cyclic_refresh_map,
1885 vpx_calloc((cpi->common.mb_rows * cpi->common.mb_cols), 1));
1886 } else {
1887 cpi->cyclic_refresh_map = (signed char *)NULL;
1888 }
1889
1890 CHECK_MEM_ERROR(cpi->skin_map, vpx_calloc(cm->mb_rows * cm->mb_cols,
1891 sizeof(cpi->skin_map[0])));
1892
1893 CHECK_MEM_ERROR(cpi->consec_zero_last,
1894 vpx_calloc(cm->mb_rows * cm->mb_cols, 1));
1895 CHECK_MEM_ERROR(cpi->consec_zero_last_mvbias,
1896 vpx_calloc((cpi->common.mb_rows * cpi->common.mb_cols), 1));
1897
1898 /*Initialize the feed-forward activity masking.*/
1899 cpi->activity_avg = 90 << 12;
1900
1901 /* Give a sensible default for the first frame. */
1902 cpi->frames_since_key = 8;
1903 cpi->key_frame_frequency = cpi->oxcf.key_freq;
1904 cpi->this_key_frame_forced = 0;
1905 cpi->next_key_frame_forced = 0;
1906
1907 cpi->source_alt_ref_pending = 0;
1908 cpi->source_alt_ref_active = 0;
1909 cpi->common.refresh_alt_ref_frame = 0;
1910
1911 cpi->force_maxqp = 0;
1912 cpi->frames_since_last_drop_overshoot = 0;
1913
1914 cpi->b_calculate_psnr = CONFIG_INTERNAL_STATS;
1915 #if CONFIG_INTERNAL_STATS
1916 cpi->b_calculate_ssimg = 0;
1917
1918 cpi->count = 0;
1919 cpi->bytes = 0;
1920
1921 if (cpi->b_calculate_psnr) {
1922 cpi->total_sq_error = 0.0;
1923 cpi->total_sq_error2 = 0.0;
1924 cpi->total_y = 0.0;
1925 cpi->total_u = 0.0;
1926 cpi->total_v = 0.0;
1927 cpi->total = 0.0;
1928 cpi->totalp_y = 0.0;
1929 cpi->totalp_u = 0.0;
1930 cpi->totalp_v = 0.0;
1931 cpi->totalp = 0.0;
1932 cpi->tot_recode_hits = 0;
1933 cpi->summed_quality = 0;
1934 cpi->summed_weights = 0;
1935 }
1936
1937 #endif
1938
1939 cpi->first_time_stamp_ever = 0x7FFFFFFF;
1940
1941 cpi->frames_till_gf_update_due = 0;
1942 cpi->key_frame_count = 1;
1943
1944 cpi->ni_av_qi = cpi->oxcf.worst_allowed_q;
1945 cpi->ni_tot_qi = 0;
1946 cpi->ni_frames = 0;
1947 cpi->total_byte_count = 0;
1948
1949 cpi->drop_frame = 0;
1950
1951 cpi->rate_correction_factor = 1.0;
1952 cpi->key_frame_rate_correction_factor = 1.0;
1953 cpi->gf_rate_correction_factor = 1.0;
1954 cpi->twopass.est_max_qcorrection_factor = 1.0;
1955
1956 for (i = 0; i < KEY_FRAME_CONTEXT; ++i) {
1957 cpi->prior_key_frame_distance[i] = (int)cpi->output_framerate;
1958 }
1959
1960 #ifdef OUTPUT_YUV_SRC
1961 yuv_file = fopen("bd.yuv", "ab");
1962 #endif
1963 #ifdef OUTPUT_YUV_DENOISED
1964 yuv_denoised_file = fopen("denoised.yuv", "ab");
1965 #endif
1966 #ifdef OUTPUT_YUV_SKINMAP
1967 yuv_skinmap_file = fopen("skinmap.yuv", "wb");
1968 #endif
1969
1970 #if 0
1971 framepsnr = fopen("framepsnr.stt", "a");
1972 kf_list = fopen("kf_list.stt", "w");
1973 #endif
1974
1975 cpi->output_pkt_list = oxcf->output_pkt_list;
1976
1977 #if !CONFIG_REALTIME_ONLY
1978
1979 if (cpi->pass == 1) {
1980 vp8_init_first_pass(cpi);
1981 } else if (cpi->pass == 2) {
1982 size_t packet_sz = sizeof(FIRSTPASS_STATS);
1983 int packets = (int)(oxcf->two_pass_stats_in.sz / packet_sz);
1984
1985 cpi->twopass.stats_in_start = oxcf->two_pass_stats_in.buf;
1986 cpi->twopass.stats_in = cpi->twopass.stats_in_start;
1987 cpi->twopass.stats_in_end =
1988 (void *)((char *)cpi->twopass.stats_in + (packets - 1) * packet_sz);
1989 vp8_init_second_pass(cpi);
1990 }
1991
1992 #endif
1993
1994 if (cpi->compressor_speed == 2) {
1995 cpi->avg_encode_time = 0;
1996 cpi->avg_pick_mode_time = 0;
1997 }
1998
1999 vp8_set_speed_features(cpi);
2000
2001 /* Set starting values of RD threshold multipliers (128 = *1) */
2002 for (i = 0; i < MAX_MODES; ++i) {
2003 cpi->mb.rd_thresh_mult[i] = 128;
2004 }
2005
2006 #if CONFIG_MULTITHREAD
2007 if (vp8cx_create_encoder_threads(cpi)) {
2008 vp8_remove_compressor(&cpi);
2009 return 0;
2010 }
2011 #endif
2012
2013 cpi->fn_ptr[BLOCK_16X16].sdf = vpx_sad16x16;
2014 cpi->fn_ptr[BLOCK_16X16].vf = vpx_variance16x16;
2015 cpi->fn_ptr[BLOCK_16X16].svf = vpx_sub_pixel_variance16x16;
2016 cpi->fn_ptr[BLOCK_16X16].sdx3f = vpx_sad16x16x3;
2017 cpi->fn_ptr[BLOCK_16X16].sdx8f = vpx_sad16x16x8;
2018 cpi->fn_ptr[BLOCK_16X16].sdx4df = vpx_sad16x16x4d;
2019
2020 cpi->fn_ptr[BLOCK_16X8].sdf = vpx_sad16x8;
2021 cpi->fn_ptr[BLOCK_16X8].vf = vpx_variance16x8;
2022 cpi->fn_ptr[BLOCK_16X8].svf = vpx_sub_pixel_variance16x8;
2023 cpi->fn_ptr[BLOCK_16X8].sdx3f = vpx_sad16x8x3;
2024 cpi->fn_ptr[BLOCK_16X8].sdx8f = vpx_sad16x8x8;
2025 cpi->fn_ptr[BLOCK_16X8].sdx4df = vpx_sad16x8x4d;
2026
2027 cpi->fn_ptr[BLOCK_8X16].sdf = vpx_sad8x16;
2028 cpi->fn_ptr[BLOCK_8X16].vf = vpx_variance8x16;
2029 cpi->fn_ptr[BLOCK_8X16].svf = vpx_sub_pixel_variance8x16;
2030 cpi->fn_ptr[BLOCK_8X16].sdx3f = vpx_sad8x16x3;
2031 cpi->fn_ptr[BLOCK_8X16].sdx8f = vpx_sad8x16x8;
2032 cpi->fn_ptr[BLOCK_8X16].sdx4df = vpx_sad8x16x4d;
2033
2034 cpi->fn_ptr[BLOCK_8X8].sdf = vpx_sad8x8;
2035 cpi->fn_ptr[BLOCK_8X8].vf = vpx_variance8x8;
2036 cpi->fn_ptr[BLOCK_8X8].svf = vpx_sub_pixel_variance8x8;
2037 cpi->fn_ptr[BLOCK_8X8].sdx3f = vpx_sad8x8x3;
2038 cpi->fn_ptr[BLOCK_8X8].sdx8f = vpx_sad8x8x8;
2039 cpi->fn_ptr[BLOCK_8X8].sdx4df = vpx_sad8x8x4d;
2040
2041 cpi->fn_ptr[BLOCK_4X4].sdf = vpx_sad4x4;
2042 cpi->fn_ptr[BLOCK_4X4].vf = vpx_variance4x4;
2043 cpi->fn_ptr[BLOCK_4X4].svf = vpx_sub_pixel_variance4x4;
2044 cpi->fn_ptr[BLOCK_4X4].sdx3f = vpx_sad4x4x3;
2045 cpi->fn_ptr[BLOCK_4X4].sdx8f = vpx_sad4x4x8;
2046 cpi->fn_ptr[BLOCK_4X4].sdx4df = vpx_sad4x4x4d;
2047
2048 #if VPX_ARCH_X86 || VPX_ARCH_X86_64
2049 cpi->fn_ptr[BLOCK_16X16].copymem = vp8_copy32xn;
2050 cpi->fn_ptr[BLOCK_16X8].copymem = vp8_copy32xn;
2051 cpi->fn_ptr[BLOCK_8X16].copymem = vp8_copy32xn;
2052 cpi->fn_ptr[BLOCK_8X8].copymem = vp8_copy32xn;
2053 cpi->fn_ptr[BLOCK_4X4].copymem = vp8_copy32xn;
2054 #endif
2055
2056 cpi->full_search_sad = vp8_full_search_sad;
2057 cpi->diamond_search_sad = vp8_diamond_search_sad;
2058 cpi->refining_search_sad = vp8_refining_search_sad;
2059
2060 /* make sure frame 1 is okay */
2061 cpi->mb.error_bins[0] = cpi->common.MBs;
2062
2063 /* vp8cx_init_quantizer() is first called here. Add check in
2064 * vp8cx_frame_init_quantizer() so that vp8cx_init_quantizer is only
2065 * called later when needed. This will avoid unnecessary calls of
2066 * vp8cx_init_quantizer() for every frame.
2067 */
2068 vp8cx_init_quantizer(cpi);
2069
2070 vp8_loop_filter_init(cm);
2071
2072 cpi->common.error.setjmp = 0;
2073
2074 #if CONFIG_MULTI_RES_ENCODING
2075
2076 /* Calculate # of MBs in a row in lower-resolution level image. */
2077 if (cpi->oxcf.mr_encoder_id > 0) vp8_cal_low_res_mb_cols(cpi);
2078
2079 #endif
2080
2081 /* setup RD costs to MACROBLOCK struct */
2082
2083 cpi->mb.mvcost[0] = &cpi->rd_costs.mvcosts[0][mv_max + 1];
2084 cpi->mb.mvcost[1] = &cpi->rd_costs.mvcosts[1][mv_max + 1];
2085 cpi->mb.mvsadcost[0] = &cpi->rd_costs.mvsadcosts[0][mvfp_max + 1];
2086 cpi->mb.mvsadcost[1] = &cpi->rd_costs.mvsadcosts[1][mvfp_max + 1];
2087
2088 cal_mvsadcosts(cpi->mb.mvsadcost);
2089
2090 cpi->mb.mbmode_cost = cpi->rd_costs.mbmode_cost;
2091 cpi->mb.intra_uv_mode_cost = cpi->rd_costs.intra_uv_mode_cost;
2092 cpi->mb.bmode_costs = cpi->rd_costs.bmode_costs;
2093 cpi->mb.inter_bmode_costs = cpi->rd_costs.inter_bmode_costs;
2094 cpi->mb.token_costs = cpi->rd_costs.token_costs;
2095
2096 /* setup block ptrs & offsets */
2097 vp8_setup_block_ptrs(&cpi->mb);
2098 vp8_setup_block_dptrs(&cpi->mb.e_mbd);
2099
2100 return cpi;
2101 }
2102
vp8_remove_compressor(VP8_COMP ** comp)2103 void vp8_remove_compressor(VP8_COMP **comp) {
2104 VP8_COMP *cpi = *comp;
2105
2106 if (!cpi) return;
2107
2108 if (cpi && (cpi->common.current_video_frame > 0)) {
2109 #if !CONFIG_REALTIME_ONLY
2110
2111 if (cpi->pass == 2) {
2112 vp8_end_second_pass(cpi);
2113 }
2114
2115 #endif
2116
2117 #if CONFIG_INTERNAL_STATS
2118
2119 if (cpi->pass != 1) {
2120 FILE *f = fopen("opsnr.stt", "a");
2121 double time_encoded =
2122 (cpi->last_end_time_stamp_seen - cpi->first_time_stamp_ever) /
2123 10000000.000;
2124 double dr = (double)cpi->bytes * 8.0 / 1000.0 / time_encoded;
2125
2126 if (cpi->b_calculate_psnr) {
2127 if (cpi->oxcf.number_of_layers > 1) {
2128 int i;
2129
2130 fprintf(f,
2131 "Layer\tBitrate\tAVGPsnr\tGLBPsnr\tAVPsnrP\t"
2132 "GLPsnrP\tVPXSSIM\n");
2133 for (i = 0; i < (int)cpi->oxcf.number_of_layers; ++i) {
2134 double dr =
2135 (double)cpi->bytes_in_layer[i] * 8.0 / 1000.0 / time_encoded;
2136 double samples = 3.0 / 2 * cpi->frames_in_layer[i] *
2137 cpi->common.Width * cpi->common.Height;
2138 double total_psnr =
2139 vpx_sse_to_psnr(samples, 255.0, cpi->total_error2[i]);
2140 double total_psnr2 =
2141 vpx_sse_to_psnr(samples, 255.0, cpi->total_error2_p[i]);
2142 double total_ssim =
2143 100 * pow(cpi->sum_ssim[i] / cpi->sum_weights[i], 8.0);
2144
2145 fprintf(f,
2146 "%5d\t%7.3f\t%7.3f\t%7.3f\t%7.3f\t"
2147 "%7.3f\t%7.3f\n",
2148 i, dr, cpi->sum_psnr[i] / cpi->frames_in_layer[i],
2149 total_psnr, cpi->sum_psnr_p[i] / cpi->frames_in_layer[i],
2150 total_psnr2, total_ssim);
2151 }
2152 } else {
2153 double samples =
2154 3.0 / 2 * cpi->count * cpi->common.Width * cpi->common.Height;
2155 double total_psnr =
2156 vpx_sse_to_psnr(samples, 255.0, cpi->total_sq_error);
2157 double total_psnr2 =
2158 vpx_sse_to_psnr(samples, 255.0, cpi->total_sq_error2);
2159 double total_ssim =
2160 100 * pow(cpi->summed_quality / cpi->summed_weights, 8.0);
2161
2162 fprintf(f,
2163 "Bitrate\tAVGPsnr\tGLBPsnr\tAVPsnrP\t"
2164 "GLPsnrP\tVPXSSIM\n");
2165 fprintf(f,
2166 "%7.3f\t%7.3f\t%7.3f\t%7.3f\t%7.3f\t"
2167 "%7.3f\n",
2168 dr, cpi->total / cpi->count, total_psnr,
2169 cpi->totalp / cpi->count, total_psnr2, total_ssim);
2170 }
2171 }
2172 fclose(f);
2173 #if 0
2174 f = fopen("qskip.stt", "a");
2175 fprintf(f, "minq:%d -maxq:%d skiptrue:skipfalse = %d:%d\n", cpi->oxcf.best_allowed_q, cpi->oxcf.worst_allowed_q, skiptruecount, skipfalsecount);
2176 fclose(f);
2177 #endif
2178 }
2179
2180 #endif
2181
2182 #ifdef SPEEDSTATS
2183
2184 if (cpi->compressor_speed == 2) {
2185 int i;
2186 FILE *f = fopen("cxspeed.stt", "a");
2187 cnt_pm /= cpi->common.MBs;
2188
2189 for (i = 0; i < 16; ++i) fprintf(f, "%5d", frames_at_speed[i]);
2190
2191 fprintf(f, "\n");
2192 fclose(f);
2193 }
2194
2195 #endif
2196
2197 #ifdef MODE_STATS
2198 {
2199 extern int count_mb_seg[4];
2200 FILE *f = fopen("modes.stt", "a");
2201 double dr = (double)cpi->framerate * (double)bytes * (double)8 /
2202 (double)count / (double)1000;
2203 fprintf(f, "intra_mode in Intra Frames:\n");
2204 fprintf(f, "Y: %8d, %8d, %8d, %8d, %8d\n", y_modes[0], y_modes[1],
2205 y_modes[2], y_modes[3], y_modes[4]);
2206 fprintf(f, "UV:%8d, %8d, %8d, %8d\n", uv_modes[0], uv_modes[1],
2207 uv_modes[2], uv_modes[3]);
2208 fprintf(f, "B: ");
2209 {
2210 int i;
2211
2212 for (i = 0; i < 10; ++i) fprintf(f, "%8d, ", b_modes[i]);
2213
2214 fprintf(f, "\n");
2215 }
2216
2217 fprintf(f, "Modes in Inter Frames:\n");
2218 fprintf(f, "Y: %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d\n",
2219 inter_y_modes[0], inter_y_modes[1], inter_y_modes[2],
2220 inter_y_modes[3], inter_y_modes[4], inter_y_modes[5],
2221 inter_y_modes[6], inter_y_modes[7], inter_y_modes[8],
2222 inter_y_modes[9]);
2223 fprintf(f, "UV:%8d, %8d, %8d, %8d\n", inter_uv_modes[0],
2224 inter_uv_modes[1], inter_uv_modes[2], inter_uv_modes[3]);
2225 fprintf(f, "B: ");
2226 {
2227 int i;
2228
2229 for (i = 0; i < 15; ++i) fprintf(f, "%8d, ", inter_b_modes[i]);
2230
2231 fprintf(f, "\n");
2232 }
2233 fprintf(f, "P:%8d, %8d, %8d, %8d\n", count_mb_seg[0], count_mb_seg[1],
2234 count_mb_seg[2], count_mb_seg[3]);
2235 fprintf(f, "PB:%8d, %8d, %8d, %8d\n", inter_b_modes[LEFT4X4],
2236 inter_b_modes[ABOVE4X4], inter_b_modes[ZERO4X4],
2237 inter_b_modes[NEW4X4]);
2238
2239 fclose(f);
2240 }
2241 #endif
2242
2243 #if defined(SECTIONBITS_OUTPUT)
2244
2245 if (0) {
2246 int i;
2247 FILE *f = fopen("tokenbits.stt", "a");
2248
2249 for (i = 0; i < 28; ++i) fprintf(f, "%8d", (int)(Sectionbits[i] / 256));
2250
2251 fprintf(f, "\n");
2252 fclose(f);
2253 }
2254
2255 #endif
2256
2257 #if 0
2258 {
2259 printf("\n_pick_loop_filter_level:%d\n", cpi->time_pick_lpf / 1000);
2260 printf("\n_frames recive_data encod_mb_row compress_frame Total\n");
2261 printf("%6d %10ld %10ld %10ld %10ld\n", cpi->common.current_video_frame, cpi->time_receive_data / 1000, cpi->time_encode_mb_row / 1000, cpi->time_compress_data / 1000, (cpi->time_receive_data + cpi->time_compress_data) / 1000);
2262 }
2263 #endif
2264 }
2265
2266 #if CONFIG_MULTITHREAD
2267 vp8cx_remove_encoder_threads(cpi);
2268 #endif
2269
2270 #if CONFIG_TEMPORAL_DENOISING
2271 vp8_denoiser_free(&cpi->denoiser);
2272 #endif
2273 dealloc_compressor_data(cpi);
2274 vpx_free(cpi->mb.ss);
2275 vpx_free(cpi->tok);
2276 vpx_free(cpi->skin_map);
2277 vpx_free(cpi->cyclic_refresh_map);
2278 vpx_free(cpi->consec_zero_last);
2279 vpx_free(cpi->consec_zero_last_mvbias);
2280
2281 vp8_remove_common(&cpi->common);
2282 vpx_free(cpi);
2283 *comp = 0;
2284
2285 #ifdef OUTPUT_YUV_SRC
2286 fclose(yuv_file);
2287 #endif
2288 #ifdef OUTPUT_YUV_DENOISED
2289 fclose(yuv_denoised_file);
2290 #endif
2291 #ifdef OUTPUT_YUV_SKINMAP
2292 fclose(yuv_skinmap_file);
2293 #endif
2294
2295 #if 0
2296
2297 if (keyfile)
2298 fclose(keyfile);
2299
2300 if (framepsnr)
2301 fclose(framepsnr);
2302
2303 if (kf_list)
2304 fclose(kf_list);
2305
2306 #endif
2307 }
2308
calc_plane_error(unsigned char * orig,int orig_stride,unsigned char * recon,int recon_stride,unsigned int cols,unsigned int rows)2309 static uint64_t calc_plane_error(unsigned char *orig, int orig_stride,
2310 unsigned char *recon, int recon_stride,
2311 unsigned int cols, unsigned int rows) {
2312 unsigned int row, col;
2313 uint64_t total_sse = 0;
2314 int diff;
2315
2316 for (row = 0; row + 16 <= rows; row += 16) {
2317 for (col = 0; col + 16 <= cols; col += 16) {
2318 unsigned int sse;
2319
2320 vpx_mse16x16(orig + col, orig_stride, recon + col, recon_stride, &sse);
2321 total_sse += sse;
2322 }
2323
2324 /* Handle odd-sized width */
2325 if (col < cols) {
2326 unsigned int border_row, border_col;
2327 unsigned char *border_orig = orig;
2328 unsigned char *border_recon = recon;
2329
2330 for (border_row = 0; border_row < 16; ++border_row) {
2331 for (border_col = col; border_col < cols; ++border_col) {
2332 diff = border_orig[border_col] - border_recon[border_col];
2333 total_sse += diff * diff;
2334 }
2335
2336 border_orig += orig_stride;
2337 border_recon += recon_stride;
2338 }
2339 }
2340
2341 orig += orig_stride * 16;
2342 recon += recon_stride * 16;
2343 }
2344
2345 /* Handle odd-sized height */
2346 for (; row < rows; ++row) {
2347 for (col = 0; col < cols; ++col) {
2348 diff = orig[col] - recon[col];
2349 total_sse += diff * diff;
2350 }
2351
2352 orig += orig_stride;
2353 recon += recon_stride;
2354 }
2355
2356 vpx_clear_system_state();
2357 return total_sse;
2358 }
2359
generate_psnr_packet(VP8_COMP * cpi)2360 static void generate_psnr_packet(VP8_COMP *cpi) {
2361 YV12_BUFFER_CONFIG *orig = cpi->Source;
2362 YV12_BUFFER_CONFIG *recon = cpi->common.frame_to_show;
2363 struct vpx_codec_cx_pkt pkt;
2364 uint64_t sse;
2365 int i;
2366 unsigned int width = cpi->common.Width;
2367 unsigned int height = cpi->common.Height;
2368
2369 pkt.kind = VPX_CODEC_PSNR_PKT;
2370 sse = calc_plane_error(orig->y_buffer, orig->y_stride, recon->y_buffer,
2371 recon->y_stride, width, height);
2372 pkt.data.psnr.sse[0] = sse;
2373 pkt.data.psnr.sse[1] = sse;
2374 pkt.data.psnr.samples[0] = width * height;
2375 pkt.data.psnr.samples[1] = width * height;
2376
2377 width = (width + 1) / 2;
2378 height = (height + 1) / 2;
2379
2380 sse = calc_plane_error(orig->u_buffer, orig->uv_stride, recon->u_buffer,
2381 recon->uv_stride, width, height);
2382 pkt.data.psnr.sse[0] += sse;
2383 pkt.data.psnr.sse[2] = sse;
2384 pkt.data.psnr.samples[0] += width * height;
2385 pkt.data.psnr.samples[2] = width * height;
2386
2387 sse = calc_plane_error(orig->v_buffer, orig->uv_stride, recon->v_buffer,
2388 recon->uv_stride, width, height);
2389 pkt.data.psnr.sse[0] += sse;
2390 pkt.data.psnr.sse[3] = sse;
2391 pkt.data.psnr.samples[0] += width * height;
2392 pkt.data.psnr.samples[3] = width * height;
2393
2394 for (i = 0; i < 4; ++i) {
2395 pkt.data.psnr.psnr[i] = vpx_sse_to_psnr(pkt.data.psnr.samples[i], 255.0,
2396 (double)(pkt.data.psnr.sse[i]));
2397 }
2398
2399 vpx_codec_pkt_list_add(cpi->output_pkt_list, &pkt);
2400 }
2401
vp8_use_as_reference(VP8_COMP * cpi,int ref_frame_flags)2402 int vp8_use_as_reference(VP8_COMP *cpi, int ref_frame_flags) {
2403 if (ref_frame_flags > 7) return -1;
2404
2405 cpi->ref_frame_flags = ref_frame_flags;
2406 return 0;
2407 }
vp8_update_reference(VP8_COMP * cpi,int ref_frame_flags)2408 int vp8_update_reference(VP8_COMP *cpi, int ref_frame_flags) {
2409 if (ref_frame_flags > 7) return -1;
2410
2411 cpi->common.refresh_golden_frame = 0;
2412 cpi->common.refresh_alt_ref_frame = 0;
2413 cpi->common.refresh_last_frame = 0;
2414
2415 if (ref_frame_flags & VP8_LAST_FRAME) cpi->common.refresh_last_frame = 1;
2416
2417 if (ref_frame_flags & VP8_GOLD_FRAME) cpi->common.refresh_golden_frame = 1;
2418
2419 if (ref_frame_flags & VP8_ALTR_FRAME) cpi->common.refresh_alt_ref_frame = 1;
2420
2421 cpi->ext_refresh_frame_flags_pending = 1;
2422 return 0;
2423 }
2424
vp8_get_reference(VP8_COMP * cpi,enum vpx_ref_frame_type ref_frame_flag,YV12_BUFFER_CONFIG * sd)2425 int vp8_get_reference(VP8_COMP *cpi, enum vpx_ref_frame_type ref_frame_flag,
2426 YV12_BUFFER_CONFIG *sd) {
2427 VP8_COMMON *cm = &cpi->common;
2428 int ref_fb_idx;
2429
2430 if (ref_frame_flag == VP8_LAST_FRAME) {
2431 ref_fb_idx = cm->lst_fb_idx;
2432 } else if (ref_frame_flag == VP8_GOLD_FRAME) {
2433 ref_fb_idx = cm->gld_fb_idx;
2434 } else if (ref_frame_flag == VP8_ALTR_FRAME) {
2435 ref_fb_idx = cm->alt_fb_idx;
2436 } else {
2437 return -1;
2438 }
2439
2440 vp8_yv12_copy_frame(&cm->yv12_fb[ref_fb_idx], sd);
2441
2442 return 0;
2443 }
vp8_set_reference(VP8_COMP * cpi,enum vpx_ref_frame_type ref_frame_flag,YV12_BUFFER_CONFIG * sd)2444 int vp8_set_reference(VP8_COMP *cpi, enum vpx_ref_frame_type ref_frame_flag,
2445 YV12_BUFFER_CONFIG *sd) {
2446 VP8_COMMON *cm = &cpi->common;
2447
2448 int ref_fb_idx;
2449
2450 if (ref_frame_flag == VP8_LAST_FRAME) {
2451 ref_fb_idx = cm->lst_fb_idx;
2452 } else if (ref_frame_flag == VP8_GOLD_FRAME) {
2453 ref_fb_idx = cm->gld_fb_idx;
2454 } else if (ref_frame_flag == VP8_ALTR_FRAME) {
2455 ref_fb_idx = cm->alt_fb_idx;
2456 } else {
2457 return -1;
2458 }
2459
2460 vp8_yv12_copy_frame(sd, &cm->yv12_fb[ref_fb_idx]);
2461
2462 return 0;
2463 }
vp8_update_entropy(VP8_COMP * cpi,int update)2464 int vp8_update_entropy(VP8_COMP *cpi, int update) {
2465 VP8_COMMON *cm = &cpi->common;
2466 cm->refresh_entropy_probs = update;
2467
2468 return 0;
2469 }
2470
scale_and_extend_source(YV12_BUFFER_CONFIG * sd,VP8_COMP * cpi)2471 static void scale_and_extend_source(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi) {
2472 VP8_COMMON *cm = &cpi->common;
2473
2474 /* are we resizing the image */
2475 if (cm->horiz_scale != 0 || cm->vert_scale != 0) {
2476 #if CONFIG_SPATIAL_RESAMPLING
2477 int hr, hs, vr, vs;
2478 int tmp_height;
2479
2480 if (cm->vert_scale == 3) {
2481 tmp_height = 9;
2482 } else {
2483 tmp_height = 11;
2484 }
2485
2486 Scale2Ratio(cm->horiz_scale, &hr, &hs);
2487 Scale2Ratio(cm->vert_scale, &vr, &vs);
2488
2489 vpx_scale_frame(sd, &cpi->scaled_source, cm->temp_scale_frame.y_buffer,
2490 tmp_height, hs, hr, vs, vr, 0);
2491
2492 vp8_yv12_extend_frame_borders(&cpi->scaled_source);
2493 cpi->Source = &cpi->scaled_source;
2494 #endif
2495 } else {
2496 cpi->Source = sd;
2497 }
2498 }
2499
resize_key_frame(VP8_COMP * cpi)2500 static int resize_key_frame(VP8_COMP *cpi) {
2501 #if CONFIG_SPATIAL_RESAMPLING
2502 VP8_COMMON *cm = &cpi->common;
2503
2504 /* Do we need to apply resampling for one pass cbr.
2505 * In one pass this is more limited than in two pass cbr.
2506 * The test and any change is only made once per key frame sequence.
2507 */
2508 if (cpi->oxcf.allow_spatial_resampling &&
2509 (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER)) {
2510 int hr, hs, vr, vs;
2511 int new_width, new_height;
2512
2513 /* If we are below the resample DOWN watermark then scale down a
2514 * notch.
2515 */
2516 if (cpi->buffer_level < (cpi->oxcf.resample_down_water_mark *
2517 cpi->oxcf.optimal_buffer_level / 100)) {
2518 cm->horiz_scale =
2519 (cm->horiz_scale < ONETWO) ? cm->horiz_scale + 1 : ONETWO;
2520 cm->vert_scale = (cm->vert_scale < ONETWO) ? cm->vert_scale + 1 : ONETWO;
2521 }
2522 /* Should we now start scaling back up */
2523 else if (cpi->buffer_level > (cpi->oxcf.resample_up_water_mark *
2524 cpi->oxcf.optimal_buffer_level / 100)) {
2525 cm->horiz_scale =
2526 (cm->horiz_scale > NORMAL) ? cm->horiz_scale - 1 : NORMAL;
2527 cm->vert_scale = (cm->vert_scale > NORMAL) ? cm->vert_scale - 1 : NORMAL;
2528 }
2529
2530 /* Get the new height and width */
2531 Scale2Ratio(cm->horiz_scale, &hr, &hs);
2532 Scale2Ratio(cm->vert_scale, &vr, &vs);
2533 new_width = ((hs - 1) + (cpi->oxcf.Width * hr)) / hs;
2534 new_height = ((vs - 1) + (cpi->oxcf.Height * vr)) / vs;
2535
2536 /* If the image size has changed we need to reallocate the buffers
2537 * and resample the source image
2538 */
2539 if ((cm->Width != new_width) || (cm->Height != new_height)) {
2540 cm->Width = new_width;
2541 cm->Height = new_height;
2542 vp8_alloc_compressor_data(cpi);
2543 scale_and_extend_source(cpi->un_scaled_source, cpi);
2544 return 1;
2545 }
2546 }
2547
2548 #endif
2549 return 0;
2550 }
2551
update_alt_ref_frame_stats(VP8_COMP * cpi)2552 static void update_alt_ref_frame_stats(VP8_COMP *cpi) {
2553 VP8_COMMON *cm = &cpi->common;
2554
2555 /* Select an interval before next GF or altref */
2556 if (!cpi->auto_gold) cpi->frames_till_gf_update_due = DEFAULT_GF_INTERVAL;
2557
2558 if ((cpi->pass != 2) && cpi->frames_till_gf_update_due) {
2559 cpi->current_gf_interval = cpi->frames_till_gf_update_due;
2560
2561 /* Set the bits per frame that we should try and recover in
2562 * subsequent inter frames to account for the extra GF spend...
2563 * note that his does not apply for GF updates that occur
2564 * coincident with a key frame as the extra cost of key frames is
2565 * dealt with elsewhere.
2566 */
2567 cpi->gf_overspend_bits += cpi->projected_frame_size;
2568 cpi->non_gf_bitrate_adjustment =
2569 cpi->gf_overspend_bits / cpi->frames_till_gf_update_due;
2570 }
2571
2572 /* Update data structure that monitors level of reference to last GF */
2573 memset(cpi->gf_active_flags, 1, (cm->mb_rows * cm->mb_cols));
2574 cpi->gf_active_count = cm->mb_rows * cm->mb_cols;
2575
2576 /* this frame refreshes means next frames don't unless specified by user */
2577 cpi->frames_since_golden = 0;
2578
2579 /* Clear the alternate reference update pending flag. */
2580 cpi->source_alt_ref_pending = 0;
2581
2582 /* Set the alternate reference frame active flag */
2583 cpi->source_alt_ref_active = 1;
2584 }
update_golden_frame_stats(VP8_COMP * cpi)2585 static void update_golden_frame_stats(VP8_COMP *cpi) {
2586 VP8_COMMON *cm = &cpi->common;
2587
2588 /* Update the Golden frame usage counts. */
2589 if (cm->refresh_golden_frame) {
2590 /* Select an interval before next GF */
2591 if (!cpi->auto_gold) cpi->frames_till_gf_update_due = DEFAULT_GF_INTERVAL;
2592
2593 if ((cpi->pass != 2) && (cpi->frames_till_gf_update_due > 0)) {
2594 cpi->current_gf_interval = cpi->frames_till_gf_update_due;
2595
2596 /* Set the bits per frame that we should try and recover in
2597 * subsequent inter frames to account for the extra GF spend...
2598 * note that his does not apply for GF updates that occur
2599 * coincident with a key frame as the extra cost of key frames
2600 * is dealt with elsewhere.
2601 */
2602 if ((cm->frame_type != KEY_FRAME) && !cpi->source_alt_ref_active) {
2603 /* Calcluate GF bits to be recovered
2604 * Projected size - av frame bits available for inter
2605 * frames for clip as a whole
2606 */
2607 cpi->gf_overspend_bits +=
2608 (cpi->projected_frame_size - cpi->inter_frame_target);
2609 }
2610
2611 cpi->non_gf_bitrate_adjustment =
2612 cpi->gf_overspend_bits / cpi->frames_till_gf_update_due;
2613 }
2614
2615 /* Update data structure that monitors level of reference to last GF */
2616 memset(cpi->gf_active_flags, 1, (cm->mb_rows * cm->mb_cols));
2617 cpi->gf_active_count = cm->mb_rows * cm->mb_cols;
2618
2619 /* this frame refreshes means next frames don't unless specified by
2620 * user
2621 */
2622 cm->refresh_golden_frame = 0;
2623 cpi->frames_since_golden = 0;
2624
2625 cpi->recent_ref_frame_usage[INTRA_FRAME] = 1;
2626 cpi->recent_ref_frame_usage[LAST_FRAME] = 1;
2627 cpi->recent_ref_frame_usage[GOLDEN_FRAME] = 1;
2628 cpi->recent_ref_frame_usage[ALTREF_FRAME] = 1;
2629
2630 /* ******** Fixed Q test code only ************ */
2631 /* If we are going to use the ALT reference for the next group of
2632 * frames set a flag to say so.
2633 */
2634 if (cpi->oxcf.fixed_q >= 0 && cpi->oxcf.play_alternate &&
2635 !cpi->common.refresh_alt_ref_frame) {
2636 cpi->source_alt_ref_pending = 1;
2637 cpi->frames_till_gf_update_due = cpi->baseline_gf_interval;
2638 }
2639
2640 if (!cpi->source_alt_ref_pending) cpi->source_alt_ref_active = 0;
2641
2642 /* Decrement count down till next gf */
2643 if (cpi->frames_till_gf_update_due > 0) cpi->frames_till_gf_update_due--;
2644
2645 } else if (!cpi->common.refresh_alt_ref_frame) {
2646 /* Decrement count down till next gf */
2647 if (cpi->frames_till_gf_update_due > 0) cpi->frames_till_gf_update_due--;
2648
2649 if (cpi->frames_till_alt_ref_frame) cpi->frames_till_alt_ref_frame--;
2650
2651 cpi->frames_since_golden++;
2652
2653 if (cpi->frames_since_golden > 1) {
2654 cpi->recent_ref_frame_usage[INTRA_FRAME] +=
2655 cpi->mb.count_mb_ref_frame_usage[INTRA_FRAME];
2656 cpi->recent_ref_frame_usage[LAST_FRAME] +=
2657 cpi->mb.count_mb_ref_frame_usage[LAST_FRAME];
2658 cpi->recent_ref_frame_usage[GOLDEN_FRAME] +=
2659 cpi->mb.count_mb_ref_frame_usage[GOLDEN_FRAME];
2660 cpi->recent_ref_frame_usage[ALTREF_FRAME] +=
2661 cpi->mb.count_mb_ref_frame_usage[ALTREF_FRAME];
2662 }
2663 }
2664 }
2665
2666 /* This function updates the reference frame probability estimates that
2667 * will be used during mode selection
2668 */
update_rd_ref_frame_probs(VP8_COMP * cpi)2669 static void update_rd_ref_frame_probs(VP8_COMP *cpi) {
2670 VP8_COMMON *cm = &cpi->common;
2671
2672 const int *const rfct = cpi->mb.count_mb_ref_frame_usage;
2673 const int rf_intra = rfct[INTRA_FRAME];
2674 const int rf_inter =
2675 rfct[LAST_FRAME] + rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME];
2676
2677 if (cm->frame_type == KEY_FRAME) {
2678 cpi->prob_intra_coded = 255;
2679 cpi->prob_last_coded = 128;
2680 cpi->prob_gf_coded = 128;
2681 } else if (!(rf_intra + rf_inter)) {
2682 cpi->prob_intra_coded = 63;
2683 cpi->prob_last_coded = 128;
2684 cpi->prob_gf_coded = 128;
2685 }
2686
2687 /* update reference frame costs since we can do better than what we got
2688 * last frame.
2689 */
2690 if (cpi->oxcf.number_of_layers == 1) {
2691 if (cpi->common.refresh_alt_ref_frame) {
2692 cpi->prob_intra_coded += 40;
2693 if (cpi->prob_intra_coded > 255) cpi->prob_intra_coded = 255;
2694 cpi->prob_last_coded = 200;
2695 cpi->prob_gf_coded = 1;
2696 } else if (cpi->frames_since_golden == 0) {
2697 cpi->prob_last_coded = 214;
2698 } else if (cpi->frames_since_golden == 1) {
2699 cpi->prob_last_coded = 192;
2700 cpi->prob_gf_coded = 220;
2701 } else if (cpi->source_alt_ref_active) {
2702 cpi->prob_gf_coded -= 20;
2703
2704 if (cpi->prob_gf_coded < 10) cpi->prob_gf_coded = 10;
2705 }
2706 if (!cpi->source_alt_ref_active) cpi->prob_gf_coded = 255;
2707 }
2708 }
2709
2710 #if !CONFIG_REALTIME_ONLY
2711 /* 1 = key, 0 = inter */
decide_key_frame(VP8_COMP * cpi)2712 static int decide_key_frame(VP8_COMP *cpi) {
2713 VP8_COMMON *cm = &cpi->common;
2714
2715 int code_key_frame = 0;
2716
2717 cpi->kf_boost = 0;
2718
2719 if (cpi->Speed > 11) return 0;
2720
2721 /* Clear down mmx registers */
2722 vpx_clear_system_state();
2723
2724 if ((cpi->compressor_speed == 2) && (cpi->Speed >= 5) && (cpi->sf.RD == 0)) {
2725 double change = 1.0 *
2726 abs((int)(cpi->mb.intra_error - cpi->last_intra_error)) /
2727 (1 + cpi->last_intra_error);
2728 double change2 =
2729 1.0 *
2730 abs((int)(cpi->mb.prediction_error - cpi->last_prediction_error)) /
2731 (1 + cpi->last_prediction_error);
2732 double minerror = cm->MBs * 256;
2733
2734 cpi->last_intra_error = cpi->mb.intra_error;
2735 cpi->last_prediction_error = cpi->mb.prediction_error;
2736
2737 if (10 * cpi->mb.intra_error / (1 + cpi->mb.prediction_error) < 15 &&
2738 cpi->mb.prediction_error > minerror &&
2739 (change > .25 || change2 > .25)) {
2740 /*(change > 1.4 || change < .75)&& cpi->this_frame_percent_intra >
2741 * cpi->last_frame_percent_intra + 3*/
2742 return 1;
2743 }
2744
2745 return 0;
2746 }
2747
2748 /* If the following are true we might as well code a key frame */
2749 if (((cpi->this_frame_percent_intra == 100) &&
2750 (cpi->this_frame_percent_intra > (cpi->last_frame_percent_intra + 2))) ||
2751 ((cpi->this_frame_percent_intra > 95) &&
2752 (cpi->this_frame_percent_intra >=
2753 (cpi->last_frame_percent_intra + 5)))) {
2754 code_key_frame = 1;
2755 }
2756 /* in addition if the following are true and this is not a golden frame
2757 * then code a key frame Note that on golden frames there often seems
2758 * to be a pop in intra useage anyway hence this restriction is
2759 * designed to prevent spurious key frames. The Intra pop needs to be
2760 * investigated.
2761 */
2762 else if (((cpi->this_frame_percent_intra > 60) &&
2763 (cpi->this_frame_percent_intra >
2764 (cpi->last_frame_percent_intra * 2))) ||
2765 ((cpi->this_frame_percent_intra > 75) &&
2766 (cpi->this_frame_percent_intra >
2767 (cpi->last_frame_percent_intra * 3 / 2))) ||
2768 ((cpi->this_frame_percent_intra > 90) &&
2769 (cpi->this_frame_percent_intra >
2770 (cpi->last_frame_percent_intra + 10)))) {
2771 if (!cm->refresh_golden_frame) code_key_frame = 1;
2772 }
2773
2774 return code_key_frame;
2775 }
2776
Pass1Encode(VP8_COMP * cpi)2777 static void Pass1Encode(VP8_COMP *cpi) {
2778 vp8_set_quantizer(cpi, 26);
2779 vp8_first_pass(cpi);
2780 }
2781 #endif
2782
2783 #if 0
2784 void write_cx_frame_to_file(YV12_BUFFER_CONFIG *frame, int this_frame)
2785 {
2786
2787 /* write the frame */
2788 FILE *yframe;
2789 int i;
2790 char filename[255];
2791
2792 sprintf(filename, "cx\\y%04d.raw", this_frame);
2793 yframe = fopen(filename, "wb");
2794
2795 for (i = 0; i < frame->y_height; ++i)
2796 fwrite(frame->y_buffer + i * frame->y_stride, frame->y_width, 1, yframe);
2797
2798 fclose(yframe);
2799 sprintf(filename, "cx\\u%04d.raw", this_frame);
2800 yframe = fopen(filename, "wb");
2801
2802 for (i = 0; i < frame->uv_height; ++i)
2803 fwrite(frame->u_buffer + i * frame->uv_stride, frame->uv_width, 1, yframe);
2804
2805 fclose(yframe);
2806 sprintf(filename, "cx\\v%04d.raw", this_frame);
2807 yframe = fopen(filename, "wb");
2808
2809 for (i = 0; i < frame->uv_height; ++i)
2810 fwrite(frame->v_buffer + i * frame->uv_stride, frame->uv_width, 1, yframe);
2811
2812 fclose(yframe);
2813 }
2814 #endif
2815
2816 #if !CONFIG_REALTIME_ONLY
2817 /* Function to test for conditions that indeicate we should loop
2818 * back and recode a frame.
2819 */
recode_loop_test(VP8_COMP * cpi,int high_limit,int low_limit,int q,int maxq,int minq)2820 static int recode_loop_test(VP8_COMP *cpi, int high_limit, int low_limit, int q,
2821 int maxq, int minq) {
2822 int force_recode = 0;
2823 VP8_COMMON *cm = &cpi->common;
2824
2825 /* Is frame recode allowed at all
2826 * Yes if either recode mode 1 is selected or mode two is selcted
2827 * and the frame is a key frame. golden frame or alt_ref_frame
2828 */
2829 if ((cpi->sf.recode_loop == 1) ||
2830 ((cpi->sf.recode_loop == 2) &&
2831 ((cm->frame_type == KEY_FRAME) || cm->refresh_golden_frame ||
2832 cm->refresh_alt_ref_frame))) {
2833 /* General over and under shoot tests */
2834 if (((cpi->projected_frame_size > high_limit) && (q < maxq)) ||
2835 ((cpi->projected_frame_size < low_limit) && (q > minq))) {
2836 force_recode = 1;
2837 }
2838 /* Special Constrained quality tests */
2839 else if (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) {
2840 /* Undershoot and below auto cq level */
2841 if ((q > cpi->cq_target_quality) &&
2842 (cpi->projected_frame_size < ((cpi->this_frame_target * 7) >> 3))) {
2843 force_recode = 1;
2844 }
2845 /* Severe undershoot and between auto and user cq level */
2846 else if ((q > cpi->oxcf.cq_level) &&
2847 (cpi->projected_frame_size < cpi->min_frame_bandwidth) &&
2848 (cpi->active_best_quality > cpi->oxcf.cq_level)) {
2849 force_recode = 1;
2850 cpi->active_best_quality = cpi->oxcf.cq_level;
2851 }
2852 }
2853 }
2854
2855 return force_recode;
2856 }
2857 #endif // !CONFIG_REALTIME_ONLY
2858
update_reference_frames(VP8_COMP * cpi)2859 static void update_reference_frames(VP8_COMP *cpi) {
2860 VP8_COMMON *cm = &cpi->common;
2861 YV12_BUFFER_CONFIG *yv12_fb = cm->yv12_fb;
2862
2863 /* At this point the new frame has been encoded.
2864 * If any buffer copy / swapping is signaled it should be done here.
2865 */
2866
2867 if (cm->frame_type == KEY_FRAME) {
2868 yv12_fb[cm->new_fb_idx].flags |= VP8_GOLD_FRAME | VP8_ALTR_FRAME;
2869
2870 yv12_fb[cm->gld_fb_idx].flags &= ~VP8_GOLD_FRAME;
2871 yv12_fb[cm->alt_fb_idx].flags &= ~VP8_ALTR_FRAME;
2872
2873 cm->alt_fb_idx = cm->gld_fb_idx = cm->new_fb_idx;
2874
2875 cpi->current_ref_frames[GOLDEN_FRAME] = cm->current_video_frame;
2876 cpi->current_ref_frames[ALTREF_FRAME] = cm->current_video_frame;
2877 } else {
2878 if (cm->refresh_alt_ref_frame) {
2879 assert(!cm->copy_buffer_to_arf);
2880
2881 cm->yv12_fb[cm->new_fb_idx].flags |= VP8_ALTR_FRAME;
2882 cm->yv12_fb[cm->alt_fb_idx].flags &= ~VP8_ALTR_FRAME;
2883 cm->alt_fb_idx = cm->new_fb_idx;
2884
2885 cpi->current_ref_frames[ALTREF_FRAME] = cm->current_video_frame;
2886 } else if (cm->copy_buffer_to_arf) {
2887 assert(!(cm->copy_buffer_to_arf & ~0x3));
2888
2889 if (cm->copy_buffer_to_arf == 1) {
2890 if (cm->alt_fb_idx != cm->lst_fb_idx) {
2891 yv12_fb[cm->lst_fb_idx].flags |= VP8_ALTR_FRAME;
2892 yv12_fb[cm->alt_fb_idx].flags &= ~VP8_ALTR_FRAME;
2893 cm->alt_fb_idx = cm->lst_fb_idx;
2894
2895 cpi->current_ref_frames[ALTREF_FRAME] =
2896 cpi->current_ref_frames[LAST_FRAME];
2897 }
2898 } else {
2899 if (cm->alt_fb_idx != cm->gld_fb_idx) {
2900 yv12_fb[cm->gld_fb_idx].flags |= VP8_ALTR_FRAME;
2901 yv12_fb[cm->alt_fb_idx].flags &= ~VP8_ALTR_FRAME;
2902 cm->alt_fb_idx = cm->gld_fb_idx;
2903
2904 cpi->current_ref_frames[ALTREF_FRAME] =
2905 cpi->current_ref_frames[GOLDEN_FRAME];
2906 }
2907 }
2908 }
2909
2910 if (cm->refresh_golden_frame) {
2911 assert(!cm->copy_buffer_to_gf);
2912
2913 cm->yv12_fb[cm->new_fb_idx].flags |= VP8_GOLD_FRAME;
2914 cm->yv12_fb[cm->gld_fb_idx].flags &= ~VP8_GOLD_FRAME;
2915 cm->gld_fb_idx = cm->new_fb_idx;
2916
2917 cpi->current_ref_frames[GOLDEN_FRAME] = cm->current_video_frame;
2918 } else if (cm->copy_buffer_to_gf) {
2919 assert(!(cm->copy_buffer_to_arf & ~0x3));
2920
2921 if (cm->copy_buffer_to_gf == 1) {
2922 if (cm->gld_fb_idx != cm->lst_fb_idx) {
2923 yv12_fb[cm->lst_fb_idx].flags |= VP8_GOLD_FRAME;
2924 yv12_fb[cm->gld_fb_idx].flags &= ~VP8_GOLD_FRAME;
2925 cm->gld_fb_idx = cm->lst_fb_idx;
2926
2927 cpi->current_ref_frames[GOLDEN_FRAME] =
2928 cpi->current_ref_frames[LAST_FRAME];
2929 }
2930 } else {
2931 if (cm->alt_fb_idx != cm->gld_fb_idx) {
2932 yv12_fb[cm->alt_fb_idx].flags |= VP8_GOLD_FRAME;
2933 yv12_fb[cm->gld_fb_idx].flags &= ~VP8_GOLD_FRAME;
2934 cm->gld_fb_idx = cm->alt_fb_idx;
2935
2936 cpi->current_ref_frames[GOLDEN_FRAME] =
2937 cpi->current_ref_frames[ALTREF_FRAME];
2938 }
2939 }
2940 }
2941 }
2942
2943 if (cm->refresh_last_frame) {
2944 cm->yv12_fb[cm->new_fb_idx].flags |= VP8_LAST_FRAME;
2945 cm->yv12_fb[cm->lst_fb_idx].flags &= ~VP8_LAST_FRAME;
2946 cm->lst_fb_idx = cm->new_fb_idx;
2947
2948 cpi->current_ref_frames[LAST_FRAME] = cm->current_video_frame;
2949 }
2950
2951 #if CONFIG_TEMPORAL_DENOISING
2952 if (cpi->oxcf.noise_sensitivity) {
2953 /* we shouldn't have to keep multiple copies as we know in advance which
2954 * buffer we should start - for now to get something up and running
2955 * I've chosen to copy the buffers
2956 */
2957 if (cm->frame_type == KEY_FRAME) {
2958 int i;
2959 for (i = LAST_FRAME; i < MAX_REF_FRAMES; ++i)
2960 vp8_yv12_copy_frame(cpi->Source, &cpi->denoiser.yv12_running_avg[i]);
2961 } else {
2962 vp8_yv12_extend_frame_borders(
2963 &cpi->denoiser.yv12_running_avg[INTRA_FRAME]);
2964
2965 if (cm->refresh_alt_ref_frame || cm->copy_buffer_to_arf) {
2966 vp8_yv12_copy_frame(&cpi->denoiser.yv12_running_avg[INTRA_FRAME],
2967 &cpi->denoiser.yv12_running_avg[ALTREF_FRAME]);
2968 }
2969 if (cm->refresh_golden_frame || cm->copy_buffer_to_gf) {
2970 vp8_yv12_copy_frame(&cpi->denoiser.yv12_running_avg[INTRA_FRAME],
2971 &cpi->denoiser.yv12_running_avg[GOLDEN_FRAME]);
2972 }
2973 if (cm->refresh_last_frame) {
2974 vp8_yv12_copy_frame(&cpi->denoiser.yv12_running_avg[INTRA_FRAME],
2975 &cpi->denoiser.yv12_running_avg[LAST_FRAME]);
2976 }
2977 }
2978 if (cpi->oxcf.noise_sensitivity == 4)
2979 vp8_yv12_copy_frame(cpi->Source, &cpi->denoiser.yv12_last_source);
2980 }
2981 #endif
2982 }
2983
measure_square_diff_partial(YV12_BUFFER_CONFIG * source,YV12_BUFFER_CONFIG * dest,VP8_COMP * cpi)2984 static int measure_square_diff_partial(YV12_BUFFER_CONFIG *source,
2985 YV12_BUFFER_CONFIG *dest,
2986 VP8_COMP *cpi) {
2987 int i, j;
2988 int Total = 0;
2989 int num_blocks = 0;
2990 int skip = 2;
2991 int min_consec_zero_last = 10;
2992 int tot_num_blocks = (source->y_height * source->y_width) >> 8;
2993 unsigned char *src = source->y_buffer;
2994 unsigned char *dst = dest->y_buffer;
2995
2996 /* Loop through the Y plane, every |skip| blocks along rows and colmumns,
2997 * summing the square differences, and only for blocks that have been
2998 * zero_last mode at least |x| frames in a row.
2999 */
3000 for (i = 0; i < source->y_height; i += 16 * skip) {
3001 int block_index_row = (i >> 4) * cpi->common.mb_cols;
3002 for (j = 0; j < source->y_width; j += 16 * skip) {
3003 int index = block_index_row + (j >> 4);
3004 if (cpi->consec_zero_last[index] >= min_consec_zero_last) {
3005 unsigned int sse;
3006 Total += vpx_mse16x16(src + j, source->y_stride, dst + j,
3007 dest->y_stride, &sse);
3008 num_blocks++;
3009 }
3010 }
3011 src += 16 * skip * source->y_stride;
3012 dst += 16 * skip * dest->y_stride;
3013 }
3014 // Only return non-zero if we have at least ~1/16 samples for estimate.
3015 if (num_blocks > (tot_num_blocks >> 4)) {
3016 assert(num_blocks != 0);
3017 return (Total / num_blocks);
3018 } else {
3019 return 0;
3020 }
3021 }
3022
3023 #if CONFIG_TEMPORAL_DENOISING
process_denoiser_mode_change(VP8_COMP * cpi)3024 static void process_denoiser_mode_change(VP8_COMP *cpi) {
3025 const VP8_COMMON *const cm = &cpi->common;
3026 int i, j;
3027 int total = 0;
3028 int num_blocks = 0;
3029 // Number of blocks skipped along row/column in computing the
3030 // nmse (normalized mean square error) of source.
3031 int skip = 2;
3032 // Only select blocks for computing nmse that have been encoded
3033 // as ZERO LAST min_consec_zero_last frames in a row.
3034 // Scale with number of temporal layers.
3035 int min_consec_zero_last = 12 / cpi->oxcf.number_of_layers;
3036 // Decision is tested for changing the denoising mode every
3037 // num_mode_change times this function is called. Note that this
3038 // function called every 8 frames, so (8 * num_mode_change) is number
3039 // of frames where denoising mode change is tested for switch.
3040 int num_mode_change = 20;
3041 // Framerate factor, to compensate for larger mse at lower framerates.
3042 // Use ref_framerate, which is full source framerate for temporal layers.
3043 // TODO(marpan): Adjust this factor.
3044 int fac_framerate = cpi->ref_framerate < 25.0f ? 80 : 100;
3045 int tot_num_blocks = cm->mb_rows * cm->mb_cols;
3046 int ystride = cpi->Source->y_stride;
3047 unsigned char *src = cpi->Source->y_buffer;
3048 unsigned char *dst = cpi->denoiser.yv12_last_source.y_buffer;
3049 static const unsigned char const_source[16] = { 128, 128, 128, 128, 128, 128,
3050 128, 128, 128, 128, 128, 128,
3051 128, 128, 128, 128 };
3052 int bandwidth = (int)(cpi->target_bandwidth);
3053 // For temporal layers, use full bandwidth (top layer).
3054 if (cpi->oxcf.number_of_layers > 1) {
3055 LAYER_CONTEXT *lc = &cpi->layer_context[cpi->oxcf.number_of_layers - 1];
3056 bandwidth = (int)(lc->target_bandwidth);
3057 }
3058 // Loop through the Y plane, every skip blocks along rows and columns,
3059 // summing the normalized mean square error, only for blocks that have
3060 // been encoded as ZEROMV LAST at least min_consec_zero_last least frames in
3061 // a row and have small sum difference between current and previous frame.
3062 // Normalization here is by the contrast of the current frame block.
3063 for (i = 0; i < cm->Height; i += 16 * skip) {
3064 int block_index_row = (i >> 4) * cm->mb_cols;
3065 for (j = 0; j < cm->Width; j += 16 * skip) {
3066 int index = block_index_row + (j >> 4);
3067 if (cpi->consec_zero_last[index] >= min_consec_zero_last) {
3068 unsigned int sse;
3069 const unsigned int var =
3070 vpx_variance16x16(src + j, ystride, dst + j, ystride, &sse);
3071 // Only consider this block as valid for noise measurement
3072 // if the sum_diff average of the current and previous frame
3073 // is small (to avoid effects from lighting change).
3074 if ((sse - var) < 128) {
3075 unsigned int sse2;
3076 const unsigned int act =
3077 vpx_variance16x16(src + j, ystride, const_source, 0, &sse2);
3078 if (act > 0) total += sse / act;
3079 num_blocks++;
3080 }
3081 }
3082 }
3083 src += 16 * skip * ystride;
3084 dst += 16 * skip * ystride;
3085 }
3086 total = total * fac_framerate / 100;
3087
3088 // Only consider this frame as valid sample if we have computed nmse over
3089 // at least ~1/16 blocks, and Total > 0 (Total == 0 can happen if the
3090 // application inputs duplicate frames, or contrast is all zero).
3091 if (total > 0 && (num_blocks > (tot_num_blocks >> 4))) {
3092 // Update the recursive mean square source_diff.
3093 total = (total << 8) / num_blocks;
3094 if (cpi->denoiser.nmse_source_diff_count == 0) {
3095 // First sample in new interval.
3096 cpi->denoiser.nmse_source_diff = total;
3097 cpi->denoiser.qp_avg = cm->base_qindex;
3098 } else {
3099 // For subsequent samples, use average with weight ~1/4 for new sample.
3100 cpi->denoiser.nmse_source_diff =
3101 (int)((total + 3 * cpi->denoiser.nmse_source_diff) >> 2);
3102 cpi->denoiser.qp_avg =
3103 (int)((cm->base_qindex + 3 * cpi->denoiser.qp_avg) >> 2);
3104 }
3105 cpi->denoiser.nmse_source_diff_count++;
3106 }
3107 // Check for changing the denoiser mode, when we have obtained #samples =
3108 // num_mode_change. Condition the change also on the bitrate and QP.
3109 if (cpi->denoiser.nmse_source_diff_count == num_mode_change) {
3110 // Check for going up: from normal to aggressive mode.
3111 if ((cpi->denoiser.denoiser_mode == kDenoiserOnYUV) &&
3112 (cpi->denoiser.nmse_source_diff >
3113 cpi->denoiser.threshold_aggressive_mode) &&
3114 (cpi->denoiser.qp_avg < cpi->denoiser.qp_threshold_up &&
3115 bandwidth > cpi->denoiser.bitrate_threshold)) {
3116 vp8_denoiser_set_parameters(&cpi->denoiser, kDenoiserOnYUVAggressive);
3117 } else {
3118 // Check for going down: from aggressive to normal mode.
3119 if (((cpi->denoiser.denoiser_mode == kDenoiserOnYUVAggressive) &&
3120 (cpi->denoiser.nmse_source_diff <
3121 cpi->denoiser.threshold_aggressive_mode)) ||
3122 ((cpi->denoiser.denoiser_mode == kDenoiserOnYUVAggressive) &&
3123 (cpi->denoiser.qp_avg > cpi->denoiser.qp_threshold_down ||
3124 bandwidth < cpi->denoiser.bitrate_threshold))) {
3125 vp8_denoiser_set_parameters(&cpi->denoiser, kDenoiserOnYUV);
3126 }
3127 }
3128 // Reset metric and counter for next interval.
3129 cpi->denoiser.nmse_source_diff = 0;
3130 cpi->denoiser.qp_avg = 0;
3131 cpi->denoiser.nmse_source_diff_count = 0;
3132 }
3133 }
3134 #endif
3135
vp8_loopfilter_frame(VP8_COMP * cpi,VP8_COMMON * cm)3136 void vp8_loopfilter_frame(VP8_COMP *cpi, VP8_COMMON *cm) {
3137 const FRAME_TYPE frame_type = cm->frame_type;
3138
3139 int update_any_ref_buffers = 1;
3140 if (cpi->common.refresh_last_frame == 0 &&
3141 cpi->common.refresh_golden_frame == 0 &&
3142 cpi->common.refresh_alt_ref_frame == 0) {
3143 update_any_ref_buffers = 0;
3144 }
3145
3146 if (cm->no_lpf) {
3147 cm->filter_level = 0;
3148 } else {
3149 struct vpx_usec_timer timer;
3150
3151 vpx_clear_system_state();
3152
3153 vpx_usec_timer_start(&timer);
3154 if (cpi->sf.auto_filter == 0) {
3155 #if CONFIG_TEMPORAL_DENOISING
3156 if (cpi->oxcf.noise_sensitivity && cm->frame_type != KEY_FRAME) {
3157 // Use the denoised buffer for selecting base loop filter level.
3158 // Denoised signal for current frame is stored in INTRA_FRAME.
3159 // No denoising on key frames.
3160 vp8cx_pick_filter_level_fast(
3161 &cpi->denoiser.yv12_running_avg[INTRA_FRAME], cpi);
3162 } else {
3163 vp8cx_pick_filter_level_fast(cpi->Source, cpi);
3164 }
3165 #else
3166 vp8cx_pick_filter_level_fast(cpi->Source, cpi);
3167 #endif
3168 } else {
3169 #if CONFIG_TEMPORAL_DENOISING
3170 if (cpi->oxcf.noise_sensitivity && cm->frame_type != KEY_FRAME) {
3171 // Use the denoised buffer for selecting base loop filter level.
3172 // Denoised signal for current frame is stored in INTRA_FRAME.
3173 // No denoising on key frames.
3174 vp8cx_pick_filter_level(&cpi->denoiser.yv12_running_avg[INTRA_FRAME],
3175 cpi);
3176 } else {
3177 vp8cx_pick_filter_level(cpi->Source, cpi);
3178 }
3179 #else
3180 vp8cx_pick_filter_level(cpi->Source, cpi);
3181 #endif
3182 }
3183
3184 if (cm->filter_level > 0) {
3185 vp8cx_set_alt_lf_level(cpi, cm->filter_level);
3186 }
3187
3188 vpx_usec_timer_mark(&timer);
3189 cpi->time_pick_lpf += vpx_usec_timer_elapsed(&timer);
3190 }
3191
3192 #if CONFIG_MULTITHREAD
3193 if (vpx_atomic_load_acquire(&cpi->b_multi_threaded)) {
3194 sem_post(&cpi->h_event_end_lpf); /* signal that we have set filter_level */
3195 }
3196 #endif
3197
3198 // No need to apply loop-filter if the encoded frame does not update
3199 // any reference buffers.
3200 if (cm->filter_level > 0 && update_any_ref_buffers) {
3201 vp8_loop_filter_frame(cm, &cpi->mb.e_mbd, frame_type);
3202 }
3203
3204 vp8_yv12_extend_frame_borders(cm->frame_to_show);
3205 }
3206
encode_frame_to_data_rate(VP8_COMP * cpi,size_t * size,unsigned char * dest,unsigned char * dest_end,unsigned int * frame_flags)3207 static void encode_frame_to_data_rate(VP8_COMP *cpi, size_t *size,
3208 unsigned char *dest,
3209 unsigned char *dest_end,
3210 unsigned int *frame_flags) {
3211 int Q;
3212 int frame_over_shoot_limit;
3213 int frame_under_shoot_limit;
3214
3215 int Loop = 0;
3216 int loop_count;
3217
3218 VP8_COMMON *cm = &cpi->common;
3219 int active_worst_qchanged = 0;
3220
3221 #if !CONFIG_REALTIME_ONLY
3222 int q_low;
3223 int q_high;
3224 int zbin_oq_high;
3225 int zbin_oq_low = 0;
3226 int top_index;
3227 int bottom_index;
3228 int overshoot_seen = 0;
3229 int undershoot_seen = 0;
3230 #endif
3231
3232 int drop_mark = (int)(cpi->oxcf.drop_frames_water_mark *
3233 cpi->oxcf.optimal_buffer_level / 100);
3234 int drop_mark75 = drop_mark * 2 / 3;
3235 int drop_mark50 = drop_mark / 4;
3236 int drop_mark25 = drop_mark / 8;
3237
3238 /* Clear down mmx registers to allow floating point in what follows */
3239 vpx_clear_system_state();
3240
3241 if (cpi->force_next_frame_intra) {
3242 cm->frame_type = KEY_FRAME; /* delayed intra frame */
3243 cpi->force_next_frame_intra = 0;
3244 }
3245
3246 /* For an alt ref frame in 2 pass we skip the call to the second pass
3247 * function that sets the target bandwidth
3248 */
3249 switch (cpi->pass) {
3250 #if !CONFIG_REALTIME_ONLY
3251 case 2:
3252 if (cpi->common.refresh_alt_ref_frame) {
3253 /* Per frame bit target for the alt ref frame */
3254 cpi->per_frame_bandwidth = cpi->twopass.gf_bits;
3255 /* per second target bitrate */
3256 cpi->target_bandwidth =
3257 (int)(cpi->twopass.gf_bits * cpi->output_framerate);
3258 }
3259 break;
3260 #endif // !CONFIG_REALTIME_ONLY
3261 default:
3262 cpi->per_frame_bandwidth =
3263 (int)(cpi->target_bandwidth / cpi->output_framerate);
3264 break;
3265 }
3266
3267 /* Default turn off buffer to buffer copying */
3268 cm->copy_buffer_to_gf = 0;
3269 cm->copy_buffer_to_arf = 0;
3270
3271 /* Clear zbin over-quant value and mode boost values. */
3272 cpi->mb.zbin_over_quant = 0;
3273 cpi->mb.zbin_mode_boost = 0;
3274
3275 /* Enable or disable mode based tweaking of the zbin
3276 * For 2 Pass Only used where GF/ARF prediction quality
3277 * is above a threshold
3278 */
3279 cpi->mb.zbin_mode_boost_enabled = 1;
3280 if (cpi->pass == 2) {
3281 if (cpi->gfu_boost <= 400) {
3282 cpi->mb.zbin_mode_boost_enabled = 0;
3283 }
3284 }
3285
3286 /* Current default encoder behaviour for the altref sign bias */
3287 if (cpi->source_alt_ref_active) {
3288 cpi->common.ref_frame_sign_bias[ALTREF_FRAME] = 1;
3289 } else {
3290 cpi->common.ref_frame_sign_bias[ALTREF_FRAME] = 0;
3291 }
3292
3293 /* Check to see if a key frame is signaled
3294 * For two pass with auto key frame enabled cm->frame_type may already
3295 * be set, but not for one pass.
3296 */
3297 if ((cm->current_video_frame == 0) || (cm->frame_flags & FRAMEFLAGS_KEY) ||
3298 (cpi->oxcf.auto_key &&
3299 (cpi->frames_since_key % cpi->key_frame_frequency == 0))) {
3300 /* Key frame from VFW/auto-keyframe/first frame */
3301 cm->frame_type = KEY_FRAME;
3302 #if CONFIG_TEMPORAL_DENOISING
3303 if (cpi->oxcf.noise_sensitivity == 4) {
3304 // For adaptive mode, reset denoiser to normal mode on key frame.
3305 vp8_denoiser_set_parameters(&cpi->denoiser, kDenoiserOnYUV);
3306 }
3307 #endif
3308 }
3309
3310 #if CONFIG_MULTI_RES_ENCODING
3311 if (cpi->oxcf.mr_total_resolutions > 1) {
3312 LOWER_RES_FRAME_INFO *low_res_frame_info =
3313 (LOWER_RES_FRAME_INFO *)cpi->oxcf.mr_low_res_mode_info;
3314
3315 if (cpi->oxcf.mr_encoder_id) {
3316 // Check if lower resolution is available for motion vector reuse.
3317 if (cm->frame_type != KEY_FRAME) {
3318 cpi->mr_low_res_mv_avail = 1;
3319 cpi->mr_low_res_mv_avail &= !(low_res_frame_info->is_frame_dropped);
3320
3321 if (cpi->ref_frame_flags & VP8_LAST_FRAME)
3322 cpi->mr_low_res_mv_avail &=
3323 (cpi->current_ref_frames[LAST_FRAME] ==
3324 low_res_frame_info->low_res_ref_frames[LAST_FRAME]);
3325
3326 if (cpi->ref_frame_flags & VP8_GOLD_FRAME)
3327 cpi->mr_low_res_mv_avail &=
3328 (cpi->current_ref_frames[GOLDEN_FRAME] ==
3329 low_res_frame_info->low_res_ref_frames[GOLDEN_FRAME]);
3330
3331 // Don't use altref to determine whether low res is available.
3332 // TODO (marpan): Should we make this type of condition on a
3333 // per-reference frame basis?
3334 /*
3335 if (cpi->ref_frame_flags & VP8_ALTR_FRAME)
3336 cpi->mr_low_res_mv_avail &= (cpi->current_ref_frames[ALTREF_FRAME]
3337 == low_res_frame_info->low_res_ref_frames[ALTREF_FRAME]);
3338 */
3339 }
3340 // Disable motion vector reuse (i.e., disable any usage of the low_res)
3341 // if the previous lower stream is skipped/disabled.
3342 if (low_res_frame_info->skip_encoding_prev_stream) {
3343 cpi->mr_low_res_mv_avail = 0;
3344 }
3345 }
3346 // This stream is not skipped (i.e., it's being encoded), so set this skip
3347 // flag to 0. This is needed for the next stream (i.e., which is the next
3348 // frame to be encoded).
3349 low_res_frame_info->skip_encoding_prev_stream = 0;
3350
3351 // On a key frame: For the lowest resolution, keep track of the key frame
3352 // counter value. For the higher resolutions, reset the current video
3353 // frame counter to that of the lowest resolution.
3354 // This is done to the handle the case where we may stop/start encoding
3355 // higher layer(s). The restart-encoding of higher layer is only signaled
3356 // by a key frame for now.
3357 // TODO (marpan): Add flag to indicate restart-encoding of higher layer.
3358 if (cm->frame_type == KEY_FRAME) {
3359 if (cpi->oxcf.mr_encoder_id) {
3360 // If the initial starting value of the buffer level is zero (this can
3361 // happen because we may have not started encoding this higher stream),
3362 // then reset it to non-zero value based on |starting_buffer_level|.
3363 if (cpi->common.current_video_frame == 0 && cpi->buffer_level == 0) {
3364 unsigned int i;
3365 cpi->bits_off_target = cpi->oxcf.starting_buffer_level;
3366 cpi->buffer_level = cpi->oxcf.starting_buffer_level;
3367 for (i = 0; i < cpi->oxcf.number_of_layers; ++i) {
3368 LAYER_CONTEXT *lc = &cpi->layer_context[i];
3369 lc->bits_off_target = lc->starting_buffer_level;
3370 lc->buffer_level = lc->starting_buffer_level;
3371 }
3372 }
3373 cpi->common.current_video_frame =
3374 low_res_frame_info->key_frame_counter_value;
3375 } else {
3376 low_res_frame_info->key_frame_counter_value =
3377 cpi->common.current_video_frame;
3378 }
3379 }
3380 }
3381 #endif
3382
3383 // Find the reference frame closest to the current frame.
3384 cpi->closest_reference_frame = LAST_FRAME;
3385 if (cm->frame_type != KEY_FRAME) {
3386 int i;
3387 MV_REFERENCE_FRAME closest_ref = INTRA_FRAME;
3388 if (cpi->ref_frame_flags & VP8_LAST_FRAME) {
3389 closest_ref = LAST_FRAME;
3390 } else if (cpi->ref_frame_flags & VP8_GOLD_FRAME) {
3391 closest_ref = GOLDEN_FRAME;
3392 } else if (cpi->ref_frame_flags & VP8_ALTR_FRAME) {
3393 closest_ref = ALTREF_FRAME;
3394 }
3395 for (i = 1; i <= 3; ++i) {
3396 vpx_ref_frame_type_t ref_frame_type =
3397 (vpx_ref_frame_type_t)((i == 3) ? 4 : i);
3398 if (cpi->ref_frame_flags & ref_frame_type) {
3399 if ((cm->current_video_frame - cpi->current_ref_frames[i]) <
3400 (cm->current_video_frame - cpi->current_ref_frames[closest_ref])) {
3401 closest_ref = i;
3402 }
3403 }
3404 }
3405 cpi->closest_reference_frame = closest_ref;
3406 }
3407
3408 /* Set various flags etc to special state if it is a key frame */
3409 if (cm->frame_type == KEY_FRAME) {
3410 int i;
3411
3412 // Set the loop filter deltas and segmentation map update
3413 setup_features(cpi);
3414
3415 /* The alternate reference frame cannot be active for a key frame */
3416 cpi->source_alt_ref_active = 0;
3417
3418 /* Reset the RD threshold multipliers to default of * 1 (128) */
3419 for (i = 0; i < MAX_MODES; ++i) {
3420 cpi->mb.rd_thresh_mult[i] = 128;
3421 }
3422
3423 // Reset the zero_last counter to 0 on key frame.
3424 memset(cpi->consec_zero_last, 0, cm->mb_rows * cm->mb_cols);
3425 memset(cpi->consec_zero_last_mvbias, 0,
3426 (cpi->common.mb_rows * cpi->common.mb_cols));
3427 }
3428
3429 #if 0
3430 /* Experimental code for lagged compress and one pass
3431 * Initialise one_pass GF frames stats
3432 * Update stats used for GF selection
3433 */
3434 {
3435 cpi->one_pass_frame_index = cm->current_video_frame % MAX_LAG_BUFFERS;
3436
3437 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frames_so_far = 0;
3438 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_intra_error = 0.0;
3439 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_coded_error = 0.0;
3440 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_pcnt_inter = 0.0;
3441 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_pcnt_motion = 0.0;
3442 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_mvr = 0.0;
3443 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_mvr_abs = 0.0;
3444 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_mvc = 0.0;
3445 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_mvc_abs = 0.0;
3446 }
3447 #endif
3448
3449 update_rd_ref_frame_probs(cpi);
3450
3451 if (cpi->drop_frames_allowed) {
3452 /* The reset to decimation 0 is only done here for one pass.
3453 * Once it is set two pass leaves decimation on till the next kf.
3454 */
3455 if ((cpi->buffer_level > drop_mark) && (cpi->decimation_factor > 0)) {
3456 cpi->decimation_factor--;
3457 }
3458
3459 if (cpi->buffer_level > drop_mark75 && cpi->decimation_factor > 0) {
3460 cpi->decimation_factor = 1;
3461
3462 } else if (cpi->buffer_level < drop_mark25 &&
3463 (cpi->decimation_factor == 2 || cpi->decimation_factor == 3)) {
3464 cpi->decimation_factor = 3;
3465 } else if (cpi->buffer_level < drop_mark50 &&
3466 (cpi->decimation_factor == 1 || cpi->decimation_factor == 2)) {
3467 cpi->decimation_factor = 2;
3468 } else if (cpi->buffer_level < drop_mark75 &&
3469 (cpi->decimation_factor == 0 || cpi->decimation_factor == 1)) {
3470 cpi->decimation_factor = 1;
3471 }
3472 }
3473
3474 /* The following decimates the frame rate according to a regular
3475 * pattern (i.e. to 1/2 or 2/3 frame rate) This can be used to help
3476 * prevent buffer under-run in CBR mode. Alternatively it might be
3477 * desirable in some situations to drop frame rate but throw more bits
3478 * at each frame.
3479 *
3480 * Note that dropping a key frame can be problematic if spatial
3481 * resampling is also active
3482 */
3483 if (cpi->decimation_factor > 0) {
3484 switch (cpi->decimation_factor) {
3485 case 1:
3486 cpi->per_frame_bandwidth = cpi->per_frame_bandwidth * 3 / 2;
3487 break;
3488 case 2:
3489 cpi->per_frame_bandwidth = cpi->per_frame_bandwidth * 5 / 4;
3490 break;
3491 case 3:
3492 cpi->per_frame_bandwidth = cpi->per_frame_bandwidth * 5 / 4;
3493 break;
3494 }
3495
3496 /* Note that we should not throw out a key frame (especially when
3497 * spatial resampling is enabled).
3498 */
3499 if (cm->frame_type == KEY_FRAME) {
3500 cpi->decimation_count = cpi->decimation_factor;
3501 } else if (cpi->decimation_count > 0) {
3502 cpi->decimation_count--;
3503
3504 cpi->bits_off_target += cpi->av_per_frame_bandwidth;
3505 if (cpi->bits_off_target > cpi->oxcf.maximum_buffer_size) {
3506 cpi->bits_off_target = cpi->oxcf.maximum_buffer_size;
3507 }
3508
3509 #if CONFIG_MULTI_RES_ENCODING
3510 vp8_store_drop_frame_info(cpi);
3511 #endif
3512
3513 cm->current_video_frame++;
3514 cpi->frames_since_key++;
3515 cpi->ext_refresh_frame_flags_pending = 0;
3516 // We advance the temporal pattern for dropped frames.
3517 cpi->temporal_pattern_counter++;
3518
3519 #if CONFIG_INTERNAL_STATS
3520 cpi->count++;
3521 #endif
3522
3523 cpi->buffer_level = cpi->bits_off_target;
3524
3525 if (cpi->oxcf.number_of_layers > 1) {
3526 unsigned int i;
3527
3528 /* Propagate bits saved by dropping the frame to higher
3529 * layers
3530 */
3531 for (i = cpi->current_layer + 1; i < cpi->oxcf.number_of_layers; ++i) {
3532 LAYER_CONTEXT *lc = &cpi->layer_context[i];
3533 lc->bits_off_target += (int)(lc->target_bandwidth / lc->framerate);
3534 if (lc->bits_off_target > lc->maximum_buffer_size) {
3535 lc->bits_off_target = lc->maximum_buffer_size;
3536 }
3537 lc->buffer_level = lc->bits_off_target;
3538 }
3539 }
3540
3541 return;
3542 } else {
3543 cpi->decimation_count = cpi->decimation_factor;
3544 }
3545 } else {
3546 cpi->decimation_count = 0;
3547 }
3548
3549 /* Decide how big to make the frame */
3550 if (!vp8_pick_frame_size(cpi)) {
3551 /*TODO: 2 drop_frame and return code could be put together. */
3552 #if CONFIG_MULTI_RES_ENCODING
3553 vp8_store_drop_frame_info(cpi);
3554 #endif
3555 cm->current_video_frame++;
3556 cpi->frames_since_key++;
3557 cpi->ext_refresh_frame_flags_pending = 0;
3558 // We advance the temporal pattern for dropped frames.
3559 cpi->temporal_pattern_counter++;
3560 return;
3561 }
3562
3563 /* Reduce active_worst_allowed_q for CBR if our buffer is getting too full.
3564 * This has a knock on effect on active best quality as well.
3565 * For CBR if the buffer reaches its maximum level then we can no longer
3566 * save up bits for later frames so we might as well use them up
3567 * on the current frame.
3568 */
3569 if ((cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) &&
3570 (cpi->buffer_level >= cpi->oxcf.optimal_buffer_level) &&
3571 cpi->buffered_mode) {
3572 /* Max adjustment is 1/4 */
3573 int Adjustment = cpi->active_worst_quality / 4;
3574
3575 if (Adjustment) {
3576 int buff_lvl_step;
3577
3578 if (cpi->buffer_level < cpi->oxcf.maximum_buffer_size) {
3579 buff_lvl_step = (int)((cpi->oxcf.maximum_buffer_size -
3580 cpi->oxcf.optimal_buffer_level) /
3581 Adjustment);
3582
3583 if (buff_lvl_step) {
3584 Adjustment =
3585 (int)((cpi->buffer_level - cpi->oxcf.optimal_buffer_level) /
3586 buff_lvl_step);
3587 } else {
3588 Adjustment = 0;
3589 }
3590 }
3591
3592 cpi->active_worst_quality -= Adjustment;
3593
3594 if (cpi->active_worst_quality < cpi->active_best_quality) {
3595 cpi->active_worst_quality = cpi->active_best_quality;
3596 }
3597 }
3598 }
3599
3600 /* Set an active best quality and if necessary active worst quality
3601 * There is some odd behavior for one pass here that needs attention.
3602 */
3603 if ((cpi->pass == 2) || (cpi->ni_frames > 150)) {
3604 vpx_clear_system_state();
3605
3606 Q = cpi->active_worst_quality;
3607
3608 if (cm->frame_type == KEY_FRAME) {
3609 if (cpi->pass == 2) {
3610 if (cpi->gfu_boost > 600) {
3611 cpi->active_best_quality = kf_low_motion_minq[Q];
3612 } else {
3613 cpi->active_best_quality = kf_high_motion_minq[Q];
3614 }
3615
3616 /* Special case for key frames forced because we have reached
3617 * the maximum key frame interval. Here force the Q to a range
3618 * based on the ambient Q to reduce the risk of popping
3619 */
3620 if (cpi->this_key_frame_forced) {
3621 if (cpi->active_best_quality > cpi->avg_frame_qindex * 7 / 8) {
3622 cpi->active_best_quality = cpi->avg_frame_qindex * 7 / 8;
3623 } else if (cpi->active_best_quality < (cpi->avg_frame_qindex >> 2)) {
3624 cpi->active_best_quality = cpi->avg_frame_qindex >> 2;
3625 }
3626 }
3627 }
3628 /* One pass more conservative */
3629 else {
3630 cpi->active_best_quality = kf_high_motion_minq[Q];
3631 }
3632 }
3633
3634 else if (cpi->oxcf.number_of_layers == 1 &&
3635 (cm->refresh_golden_frame || cpi->common.refresh_alt_ref_frame)) {
3636 /* Use the lower of cpi->active_worst_quality and recent
3637 * average Q as basis for GF/ARF Q limit unless last frame was
3638 * a key frame.
3639 */
3640 if ((cpi->frames_since_key > 1) &&
3641 (cpi->avg_frame_qindex < cpi->active_worst_quality)) {
3642 Q = cpi->avg_frame_qindex;
3643 }
3644
3645 /* For constrained quality dont allow Q less than the cq level */
3646 if ((cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) &&
3647 (Q < cpi->cq_target_quality)) {
3648 Q = cpi->cq_target_quality;
3649 }
3650
3651 if (cpi->pass == 2) {
3652 if (cpi->gfu_boost > 1000) {
3653 cpi->active_best_quality = gf_low_motion_minq[Q];
3654 } else if (cpi->gfu_boost < 400) {
3655 cpi->active_best_quality = gf_high_motion_minq[Q];
3656 } else {
3657 cpi->active_best_quality = gf_mid_motion_minq[Q];
3658 }
3659
3660 /* Constrained quality use slightly lower active best. */
3661 if (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) {
3662 cpi->active_best_quality = cpi->active_best_quality * 15 / 16;
3663 }
3664 }
3665 /* One pass more conservative */
3666 else {
3667 cpi->active_best_quality = gf_high_motion_minq[Q];
3668 }
3669 } else {
3670 cpi->active_best_quality = inter_minq[Q];
3671
3672 /* For the constant/constrained quality mode we dont want
3673 * q to fall below the cq level.
3674 */
3675 if ((cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) &&
3676 (cpi->active_best_quality < cpi->cq_target_quality)) {
3677 /* If we are strongly undershooting the target rate in the last
3678 * frames then use the user passed in cq value not the auto
3679 * cq value.
3680 */
3681 if (cpi->rolling_actual_bits < cpi->min_frame_bandwidth) {
3682 cpi->active_best_quality = cpi->oxcf.cq_level;
3683 } else {
3684 cpi->active_best_quality = cpi->cq_target_quality;
3685 }
3686 }
3687 }
3688
3689 /* If CBR and the buffer is as full then it is reasonable to allow
3690 * higher quality on the frames to prevent bits just going to waste.
3691 */
3692 if (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) {
3693 /* Note that the use of >= here elliminates the risk of a devide
3694 * by 0 error in the else if clause
3695 */
3696 if (cpi->buffer_level >= cpi->oxcf.maximum_buffer_size) {
3697 cpi->active_best_quality = cpi->best_quality;
3698
3699 } else if (cpi->buffer_level > cpi->oxcf.optimal_buffer_level) {
3700 int Fraction =
3701 (int)(((cpi->buffer_level - cpi->oxcf.optimal_buffer_level) * 128) /
3702 (cpi->oxcf.maximum_buffer_size -
3703 cpi->oxcf.optimal_buffer_level));
3704 int min_qadjustment =
3705 ((cpi->active_best_quality - cpi->best_quality) * Fraction) / 128;
3706
3707 cpi->active_best_quality -= min_qadjustment;
3708 }
3709 }
3710 }
3711 /* Make sure constrained quality mode limits are adhered to for the first
3712 * few frames of one pass encodes
3713 */
3714 else if (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) {
3715 if ((cm->frame_type == KEY_FRAME) || cm->refresh_golden_frame ||
3716 cpi->common.refresh_alt_ref_frame) {
3717 cpi->active_best_quality = cpi->best_quality;
3718 } else if (cpi->active_best_quality < cpi->cq_target_quality) {
3719 cpi->active_best_quality = cpi->cq_target_quality;
3720 }
3721 }
3722
3723 /* Clip the active best and worst quality values to limits */
3724 if (cpi->active_worst_quality > cpi->worst_quality) {
3725 cpi->active_worst_quality = cpi->worst_quality;
3726 }
3727
3728 if (cpi->active_best_quality < cpi->best_quality) {
3729 cpi->active_best_quality = cpi->best_quality;
3730 }
3731
3732 if (cpi->active_worst_quality < cpi->active_best_quality) {
3733 cpi->active_worst_quality = cpi->active_best_quality;
3734 }
3735
3736 /* Determine initial Q to try */
3737 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
3738
3739 #if !CONFIG_REALTIME_ONLY
3740
3741 /* Set highest allowed value for Zbin over quant */
3742 if (cm->frame_type == KEY_FRAME) {
3743 zbin_oq_high = 0;
3744 } else if ((cpi->oxcf.number_of_layers == 1) &&
3745 ((cm->refresh_alt_ref_frame ||
3746 (cm->refresh_golden_frame && !cpi->source_alt_ref_active)))) {
3747 zbin_oq_high = 16;
3748 } else {
3749 zbin_oq_high = ZBIN_OQ_MAX;
3750 }
3751 #endif
3752
3753 compute_skin_map(cpi);
3754
3755 /* Setup background Q adjustment for error resilient mode.
3756 * For multi-layer encodes only enable this for the base layer.
3757 */
3758 if (cpi->cyclic_refresh_mode_enabled) {
3759 // Special case for screen_content_mode with golden frame updates.
3760 int disable_cr_gf =
3761 (cpi->oxcf.screen_content_mode == 2 && cm->refresh_golden_frame);
3762 if (cpi->current_layer == 0 && cpi->force_maxqp == 0 && !disable_cr_gf) {
3763 cyclic_background_refresh(cpi, Q, 0);
3764 } else {
3765 disable_segmentation(cpi);
3766 }
3767 }
3768
3769 vp8_compute_frame_size_bounds(cpi, &frame_under_shoot_limit,
3770 &frame_over_shoot_limit);
3771
3772 #if !CONFIG_REALTIME_ONLY
3773 /* Limit Q range for the adaptive loop. */
3774 bottom_index = cpi->active_best_quality;
3775 top_index = cpi->active_worst_quality;
3776 q_low = cpi->active_best_quality;
3777 q_high = cpi->active_worst_quality;
3778 #endif
3779
3780 vp8_save_coding_context(cpi);
3781
3782 loop_count = 0;
3783
3784 scale_and_extend_source(cpi->un_scaled_source, cpi);
3785
3786 #if CONFIG_TEMPORAL_DENOISING && CONFIG_POSTPROC
3787 // Option to apply spatial blur under the aggressive or adaptive
3788 // (temporal denoising) mode.
3789 if (cpi->oxcf.noise_sensitivity >= 3) {
3790 if (cpi->denoiser.denoise_pars.spatial_blur != 0) {
3791 vp8_de_noise(cm, cpi->Source, cpi->denoiser.denoise_pars.spatial_blur, 1);
3792 }
3793 }
3794 #endif
3795
3796 #if !(CONFIG_REALTIME_ONLY) && CONFIG_POSTPROC && !(CONFIG_TEMPORAL_DENOISING)
3797
3798 if (cpi->oxcf.noise_sensitivity > 0) {
3799 unsigned char *src;
3800 int l = 0;
3801
3802 switch (cpi->oxcf.noise_sensitivity) {
3803 case 1: l = 20; break;
3804 case 2: l = 40; break;
3805 case 3: l = 60; break;
3806 case 4: l = 80; break;
3807 case 5: l = 100; break;
3808 case 6: l = 150; break;
3809 }
3810
3811 if (cm->frame_type == KEY_FRAME) {
3812 vp8_de_noise(cm, cpi->Source, l, 1);
3813 } else {
3814 vp8_de_noise(cm, cpi->Source, l, 1);
3815
3816 src = cpi->Source->y_buffer;
3817
3818 if (cpi->Source->y_stride < 0) {
3819 src += cpi->Source->y_stride * (cpi->Source->y_height - 1);
3820 }
3821 }
3822 }
3823
3824 #endif
3825
3826 #ifdef OUTPUT_YUV_SRC
3827 vpx_write_yuv_frame(yuv_file, cpi->Source);
3828 #endif
3829
3830 do {
3831 vpx_clear_system_state();
3832
3833 vp8_set_quantizer(cpi, Q);
3834
3835 /* setup skip prob for costing in mode/mv decision */
3836 if (cpi->common.mb_no_coeff_skip) {
3837 cpi->prob_skip_false = cpi->base_skip_false_prob[Q];
3838
3839 if (cm->frame_type != KEY_FRAME) {
3840 if (cpi->common.refresh_alt_ref_frame) {
3841 if (cpi->last_skip_false_probs[2] != 0) {
3842 cpi->prob_skip_false = cpi->last_skip_false_probs[2];
3843 }
3844
3845 /*
3846 if(cpi->last_skip_false_probs[2]!=0 && abs(Q-
3847 cpi->last_skip_probs_q[2])<=16 )
3848 cpi->prob_skip_false = cpi->last_skip_false_probs[2];
3849 else if (cpi->last_skip_false_probs[2]!=0)
3850 cpi->prob_skip_false = (cpi->last_skip_false_probs[2] +
3851 cpi->prob_skip_false ) / 2;
3852 */
3853 } else if (cpi->common.refresh_golden_frame) {
3854 if (cpi->last_skip_false_probs[1] != 0) {
3855 cpi->prob_skip_false = cpi->last_skip_false_probs[1];
3856 }
3857
3858 /*
3859 if(cpi->last_skip_false_probs[1]!=0 && abs(Q-
3860 cpi->last_skip_probs_q[1])<=16 )
3861 cpi->prob_skip_false = cpi->last_skip_false_probs[1];
3862 else if (cpi->last_skip_false_probs[1]!=0)
3863 cpi->prob_skip_false = (cpi->last_skip_false_probs[1] +
3864 cpi->prob_skip_false ) / 2;
3865 */
3866 } else {
3867 if (cpi->last_skip_false_probs[0] != 0) {
3868 cpi->prob_skip_false = cpi->last_skip_false_probs[0];
3869 }
3870
3871 /*
3872 if(cpi->last_skip_false_probs[0]!=0 && abs(Q-
3873 cpi->last_skip_probs_q[0])<=16 )
3874 cpi->prob_skip_false = cpi->last_skip_false_probs[0];
3875 else if(cpi->last_skip_false_probs[0]!=0)
3876 cpi->prob_skip_false = (cpi->last_skip_false_probs[0] +
3877 cpi->prob_skip_false ) / 2;
3878 */
3879 }
3880
3881 /* as this is for cost estimate, let's make sure it does not
3882 * go extreme eitehr way
3883 */
3884 if (cpi->prob_skip_false < 5) cpi->prob_skip_false = 5;
3885
3886 if (cpi->prob_skip_false > 250) cpi->prob_skip_false = 250;
3887
3888 if (cpi->oxcf.number_of_layers == 1 && cpi->is_src_frame_alt_ref) {
3889 cpi->prob_skip_false = 1;
3890 }
3891 }
3892
3893 #if 0
3894
3895 if (cpi->pass != 1)
3896 {
3897 FILE *f = fopen("skip.stt", "a");
3898 fprintf(f, "%d, %d, %4d ", cpi->common.refresh_golden_frame, cpi->common.refresh_alt_ref_frame, cpi->prob_skip_false);
3899 fclose(f);
3900 }
3901
3902 #endif
3903 }
3904
3905 if (cm->frame_type == KEY_FRAME) {
3906 if (resize_key_frame(cpi)) {
3907 /* If the frame size has changed, need to reset Q, quantizer,
3908 * and background refresh.
3909 */
3910 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
3911 if (cpi->cyclic_refresh_mode_enabled) {
3912 if (cpi->current_layer == 0) {
3913 cyclic_background_refresh(cpi, Q, 0);
3914 } else {
3915 disable_segmentation(cpi);
3916 }
3917 }
3918 // Reset the zero_last counter to 0 on key frame.
3919 memset(cpi->consec_zero_last, 0, cm->mb_rows * cm->mb_cols);
3920 memset(cpi->consec_zero_last_mvbias, 0,
3921 (cpi->common.mb_rows * cpi->common.mb_cols));
3922 vp8_set_quantizer(cpi, Q);
3923 }
3924
3925 vp8_setup_key_frame(cpi);
3926 }
3927
3928 #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
3929 {
3930 if (cpi->oxcf.error_resilient_mode) cm->refresh_entropy_probs = 0;
3931
3932 if (cpi->oxcf.error_resilient_mode & VPX_ERROR_RESILIENT_PARTITIONS) {
3933 if (cm->frame_type == KEY_FRAME) cm->refresh_entropy_probs = 1;
3934 }
3935
3936 if (cm->refresh_entropy_probs == 0) {
3937 /* save a copy for later refresh */
3938 memcpy(&cm->lfc, &cm->fc, sizeof(cm->fc));
3939 }
3940
3941 vp8_update_coef_context(cpi);
3942
3943 vp8_update_coef_probs(cpi);
3944
3945 /* transform / motion compensation build reconstruction frame
3946 * +pack coef partitions
3947 */
3948 vp8_encode_frame(cpi);
3949
3950 /* cpi->projected_frame_size is not needed for RT mode */
3951 }
3952 #else
3953 /* transform / motion compensation build reconstruction frame */
3954 vp8_encode_frame(cpi);
3955
3956 if (cpi->pass == 0 && cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) {
3957 if (vp8_drop_encodedframe_overshoot(cpi, Q)) {
3958 vpx_clear_system_state();
3959 return;
3960 }
3961 if (cm->frame_type != KEY_FRAME)
3962 cpi->last_pred_err_mb =
3963 (int)(cpi->mb.prediction_error / cpi->common.MBs);
3964 }
3965
3966 cpi->projected_frame_size -= vp8_estimate_entropy_savings(cpi);
3967 cpi->projected_frame_size =
3968 (cpi->projected_frame_size > 0) ? cpi->projected_frame_size : 0;
3969 #endif
3970 vpx_clear_system_state();
3971
3972 /* Test to see if the stats generated for this frame indicate that
3973 * we should have coded a key frame (assuming that we didn't)!
3974 */
3975
3976 if (cpi->pass != 2 && cpi->oxcf.auto_key && cm->frame_type != KEY_FRAME &&
3977 cpi->compressor_speed != 2) {
3978 #if !CONFIG_REALTIME_ONLY
3979 if (decide_key_frame(cpi)) {
3980 /* Reset all our sizing numbers and recode */
3981 cm->frame_type = KEY_FRAME;
3982
3983 vp8_pick_frame_size(cpi);
3984
3985 /* Clear the Alt reference frame active flag when we have
3986 * a key frame
3987 */
3988 cpi->source_alt_ref_active = 0;
3989
3990 // Set the loop filter deltas and segmentation map update
3991 setup_features(cpi);
3992
3993 vp8_restore_coding_context(cpi);
3994
3995 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
3996
3997 vp8_compute_frame_size_bounds(cpi, &frame_under_shoot_limit,
3998 &frame_over_shoot_limit);
3999
4000 /* Limit Q range for the adaptive loop. */
4001 bottom_index = cpi->active_best_quality;
4002 top_index = cpi->active_worst_quality;
4003 q_low = cpi->active_best_quality;
4004 q_high = cpi->active_worst_quality;
4005
4006 loop_count++;
4007 Loop = 1;
4008
4009 continue;
4010 }
4011 #endif
4012 }
4013
4014 vpx_clear_system_state();
4015
4016 if (frame_over_shoot_limit == 0) frame_over_shoot_limit = 1;
4017
4018 /* Are we are overshooting and up against the limit of active max Q. */
4019 if (((cpi->pass != 2) ||
4020 (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER)) &&
4021 (Q == cpi->active_worst_quality) &&
4022 (cpi->active_worst_quality < cpi->worst_quality) &&
4023 (cpi->projected_frame_size > frame_over_shoot_limit)) {
4024 int over_size_percent =
4025 ((cpi->projected_frame_size - frame_over_shoot_limit) * 100) /
4026 frame_over_shoot_limit;
4027
4028 /* If so is there any scope for relaxing it */
4029 while ((cpi->active_worst_quality < cpi->worst_quality) &&
4030 (over_size_percent > 0)) {
4031 cpi->active_worst_quality++;
4032 /* Assume 1 qstep = about 4% on frame size. */
4033 over_size_percent = (int)(over_size_percent * 0.96);
4034 }
4035 #if !CONFIG_REALTIME_ONLY
4036 top_index = cpi->active_worst_quality;
4037 #endif // !CONFIG_REALTIME_ONLY
4038 /* If we have updated the active max Q do not call
4039 * vp8_update_rate_correction_factors() this loop.
4040 */
4041 active_worst_qchanged = 1;
4042 } else {
4043 active_worst_qchanged = 0;
4044 }
4045
4046 #if CONFIG_REALTIME_ONLY
4047 Loop = 0;
4048 #else
4049 /* Special case handling for forced key frames */
4050 if ((cm->frame_type == KEY_FRAME) && cpi->this_key_frame_forced) {
4051 int last_q = Q;
4052 int kf_err = vp8_calc_ss_err(cpi->Source, &cm->yv12_fb[cm->new_fb_idx]);
4053
4054 /* The key frame is not good enough */
4055 if (kf_err > ((cpi->ambient_err * 7) >> 3)) {
4056 /* Lower q_high */
4057 q_high = (Q > q_low) ? (Q - 1) : q_low;
4058
4059 /* Adjust Q */
4060 Q = (q_high + q_low) >> 1;
4061 }
4062 /* The key frame is much better than the previous frame */
4063 else if (kf_err < (cpi->ambient_err >> 1)) {
4064 /* Raise q_low */
4065 q_low = (Q < q_high) ? (Q + 1) : q_high;
4066
4067 /* Adjust Q */
4068 Q = (q_high + q_low + 1) >> 1;
4069 }
4070
4071 /* Clamp Q to upper and lower limits: */
4072 if (Q > q_high) {
4073 Q = q_high;
4074 } else if (Q < q_low) {
4075 Q = q_low;
4076 }
4077
4078 Loop = Q != last_q;
4079 }
4080
4081 /* Is the projected frame size out of range and are we allowed
4082 * to attempt to recode.
4083 */
4084 else if (recode_loop_test(cpi, frame_over_shoot_limit,
4085 frame_under_shoot_limit, Q, top_index,
4086 bottom_index)) {
4087 int last_q = Q;
4088 int Retries = 0;
4089
4090 /* Frame size out of permitted range. Update correction factor
4091 * & compute new Q to try...
4092 */
4093
4094 /* Frame is too large */
4095 if (cpi->projected_frame_size > cpi->this_frame_target) {
4096 /* Raise Qlow as to at least the current value */
4097 q_low = (Q < q_high) ? (Q + 1) : q_high;
4098
4099 /* If we are using over quant do the same for zbin_oq_low */
4100 if (cpi->mb.zbin_over_quant > 0) {
4101 zbin_oq_low = (cpi->mb.zbin_over_quant < zbin_oq_high)
4102 ? (cpi->mb.zbin_over_quant + 1)
4103 : zbin_oq_high;
4104 }
4105
4106 if (undershoot_seen) {
4107 /* Update rate_correction_factor unless
4108 * cpi->active_worst_quality has changed.
4109 */
4110 if (!active_worst_qchanged) {
4111 vp8_update_rate_correction_factors(cpi, 1);
4112 }
4113
4114 Q = (q_high + q_low + 1) / 2;
4115
4116 /* Adjust cpi->zbin_over_quant (only allowed when Q
4117 * is max)
4118 */
4119 if (Q < MAXQ) {
4120 cpi->mb.zbin_over_quant = 0;
4121 } else {
4122 zbin_oq_low = (cpi->mb.zbin_over_quant < zbin_oq_high)
4123 ? (cpi->mb.zbin_over_quant + 1)
4124 : zbin_oq_high;
4125 cpi->mb.zbin_over_quant = (zbin_oq_high + zbin_oq_low) / 2;
4126 }
4127 } else {
4128 /* Update rate_correction_factor unless
4129 * cpi->active_worst_quality has changed.
4130 */
4131 if (!active_worst_qchanged) {
4132 vp8_update_rate_correction_factors(cpi, 0);
4133 }
4134
4135 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
4136
4137 while (((Q < q_low) || (cpi->mb.zbin_over_quant < zbin_oq_low)) &&
4138 (Retries < 10)) {
4139 vp8_update_rate_correction_factors(cpi, 0);
4140 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
4141 Retries++;
4142 }
4143 }
4144
4145 overshoot_seen = 1;
4146 }
4147 /* Frame is too small */
4148 else {
4149 if (cpi->mb.zbin_over_quant == 0) {
4150 /* Lower q_high if not using over quant */
4151 q_high = (Q > q_low) ? (Q - 1) : q_low;
4152 } else {
4153 /* else lower zbin_oq_high */
4154 zbin_oq_high = (cpi->mb.zbin_over_quant > zbin_oq_low)
4155 ? (cpi->mb.zbin_over_quant - 1)
4156 : zbin_oq_low;
4157 }
4158
4159 if (overshoot_seen) {
4160 /* Update rate_correction_factor unless
4161 * cpi->active_worst_quality has changed.
4162 */
4163 if (!active_worst_qchanged) {
4164 vp8_update_rate_correction_factors(cpi, 1);
4165 }
4166
4167 Q = (q_high + q_low) / 2;
4168
4169 /* Adjust cpi->zbin_over_quant (only allowed when Q
4170 * is max)
4171 */
4172 if (Q < MAXQ) {
4173 cpi->mb.zbin_over_quant = 0;
4174 } else {
4175 cpi->mb.zbin_over_quant = (zbin_oq_high + zbin_oq_low) / 2;
4176 }
4177 } else {
4178 /* Update rate_correction_factor unless
4179 * cpi->active_worst_quality has changed.
4180 */
4181 if (!active_worst_qchanged) {
4182 vp8_update_rate_correction_factors(cpi, 0);
4183 }
4184
4185 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
4186
4187 /* Special case reset for qlow for constrained quality.
4188 * This should only trigger where there is very substantial
4189 * undershoot on a frame and the auto cq level is above
4190 * the user passsed in value.
4191 */
4192 if ((cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) &&
4193 (Q < q_low)) {
4194 q_low = Q;
4195 }
4196
4197 while (((Q > q_high) || (cpi->mb.zbin_over_quant > zbin_oq_high)) &&
4198 (Retries < 10)) {
4199 vp8_update_rate_correction_factors(cpi, 0);
4200 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
4201 Retries++;
4202 }
4203 }
4204
4205 undershoot_seen = 1;
4206 }
4207
4208 /* Clamp Q to upper and lower limits: */
4209 if (Q > q_high) {
4210 Q = q_high;
4211 } else if (Q < q_low) {
4212 Q = q_low;
4213 }
4214
4215 /* Clamp cpi->zbin_over_quant */
4216 cpi->mb.zbin_over_quant = (cpi->mb.zbin_over_quant < zbin_oq_low)
4217 ? zbin_oq_low
4218 : (cpi->mb.zbin_over_quant > zbin_oq_high)
4219 ? zbin_oq_high
4220 : cpi->mb.zbin_over_quant;
4221
4222 Loop = Q != last_q;
4223 } else {
4224 Loop = 0;
4225 }
4226 #endif // CONFIG_REALTIME_ONLY
4227
4228 if (cpi->is_src_frame_alt_ref) Loop = 0;
4229
4230 if (Loop == 1) {
4231 vp8_restore_coding_context(cpi);
4232 loop_count++;
4233 #if CONFIG_INTERNAL_STATS
4234 cpi->tot_recode_hits++;
4235 #endif
4236 }
4237 } while (Loop == 1);
4238
4239 #if defined(DROP_UNCODED_FRAMES)
4240 /* if there are no coded macroblocks at all drop this frame */
4241 if (cpi->common.MBs == cpi->mb.skip_true_count &&
4242 (cpi->drop_frame_count & 7) != 7 && cm->frame_type != KEY_FRAME) {
4243 cpi->common.current_video_frame++;
4244 cpi->frames_since_key++;
4245 cpi->drop_frame_count++;
4246 cpi->ext_refresh_frame_flags_pending = 0;
4247 // We advance the temporal pattern for dropped frames.
4248 cpi->temporal_pattern_counter++;
4249 return;
4250 }
4251 cpi->drop_frame_count = 0;
4252 #endif
4253
4254 #if 0
4255 /* Experimental code for lagged and one pass
4256 * Update stats used for one pass GF selection
4257 */
4258 {
4259 cpi->one_pass_frame_stats[cpi->one_pass_frame_index].frame_coded_error = (double)cpi->prediction_error;
4260 cpi->one_pass_frame_stats[cpi->one_pass_frame_index].frame_intra_error = (double)cpi->intra_error;
4261 cpi->one_pass_frame_stats[cpi->one_pass_frame_index].frame_pcnt_inter = (double)(100 - cpi->this_frame_percent_intra) / 100.0;
4262 }
4263 #endif
4264
4265 /* Special case code to reduce pulsing when key frames are forced at a
4266 * fixed interval. Note the reconstruction error if it is the frame before
4267 * the force key frame
4268 */
4269 if (cpi->next_key_frame_forced && (cpi->twopass.frames_to_key == 0)) {
4270 cpi->ambient_err =
4271 vp8_calc_ss_err(cpi->Source, &cm->yv12_fb[cm->new_fb_idx]);
4272 }
4273
4274 /* This frame's MVs are saved and will be used in next frame's MV predictor.
4275 * Last frame has one more line(add to bottom) and one more column(add to
4276 * right) than cm->mip. The edge elements are initialized to 0.
4277 */
4278 #if CONFIG_MULTI_RES_ENCODING
4279 if (!cpi->oxcf.mr_encoder_id && cm->show_frame)
4280 #else
4281 if (cm->show_frame) /* do not save for altref frame */
4282 #endif
4283 {
4284 int mb_row;
4285 int mb_col;
4286 /* Point to beginning of allocated MODE_INFO arrays. */
4287 MODE_INFO *tmp = cm->mip;
4288
4289 if (cm->frame_type != KEY_FRAME) {
4290 for (mb_row = 0; mb_row < cm->mb_rows + 1; ++mb_row) {
4291 for (mb_col = 0; mb_col < cm->mb_cols + 1; ++mb_col) {
4292 if (tmp->mbmi.ref_frame != INTRA_FRAME) {
4293 cpi->lfmv[mb_col + mb_row * (cm->mode_info_stride + 1)].as_int =
4294 tmp->mbmi.mv.as_int;
4295 }
4296
4297 cpi->lf_ref_frame_sign_bias[mb_col +
4298 mb_row * (cm->mode_info_stride + 1)] =
4299 cm->ref_frame_sign_bias[tmp->mbmi.ref_frame];
4300 cpi->lf_ref_frame[mb_col + mb_row * (cm->mode_info_stride + 1)] =
4301 tmp->mbmi.ref_frame;
4302 tmp++;
4303 }
4304 }
4305 }
4306 }
4307
4308 /* Count last ref frame 0,0 usage on current encoded frame. */
4309 {
4310 int mb_row;
4311 int mb_col;
4312 /* Point to beginning of MODE_INFO arrays. */
4313 MODE_INFO *tmp = cm->mi;
4314
4315 cpi->zeromv_count = 0;
4316
4317 if (cm->frame_type != KEY_FRAME) {
4318 for (mb_row = 0; mb_row < cm->mb_rows; ++mb_row) {
4319 for (mb_col = 0; mb_col < cm->mb_cols; ++mb_col) {
4320 if (tmp->mbmi.mode == ZEROMV && tmp->mbmi.ref_frame == LAST_FRAME) {
4321 cpi->zeromv_count++;
4322 }
4323 tmp++;
4324 }
4325 tmp++;
4326 }
4327 }
4328 }
4329
4330 #if CONFIG_MULTI_RES_ENCODING
4331 vp8_cal_dissimilarity(cpi);
4332 #endif
4333
4334 /* Update the GF useage maps.
4335 * This is done after completing the compression of a frame when all
4336 * modes etc. are finalized but before loop filter
4337 */
4338 if (cpi->oxcf.number_of_layers == 1) {
4339 vp8_update_gf_useage_maps(cpi, cm, &cpi->mb);
4340 }
4341
4342 if (cm->frame_type == KEY_FRAME) cm->refresh_last_frame = 1;
4343
4344 #if 0
4345 {
4346 FILE *f = fopen("gfactive.stt", "a");
4347 fprintf(f, "%8d %8d %8d %8d %8d\n", cm->current_video_frame, (100 * cpi->gf_active_count) / (cpi->common.mb_rows * cpi->common.mb_cols), cpi->this_iiratio, cpi->next_iiratio, cm->refresh_golden_frame);
4348 fclose(f);
4349 }
4350 #endif
4351
4352 /* For inter frames the current default behavior is that when
4353 * cm->refresh_golden_frame is set we copy the old GF over to the ARF buffer
4354 * This is purely an encoder decision at present.
4355 * Avoid this behavior when refresh flags are set by the user.
4356 */
4357 if (!cpi->oxcf.error_resilient_mode && cm->refresh_golden_frame &&
4358 !cpi->ext_refresh_frame_flags_pending) {
4359 cm->copy_buffer_to_arf = 2;
4360 } else {
4361 cm->copy_buffer_to_arf = 0;
4362 }
4363
4364 cm->frame_to_show = &cm->yv12_fb[cm->new_fb_idx];
4365
4366 #if CONFIG_TEMPORAL_DENOISING
4367 // Get some measure of the amount of noise, by measuring the (partial) mse
4368 // between source and denoised buffer, for y channel. Partial refers to
4369 // computing the sse for a sub-sample of the frame (i.e., skip x blocks along
4370 // row/column),
4371 // and only for blocks in that set that are consecutive ZEROMV_LAST mode.
4372 // Do this every ~8 frames, to further reduce complexity.
4373 // TODO(marpan): Keep this for now for the case cpi->oxcf.noise_sensitivity <
4374 // 4,
4375 // should be removed in favor of the process_denoiser_mode_change() function
4376 // below.
4377 if (cpi->oxcf.noise_sensitivity > 0 && cpi->oxcf.noise_sensitivity < 4 &&
4378 !cpi->oxcf.screen_content_mode && cpi->frames_since_key % 8 == 0 &&
4379 cm->frame_type != KEY_FRAME) {
4380 cpi->mse_source_denoised = measure_square_diff_partial(
4381 &cpi->denoiser.yv12_running_avg[INTRA_FRAME], cpi->Source, cpi);
4382 }
4383
4384 // For the adaptive denoising mode (noise_sensitivity == 4), sample the mse
4385 // of source diff (between current and previous frame), and determine if we
4386 // should switch the denoiser mode. Sampling refers to computing the mse for
4387 // a sub-sample of the frame (i.e., skip x blocks along row/column), and
4388 // only for blocks in that set that have used ZEROMV LAST, along with some
4389 // constraint on the sum diff between blocks. This process is called every
4390 // ~8 frames, to further reduce complexity.
4391 if (cpi->oxcf.noise_sensitivity == 4 && !cpi->oxcf.screen_content_mode &&
4392 cpi->frames_since_key % 8 == 0 && cm->frame_type != KEY_FRAME) {
4393 process_denoiser_mode_change(cpi);
4394 }
4395 #endif
4396
4397 #ifdef OUTPUT_YUV_SKINMAP
4398 if (cpi->common.current_video_frame > 1) {
4399 vp8_compute_skin_map(cpi, yuv_skinmap_file);
4400 }
4401 #endif
4402
4403 #if CONFIG_MULTITHREAD
4404 if (vpx_atomic_load_acquire(&cpi->b_multi_threaded)) {
4405 /* start loopfilter in separate thread */
4406 sem_post(&cpi->h_event_start_lpf);
4407 cpi->b_lpf_running = 1;
4408 /* wait for the filter_level to be picked so that we can continue with
4409 * stream packing */
4410 sem_wait(&cpi->h_event_end_lpf);
4411 } else
4412 #endif
4413 {
4414 vp8_loopfilter_frame(cpi, cm);
4415 }
4416
4417 update_reference_frames(cpi);
4418
4419 #ifdef OUTPUT_YUV_DENOISED
4420 vpx_write_yuv_frame(yuv_denoised_file,
4421 &cpi->denoiser.yv12_running_avg[INTRA_FRAME]);
4422 #endif
4423
4424 #if !(CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING)
4425 if (cpi->oxcf.error_resilient_mode) {
4426 cm->refresh_entropy_probs = 0;
4427 }
4428 #endif
4429
4430 /* build the bitstream */
4431 vp8_pack_bitstream(cpi, dest, dest_end, size);
4432
4433 /* Move storing frame_type out of the above loop since it is also
4434 * needed in motion search besides loopfilter */
4435 cm->last_frame_type = cm->frame_type;
4436
4437 /* Update rate control heuristics */
4438 cpi->total_byte_count += (*size);
4439 cpi->projected_frame_size = (int)(*size) << 3;
4440
4441 if (cpi->oxcf.number_of_layers > 1) {
4442 unsigned int i;
4443 for (i = cpi->current_layer + 1; i < cpi->oxcf.number_of_layers; ++i) {
4444 cpi->layer_context[i].total_byte_count += (*size);
4445 }
4446 }
4447
4448 if (!active_worst_qchanged) vp8_update_rate_correction_factors(cpi, 2);
4449
4450 cpi->last_q[cm->frame_type] = cm->base_qindex;
4451
4452 if (cm->frame_type == KEY_FRAME) {
4453 vp8_adjust_key_frame_context(cpi);
4454 }
4455
4456 /* Keep a record of ambient average Q. */
4457 if (cm->frame_type != KEY_FRAME) {
4458 cpi->avg_frame_qindex =
4459 (2 + 3 * cpi->avg_frame_qindex + cm->base_qindex) >> 2;
4460 }
4461
4462 /* Keep a record from which we can calculate the average Q excluding
4463 * GF updates and key frames
4464 */
4465 if ((cm->frame_type != KEY_FRAME) &&
4466 ((cpi->oxcf.number_of_layers > 1) ||
4467 (!cm->refresh_golden_frame && !cm->refresh_alt_ref_frame))) {
4468 cpi->ni_frames++;
4469
4470 /* Calculate the average Q for normal inter frames (not key or GFU
4471 * frames).
4472 */
4473 if (cpi->pass == 2) {
4474 cpi->ni_tot_qi += Q;
4475 cpi->ni_av_qi = (cpi->ni_tot_qi / cpi->ni_frames);
4476 } else {
4477 /* Damp value for first few frames */
4478 if (cpi->ni_frames > 150) {
4479 cpi->ni_tot_qi += Q;
4480 cpi->ni_av_qi = (cpi->ni_tot_qi / cpi->ni_frames);
4481 }
4482 /* For one pass, early in the clip ... average the current frame Q
4483 * value with the worstq entered by the user as a dampening measure
4484 */
4485 else {
4486 cpi->ni_tot_qi += Q;
4487 cpi->ni_av_qi =
4488 ((cpi->ni_tot_qi / cpi->ni_frames) + cpi->worst_quality + 1) / 2;
4489 }
4490
4491 /* If the average Q is higher than what was used in the last
4492 * frame (after going through the recode loop to keep the frame
4493 * size within range) then use the last frame value - 1. The -1
4494 * is designed to stop Q and hence the data rate, from
4495 * progressively falling away during difficult sections, but at
4496 * the same time reduce the number of itterations around the
4497 * recode loop.
4498 */
4499 if (Q > cpi->ni_av_qi) cpi->ni_av_qi = Q - 1;
4500 }
4501 }
4502
4503 /* Update the buffer level variable. */
4504 /* Non-viewable frames are a special case and are treated as pure overhead. */
4505 if (!cm->show_frame) {
4506 cpi->bits_off_target -= cpi->projected_frame_size;
4507 } else {
4508 cpi->bits_off_target +=
4509 cpi->av_per_frame_bandwidth - cpi->projected_frame_size;
4510 }
4511
4512 /* Clip the buffer level to the maximum specified buffer size */
4513 if (cpi->bits_off_target > cpi->oxcf.maximum_buffer_size) {
4514 cpi->bits_off_target = cpi->oxcf.maximum_buffer_size;
4515 }
4516
4517 // If the frame dropper is not enabled, don't let the buffer level go below
4518 // some threshold, given here by -|maximum_buffer_size|. For now we only do
4519 // this for screen content input.
4520 if (cpi->drop_frames_allowed == 0 && cpi->oxcf.screen_content_mode &&
4521 cpi->bits_off_target < -cpi->oxcf.maximum_buffer_size) {
4522 cpi->bits_off_target = -cpi->oxcf.maximum_buffer_size;
4523 }
4524
4525 /* Rolling monitors of whether we are over or underspending used to
4526 * help regulate min and Max Q in two pass.
4527 */
4528 cpi->rolling_target_bits = (int)ROUND64_POWER_OF_TWO(
4529 (int64_t)cpi->rolling_target_bits * 3 + cpi->this_frame_target, 2);
4530 cpi->rolling_actual_bits = (int)ROUND64_POWER_OF_TWO(
4531 (int64_t)cpi->rolling_actual_bits * 3 + cpi->projected_frame_size, 2);
4532 cpi->long_rolling_target_bits = (int)ROUND64_POWER_OF_TWO(
4533 (int64_t)cpi->long_rolling_target_bits * 31 + cpi->this_frame_target, 5);
4534 cpi->long_rolling_actual_bits = (int)ROUND64_POWER_OF_TWO(
4535 (int64_t)cpi->long_rolling_actual_bits * 31 + cpi->projected_frame_size,
4536 5);
4537
4538 /* Actual bits spent */
4539 cpi->total_actual_bits += cpi->projected_frame_size;
4540
4541 #if 0 && CONFIG_INTERNAL_STATS
4542 /* Debug stats */
4543 cpi->total_target_vs_actual +=
4544 (cpi->this_frame_target - cpi->projected_frame_size);
4545 #endif
4546
4547 cpi->buffer_level = cpi->bits_off_target;
4548
4549 /* Propagate values to higher temporal layers */
4550 if (cpi->oxcf.number_of_layers > 1) {
4551 unsigned int i;
4552
4553 for (i = cpi->current_layer + 1; i < cpi->oxcf.number_of_layers; ++i) {
4554 LAYER_CONTEXT *lc = &cpi->layer_context[i];
4555 int bits_off_for_this_layer = (int)(lc->target_bandwidth / lc->framerate -
4556 cpi->projected_frame_size);
4557
4558 lc->bits_off_target += bits_off_for_this_layer;
4559
4560 /* Clip buffer level to maximum buffer size for the layer */
4561 if (lc->bits_off_target > lc->maximum_buffer_size) {
4562 lc->bits_off_target = lc->maximum_buffer_size;
4563 }
4564
4565 lc->total_actual_bits += cpi->projected_frame_size;
4566 lc->total_target_vs_actual += bits_off_for_this_layer;
4567 lc->buffer_level = lc->bits_off_target;
4568 }
4569 }
4570
4571 /* Update bits left to the kf and gf groups to account for overshoot
4572 * or undershoot on these frames
4573 */
4574 if (cm->frame_type == KEY_FRAME) {
4575 cpi->twopass.kf_group_bits +=
4576 cpi->this_frame_target - cpi->projected_frame_size;
4577
4578 if (cpi->twopass.kf_group_bits < 0) cpi->twopass.kf_group_bits = 0;
4579 } else if (cm->refresh_golden_frame || cm->refresh_alt_ref_frame) {
4580 cpi->twopass.gf_group_bits +=
4581 cpi->this_frame_target - cpi->projected_frame_size;
4582
4583 if (cpi->twopass.gf_group_bits < 0) cpi->twopass.gf_group_bits = 0;
4584 }
4585
4586 if (cm->frame_type != KEY_FRAME) {
4587 if (cpi->common.refresh_alt_ref_frame) {
4588 cpi->last_skip_false_probs[2] = cpi->prob_skip_false;
4589 cpi->last_skip_probs_q[2] = cm->base_qindex;
4590 } else if (cpi->common.refresh_golden_frame) {
4591 cpi->last_skip_false_probs[1] = cpi->prob_skip_false;
4592 cpi->last_skip_probs_q[1] = cm->base_qindex;
4593 } else {
4594 cpi->last_skip_false_probs[0] = cpi->prob_skip_false;
4595 cpi->last_skip_probs_q[0] = cm->base_qindex;
4596
4597 /* update the baseline */
4598 cpi->base_skip_false_prob[cm->base_qindex] = cpi->prob_skip_false;
4599 }
4600 }
4601
4602 #if 0 && CONFIG_INTERNAL_STATS
4603 {
4604 FILE *f = fopen("tmp.stt", "a");
4605
4606 vpx_clear_system_state();
4607
4608 if (cpi->twopass.total_left_stats.coded_error != 0.0)
4609 fprintf(f, "%10d %10d %10d %10d %10d %10"PRId64" %10"PRId64
4610 "%10"PRId64" %10d %6d %6d %6d %6d %5d %5d %5d %8d "
4611 "%8.2lf %"PRId64" %10.3lf %10"PRId64" %8d\n",
4612 cpi->common.current_video_frame, cpi->this_frame_target,
4613 cpi->projected_frame_size,
4614 (cpi->projected_frame_size - cpi->this_frame_target),
4615 cpi->total_target_vs_actual,
4616 cpi->buffer_level,
4617 (cpi->oxcf.starting_buffer_level-cpi->bits_off_target),
4618 cpi->total_actual_bits, cm->base_qindex,
4619 cpi->active_best_quality, cpi->active_worst_quality,
4620 cpi->ni_av_qi, cpi->cq_target_quality,
4621 cm->refresh_golden_frame, cm->refresh_alt_ref_frame,
4622 cm->frame_type, cpi->gfu_boost,
4623 cpi->twopass.est_max_qcorrection_factor,
4624 cpi->twopass.bits_left,
4625 cpi->twopass.total_left_stats.coded_error,
4626 (double)cpi->twopass.bits_left /
4627 cpi->twopass.total_left_stats.coded_error,
4628 cpi->tot_recode_hits);
4629 else
4630 fprintf(f, "%10d %10d %10d %10d %10d %10"PRId64" %10"PRId64
4631 "%10"PRId64" %10d %6d %6d %6d %6d %5d %5d %5d %8d "
4632 "%8.2lf %"PRId64" %10.3lf %8d\n",
4633 cpi->common.current_video_frame, cpi->this_frame_target,
4634 cpi->projected_frame_size,
4635 (cpi->projected_frame_size - cpi->this_frame_target),
4636 cpi->total_target_vs_actual,
4637 cpi->buffer_level,
4638 (cpi->oxcf.starting_buffer_level-cpi->bits_off_target),
4639 cpi->total_actual_bits, cm->base_qindex,
4640 cpi->active_best_quality, cpi->active_worst_quality,
4641 cpi->ni_av_qi, cpi->cq_target_quality,
4642 cm->refresh_golden_frame, cm->refresh_alt_ref_frame,
4643 cm->frame_type, cpi->gfu_boost,
4644 cpi->twopass.est_max_qcorrection_factor,
4645 cpi->twopass.bits_left,
4646 cpi->twopass.total_left_stats.coded_error,
4647 cpi->tot_recode_hits);
4648
4649 fclose(f);
4650
4651 {
4652 FILE *fmodes = fopen("Modes.stt", "a");
4653
4654 fprintf(fmodes, "%6d:%1d:%1d:%1d ",
4655 cpi->common.current_video_frame,
4656 cm->frame_type, cm->refresh_golden_frame,
4657 cm->refresh_alt_ref_frame);
4658
4659 fprintf(fmodes, "\n");
4660
4661 fclose(fmodes);
4662 }
4663 }
4664
4665 #endif
4666
4667 cpi->ext_refresh_frame_flags_pending = 0;
4668
4669 if (cm->refresh_golden_frame == 1) {
4670 cm->frame_flags = cm->frame_flags | FRAMEFLAGS_GOLDEN;
4671 } else {
4672 cm->frame_flags = cm->frame_flags & ~FRAMEFLAGS_GOLDEN;
4673 }
4674
4675 if (cm->refresh_alt_ref_frame == 1) {
4676 cm->frame_flags = cm->frame_flags | FRAMEFLAGS_ALTREF;
4677 } else {
4678 cm->frame_flags = cm->frame_flags & ~FRAMEFLAGS_ALTREF;
4679 }
4680
4681 if (cm->refresh_last_frame & cm->refresh_golden_frame) { /* both refreshed */
4682 cpi->gold_is_last = 1;
4683 } else if (cm->refresh_last_frame ^ cm->refresh_golden_frame) {
4684 /* 1 refreshed but not the other */
4685 cpi->gold_is_last = 0;
4686 }
4687
4688 if (cm->refresh_last_frame & cm->refresh_alt_ref_frame) { /* both refreshed */
4689 cpi->alt_is_last = 1;
4690 } else if (cm->refresh_last_frame ^ cm->refresh_alt_ref_frame) {
4691 /* 1 refreshed but not the other */
4692 cpi->alt_is_last = 0;
4693 }
4694
4695 if (cm->refresh_alt_ref_frame &
4696 cm->refresh_golden_frame) { /* both refreshed */
4697 cpi->gold_is_alt = 1;
4698 } else if (cm->refresh_alt_ref_frame ^ cm->refresh_golden_frame) {
4699 /* 1 refreshed but not the other */
4700 cpi->gold_is_alt = 0;
4701 }
4702
4703 cpi->ref_frame_flags = VP8_ALTR_FRAME | VP8_GOLD_FRAME | VP8_LAST_FRAME;
4704
4705 if (cpi->gold_is_last) cpi->ref_frame_flags &= ~VP8_GOLD_FRAME;
4706
4707 if (cpi->alt_is_last) cpi->ref_frame_flags &= ~VP8_ALTR_FRAME;
4708
4709 if (cpi->gold_is_alt) cpi->ref_frame_flags &= ~VP8_ALTR_FRAME;
4710
4711 if (!cpi->oxcf.error_resilient_mode) {
4712 if (cpi->oxcf.play_alternate && cm->refresh_alt_ref_frame &&
4713 (cm->frame_type != KEY_FRAME)) {
4714 /* Update the alternate reference frame stats as appropriate. */
4715 update_alt_ref_frame_stats(cpi);
4716 } else {
4717 /* Update the Golden frame stats as appropriate. */
4718 update_golden_frame_stats(cpi);
4719 }
4720 }
4721
4722 if (cm->frame_type == KEY_FRAME) {
4723 /* Tell the caller that the frame was coded as a key frame */
4724 *frame_flags = cm->frame_flags | FRAMEFLAGS_KEY;
4725
4726 /* As this frame is a key frame the next defaults to an inter frame. */
4727 cm->frame_type = INTER_FRAME;
4728
4729 cpi->last_frame_percent_intra = 100;
4730 } else {
4731 *frame_flags = cm->frame_flags & ~FRAMEFLAGS_KEY;
4732
4733 cpi->last_frame_percent_intra = cpi->this_frame_percent_intra;
4734 }
4735
4736 /* Clear the one shot update flags for segmentation map and mode/ref
4737 * loop filter deltas.
4738 */
4739 cpi->mb.e_mbd.update_mb_segmentation_map = 0;
4740 cpi->mb.e_mbd.update_mb_segmentation_data = 0;
4741 cpi->mb.e_mbd.mode_ref_lf_delta_update = 0;
4742
4743 /* Dont increment frame counters if this was an altref buffer update
4744 * not a real frame
4745 */
4746 if (cm->show_frame) {
4747 cm->current_video_frame++;
4748 cpi->frames_since_key++;
4749 cpi->temporal_pattern_counter++;
4750 }
4751
4752 #if 0
4753 {
4754 char filename[512];
4755 FILE *recon_file;
4756 sprintf(filename, "enc%04d.yuv", (int) cm->current_video_frame);
4757 recon_file = fopen(filename, "wb");
4758 fwrite(cm->yv12_fb[cm->lst_fb_idx].buffer_alloc,
4759 cm->yv12_fb[cm->lst_fb_idx].frame_size, 1, recon_file);
4760 fclose(recon_file);
4761 }
4762 #endif
4763
4764 /* DEBUG */
4765 /* vpx_write_yuv_frame("encoder_recon.yuv", cm->frame_to_show); */
4766 }
4767 #if !CONFIG_REALTIME_ONLY
Pass2Encode(VP8_COMP * cpi,size_t * size,unsigned char * dest,unsigned char * dest_end,unsigned int * frame_flags)4768 static void Pass2Encode(VP8_COMP *cpi, size_t *size, unsigned char *dest,
4769 unsigned char *dest_end, unsigned int *frame_flags) {
4770 if (!cpi->common.refresh_alt_ref_frame) vp8_second_pass(cpi);
4771
4772 encode_frame_to_data_rate(cpi, size, dest, dest_end, frame_flags);
4773 cpi->twopass.bits_left -= 8 * (int)(*size);
4774
4775 if (!cpi->common.refresh_alt_ref_frame) {
4776 double two_pass_min_rate =
4777 (double)(cpi->oxcf.target_bandwidth *
4778 cpi->oxcf.two_pass_vbrmin_section / 100);
4779 cpi->twopass.bits_left += (int64_t)(two_pass_min_rate / cpi->framerate);
4780 }
4781 }
4782 #endif
4783
vp8_receive_raw_frame(VP8_COMP * cpi,unsigned int frame_flags,YV12_BUFFER_CONFIG * sd,int64_t time_stamp,int64_t end_time)4784 int vp8_receive_raw_frame(VP8_COMP *cpi, unsigned int frame_flags,
4785 YV12_BUFFER_CONFIG *sd, int64_t time_stamp,
4786 int64_t end_time) {
4787 struct vpx_usec_timer timer;
4788 int res = 0;
4789
4790 vpx_usec_timer_start(&timer);
4791
4792 /* Reinit the lookahead buffer if the frame size changes */
4793 if (sd->y_width != cpi->oxcf.Width || sd->y_height != cpi->oxcf.Height) {
4794 assert(cpi->oxcf.lag_in_frames < 2);
4795 dealloc_raw_frame_buffers(cpi);
4796 alloc_raw_frame_buffers(cpi);
4797 }
4798
4799 if (vp8_lookahead_push(cpi->lookahead, sd, time_stamp, end_time, frame_flags,
4800 cpi->active_map_enabled ? cpi->active_map : NULL)) {
4801 res = -1;
4802 }
4803 vpx_usec_timer_mark(&timer);
4804 cpi->time_receive_data += vpx_usec_timer_elapsed(&timer);
4805
4806 return res;
4807 }
4808
frame_is_reference(const VP8_COMP * cpi)4809 static int frame_is_reference(const VP8_COMP *cpi) {
4810 const VP8_COMMON *cm = &cpi->common;
4811 const MACROBLOCKD *xd = &cpi->mb.e_mbd;
4812
4813 return cm->frame_type == KEY_FRAME || cm->refresh_last_frame ||
4814 cm->refresh_golden_frame || cm->refresh_alt_ref_frame ||
4815 cm->copy_buffer_to_gf || cm->copy_buffer_to_arf ||
4816 cm->refresh_entropy_probs || xd->mode_ref_lf_delta_update ||
4817 xd->update_mb_segmentation_map || xd->update_mb_segmentation_data;
4818 }
4819
vp8_get_compressed_data(VP8_COMP * cpi,unsigned int * frame_flags,size_t * size,unsigned char * dest,unsigned char * dest_end,int64_t * time_stamp,int64_t * time_end,int flush)4820 int vp8_get_compressed_data(VP8_COMP *cpi, unsigned int *frame_flags,
4821 size_t *size, unsigned char *dest,
4822 unsigned char *dest_end, int64_t *time_stamp,
4823 int64_t *time_end, int flush) {
4824 VP8_COMMON *cm;
4825 struct vpx_usec_timer tsctimer;
4826 struct vpx_usec_timer ticktimer;
4827 struct vpx_usec_timer cmptimer;
4828 YV12_BUFFER_CONFIG *force_src_buffer = NULL;
4829
4830 if (!cpi) return -1;
4831
4832 cm = &cpi->common;
4833
4834 vpx_usec_timer_start(&cmptimer);
4835
4836 cpi->source = NULL;
4837
4838 #if !CONFIG_REALTIME_ONLY
4839 /* Should we code an alternate reference frame */
4840 if (cpi->oxcf.error_resilient_mode == 0 && cpi->oxcf.play_alternate &&
4841 cpi->source_alt_ref_pending) {
4842 if ((cpi->source = vp8_lookahead_peek(
4843 cpi->lookahead, cpi->frames_till_gf_update_due, PEEK_FORWARD))) {
4844 cpi->alt_ref_source = cpi->source;
4845 if (cpi->oxcf.arnr_max_frames > 0) {
4846 vp8_temporal_filter_prepare_c(cpi, cpi->frames_till_gf_update_due);
4847 force_src_buffer = &cpi->alt_ref_buffer;
4848 }
4849 cpi->frames_till_alt_ref_frame = cpi->frames_till_gf_update_due;
4850 cm->refresh_alt_ref_frame = 1;
4851 cm->refresh_golden_frame = 0;
4852 cm->refresh_last_frame = 0;
4853 cm->show_frame = 0;
4854 /* Clear Pending alt Ref flag. */
4855 cpi->source_alt_ref_pending = 0;
4856 cpi->is_src_frame_alt_ref = 0;
4857 }
4858 }
4859 #endif
4860
4861 if (!cpi->source) {
4862 /* Read last frame source if we are encoding first pass. */
4863 if (cpi->pass == 1 && cm->current_video_frame > 0) {
4864 if ((cpi->last_source =
4865 vp8_lookahead_peek(cpi->lookahead, 1, PEEK_BACKWARD)) == NULL) {
4866 return -1;
4867 }
4868 }
4869
4870 if ((cpi->source = vp8_lookahead_pop(cpi->lookahead, flush))) {
4871 cm->show_frame = 1;
4872
4873 cpi->is_src_frame_alt_ref =
4874 cpi->alt_ref_source && (cpi->source == cpi->alt_ref_source);
4875
4876 if (cpi->is_src_frame_alt_ref) cpi->alt_ref_source = NULL;
4877 }
4878 }
4879
4880 if (cpi->source) {
4881 cpi->Source = force_src_buffer ? force_src_buffer : &cpi->source->img;
4882 cpi->un_scaled_source = cpi->Source;
4883 *time_stamp = cpi->source->ts_start;
4884 *time_end = cpi->source->ts_end;
4885 *frame_flags = cpi->source->flags;
4886
4887 if (cpi->pass == 1 && cm->current_video_frame > 0) {
4888 cpi->last_frame_unscaled_source = &cpi->last_source->img;
4889 }
4890 } else {
4891 *size = 0;
4892 #if !CONFIG_REALTIME_ONLY
4893
4894 if (flush && cpi->pass == 1 && !cpi->twopass.first_pass_done) {
4895 vp8_end_first_pass(cpi); /* get last stats packet */
4896 cpi->twopass.first_pass_done = 1;
4897 }
4898
4899 #endif
4900
4901 return -1;
4902 }
4903
4904 if (cpi->source->ts_start < cpi->first_time_stamp_ever) {
4905 cpi->first_time_stamp_ever = cpi->source->ts_start;
4906 cpi->last_end_time_stamp_seen = cpi->source->ts_start;
4907 }
4908
4909 /* adjust frame rates based on timestamps given */
4910 if (cm->show_frame) {
4911 int64_t this_duration;
4912 int step = 0;
4913
4914 if (cpi->source->ts_start == cpi->first_time_stamp_ever) {
4915 this_duration = cpi->source->ts_end - cpi->source->ts_start;
4916 step = 1;
4917 } else {
4918 int64_t last_duration;
4919
4920 this_duration = cpi->source->ts_end - cpi->last_end_time_stamp_seen;
4921 last_duration = cpi->last_end_time_stamp_seen - cpi->last_time_stamp_seen;
4922 /* do a step update if the duration changes by 10% */
4923 if (last_duration) {
4924 step = (int)(((this_duration - last_duration) * 10 / last_duration));
4925 }
4926 }
4927
4928 if (this_duration) {
4929 if (step) {
4930 cpi->ref_framerate = 10000000.0 / this_duration;
4931 } else {
4932 double avg_duration, interval;
4933
4934 /* Average this frame's rate into the last second's average
4935 * frame rate. If we haven't seen 1 second yet, then average
4936 * over the whole interval seen.
4937 */
4938 interval = (double)(cpi->source->ts_end - cpi->first_time_stamp_ever);
4939 if (interval > 10000000.0) interval = 10000000;
4940
4941 avg_duration = 10000000.0 / cpi->ref_framerate;
4942 avg_duration *= (interval - avg_duration + this_duration);
4943 avg_duration /= interval;
4944
4945 cpi->ref_framerate = 10000000.0 / avg_duration;
4946 }
4947 #if CONFIG_MULTI_RES_ENCODING
4948 if (cpi->oxcf.mr_total_resolutions > 1) {
4949 LOWER_RES_FRAME_INFO *low_res_frame_info =
4950 (LOWER_RES_FRAME_INFO *)cpi->oxcf.mr_low_res_mode_info;
4951 // Frame rate should be the same for all spatial layers in
4952 // multi-res-encoding (simulcast), so we constrain the frame for
4953 // higher layers to be that of lowest resolution. This is needed
4954 // as he application may decide to skip encoding a high layer and
4955 // then start again, in which case a big jump in time-stamps will
4956 // be received for that high layer, which will yield an incorrect
4957 // frame rate (from time-stamp adjustment in above calculation).
4958 if (cpi->oxcf.mr_encoder_id) {
4959 if (!low_res_frame_info->skip_encoding_base_stream)
4960 cpi->ref_framerate = low_res_frame_info->low_res_framerate;
4961 } else {
4962 // Keep track of frame rate for lowest resolution.
4963 low_res_frame_info->low_res_framerate = cpi->ref_framerate;
4964 // The base stream is being encoded so set skip flag to 0.
4965 low_res_frame_info->skip_encoding_base_stream = 0;
4966 }
4967 }
4968 #endif
4969 if (cpi->oxcf.number_of_layers > 1) {
4970 unsigned int i;
4971
4972 /* Update frame rates for each layer */
4973 assert(cpi->oxcf.number_of_layers <= VPX_TS_MAX_LAYERS);
4974 for (i = 0; i < cpi->oxcf.number_of_layers && i < VPX_TS_MAX_LAYERS;
4975 ++i) {
4976 LAYER_CONTEXT *lc = &cpi->layer_context[i];
4977 lc->framerate = cpi->ref_framerate / cpi->oxcf.rate_decimator[i];
4978 }
4979 } else {
4980 vp8_new_framerate(cpi, cpi->ref_framerate);
4981 }
4982 }
4983
4984 cpi->last_time_stamp_seen = cpi->source->ts_start;
4985 cpi->last_end_time_stamp_seen = cpi->source->ts_end;
4986 }
4987
4988 if (cpi->oxcf.number_of_layers > 1) {
4989 int layer;
4990
4991 update_layer_contexts(cpi);
4992
4993 /* Restore layer specific context & set frame rate */
4994 if (cpi->temporal_layer_id >= 0) {
4995 layer = cpi->temporal_layer_id;
4996 } else {
4997 layer =
4998 cpi->oxcf
4999 .layer_id[cpi->temporal_pattern_counter % cpi->oxcf.periodicity];
5000 }
5001 restore_layer_context(cpi, layer);
5002 vp8_new_framerate(cpi, cpi->layer_context[layer].framerate);
5003 }
5004
5005 if (cpi->compressor_speed == 2) {
5006 vpx_usec_timer_start(&tsctimer);
5007 vpx_usec_timer_start(&ticktimer);
5008 }
5009
5010 cpi->lf_zeromv_pct = (cpi->zeromv_count * 100) / cm->MBs;
5011
5012 #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
5013 {
5014 int i;
5015 const int num_part = (1 << cm->multi_token_partition);
5016 /* the available bytes in dest */
5017 const unsigned long dest_size = dest_end - dest;
5018 const int tok_part_buff_size = (dest_size * 9) / (10 * num_part);
5019
5020 unsigned char *dp = dest;
5021
5022 cpi->partition_d[0] = dp;
5023 dp += dest_size / 10; /* reserve 1/10 for control partition */
5024 cpi->partition_d_end[0] = dp;
5025
5026 for (i = 0; i < num_part; ++i) {
5027 cpi->partition_d[i + 1] = dp;
5028 dp += tok_part_buff_size;
5029 cpi->partition_d_end[i + 1] = dp;
5030 }
5031 }
5032 #endif
5033
5034 /* start with a 0 size frame */
5035 *size = 0;
5036
5037 /* Clear down mmx registers */
5038 vpx_clear_system_state();
5039
5040 cm->frame_type = INTER_FRAME;
5041 cm->frame_flags = *frame_flags;
5042
5043 #if 0
5044
5045 if (cm->refresh_alt_ref_frame)
5046 {
5047 cm->refresh_golden_frame = 0;
5048 cm->refresh_last_frame = 0;
5049 }
5050 else
5051 {
5052 cm->refresh_golden_frame = 0;
5053 cm->refresh_last_frame = 1;
5054 }
5055
5056 #endif
5057 /* find a free buffer for the new frame */
5058 {
5059 int i = 0;
5060 for (; i < NUM_YV12_BUFFERS; ++i) {
5061 if (!cm->yv12_fb[i].flags) {
5062 cm->new_fb_idx = i;
5063 break;
5064 }
5065 }
5066
5067 assert(i < NUM_YV12_BUFFERS);
5068 }
5069 switch (cpi->pass) {
5070 #if !CONFIG_REALTIME_ONLY
5071 case 1: Pass1Encode(cpi); break;
5072 case 2: Pass2Encode(cpi, size, dest, dest_end, frame_flags); break;
5073 #endif // !CONFIG_REALTIME_ONLY
5074 default:
5075 encode_frame_to_data_rate(cpi, size, dest, dest_end, frame_flags);
5076 break;
5077 }
5078
5079 if (cpi->compressor_speed == 2) {
5080 unsigned int duration, duration2;
5081 vpx_usec_timer_mark(&tsctimer);
5082 vpx_usec_timer_mark(&ticktimer);
5083
5084 duration = (int)(vpx_usec_timer_elapsed(&ticktimer));
5085 duration2 = (unsigned int)((double)duration / 2);
5086
5087 if (cm->frame_type != KEY_FRAME) {
5088 if (cpi->avg_encode_time == 0) {
5089 cpi->avg_encode_time = duration;
5090 } else {
5091 cpi->avg_encode_time = (7 * cpi->avg_encode_time + duration) >> 3;
5092 }
5093 }
5094
5095 if (duration2) {
5096 {
5097 if (cpi->avg_pick_mode_time == 0) {
5098 cpi->avg_pick_mode_time = duration2;
5099 } else {
5100 cpi->avg_pick_mode_time =
5101 (7 * cpi->avg_pick_mode_time + duration2) >> 3;
5102 }
5103 }
5104 }
5105 }
5106
5107 if (cm->refresh_entropy_probs == 0) {
5108 memcpy(&cm->fc, &cm->lfc, sizeof(cm->fc));
5109 }
5110
5111 /* Save the contexts separately for alt ref, gold and last. */
5112 /* (TODO jbb -> Optimize this with pointers to avoid extra copies. ) */
5113 if (cm->refresh_alt_ref_frame) memcpy(&cpi->lfc_a, &cm->fc, sizeof(cm->fc));
5114
5115 if (cm->refresh_golden_frame) memcpy(&cpi->lfc_g, &cm->fc, sizeof(cm->fc));
5116
5117 if (cm->refresh_last_frame) memcpy(&cpi->lfc_n, &cm->fc, sizeof(cm->fc));
5118
5119 /* if its a dropped frame honor the requests on subsequent frames */
5120 if (*size > 0) {
5121 cpi->droppable = !frame_is_reference(cpi);
5122
5123 /* return to normal state */
5124 cm->refresh_entropy_probs = 1;
5125 cm->refresh_alt_ref_frame = 0;
5126 cm->refresh_golden_frame = 0;
5127 cm->refresh_last_frame = 1;
5128 cm->frame_type = INTER_FRAME;
5129 }
5130
5131 /* Save layer specific state */
5132 if (cpi->oxcf.number_of_layers > 1) save_layer_context(cpi);
5133
5134 vpx_usec_timer_mark(&cmptimer);
5135 cpi->time_compress_data += vpx_usec_timer_elapsed(&cmptimer);
5136
5137 if (cpi->b_calculate_psnr && cpi->pass != 1 && cm->show_frame) {
5138 generate_psnr_packet(cpi);
5139 }
5140
5141 #if CONFIG_INTERNAL_STATS
5142
5143 if (cpi->pass != 1) {
5144 cpi->bytes += *size;
5145
5146 if (cm->show_frame) {
5147 cpi->common.show_frame_mi = cpi->common.mi;
5148 cpi->count++;
5149
5150 if (cpi->b_calculate_psnr) {
5151 uint64_t ye, ue, ve;
5152 double frame_psnr;
5153 YV12_BUFFER_CONFIG *orig = cpi->Source;
5154 YV12_BUFFER_CONFIG *recon = cpi->common.frame_to_show;
5155 unsigned int y_width = cpi->common.Width;
5156 unsigned int y_height = cpi->common.Height;
5157 unsigned int uv_width = (y_width + 1) / 2;
5158 unsigned int uv_height = (y_height + 1) / 2;
5159 int y_samples = y_height * y_width;
5160 int uv_samples = uv_height * uv_width;
5161 int t_samples = y_samples + 2 * uv_samples;
5162 double sq_error;
5163
5164 ye = calc_plane_error(orig->y_buffer, orig->y_stride, recon->y_buffer,
5165 recon->y_stride, y_width, y_height);
5166
5167 ue = calc_plane_error(orig->u_buffer, orig->uv_stride, recon->u_buffer,
5168 recon->uv_stride, uv_width, uv_height);
5169
5170 ve = calc_plane_error(orig->v_buffer, orig->uv_stride, recon->v_buffer,
5171 recon->uv_stride, uv_width, uv_height);
5172
5173 sq_error = (double)(ye + ue + ve);
5174
5175 frame_psnr = vpx_sse_to_psnr(t_samples, 255.0, sq_error);
5176
5177 cpi->total_y += vpx_sse_to_psnr(y_samples, 255.0, (double)ye);
5178 cpi->total_u += vpx_sse_to_psnr(uv_samples, 255.0, (double)ue);
5179 cpi->total_v += vpx_sse_to_psnr(uv_samples, 255.0, (double)ve);
5180 cpi->total_sq_error += sq_error;
5181 cpi->total += frame_psnr;
5182 #if CONFIG_POSTPROC
5183 {
5184 YV12_BUFFER_CONFIG *pp = &cm->post_proc_buffer;
5185 double sq_error2;
5186 double frame_psnr2, frame_ssim2 = 0;
5187 double weight = 0;
5188
5189 vp8_deblock(cm, cm->frame_to_show, &cm->post_proc_buffer,
5190 cm->filter_level * 10 / 6);
5191 vpx_clear_system_state();
5192
5193 ye = calc_plane_error(orig->y_buffer, orig->y_stride, pp->y_buffer,
5194 pp->y_stride, y_width, y_height);
5195
5196 ue = calc_plane_error(orig->u_buffer, orig->uv_stride, pp->u_buffer,
5197 pp->uv_stride, uv_width, uv_height);
5198
5199 ve = calc_plane_error(orig->v_buffer, orig->uv_stride, pp->v_buffer,
5200 pp->uv_stride, uv_width, uv_height);
5201
5202 sq_error2 = (double)(ye + ue + ve);
5203
5204 frame_psnr2 = vpx_sse_to_psnr(t_samples, 255.0, sq_error2);
5205
5206 cpi->totalp_y += vpx_sse_to_psnr(y_samples, 255.0, (double)ye);
5207 cpi->totalp_u += vpx_sse_to_psnr(uv_samples, 255.0, (double)ue);
5208 cpi->totalp_v += vpx_sse_to_psnr(uv_samples, 255.0, (double)ve);
5209 cpi->total_sq_error2 += sq_error2;
5210 cpi->totalp += frame_psnr2;
5211
5212 frame_ssim2 =
5213 vpx_calc_ssim(cpi->Source, &cm->post_proc_buffer, &weight);
5214
5215 cpi->summed_quality += frame_ssim2 * weight;
5216 cpi->summed_weights += weight;
5217
5218 if (cpi->oxcf.number_of_layers > 1) {
5219 unsigned int i;
5220
5221 for (i = cpi->current_layer; i < cpi->oxcf.number_of_layers; ++i) {
5222 cpi->frames_in_layer[i]++;
5223
5224 cpi->bytes_in_layer[i] += *size;
5225 cpi->sum_psnr[i] += frame_psnr;
5226 cpi->sum_psnr_p[i] += frame_psnr2;
5227 cpi->total_error2[i] += sq_error;
5228 cpi->total_error2_p[i] += sq_error2;
5229 cpi->sum_ssim[i] += frame_ssim2 * weight;
5230 cpi->sum_weights[i] += weight;
5231 }
5232 }
5233 }
5234 #endif
5235 }
5236 }
5237 }
5238
5239 #if 0
5240
5241 if (cpi->common.frame_type != 0 && cpi->common.base_qindex == cpi->oxcf.worst_allowed_q)
5242 {
5243 skiptruecount += cpi->skip_true_count;
5244 skipfalsecount += cpi->skip_false_count;
5245 }
5246
5247 #endif
5248 #if 0
5249
5250 if (cpi->pass != 1)
5251 {
5252 FILE *f = fopen("skip.stt", "a");
5253 fprintf(f, "frame:%4d flags:%4x Q:%4d P:%4d Size:%5d\n", cpi->common.current_video_frame, *frame_flags, cpi->common.base_qindex, cpi->prob_skip_false, *size);
5254
5255 if (cpi->is_src_frame_alt_ref == 1)
5256 fprintf(f, "skipcount: %4d framesize: %d\n", cpi->skip_true_count , *size);
5257
5258 fclose(f);
5259 }
5260
5261 #endif
5262 #endif
5263
5264 cpi->common.error.setjmp = 0;
5265
5266 #if CONFIG_MULTITHREAD
5267 /* wait for the lpf thread done */
5268 if (vpx_atomic_load_acquire(&cpi->b_multi_threaded) && cpi->b_lpf_running) {
5269 sem_wait(&cpi->h_event_end_lpf);
5270 cpi->b_lpf_running = 0;
5271 }
5272 #endif
5273
5274 return 0;
5275 }
5276
vp8_get_preview_raw_frame(VP8_COMP * cpi,YV12_BUFFER_CONFIG * dest,vp8_ppflags_t * flags)5277 int vp8_get_preview_raw_frame(VP8_COMP *cpi, YV12_BUFFER_CONFIG *dest,
5278 vp8_ppflags_t *flags) {
5279 if (cpi->common.refresh_alt_ref_frame) {
5280 return -1;
5281 } else {
5282 int ret;
5283
5284 #if CONFIG_POSTPROC
5285 cpi->common.show_frame_mi = cpi->common.mi;
5286 ret = vp8_post_proc_frame(&cpi->common, dest, flags);
5287 #else
5288 (void)flags;
5289
5290 if (cpi->common.frame_to_show) {
5291 *dest = *cpi->common.frame_to_show;
5292 dest->y_width = cpi->common.Width;
5293 dest->y_height = cpi->common.Height;
5294 dest->uv_height = cpi->common.Height / 2;
5295 ret = 0;
5296 } else {
5297 ret = -1;
5298 }
5299
5300 #endif
5301 vpx_clear_system_state();
5302 return ret;
5303 }
5304 }
5305
vp8_set_roimap(VP8_COMP * cpi,unsigned char * map,unsigned int rows,unsigned int cols,int delta_q[4],int delta_lf[4],unsigned int threshold[4])5306 int vp8_set_roimap(VP8_COMP *cpi, unsigned char *map, unsigned int rows,
5307 unsigned int cols, int delta_q[4], int delta_lf[4],
5308 unsigned int threshold[4]) {
5309 signed char feature_data[MB_LVL_MAX][MAX_MB_SEGMENTS];
5310 int internal_delta_q[MAX_MB_SEGMENTS];
5311 const int range = 63;
5312 int i;
5313
5314 // Check number of rows and columns match
5315 if (cpi->common.mb_rows != (int)rows || cpi->common.mb_cols != (int)cols) {
5316 return -1;
5317 }
5318
5319 // Range check the delta Q values and convert the external Q range values
5320 // to internal ones.
5321 if ((abs(delta_q[0]) > range) || (abs(delta_q[1]) > range) ||
5322 (abs(delta_q[2]) > range) || (abs(delta_q[3]) > range)) {
5323 return -1;
5324 }
5325
5326 // Range check the delta lf values
5327 if ((abs(delta_lf[0]) > range) || (abs(delta_lf[1]) > range) ||
5328 (abs(delta_lf[2]) > range) || (abs(delta_lf[3]) > range)) {
5329 return -1;
5330 }
5331
5332 // Also disable segmentation if no deltas are specified.
5333 if (!map || (delta_q[0] == 0 && delta_q[1] == 0 && delta_q[2] == 0 &&
5334 delta_q[3] == 0 && delta_lf[0] == 0 && delta_lf[1] == 0 &&
5335 delta_lf[2] == 0 && delta_lf[3] == 0 && threshold[0] == 0 &&
5336 threshold[1] == 0 && threshold[2] == 0 && threshold[3] == 0)) {
5337 disable_segmentation(cpi);
5338 return 0;
5339 }
5340
5341 // Translate the external delta q values to internal values.
5342 for (i = 0; i < MAX_MB_SEGMENTS; ++i) {
5343 internal_delta_q[i] =
5344 (delta_q[i] >= 0) ? q_trans[delta_q[i]] : -q_trans[-delta_q[i]];
5345 }
5346
5347 /* Set the segmentation Map */
5348 set_segmentation_map(cpi, map);
5349
5350 /* Activate segmentation. */
5351 enable_segmentation(cpi);
5352
5353 /* Set up the quant segment data */
5354 feature_data[MB_LVL_ALT_Q][0] = internal_delta_q[0];
5355 feature_data[MB_LVL_ALT_Q][1] = internal_delta_q[1];
5356 feature_data[MB_LVL_ALT_Q][2] = internal_delta_q[2];
5357 feature_data[MB_LVL_ALT_Q][3] = internal_delta_q[3];
5358
5359 /* Set up the loop segment data s */
5360 feature_data[MB_LVL_ALT_LF][0] = delta_lf[0];
5361 feature_data[MB_LVL_ALT_LF][1] = delta_lf[1];
5362 feature_data[MB_LVL_ALT_LF][2] = delta_lf[2];
5363 feature_data[MB_LVL_ALT_LF][3] = delta_lf[3];
5364
5365 cpi->segment_encode_breakout[0] = threshold[0];
5366 cpi->segment_encode_breakout[1] = threshold[1];
5367 cpi->segment_encode_breakout[2] = threshold[2];
5368 cpi->segment_encode_breakout[3] = threshold[3];
5369
5370 /* Initialise the feature data structure */
5371 set_segment_data(cpi, &feature_data[0][0], SEGMENT_DELTADATA);
5372
5373 if (threshold[0] != 0 || threshold[1] != 0 || threshold[2] != 0 ||
5374 threshold[3] != 0)
5375 cpi->use_roi_static_threshold = 1;
5376 cpi->cyclic_refresh_mode_enabled = 0;
5377
5378 return 0;
5379 }
5380
vp8_set_active_map(VP8_COMP * cpi,unsigned char * map,unsigned int rows,unsigned int cols)5381 int vp8_set_active_map(VP8_COMP *cpi, unsigned char *map, unsigned int rows,
5382 unsigned int cols) {
5383 if ((int)rows == cpi->common.mb_rows && (int)cols == cpi->common.mb_cols) {
5384 if (map) {
5385 memcpy(cpi->active_map, map, rows * cols);
5386 cpi->active_map_enabled = 1;
5387 } else {
5388 cpi->active_map_enabled = 0;
5389 }
5390
5391 return 0;
5392 } else {
5393 return -1;
5394 }
5395 }
5396
vp8_set_internal_size(VP8_COMP * cpi,VPX_SCALING horiz_mode,VPX_SCALING vert_mode)5397 int vp8_set_internal_size(VP8_COMP *cpi, VPX_SCALING horiz_mode,
5398 VPX_SCALING vert_mode) {
5399 if (horiz_mode <= ONETWO) {
5400 cpi->common.horiz_scale = horiz_mode;
5401 } else {
5402 return -1;
5403 }
5404
5405 if (vert_mode <= ONETWO) {
5406 cpi->common.vert_scale = vert_mode;
5407 } else {
5408 return -1;
5409 }
5410
5411 return 0;
5412 }
5413
vp8_calc_ss_err(YV12_BUFFER_CONFIG * source,YV12_BUFFER_CONFIG * dest)5414 int vp8_calc_ss_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest) {
5415 int i, j;
5416 int Total = 0;
5417
5418 unsigned char *src = source->y_buffer;
5419 unsigned char *dst = dest->y_buffer;
5420
5421 /* Loop through the Y plane raw and reconstruction data summing
5422 * (square differences)
5423 */
5424 for (i = 0; i < source->y_height; i += 16) {
5425 for (j = 0; j < source->y_width; j += 16) {
5426 unsigned int sse;
5427 Total += vpx_mse16x16(src + j, source->y_stride, dst + j, dest->y_stride,
5428 &sse);
5429 }
5430
5431 src += 16 * source->y_stride;
5432 dst += 16 * dest->y_stride;
5433 }
5434
5435 return Total;
5436 }
5437
vp8_get_quantizer(VP8_COMP * cpi)5438 int vp8_get_quantizer(VP8_COMP *cpi) { return cpi->common.base_qindex; }
5439