1 /*
2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11
12 #include "vpx_config.h"
13 #include "./vpx_scale_rtcd.h"
14 #include "./vpx_dsp_rtcd.h"
15 #include "./vp8_rtcd.h"
16 #include "vp8/common/onyxc_int.h"
17 #include "vp8/common/blockd.h"
18 #include "onyx_int.h"
19 #include "vp8/common/systemdependent.h"
20 #include "vp8/encoder/quantize.h"
21 #include "vp8/common/alloccommon.h"
22 #include "mcomp.h"
23 #include "firstpass.h"
24 #include "vpx/internal/vpx_psnr.h"
25 #include "vpx_scale/vpx_scale.h"
26 #include "vp8/common/extend.h"
27 #include "ratectrl.h"
28 #include "vp8/common/quant_common.h"
29 #include "segmentation.h"
30 #if CONFIG_POSTPROC
31 #include "vp8/common/postproc.h"
32 #endif
33 #include "vpx_mem/vpx_mem.h"
34 #include "vp8/common/reconintra.h"
35 #include "vp8/common/swapyv12buffer.h"
36 #include "vp8/common/threading.h"
37 #include "vpx_ports/vpx_timer.h"
38 #if ARCH_ARM
39 #include "vpx_ports/arm.h"
40 #endif
41 #if CONFIG_MULTI_RES_ENCODING
42 #include "mr_dissim.h"
43 #endif
44 #include "encodeframe.h"
45
46 #include <math.h>
47 #include <stdio.h>
48 #include <limits.h>
49
50 #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
51 extern int vp8_update_coef_context(VP8_COMP *cpi);
52 extern void vp8_update_coef_probs(VP8_COMP *cpi);
53 #endif
54
55 extern void vp8cx_pick_filter_level_fast(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi);
56 extern void vp8cx_set_alt_lf_level(VP8_COMP *cpi, int filt_val);
57 extern void vp8cx_pick_filter_level(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi);
58
59 extern void vp8_deblock_frame(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *post, int filt_lvl, int low_var_thresh, int flag);
60 extern void print_parms(VP8_CONFIG *ocf, char *filenam);
61 extern unsigned int vp8_get_processor_freq();
62 extern void print_tree_update_probs();
63 extern int vp8cx_create_encoder_threads(VP8_COMP *cpi);
64 extern void vp8cx_remove_encoder_threads(VP8_COMP *cpi);
65
66 int vp8_estimate_entropy_savings(VP8_COMP *cpi);
67
68 int vp8_calc_ss_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest);
69
70 extern void vp8_temporal_filter_prepare_c(VP8_COMP *cpi, int distance);
71
72 static void set_default_lf_deltas(VP8_COMP *cpi);
73
74 extern const int vp8_gf_interval_table[101];
75
76 #if CONFIG_INTERNAL_STATS
77 #include "math.h"
78 #include "vpx_dsp/ssim.h"
79 #endif
80
81
82 #ifdef OUTPUT_YUV_SRC
83 FILE *yuv_file;
84 #endif
85 #ifdef OUTPUT_YUV_DENOISED
86 FILE *yuv_denoised_file;
87 #endif
88
89 #if 0
90 FILE *framepsnr;
91 FILE *kf_list;
92 FILE *keyfile;
93 #endif
94
95 #if 0
96 extern int skip_true_count;
97 extern int skip_false_count;
98 #endif
99
100
101 #ifdef VP8_ENTROPY_STATS
102 extern int intra_mode_stats[10][10][10];
103 #endif
104
105 #ifdef SPEEDSTATS
106 unsigned int frames_at_speed[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
107 unsigned int tot_pm = 0;
108 unsigned int cnt_pm = 0;
109 unsigned int tot_ef = 0;
110 unsigned int cnt_ef = 0;
111 #endif
112
113 #ifdef MODE_STATS
114 extern unsigned __int64 Sectionbits[50];
115 extern int y_modes[5] ;
116 extern int uv_modes[4] ;
117 extern int b_modes[10] ;
118
119 extern int inter_y_modes[10] ;
120 extern int inter_uv_modes[4] ;
121 extern unsigned int inter_b_modes[15];
122 #endif
123
124 extern const int vp8_bits_per_mb[2][QINDEX_RANGE];
125
126 extern const int qrounding_factors[129];
127 extern const int qzbin_factors[129];
128 extern void vp8cx_init_quantizer(VP8_COMP *cpi);
129 extern const int vp8cx_base_skip_false_prob[128];
130
131 /* Tables relating active max Q to active min Q */
132 static const unsigned char kf_low_motion_minq[QINDEX_RANGE] =
133 {
134 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
135 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
136 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
137 0,0,0,0,1,1,1,1,1,1,1,1,2,2,2,2,
138 3,3,3,3,3,3,4,4,4,5,5,5,5,5,6,6,
139 6,6,7,7,8,8,8,8,9,9,10,10,10,10,11,11,
140 11,11,12,12,13,13,13,13,14,14,15,15,15,15,16,16,
141 16,16,17,17,18,18,18,18,19,20,20,21,21,22,23,23
142 };
143 static const unsigned char kf_high_motion_minq[QINDEX_RANGE] =
144 {
145 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
146 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
147 1,1,1,1,1,1,1,1,2,2,2,2,3,3,3,3,
148 3,3,3,3,4,4,4,4,5,5,5,5,5,5,6,6,
149 6,6,7,7,8,8,8,8,9,9,10,10,10,10,11,11,
150 11,11,12,12,13,13,13,13,14,14,15,15,15,15,16,16,
151 16,16,17,17,18,18,18,18,19,19,20,20,20,20,21,21,
152 21,21,22,22,23,23,24,25,25,26,26,27,28,28,29,30
153 };
154 static const unsigned char gf_low_motion_minq[QINDEX_RANGE] =
155 {
156 0,0,0,0,1,1,1,1,1,1,1,1,2,2,2,2,
157 3,3,3,3,4,4,4,4,5,5,5,5,6,6,6,6,
158 7,7,7,7,8,8,8,8,9,9,9,9,10,10,10,10,
159 11,11,12,12,13,13,14,14,15,15,16,16,17,17,18,18,
160 19,19,20,20,21,21,22,22,23,23,24,24,25,25,26,26,
161 27,27,28,28,29,29,30,30,31,31,32,32,33,33,34,34,
162 35,35,36,36,37,37,38,38,39,39,40,40,41,41,42,42,
163 43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58
164 };
165 static const unsigned char gf_mid_motion_minq[QINDEX_RANGE] =
166 {
167 0,0,0,0,1,1,1,1,1,1,2,2,3,3,3,4,
168 4,4,5,5,5,6,6,6,7,7,7,8,8,8,9,9,
169 9,10,10,10,10,11,11,11,12,12,12,12,13,13,13,14,
170 14,14,15,15,16,16,17,17,18,18,19,19,20,20,21,21,
171 22,22,23,23,24,24,25,25,26,26,27,27,28,28,29,29,
172 30,30,31,31,32,32,33,33,34,34,35,35,36,36,37,37,
173 38,39,39,40,40,41,41,42,42,43,43,44,45,46,47,48,
174 49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64
175 };
176 static const unsigned char gf_high_motion_minq[QINDEX_RANGE] =
177 {
178 0,0,0,0,1,1,1,1,1,2,2,2,3,3,3,4,
179 4,4,5,5,5,6,6,6,7,7,7,8,8,8,9,9,
180 9,10,10,10,11,11,12,12,13,13,14,14,15,15,16,16,
181 17,17,18,18,19,19,20,20,21,21,22,22,23,23,24,24,
182 25,25,26,26,27,27,28,28,29,29,30,30,31,31,32,32,
183 33,33,34,34,35,35,36,36,37,37,38,38,39,39,40,40,
184 41,41,42,42,43,44,45,46,47,48,49,50,51,52,53,54,
185 55,56,57,58,59,60,62,64,66,68,70,72,74,76,78,80
186 };
187 static const unsigned char inter_minq[QINDEX_RANGE] =
188 {
189 0,0,1,1,2,3,3,4,4,5,6,6,7,8,8,9,
190 9,10,11,11,12,13,13,14,15,15,16,17,17,18,19,20,
191 20,21,22,22,23,24,24,25,26,27,27,28,29,30,30,31,
192 32,33,33,34,35,36,36,37,38,39,39,40,41,42,42,43,
193 44,45,46,46,47,48,49,50,50,51,52,53,54,55,55,56,
194 57,58,59,60,60,61,62,63,64,65,66,67,67,68,69,70,
195 71,72,73,74,75,75,76,77,78,79,80,81,82,83,84,85,
196 86,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100
197 };
198
199 #ifdef PACKET_TESTING
200 extern FILE *vpxlogc;
201 #endif
202
save_layer_context(VP8_COMP * cpi)203 static void save_layer_context(VP8_COMP *cpi)
204 {
205 LAYER_CONTEXT *lc = &cpi->layer_context[cpi->current_layer];
206
207 /* Save layer dependent coding state */
208 lc->target_bandwidth = cpi->target_bandwidth;
209 lc->starting_buffer_level = cpi->oxcf.starting_buffer_level;
210 lc->optimal_buffer_level = cpi->oxcf.optimal_buffer_level;
211 lc->maximum_buffer_size = cpi->oxcf.maximum_buffer_size;
212 lc->starting_buffer_level_in_ms = cpi->oxcf.starting_buffer_level_in_ms;
213 lc->optimal_buffer_level_in_ms = cpi->oxcf.optimal_buffer_level_in_ms;
214 lc->maximum_buffer_size_in_ms = cpi->oxcf.maximum_buffer_size_in_ms;
215 lc->buffer_level = cpi->buffer_level;
216 lc->bits_off_target = cpi->bits_off_target;
217 lc->total_actual_bits = cpi->total_actual_bits;
218 lc->worst_quality = cpi->worst_quality;
219 lc->active_worst_quality = cpi->active_worst_quality;
220 lc->best_quality = cpi->best_quality;
221 lc->active_best_quality = cpi->active_best_quality;
222 lc->ni_av_qi = cpi->ni_av_qi;
223 lc->ni_tot_qi = cpi->ni_tot_qi;
224 lc->ni_frames = cpi->ni_frames;
225 lc->avg_frame_qindex = cpi->avg_frame_qindex;
226 lc->rate_correction_factor = cpi->rate_correction_factor;
227 lc->key_frame_rate_correction_factor = cpi->key_frame_rate_correction_factor;
228 lc->gf_rate_correction_factor = cpi->gf_rate_correction_factor;
229 lc->zbin_over_quant = cpi->mb.zbin_over_quant;
230 lc->inter_frame_target = cpi->inter_frame_target;
231 lc->total_byte_count = cpi->total_byte_count;
232 lc->filter_level = cpi->common.filter_level;
233
234 lc->last_frame_percent_intra = cpi->last_frame_percent_intra;
235
236 memcpy (lc->count_mb_ref_frame_usage,
237 cpi->mb.count_mb_ref_frame_usage,
238 sizeof(cpi->mb.count_mb_ref_frame_usage));
239 }
240
restore_layer_context(VP8_COMP * cpi,const int layer)241 static void restore_layer_context(VP8_COMP *cpi, const int layer)
242 {
243 LAYER_CONTEXT *lc = &cpi->layer_context[layer];
244
245 /* Restore layer dependent coding state */
246 cpi->current_layer = layer;
247 cpi->target_bandwidth = lc->target_bandwidth;
248 cpi->oxcf.target_bandwidth = lc->target_bandwidth;
249 cpi->oxcf.starting_buffer_level = lc->starting_buffer_level;
250 cpi->oxcf.optimal_buffer_level = lc->optimal_buffer_level;
251 cpi->oxcf.maximum_buffer_size = lc->maximum_buffer_size;
252 cpi->oxcf.starting_buffer_level_in_ms = lc->starting_buffer_level_in_ms;
253 cpi->oxcf.optimal_buffer_level_in_ms = lc->optimal_buffer_level_in_ms;
254 cpi->oxcf.maximum_buffer_size_in_ms = lc->maximum_buffer_size_in_ms;
255 cpi->buffer_level = lc->buffer_level;
256 cpi->bits_off_target = lc->bits_off_target;
257 cpi->total_actual_bits = lc->total_actual_bits;
258 cpi->active_worst_quality = lc->active_worst_quality;
259 cpi->active_best_quality = lc->active_best_quality;
260 cpi->ni_av_qi = lc->ni_av_qi;
261 cpi->ni_tot_qi = lc->ni_tot_qi;
262 cpi->ni_frames = lc->ni_frames;
263 cpi->avg_frame_qindex = lc->avg_frame_qindex;
264 cpi->rate_correction_factor = lc->rate_correction_factor;
265 cpi->key_frame_rate_correction_factor = lc->key_frame_rate_correction_factor;
266 cpi->gf_rate_correction_factor = lc->gf_rate_correction_factor;
267 cpi->mb.zbin_over_quant = lc->zbin_over_quant;
268 cpi->inter_frame_target = lc->inter_frame_target;
269 cpi->total_byte_count = lc->total_byte_count;
270 cpi->common.filter_level = lc->filter_level;
271
272 cpi->last_frame_percent_intra = lc->last_frame_percent_intra;
273
274 memcpy (cpi->mb.count_mb_ref_frame_usage,
275 lc->count_mb_ref_frame_usage,
276 sizeof(cpi->mb.count_mb_ref_frame_usage));
277 }
278
rescale(int val,int num,int denom)279 static int rescale(int val, int num, int denom)
280 {
281 int64_t llnum = num;
282 int64_t llden = denom;
283 int64_t llval = val;
284
285 return (int)(llval * llnum / llden);
286 }
287
init_temporal_layer_context(VP8_COMP * cpi,VP8_CONFIG * oxcf,const int layer,double prev_layer_framerate)288 static void init_temporal_layer_context(VP8_COMP *cpi,
289 VP8_CONFIG *oxcf,
290 const int layer,
291 double prev_layer_framerate)
292 {
293 LAYER_CONTEXT *lc = &cpi->layer_context[layer];
294
295 lc->framerate = cpi->output_framerate / cpi->oxcf.rate_decimator[layer];
296 lc->target_bandwidth = cpi->oxcf.target_bitrate[layer] * 1000;
297
298 lc->starting_buffer_level_in_ms = oxcf->starting_buffer_level;
299 lc->optimal_buffer_level_in_ms = oxcf->optimal_buffer_level;
300 lc->maximum_buffer_size_in_ms = oxcf->maximum_buffer_size;
301
302 lc->starting_buffer_level =
303 rescale((int)(oxcf->starting_buffer_level),
304 lc->target_bandwidth, 1000);
305
306 if (oxcf->optimal_buffer_level == 0)
307 lc->optimal_buffer_level = lc->target_bandwidth / 8;
308 else
309 lc->optimal_buffer_level =
310 rescale((int)(oxcf->optimal_buffer_level),
311 lc->target_bandwidth, 1000);
312
313 if (oxcf->maximum_buffer_size == 0)
314 lc->maximum_buffer_size = lc->target_bandwidth / 8;
315 else
316 lc->maximum_buffer_size =
317 rescale((int)(oxcf->maximum_buffer_size),
318 lc->target_bandwidth, 1000);
319
320 /* Work out the average size of a frame within this layer */
321 if (layer > 0)
322 lc->avg_frame_size_for_layer =
323 (int)((cpi->oxcf.target_bitrate[layer] -
324 cpi->oxcf.target_bitrate[layer-1]) * 1000 /
325 (lc->framerate - prev_layer_framerate));
326
327 lc->active_worst_quality = cpi->oxcf.worst_allowed_q;
328 lc->active_best_quality = cpi->oxcf.best_allowed_q;
329 lc->avg_frame_qindex = cpi->oxcf.worst_allowed_q;
330
331 lc->buffer_level = lc->starting_buffer_level;
332 lc->bits_off_target = lc->starting_buffer_level;
333
334 lc->total_actual_bits = 0;
335 lc->ni_av_qi = 0;
336 lc->ni_tot_qi = 0;
337 lc->ni_frames = 0;
338 lc->rate_correction_factor = 1.0;
339 lc->key_frame_rate_correction_factor = 1.0;
340 lc->gf_rate_correction_factor = 1.0;
341 lc->inter_frame_target = 0;
342 }
343
344 // Upon a run-time change in temporal layers, reset the layer context parameters
345 // for any "new" layers. For "existing" layers, let them inherit the parameters
346 // from the previous layer state (at the same layer #). In future we may want
347 // to better map the previous layer state(s) to the "new" ones.
reset_temporal_layer_change(VP8_COMP * cpi,VP8_CONFIG * oxcf,const int prev_num_layers)348 static void reset_temporal_layer_change(VP8_COMP *cpi,
349 VP8_CONFIG *oxcf,
350 const int prev_num_layers)
351 {
352 int i;
353 double prev_layer_framerate = 0;
354 const int curr_num_layers = cpi->oxcf.number_of_layers;
355 // If the previous state was 1 layer, get current layer context from cpi.
356 // We need this to set the layer context for the new layers below.
357 if (prev_num_layers == 1)
358 {
359 cpi->current_layer = 0;
360 save_layer_context(cpi);
361 }
362 for (i = 0; i < curr_num_layers; i++)
363 {
364 LAYER_CONTEXT *lc = &cpi->layer_context[i];
365 if (i >= prev_num_layers)
366 {
367 init_temporal_layer_context(cpi, oxcf, i, prev_layer_framerate);
368 }
369 // The initial buffer levels are set based on their starting levels.
370 // We could set the buffer levels based on the previous state (normalized
371 // properly by the layer bandwidths) but we would need to keep track of
372 // the previous set of layer bandwidths (i.e., target_bitrate[i])
373 // before the layer change. For now, reset to the starting levels.
374 lc->buffer_level = cpi->oxcf.starting_buffer_level_in_ms *
375 cpi->oxcf.target_bitrate[i];
376 lc->bits_off_target = lc->buffer_level;
377 // TDOD(marpan): Should we set the rate_correction_factor and
378 // active_worst/best_quality to values derived from the previous layer
379 // state (to smooth-out quality dips/rate fluctuation at transition)?
380
381 // We need to treat the 1 layer case separately: oxcf.target_bitrate[i]
382 // is not set for 1 layer, and the restore_layer_context/save_context()
383 // are not called in the encoding loop, so we need to call it here to
384 // pass the layer context state to |cpi|.
385 if (curr_num_layers == 1)
386 {
387 lc->target_bandwidth = cpi->oxcf.target_bandwidth;
388 lc->buffer_level = cpi->oxcf.starting_buffer_level_in_ms *
389 lc->target_bandwidth / 1000;
390 lc->bits_off_target = lc->buffer_level;
391 restore_layer_context(cpi, 0);
392 }
393 prev_layer_framerate = cpi->output_framerate /
394 cpi->oxcf.rate_decimator[i];
395 }
396 }
397
setup_features(VP8_COMP * cpi)398 static void setup_features(VP8_COMP *cpi)
399 {
400 // If segmentation enabled set the update flags
401 if ( cpi->mb.e_mbd.segmentation_enabled )
402 {
403 cpi->mb.e_mbd.update_mb_segmentation_map = 1;
404 cpi->mb.e_mbd.update_mb_segmentation_data = 1;
405 }
406 else
407 {
408 cpi->mb.e_mbd.update_mb_segmentation_map = 0;
409 cpi->mb.e_mbd.update_mb_segmentation_data = 0;
410 }
411
412 cpi->mb.e_mbd.mode_ref_lf_delta_enabled = 0;
413 cpi->mb.e_mbd.mode_ref_lf_delta_update = 0;
414 memset(cpi->mb.e_mbd.ref_lf_deltas, 0, sizeof(cpi->mb.e_mbd.ref_lf_deltas));
415 memset(cpi->mb.e_mbd.mode_lf_deltas, 0, sizeof(cpi->mb.e_mbd.mode_lf_deltas));
416 memset(cpi->mb.e_mbd.last_ref_lf_deltas, 0, sizeof(cpi->mb.e_mbd.ref_lf_deltas));
417 memset(cpi->mb.e_mbd.last_mode_lf_deltas, 0, sizeof(cpi->mb.e_mbd.mode_lf_deltas));
418
419 set_default_lf_deltas(cpi);
420
421 }
422
423
424 static void dealloc_raw_frame_buffers(VP8_COMP *cpi);
425
vp8_initialize_enc(void)426 void vp8_initialize_enc(void)
427 {
428 static volatile int init_done = 0;
429
430 if (!init_done) {
431 vpx_dsp_rtcd();
432 vp8_init_intra_predictors();
433 init_done = 1;
434 }
435 }
436
dealloc_compressor_data(VP8_COMP * cpi)437 static void dealloc_compressor_data(VP8_COMP *cpi)
438 {
439 vpx_free(cpi->tplist);
440 cpi->tplist = NULL;
441
442 /* Delete last frame MV storage buffers */
443 vpx_free(cpi->lfmv);
444 cpi->lfmv = 0;
445
446 vpx_free(cpi->lf_ref_frame_sign_bias);
447 cpi->lf_ref_frame_sign_bias = 0;
448
449 vpx_free(cpi->lf_ref_frame);
450 cpi->lf_ref_frame = 0;
451
452 /* Delete sementation map */
453 vpx_free(cpi->segmentation_map);
454 cpi->segmentation_map = 0;
455
456 vpx_free(cpi->active_map);
457 cpi->active_map = 0;
458
459 vp8_de_alloc_frame_buffers(&cpi->common);
460
461 vp8_yv12_de_alloc_frame_buffer(&cpi->pick_lf_lvl_frame);
462 vp8_yv12_de_alloc_frame_buffer(&cpi->scaled_source);
463 dealloc_raw_frame_buffers(cpi);
464
465 vpx_free(cpi->tok);
466 cpi->tok = 0;
467
468 /* Structure used to monitor GF usage */
469 vpx_free(cpi->gf_active_flags);
470 cpi->gf_active_flags = 0;
471
472 /* Activity mask based per mb zbin adjustments */
473 vpx_free(cpi->mb_activity_map);
474 cpi->mb_activity_map = 0;
475
476 vpx_free(cpi->mb.pip);
477 cpi->mb.pip = 0;
478
479 #if CONFIG_MULTITHREAD
480 vpx_free(cpi->mt_current_mb_col);
481 cpi->mt_current_mb_col = NULL;
482 #endif
483 }
484
enable_segmentation(VP8_COMP * cpi)485 static void enable_segmentation(VP8_COMP *cpi)
486 {
487 /* Set the appropriate feature bit */
488 cpi->mb.e_mbd.segmentation_enabled = 1;
489 cpi->mb.e_mbd.update_mb_segmentation_map = 1;
490 cpi->mb.e_mbd.update_mb_segmentation_data = 1;
491 }
disable_segmentation(VP8_COMP * cpi)492 static void disable_segmentation(VP8_COMP *cpi)
493 {
494 /* Clear the appropriate feature bit */
495 cpi->mb.e_mbd.segmentation_enabled = 0;
496 }
497
498 /* Valid values for a segment are 0 to 3
499 * Segmentation map is arrange as [Rows][Columns]
500 */
set_segmentation_map(VP8_COMP * cpi,unsigned char * segmentation_map)501 static void set_segmentation_map(VP8_COMP *cpi, unsigned char *segmentation_map)
502 {
503 /* Copy in the new segmentation map */
504 memcpy(cpi->segmentation_map, segmentation_map, (cpi->common.mb_rows * cpi->common.mb_cols));
505
506 /* Signal that the map should be updated. */
507 cpi->mb.e_mbd.update_mb_segmentation_map = 1;
508 cpi->mb.e_mbd.update_mb_segmentation_data = 1;
509 }
510
511 /* The values given for each segment can be either deltas (from the default
512 * value chosen for the frame) or absolute values.
513 *
514 * Valid range for abs values is:
515 * (0-127 for MB_LVL_ALT_Q), (0-63 for SEGMENT_ALT_LF)
516 * Valid range for delta values are:
517 * (+/-127 for MB_LVL_ALT_Q), (+/-63 for SEGMENT_ALT_LF)
518 *
519 * abs_delta = SEGMENT_DELTADATA (deltas)
520 * abs_delta = SEGMENT_ABSDATA (use the absolute values given).
521 *
522 */
set_segment_data(VP8_COMP * cpi,signed char * feature_data,unsigned char abs_delta)523 static void set_segment_data(VP8_COMP *cpi, signed char *feature_data, unsigned char abs_delta)
524 {
525 cpi->mb.e_mbd.mb_segement_abs_delta = abs_delta;
526 memcpy(cpi->segment_feature_data, feature_data, sizeof(cpi->segment_feature_data));
527 }
528
529
530 /* A simple function to cyclically refresh the background at a lower Q */
cyclic_background_refresh(VP8_COMP * cpi,int Q,int lf_adjustment)531 static void cyclic_background_refresh(VP8_COMP *cpi, int Q, int lf_adjustment)
532 {
533 unsigned char *seg_map = cpi->segmentation_map;
534 signed char feature_data[MB_LVL_MAX][MAX_MB_SEGMENTS];
535 int i;
536 int block_count = cpi->cyclic_refresh_mode_max_mbs_perframe;
537 int mbs_in_frame = cpi->common.mb_rows * cpi->common.mb_cols;
538
539 cpi->cyclic_refresh_q = Q / 2;
540
541 if (cpi->oxcf.screen_content_mode) {
542 // Modify quality ramp-up based on Q. Above some Q level, increase the
543 // number of blocks to be refreshed, and reduce it below the thredhold.
544 // Turn-off under certain conditions (i.e., away from key frame, and if
545 // we are at good quality (low Q) and most of the blocks were skipped-encoded
546 // in previous frame.
547 int qp_thresh = (cpi->oxcf.screen_content_mode == 2) ? 80 : 100;
548 if (Q >= qp_thresh) {
549 cpi->cyclic_refresh_mode_max_mbs_perframe =
550 (cpi->common.mb_rows * cpi->common.mb_cols) / 10;
551 } else if (cpi->frames_since_key > 250 &&
552 Q < 20 &&
553 cpi->mb.skip_true_count > (int)(0.95 * mbs_in_frame)) {
554 cpi->cyclic_refresh_mode_max_mbs_perframe = 0;
555 } else {
556 cpi->cyclic_refresh_mode_max_mbs_perframe =
557 (cpi->common.mb_rows * cpi->common.mb_cols) / 20;
558 }
559 block_count = cpi->cyclic_refresh_mode_max_mbs_perframe;
560 }
561
562 // Set every macroblock to be eligible for update.
563 // For key frame this will reset seg map to 0.
564 memset(cpi->segmentation_map, 0, mbs_in_frame);
565
566 if (cpi->common.frame_type != KEY_FRAME && block_count > 0)
567 {
568 /* Cycle through the macro_block rows */
569 /* MB loop to set local segmentation map */
570 i = cpi->cyclic_refresh_mode_index;
571 assert(i < mbs_in_frame);
572 do
573 {
574 /* If the MB is as a candidate for clean up then mark it for
575 * possible boost/refresh (segment 1) The segment id may get
576 * reset to 0 later if the MB gets coded anything other than
577 * last frame 0,0 as only (last frame 0,0) MBs are eligable for
578 * refresh : that is to say Mbs likely to be background blocks.
579 */
580 if (cpi->cyclic_refresh_map[i] == 0)
581 {
582 seg_map[i] = 1;
583 block_count --;
584 }
585 else if (cpi->cyclic_refresh_map[i] < 0)
586 cpi->cyclic_refresh_map[i]++;
587
588 i++;
589 if (i == mbs_in_frame)
590 i = 0;
591
592 }
593 while(block_count && i != cpi->cyclic_refresh_mode_index);
594
595 cpi->cyclic_refresh_mode_index = i;
596
597 #if CONFIG_TEMPORAL_DENOISING
598 if (cpi->oxcf.noise_sensitivity > 0) {
599 if (cpi->denoiser.denoiser_mode == kDenoiserOnYUVAggressive &&
600 Q < (int)cpi->denoiser.denoise_pars.qp_thresh &&
601 (cpi->frames_since_key >
602 2 * cpi->denoiser.denoise_pars.consec_zerolast)) {
603 // Under aggressive denoising, use segmentation to turn off loop
604 // filter below some qp thresh. The filter is reduced for all
605 // blocks that have been encoded as ZEROMV LAST x frames in a row,
606 // where x is set by cpi->denoiser.denoise_pars.consec_zerolast.
607 // This is to avoid "dot" artifacts that can occur from repeated
608 // loop filtering on noisy input source.
609 cpi->cyclic_refresh_q = Q;
610 // lf_adjustment = -MAX_LOOP_FILTER;
611 lf_adjustment = -40;
612 for (i = 0; i < mbs_in_frame; ++i) {
613 seg_map[i] = (cpi->consec_zero_last[i] >
614 cpi->denoiser.denoise_pars.consec_zerolast) ? 1 : 0;
615 }
616 }
617 }
618 #endif
619 }
620
621 /* Activate segmentation. */
622 cpi->mb.e_mbd.update_mb_segmentation_map = 1;
623 cpi->mb.e_mbd.update_mb_segmentation_data = 1;
624 enable_segmentation(cpi);
625
626 /* Set up the quant segment data */
627 feature_data[MB_LVL_ALT_Q][0] = 0;
628 feature_data[MB_LVL_ALT_Q][1] = (cpi->cyclic_refresh_q - Q);
629 feature_data[MB_LVL_ALT_Q][2] = 0;
630 feature_data[MB_LVL_ALT_Q][3] = 0;
631
632 /* Set up the loop segment data */
633 feature_data[MB_LVL_ALT_LF][0] = 0;
634 feature_data[MB_LVL_ALT_LF][1] = lf_adjustment;
635 feature_data[MB_LVL_ALT_LF][2] = 0;
636 feature_data[MB_LVL_ALT_LF][3] = 0;
637
638 /* Initialise the feature data structure */
639 set_segment_data(cpi, &feature_data[0][0], SEGMENT_DELTADATA);
640
641 }
642
set_default_lf_deltas(VP8_COMP * cpi)643 static void set_default_lf_deltas(VP8_COMP *cpi)
644 {
645 cpi->mb.e_mbd.mode_ref_lf_delta_enabled = 1;
646 cpi->mb.e_mbd.mode_ref_lf_delta_update = 1;
647
648 memset(cpi->mb.e_mbd.ref_lf_deltas, 0, sizeof(cpi->mb.e_mbd.ref_lf_deltas));
649 memset(cpi->mb.e_mbd.mode_lf_deltas, 0, sizeof(cpi->mb.e_mbd.mode_lf_deltas));
650
651 /* Test of ref frame deltas */
652 cpi->mb.e_mbd.ref_lf_deltas[INTRA_FRAME] = 2;
653 cpi->mb.e_mbd.ref_lf_deltas[LAST_FRAME] = 0;
654 cpi->mb.e_mbd.ref_lf_deltas[GOLDEN_FRAME] = -2;
655 cpi->mb.e_mbd.ref_lf_deltas[ALTREF_FRAME] = -2;
656
657 cpi->mb.e_mbd.mode_lf_deltas[0] = 4; /* BPRED */
658
659 if(cpi->oxcf.Mode == MODE_REALTIME)
660 cpi->mb.e_mbd.mode_lf_deltas[1] = -12; /* Zero */
661 else
662 cpi->mb.e_mbd.mode_lf_deltas[1] = -2; /* Zero */
663
664 cpi->mb.e_mbd.mode_lf_deltas[2] = 2; /* New mv */
665 cpi->mb.e_mbd.mode_lf_deltas[3] = 4; /* Split mv */
666 }
667
668 /* Convenience macros for mapping speed and mode into a continuous
669 * range
670 */
671 #define GOOD(x) (x+1)
672 #define RT(x) (x+7)
673
speed_map(int speed,const int * map)674 static int speed_map(int speed, const int *map)
675 {
676 int res;
677
678 do
679 {
680 res = *map++;
681 } while(speed >= *map++);
682 return res;
683 }
684
685 static const int thresh_mult_map_znn[] = {
686 /* map common to zero, nearest, and near */
687 0, GOOD(2), 1500, GOOD(3), 2000, RT(0), 1000, RT(2), 2000, INT_MAX
688 };
689
690 static const int thresh_mult_map_vhpred[] = {
691 1000, GOOD(2), 1500, GOOD(3), 2000, RT(0), 1000, RT(1), 2000,
692 RT(7), INT_MAX, INT_MAX
693 };
694
695 static const int thresh_mult_map_bpred[] = {
696 2000, GOOD(0), 2500, GOOD(2), 5000, GOOD(3), 7500, RT(0), 2500, RT(1), 5000,
697 RT(6), INT_MAX, INT_MAX
698 };
699
700 static const int thresh_mult_map_tm[] = {
701 1000, GOOD(2), 1500, GOOD(3), 2000, RT(0), 0, RT(1), 1000, RT(2), 2000,
702 RT(7), INT_MAX, INT_MAX
703 };
704
705 static const int thresh_mult_map_new1[] = {
706 1000, GOOD(2), 2000, RT(0), 2000, INT_MAX
707 };
708
709 static const int thresh_mult_map_new2[] = {
710 1000, GOOD(2), 2000, GOOD(3), 2500, GOOD(5), 4000, RT(0), 2000, RT(2), 2500,
711 RT(5), 4000, INT_MAX
712 };
713
714 static const int thresh_mult_map_split1[] = {
715 2500, GOOD(0), 1700, GOOD(2), 10000, GOOD(3), 25000, GOOD(4), INT_MAX,
716 RT(0), 5000, RT(1), 10000, RT(2), 25000, RT(3), INT_MAX, INT_MAX
717 };
718
719 static const int thresh_mult_map_split2[] = {
720 5000, GOOD(0), 4500, GOOD(2), 20000, GOOD(3), 50000, GOOD(4), INT_MAX,
721 RT(0), 10000, RT(1), 20000, RT(2), 50000, RT(3), INT_MAX, INT_MAX
722 };
723
724 static const int mode_check_freq_map_zn2[] = {
725 /* {zero,nearest}{2,3} */
726 0, RT(10), 1<<1, RT(11), 1<<2, RT(12), 1<<3, INT_MAX
727 };
728
729 static const int mode_check_freq_map_vhbpred[] = {
730 0, GOOD(5), 2, RT(0), 0, RT(3), 2, RT(5), 4, INT_MAX
731 };
732
733 static const int mode_check_freq_map_near2[] = {
734 0, GOOD(5), 2, RT(0), 0, RT(3), 2, RT(10), 1<<2, RT(11), 1<<3, RT(12), 1<<4,
735 INT_MAX
736 };
737
738 static const int mode_check_freq_map_new1[] = {
739 0, RT(10), 1<<1, RT(11), 1<<2, RT(12), 1<<3, INT_MAX
740 };
741
742 static const int mode_check_freq_map_new2[] = {
743 0, GOOD(5), 4, RT(0), 0, RT(3), 4, RT(10), 1<<3, RT(11), 1<<4, RT(12), 1<<5,
744 INT_MAX
745 };
746
747 static const int mode_check_freq_map_split1[] = {
748 0, GOOD(2), 2, GOOD(3), 7, RT(1), 2, RT(2), 7, INT_MAX
749 };
750
751 static const int mode_check_freq_map_split2[] = {
752 0, GOOD(1), 2, GOOD(2), 4, GOOD(3), 15, RT(1), 4, RT(2), 15, INT_MAX
753 };
754
vp8_set_speed_features(VP8_COMP * cpi)755 void vp8_set_speed_features(VP8_COMP *cpi)
756 {
757 SPEED_FEATURES *sf = &cpi->sf;
758 int Mode = cpi->compressor_speed;
759 int Speed = cpi->Speed;
760 int i;
761 VP8_COMMON *cm = &cpi->common;
762 int last_improved_quant = sf->improved_quant;
763 int ref_frames;
764
765 /* Initialise default mode frequency sampling variables */
766 for (i = 0; i < MAX_MODES; i ++)
767 {
768 cpi->mode_check_freq[i] = 0;
769 }
770
771 cpi->mb.mbs_tested_so_far = 0;
772 cpi->mb.mbs_zero_last_dot_suppress = 0;
773
774 /* best quality defaults */
775 sf->RD = 1;
776 sf->search_method = NSTEP;
777 sf->improved_quant = 1;
778 sf->improved_dct = 1;
779 sf->auto_filter = 1;
780 sf->recode_loop = 1;
781 sf->quarter_pixel_search = 1;
782 sf->half_pixel_search = 1;
783 sf->iterative_sub_pixel = 1;
784 sf->optimize_coefficients = 1;
785 sf->use_fastquant_for_pick = 0;
786 sf->no_skip_block4x4_search = 1;
787
788 sf->first_step = 0;
789 sf->max_step_search_steps = MAX_MVSEARCH_STEPS;
790 sf->improved_mv_pred = 1;
791
792 /* default thresholds to 0 */
793 for (i = 0; i < MAX_MODES; i++)
794 sf->thresh_mult[i] = 0;
795
796 /* Count enabled references */
797 ref_frames = 1;
798 if (cpi->ref_frame_flags & VP8_LAST_FRAME)
799 ref_frames++;
800 if (cpi->ref_frame_flags & VP8_GOLD_FRAME)
801 ref_frames++;
802 if (cpi->ref_frame_flags & VP8_ALTR_FRAME)
803 ref_frames++;
804
805 /* Convert speed to continuous range, with clamping */
806 if (Mode == 0)
807 Speed = 0;
808 else if (Mode == 2)
809 Speed = RT(Speed);
810 else
811 {
812 if (Speed > 5)
813 Speed = 5;
814 Speed = GOOD(Speed);
815 }
816
817 sf->thresh_mult[THR_ZERO1] =
818 sf->thresh_mult[THR_NEAREST1] =
819 sf->thresh_mult[THR_NEAR1] =
820 sf->thresh_mult[THR_DC] = 0; /* always */
821
822 sf->thresh_mult[THR_ZERO2] =
823 sf->thresh_mult[THR_ZERO3] =
824 sf->thresh_mult[THR_NEAREST2] =
825 sf->thresh_mult[THR_NEAREST3] =
826 sf->thresh_mult[THR_NEAR2] =
827 sf->thresh_mult[THR_NEAR3] = speed_map(Speed, thresh_mult_map_znn);
828
829 sf->thresh_mult[THR_V_PRED] =
830 sf->thresh_mult[THR_H_PRED] = speed_map(Speed, thresh_mult_map_vhpred);
831 sf->thresh_mult[THR_B_PRED] = speed_map(Speed, thresh_mult_map_bpred);
832 sf->thresh_mult[THR_TM] = speed_map(Speed, thresh_mult_map_tm);
833 sf->thresh_mult[THR_NEW1] = speed_map(Speed, thresh_mult_map_new1);
834 sf->thresh_mult[THR_NEW2] =
835 sf->thresh_mult[THR_NEW3] = speed_map(Speed, thresh_mult_map_new2);
836 sf->thresh_mult[THR_SPLIT1] = speed_map(Speed, thresh_mult_map_split1);
837 sf->thresh_mult[THR_SPLIT2] =
838 sf->thresh_mult[THR_SPLIT3] = speed_map(Speed, thresh_mult_map_split2);
839
840 // Special case for temporal layers.
841 // Reduce the thresholds for zero/nearest/near for GOLDEN, if GOLDEN is
842 // used as second reference. We don't modify thresholds for ALTREF case
843 // since ALTREF is usually used as long-term reference in temporal layers.
844 if ((cpi->Speed <= 6) &&
845 (cpi->oxcf.number_of_layers > 1) &&
846 (cpi->ref_frame_flags & VP8_LAST_FRAME) &&
847 (cpi->ref_frame_flags & VP8_GOLD_FRAME)) {
848 if (cpi->closest_reference_frame == GOLDEN_FRAME) {
849 sf->thresh_mult[THR_ZERO2] = sf->thresh_mult[THR_ZERO2] >> 3;
850 sf->thresh_mult[THR_NEAREST2] = sf->thresh_mult[THR_NEAREST2] >> 3;
851 sf->thresh_mult[THR_NEAR2] = sf->thresh_mult[THR_NEAR2] >> 3;
852 } else {
853 sf->thresh_mult[THR_ZERO2] = sf->thresh_mult[THR_ZERO2] >> 1;
854 sf->thresh_mult[THR_NEAREST2] = sf->thresh_mult[THR_NEAREST2] >> 1;
855 sf->thresh_mult[THR_NEAR2] = sf->thresh_mult[THR_NEAR2] >> 1;
856 }
857 }
858
859 cpi->mode_check_freq[THR_ZERO1] =
860 cpi->mode_check_freq[THR_NEAREST1] =
861 cpi->mode_check_freq[THR_NEAR1] =
862 cpi->mode_check_freq[THR_TM] =
863 cpi->mode_check_freq[THR_DC] = 0; /* always */
864
865 cpi->mode_check_freq[THR_ZERO2] =
866 cpi->mode_check_freq[THR_ZERO3] =
867 cpi->mode_check_freq[THR_NEAREST2] =
868 cpi->mode_check_freq[THR_NEAREST3] = speed_map(Speed,
869 mode_check_freq_map_zn2);
870
871 cpi->mode_check_freq[THR_NEAR2] =
872 cpi->mode_check_freq[THR_NEAR3] = speed_map(Speed,
873 mode_check_freq_map_near2);
874
875 cpi->mode_check_freq[THR_V_PRED] =
876 cpi->mode_check_freq[THR_H_PRED] =
877 cpi->mode_check_freq[THR_B_PRED] = speed_map(Speed,
878 mode_check_freq_map_vhbpred);
879 cpi->mode_check_freq[THR_NEW1] = speed_map(Speed,
880 mode_check_freq_map_new1);
881 cpi->mode_check_freq[THR_NEW2] =
882 cpi->mode_check_freq[THR_NEW3] = speed_map(Speed,
883 mode_check_freq_map_new2);
884 cpi->mode_check_freq[THR_SPLIT1] = speed_map(Speed,
885 mode_check_freq_map_split1);
886 cpi->mode_check_freq[THR_SPLIT2] =
887 cpi->mode_check_freq[THR_SPLIT3] = speed_map(Speed,
888 mode_check_freq_map_split2);
889 Speed = cpi->Speed;
890 switch (Mode)
891 {
892 #if !CONFIG_REALTIME_ONLY
893 case 0: /* best quality mode */
894 sf->first_step = 0;
895 sf->max_step_search_steps = MAX_MVSEARCH_STEPS;
896 break;
897 case 1:
898 case 3:
899 if (Speed > 0)
900 {
901 /* Disable coefficient optimization above speed 0 */
902 sf->optimize_coefficients = 0;
903 sf->use_fastquant_for_pick = 1;
904 sf->no_skip_block4x4_search = 0;
905
906 sf->first_step = 1;
907 }
908
909 if (Speed > 2)
910 {
911 sf->improved_quant = 0;
912 sf->improved_dct = 0;
913
914 /* Only do recode loop on key frames, golden frames and
915 * alt ref frames
916 */
917 sf->recode_loop = 2;
918
919 }
920
921 if (Speed > 3)
922 {
923 sf->auto_filter = 1;
924 sf->recode_loop = 0; /* recode loop off */
925 sf->RD = 0; /* Turn rd off */
926
927 }
928
929 if (Speed > 4)
930 {
931 sf->auto_filter = 0; /* Faster selection of loop filter */
932 }
933
934 break;
935 #endif
936 case 2:
937 sf->optimize_coefficients = 0;
938 sf->recode_loop = 0;
939 sf->auto_filter = 1;
940 sf->iterative_sub_pixel = 1;
941 sf->search_method = NSTEP;
942
943 if (Speed > 0)
944 {
945 sf->improved_quant = 0;
946 sf->improved_dct = 0;
947
948 sf->use_fastquant_for_pick = 1;
949 sf->no_skip_block4x4_search = 0;
950 sf->first_step = 1;
951 }
952
953 if (Speed > 2)
954 sf->auto_filter = 0; /* Faster selection of loop filter */
955
956 if (Speed > 3)
957 {
958 sf->RD = 0;
959 sf->auto_filter = 1;
960 }
961
962 if (Speed > 4)
963 {
964 sf->auto_filter = 0; /* Faster selection of loop filter */
965 sf->search_method = HEX;
966 sf->iterative_sub_pixel = 0;
967 }
968
969 if (Speed > 6)
970 {
971 unsigned int sum = 0;
972 unsigned int total_mbs = cm->MBs;
973 int thresh;
974 unsigned int total_skip;
975
976 int min = 2000;
977
978 if (cpi->oxcf.encode_breakout > 2000)
979 min = cpi->oxcf.encode_breakout;
980
981 min >>= 7;
982
983 for (i = 0; i < min; i++)
984 {
985 sum += cpi->mb.error_bins[i];
986 }
987
988 total_skip = sum;
989 sum = 0;
990
991 /* i starts from 2 to make sure thresh started from 2048 */
992 for (; i < 1024; i++)
993 {
994 sum += cpi->mb.error_bins[i];
995
996 if (10 * sum >= (unsigned int)(cpi->Speed - 6)*(total_mbs - total_skip))
997 break;
998 }
999
1000 i--;
1001 thresh = (i << 7);
1002
1003 if (thresh < 2000)
1004 thresh = 2000;
1005
1006 if (ref_frames > 1)
1007 {
1008 sf->thresh_mult[THR_NEW1 ] = thresh;
1009 sf->thresh_mult[THR_NEAREST1 ] = thresh >> 1;
1010 sf->thresh_mult[THR_NEAR1 ] = thresh >> 1;
1011 }
1012
1013 if (ref_frames > 2)
1014 {
1015 sf->thresh_mult[THR_NEW2] = thresh << 1;
1016 sf->thresh_mult[THR_NEAREST2 ] = thresh;
1017 sf->thresh_mult[THR_NEAR2 ] = thresh;
1018 }
1019
1020 if (ref_frames > 3)
1021 {
1022 sf->thresh_mult[THR_NEW3] = thresh << 1;
1023 sf->thresh_mult[THR_NEAREST3 ] = thresh;
1024 sf->thresh_mult[THR_NEAR3 ] = thresh;
1025 }
1026
1027 sf->improved_mv_pred = 0;
1028 }
1029
1030 if (Speed > 8)
1031 sf->quarter_pixel_search = 0;
1032
1033 if(cm->version == 0)
1034 {
1035 cm->filter_type = NORMAL_LOOPFILTER;
1036
1037 if (Speed >= 14)
1038 cm->filter_type = SIMPLE_LOOPFILTER;
1039 }
1040 else
1041 {
1042 cm->filter_type = SIMPLE_LOOPFILTER;
1043 }
1044
1045 /* This has a big hit on quality. Last resort */
1046 if (Speed >= 15)
1047 sf->half_pixel_search = 0;
1048
1049 memset(cpi->mb.error_bins, 0, sizeof(cpi->mb.error_bins));
1050
1051 }; /* switch */
1052
1053 /* Slow quant, dct and trellis not worthwhile for first pass
1054 * so make sure they are always turned off.
1055 */
1056 if ( cpi->pass == 1 )
1057 {
1058 sf->improved_quant = 0;
1059 sf->optimize_coefficients = 0;
1060 sf->improved_dct = 0;
1061 }
1062
1063 if (cpi->sf.search_method == NSTEP)
1064 {
1065 vp8_init3smotion_compensation(&cpi->mb, cm->yv12_fb[cm->lst_fb_idx].y_stride);
1066 }
1067 else if (cpi->sf.search_method == DIAMOND)
1068 {
1069 vp8_init_dsmotion_compensation(&cpi->mb, cm->yv12_fb[cm->lst_fb_idx].y_stride);
1070 }
1071
1072 if (cpi->sf.improved_dct)
1073 {
1074 cpi->mb.short_fdct8x4 = vp8_short_fdct8x4;
1075 cpi->mb.short_fdct4x4 = vp8_short_fdct4x4;
1076 }
1077 else
1078 {
1079 /* No fast FDCT defined for any platform at this time. */
1080 cpi->mb.short_fdct8x4 = vp8_short_fdct8x4;
1081 cpi->mb.short_fdct4x4 = vp8_short_fdct4x4;
1082 }
1083
1084 cpi->mb.short_walsh4x4 = vp8_short_walsh4x4;
1085
1086 if (cpi->sf.improved_quant)
1087 {
1088 cpi->mb.quantize_b = vp8_regular_quantize_b;
1089 }
1090 else
1091 {
1092 cpi->mb.quantize_b = vp8_fast_quantize_b;
1093 }
1094 if (cpi->sf.improved_quant != last_improved_quant)
1095 vp8cx_init_quantizer(cpi);
1096
1097 if (cpi->sf.iterative_sub_pixel == 1)
1098 {
1099 cpi->find_fractional_mv_step = vp8_find_best_sub_pixel_step_iteratively;
1100 }
1101 else if (cpi->sf.quarter_pixel_search)
1102 {
1103 cpi->find_fractional_mv_step = vp8_find_best_sub_pixel_step;
1104 }
1105 else if (cpi->sf.half_pixel_search)
1106 {
1107 cpi->find_fractional_mv_step = vp8_find_best_half_pixel_step;
1108 }
1109 else
1110 {
1111 cpi->find_fractional_mv_step = vp8_skip_fractional_mv_step;
1112 }
1113
1114 if (cpi->sf.optimize_coefficients == 1 && cpi->pass!=1)
1115 cpi->mb.optimize = 1;
1116 else
1117 cpi->mb.optimize = 0;
1118
1119 if (cpi->common.full_pixel)
1120 cpi->find_fractional_mv_step = vp8_skip_fractional_mv_step;
1121
1122 #ifdef SPEEDSTATS
1123 frames_at_speed[cpi->Speed]++;
1124 #endif
1125 }
1126 #undef GOOD
1127 #undef RT
1128
alloc_raw_frame_buffers(VP8_COMP * cpi)1129 static void alloc_raw_frame_buffers(VP8_COMP *cpi)
1130 {
1131 #if VP8_TEMPORAL_ALT_REF
1132 int width = (cpi->oxcf.Width + 15) & ~15;
1133 int height = (cpi->oxcf.Height + 15) & ~15;
1134 #endif
1135
1136 cpi->lookahead = vp8_lookahead_init(cpi->oxcf.Width, cpi->oxcf.Height,
1137 cpi->oxcf.lag_in_frames);
1138 if(!cpi->lookahead)
1139 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1140 "Failed to allocate lag buffers");
1141
1142 #if VP8_TEMPORAL_ALT_REF
1143
1144 if (vp8_yv12_alloc_frame_buffer(&cpi->alt_ref_buffer,
1145 width, height, VP8BORDERINPIXELS))
1146 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1147 "Failed to allocate altref buffer");
1148
1149 #endif
1150 }
1151
1152
dealloc_raw_frame_buffers(VP8_COMP * cpi)1153 static void dealloc_raw_frame_buffers(VP8_COMP *cpi)
1154 {
1155 #if VP8_TEMPORAL_ALT_REF
1156 vp8_yv12_de_alloc_frame_buffer(&cpi->alt_ref_buffer);
1157 #endif
1158 vp8_lookahead_destroy(cpi->lookahead);
1159 }
1160
1161
vp8_alloc_partition_data(VP8_COMP * cpi)1162 static int vp8_alloc_partition_data(VP8_COMP *cpi)
1163 {
1164 vpx_free(cpi->mb.pip);
1165
1166 cpi->mb.pip = vpx_calloc((cpi->common.mb_cols + 1) *
1167 (cpi->common.mb_rows + 1),
1168 sizeof(PARTITION_INFO));
1169 if(!cpi->mb.pip)
1170 return 1;
1171
1172 cpi->mb.pi = cpi->mb.pip + cpi->common.mode_info_stride + 1;
1173
1174 return 0;
1175 }
1176
vp8_alloc_compressor_data(VP8_COMP * cpi)1177 void vp8_alloc_compressor_data(VP8_COMP *cpi)
1178 {
1179 VP8_COMMON *cm = & cpi->common;
1180
1181 int width = cm->Width;
1182 int height = cm->Height;
1183
1184 if (vp8_alloc_frame_buffers(cm, width, height))
1185 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1186 "Failed to allocate frame buffers");
1187
1188 if (vp8_alloc_partition_data(cpi))
1189 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1190 "Failed to allocate partition data");
1191
1192
1193 if ((width & 0xf) != 0)
1194 width += 16 - (width & 0xf);
1195
1196 if ((height & 0xf) != 0)
1197 height += 16 - (height & 0xf);
1198
1199
1200 if (vp8_yv12_alloc_frame_buffer(&cpi->pick_lf_lvl_frame,
1201 width, height, VP8BORDERINPIXELS))
1202 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1203 "Failed to allocate last frame buffer");
1204
1205 if (vp8_yv12_alloc_frame_buffer(&cpi->scaled_source,
1206 width, height, VP8BORDERINPIXELS))
1207 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1208 "Failed to allocate scaled source buffer");
1209
1210 vpx_free(cpi->tok);
1211
1212 {
1213 #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
1214 unsigned int tokens = 8 * 24 * 16; /* one MB for each thread */
1215 #else
1216 unsigned int tokens = cm->mb_rows * cm->mb_cols * 24 * 16;
1217 #endif
1218 CHECK_MEM_ERROR(cpi->tok, vpx_calloc(tokens, sizeof(*cpi->tok)));
1219 }
1220
1221 /* Data used for real time vc mode to see if gf needs refreshing */
1222 cpi->zeromv_count = 0;
1223
1224
1225 /* Structures used to monitor GF usage */
1226 vpx_free(cpi->gf_active_flags);
1227 CHECK_MEM_ERROR(cpi->gf_active_flags,
1228 vpx_calloc(sizeof(*cpi->gf_active_flags),
1229 cm->mb_rows * cm->mb_cols));
1230 cpi->gf_active_count = cm->mb_rows * cm->mb_cols;
1231
1232 vpx_free(cpi->mb_activity_map);
1233 CHECK_MEM_ERROR(cpi->mb_activity_map,
1234 vpx_calloc(sizeof(*cpi->mb_activity_map),
1235 cm->mb_rows * cm->mb_cols));
1236
1237 /* allocate memory for storing last frame's MVs for MV prediction. */
1238 vpx_free(cpi->lfmv);
1239 CHECK_MEM_ERROR(cpi->lfmv, vpx_calloc((cm->mb_rows+2) * (cm->mb_cols+2),
1240 sizeof(*cpi->lfmv)));
1241 vpx_free(cpi->lf_ref_frame_sign_bias);
1242 CHECK_MEM_ERROR(cpi->lf_ref_frame_sign_bias,
1243 vpx_calloc((cm->mb_rows+2) * (cm->mb_cols+2),
1244 sizeof(*cpi->lf_ref_frame_sign_bias)));
1245 vpx_free(cpi->lf_ref_frame);
1246 CHECK_MEM_ERROR(cpi->lf_ref_frame,
1247 vpx_calloc((cm->mb_rows+2) * (cm->mb_cols+2),
1248 sizeof(*cpi->lf_ref_frame)));
1249
1250 /* Create the encoder segmentation map and set all entries to 0 */
1251 vpx_free(cpi->segmentation_map);
1252 CHECK_MEM_ERROR(cpi->segmentation_map,
1253 vpx_calloc(cm->mb_rows * cm->mb_cols,
1254 sizeof(*cpi->segmentation_map)));
1255 cpi->cyclic_refresh_mode_index = 0;
1256 vpx_free(cpi->active_map);
1257 CHECK_MEM_ERROR(cpi->active_map,
1258 vpx_calloc(cm->mb_rows * cm->mb_cols,
1259 sizeof(*cpi->active_map)));
1260 memset(cpi->active_map , 1, (cm->mb_rows * cm->mb_cols));
1261
1262 #if CONFIG_MULTITHREAD
1263 if (width < 640)
1264 cpi->mt_sync_range = 1;
1265 else if (width <= 1280)
1266 cpi->mt_sync_range = 4;
1267 else if (width <= 2560)
1268 cpi->mt_sync_range = 8;
1269 else
1270 cpi->mt_sync_range = 16;
1271
1272 if (cpi->oxcf.multi_threaded > 1)
1273 {
1274 vpx_free(cpi->mt_current_mb_col);
1275 CHECK_MEM_ERROR(cpi->mt_current_mb_col,
1276 vpx_malloc(sizeof(*cpi->mt_current_mb_col) * cm->mb_rows));
1277 }
1278
1279 #endif
1280
1281 vpx_free(cpi->tplist);
1282 CHECK_MEM_ERROR(cpi->tplist, vpx_malloc(sizeof(TOKENLIST) * cm->mb_rows));
1283
1284 #if CONFIG_TEMPORAL_DENOISING
1285 if (cpi->oxcf.noise_sensitivity > 0) {
1286 vp8_denoiser_free(&cpi->denoiser);
1287 vp8_denoiser_allocate(&cpi->denoiser, width, height,
1288 cm->mb_rows, cm->mb_cols,
1289 cpi->oxcf.noise_sensitivity);
1290 }
1291 #endif
1292 }
1293
1294
1295 /* Quant MOD */
1296 static const int q_trans[] =
1297 {
1298 0, 1, 2, 3, 4, 5, 7, 8,
1299 9, 10, 12, 13, 15, 17, 18, 19,
1300 20, 21, 23, 24, 25, 26, 27, 28,
1301 29, 30, 31, 33, 35, 37, 39, 41,
1302 43, 45, 47, 49, 51, 53, 55, 57,
1303 59, 61, 64, 67, 70, 73, 76, 79,
1304 82, 85, 88, 91, 94, 97, 100, 103,
1305 106, 109, 112, 115, 118, 121, 124, 127,
1306 };
1307
vp8_reverse_trans(int x)1308 int vp8_reverse_trans(int x)
1309 {
1310 int i;
1311
1312 for (i = 0; i < 64; i++)
1313 if (q_trans[i] >= x)
1314 return i;
1315
1316 return 63;
1317 }
vp8_new_framerate(VP8_COMP * cpi,double framerate)1318 void vp8_new_framerate(VP8_COMP *cpi, double framerate)
1319 {
1320 if(framerate < .1)
1321 framerate = 30;
1322
1323 cpi->framerate = framerate;
1324 cpi->output_framerate = framerate;
1325 cpi->per_frame_bandwidth = (int)(cpi->oxcf.target_bandwidth /
1326 cpi->output_framerate);
1327 cpi->av_per_frame_bandwidth = cpi->per_frame_bandwidth;
1328 cpi->min_frame_bandwidth = (int)(cpi->av_per_frame_bandwidth *
1329 cpi->oxcf.two_pass_vbrmin_section / 100);
1330
1331 /* Set Maximum gf/arf interval */
1332 cpi->max_gf_interval = ((int)(cpi->output_framerate / 2.0) + 2);
1333
1334 if(cpi->max_gf_interval < 12)
1335 cpi->max_gf_interval = 12;
1336
1337 /* Extended interval for genuinely static scenes */
1338 cpi->twopass.static_scene_max_gf_interval = cpi->key_frame_frequency >> 1;
1339
1340 /* Special conditions when altr ref frame enabled in lagged compress mode */
1341 if (cpi->oxcf.play_alternate && cpi->oxcf.lag_in_frames)
1342 {
1343 if (cpi->max_gf_interval > cpi->oxcf.lag_in_frames - 1)
1344 cpi->max_gf_interval = cpi->oxcf.lag_in_frames - 1;
1345
1346 if (cpi->twopass.static_scene_max_gf_interval > cpi->oxcf.lag_in_frames - 1)
1347 cpi->twopass.static_scene_max_gf_interval = cpi->oxcf.lag_in_frames - 1;
1348 }
1349
1350 if ( cpi->max_gf_interval > cpi->twopass.static_scene_max_gf_interval )
1351 cpi->max_gf_interval = cpi->twopass.static_scene_max_gf_interval;
1352 }
1353
1354
init_config(VP8_COMP * cpi,VP8_CONFIG * oxcf)1355 static void init_config(VP8_COMP *cpi, VP8_CONFIG *oxcf)
1356 {
1357 VP8_COMMON *cm = &cpi->common;
1358
1359 cpi->oxcf = *oxcf;
1360
1361 cpi->auto_gold = 1;
1362 cpi->auto_adjust_gold_quantizer = 1;
1363
1364 cm->version = oxcf->Version;
1365 vp8_setup_version(cm);
1366
1367 /* Frame rate is not available on the first frame, as it's derived from
1368 * the observed timestamps. The actual value used here doesn't matter
1369 * too much, as it will adapt quickly.
1370 */
1371 if (oxcf->timebase.num > 0) {
1372 cpi->framerate = (double)(oxcf->timebase.den) /
1373 (double)(oxcf->timebase.num);
1374 } else {
1375 cpi->framerate = 30;
1376 }
1377
1378 /* If the reciprocal of the timebase seems like a reasonable framerate,
1379 * then use that as a guess, otherwise use 30.
1380 */
1381 if (cpi->framerate > 180)
1382 cpi->framerate = 30;
1383
1384 cpi->ref_framerate = cpi->framerate;
1385
1386 cpi->ref_frame_flags = VP8_ALTR_FRAME | VP8_GOLD_FRAME | VP8_LAST_FRAME;
1387
1388 cm->refresh_golden_frame = 0;
1389 cm->refresh_last_frame = 1;
1390 cm->refresh_entropy_probs = 1;
1391
1392 /* change includes all joint functionality */
1393 vp8_change_config(cpi, oxcf);
1394
1395 /* Initialize active best and worst q and average q values. */
1396 cpi->active_worst_quality = cpi->oxcf.worst_allowed_q;
1397 cpi->active_best_quality = cpi->oxcf.best_allowed_q;
1398 cpi->avg_frame_qindex = cpi->oxcf.worst_allowed_q;
1399
1400 /* Initialise the starting buffer levels */
1401 cpi->buffer_level = cpi->oxcf.starting_buffer_level;
1402 cpi->bits_off_target = cpi->oxcf.starting_buffer_level;
1403
1404 cpi->rolling_target_bits = cpi->av_per_frame_bandwidth;
1405 cpi->rolling_actual_bits = cpi->av_per_frame_bandwidth;
1406 cpi->long_rolling_target_bits = cpi->av_per_frame_bandwidth;
1407 cpi->long_rolling_actual_bits = cpi->av_per_frame_bandwidth;
1408
1409 cpi->total_actual_bits = 0;
1410 cpi->total_target_vs_actual = 0;
1411
1412 /* Temporal scalabilty */
1413 if (cpi->oxcf.number_of_layers > 1)
1414 {
1415 unsigned int i;
1416 double prev_layer_framerate=0;
1417
1418 for (i=0; i<cpi->oxcf.number_of_layers; i++)
1419 {
1420 init_temporal_layer_context(cpi, oxcf, i, prev_layer_framerate);
1421 prev_layer_framerate = cpi->output_framerate /
1422 cpi->oxcf.rate_decimator[i];
1423 }
1424 }
1425
1426 #if VP8_TEMPORAL_ALT_REF
1427 {
1428 int i;
1429
1430 cpi->fixed_divide[0] = 0;
1431
1432 for (i = 1; i < 512; i++)
1433 cpi->fixed_divide[i] = 0x80000 / i;
1434 }
1435 #endif
1436 }
1437
update_layer_contexts(VP8_COMP * cpi)1438 static void update_layer_contexts (VP8_COMP *cpi)
1439 {
1440 VP8_CONFIG *oxcf = &cpi->oxcf;
1441
1442 /* Update snapshots of the layer contexts to reflect new parameters */
1443 if (oxcf->number_of_layers > 1)
1444 {
1445 unsigned int i;
1446 double prev_layer_framerate=0;
1447
1448 assert(oxcf->number_of_layers <= VPX_TS_MAX_LAYERS);
1449 for (i = 0; i < oxcf->number_of_layers && i < VPX_TS_MAX_LAYERS; ++i)
1450 {
1451 LAYER_CONTEXT *lc = &cpi->layer_context[i];
1452
1453 lc->framerate =
1454 cpi->ref_framerate / oxcf->rate_decimator[i];
1455 lc->target_bandwidth = oxcf->target_bitrate[i] * 1000;
1456
1457 lc->starting_buffer_level = rescale(
1458 (int)oxcf->starting_buffer_level_in_ms,
1459 lc->target_bandwidth, 1000);
1460
1461 if (oxcf->optimal_buffer_level == 0)
1462 lc->optimal_buffer_level = lc->target_bandwidth / 8;
1463 else
1464 lc->optimal_buffer_level = rescale(
1465 (int)oxcf->optimal_buffer_level_in_ms,
1466 lc->target_bandwidth, 1000);
1467
1468 if (oxcf->maximum_buffer_size == 0)
1469 lc->maximum_buffer_size = lc->target_bandwidth / 8;
1470 else
1471 lc->maximum_buffer_size = rescale(
1472 (int)oxcf->maximum_buffer_size_in_ms,
1473 lc->target_bandwidth, 1000);
1474
1475 /* Work out the average size of a frame within this layer */
1476 if (i > 0)
1477 lc->avg_frame_size_for_layer =
1478 (int)((oxcf->target_bitrate[i] -
1479 oxcf->target_bitrate[i-1]) * 1000 /
1480 (lc->framerate - prev_layer_framerate));
1481
1482 prev_layer_framerate = lc->framerate;
1483 }
1484 }
1485 }
1486
vp8_change_config(VP8_COMP * cpi,VP8_CONFIG * oxcf)1487 void vp8_change_config(VP8_COMP *cpi, VP8_CONFIG *oxcf)
1488 {
1489 VP8_COMMON *cm = &cpi->common;
1490 int last_w, last_h, prev_number_of_layers;
1491
1492 if (!cpi)
1493 return;
1494
1495 if (!oxcf)
1496 return;
1497
1498 #if CONFIG_MULTITHREAD
1499 /* wait for the last picture loopfilter thread done */
1500 if (cpi->b_lpf_running)
1501 {
1502 sem_wait(&cpi->h_event_end_lpf);
1503 cpi->b_lpf_running = 0;
1504 }
1505 #endif
1506
1507 if (cm->version != oxcf->Version)
1508 {
1509 cm->version = oxcf->Version;
1510 vp8_setup_version(cm);
1511 }
1512
1513 last_w = cpi->oxcf.Width;
1514 last_h = cpi->oxcf.Height;
1515 prev_number_of_layers = cpi->oxcf.number_of_layers;
1516
1517 cpi->oxcf = *oxcf;
1518
1519 switch (cpi->oxcf.Mode)
1520 {
1521
1522 case MODE_REALTIME:
1523 cpi->pass = 0;
1524 cpi->compressor_speed = 2;
1525
1526 if (cpi->oxcf.cpu_used < -16)
1527 {
1528 cpi->oxcf.cpu_used = -16;
1529 }
1530
1531 if (cpi->oxcf.cpu_used > 16)
1532 cpi->oxcf.cpu_used = 16;
1533
1534 break;
1535
1536 case MODE_GOODQUALITY:
1537 cpi->pass = 0;
1538 cpi->compressor_speed = 1;
1539
1540 if (cpi->oxcf.cpu_used < -5)
1541 {
1542 cpi->oxcf.cpu_used = -5;
1543 }
1544
1545 if (cpi->oxcf.cpu_used > 5)
1546 cpi->oxcf.cpu_used = 5;
1547
1548 break;
1549
1550 case MODE_BESTQUALITY:
1551 cpi->pass = 0;
1552 cpi->compressor_speed = 0;
1553 break;
1554
1555 case MODE_FIRSTPASS:
1556 cpi->pass = 1;
1557 cpi->compressor_speed = 1;
1558 break;
1559 case MODE_SECONDPASS:
1560 cpi->pass = 2;
1561 cpi->compressor_speed = 1;
1562
1563 if (cpi->oxcf.cpu_used < -5)
1564 {
1565 cpi->oxcf.cpu_used = -5;
1566 }
1567
1568 if (cpi->oxcf.cpu_used > 5)
1569 cpi->oxcf.cpu_used = 5;
1570
1571 break;
1572 case MODE_SECONDPASS_BEST:
1573 cpi->pass = 2;
1574 cpi->compressor_speed = 0;
1575 break;
1576 }
1577
1578 if (cpi->pass == 0)
1579 cpi->auto_worst_q = 1;
1580
1581 cpi->oxcf.worst_allowed_q = q_trans[oxcf->worst_allowed_q];
1582 cpi->oxcf.best_allowed_q = q_trans[oxcf->best_allowed_q];
1583 cpi->oxcf.cq_level = q_trans[cpi->oxcf.cq_level];
1584
1585 if (oxcf->fixed_q >= 0)
1586 {
1587 if (oxcf->worst_allowed_q < 0)
1588 cpi->oxcf.fixed_q = q_trans[0];
1589 else
1590 cpi->oxcf.fixed_q = q_trans[oxcf->worst_allowed_q];
1591
1592 if (oxcf->alt_q < 0)
1593 cpi->oxcf.alt_q = q_trans[0];
1594 else
1595 cpi->oxcf.alt_q = q_trans[oxcf->alt_q];
1596
1597 if (oxcf->key_q < 0)
1598 cpi->oxcf.key_q = q_trans[0];
1599 else
1600 cpi->oxcf.key_q = q_trans[oxcf->key_q];
1601
1602 if (oxcf->gold_q < 0)
1603 cpi->oxcf.gold_q = q_trans[0];
1604 else
1605 cpi->oxcf.gold_q = q_trans[oxcf->gold_q];
1606
1607 }
1608
1609 cpi->baseline_gf_interval =
1610 cpi->oxcf.alt_freq ? cpi->oxcf.alt_freq : DEFAULT_GF_INTERVAL;
1611
1612 #if (CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING)
1613 cpi->oxcf.token_partitions = 3;
1614 #endif
1615
1616 if (cpi->oxcf.token_partitions >= 0 && cpi->oxcf.token_partitions <= 3)
1617 cm->multi_token_partition =
1618 (TOKEN_PARTITION) cpi->oxcf.token_partitions;
1619
1620 setup_features(cpi);
1621
1622 {
1623 int i;
1624
1625 for (i = 0; i < MAX_MB_SEGMENTS; i++)
1626 cpi->segment_encode_breakout[i] = cpi->oxcf.encode_breakout;
1627 }
1628
1629 /* At the moment the first order values may not be > MAXQ */
1630 if (cpi->oxcf.fixed_q > MAXQ)
1631 cpi->oxcf.fixed_q = MAXQ;
1632
1633 /* local file playback mode == really big buffer */
1634 if (cpi->oxcf.end_usage == USAGE_LOCAL_FILE_PLAYBACK)
1635 {
1636 cpi->oxcf.starting_buffer_level = 60000;
1637 cpi->oxcf.optimal_buffer_level = 60000;
1638 cpi->oxcf.maximum_buffer_size = 240000;
1639 cpi->oxcf.starting_buffer_level_in_ms = 60000;
1640 cpi->oxcf.optimal_buffer_level_in_ms = 60000;
1641 cpi->oxcf.maximum_buffer_size_in_ms = 240000;
1642 }
1643
1644 /* Convert target bandwidth from Kbit/s to Bit/s */
1645 cpi->oxcf.target_bandwidth *= 1000;
1646
1647 cpi->oxcf.starting_buffer_level =
1648 rescale((int)cpi->oxcf.starting_buffer_level,
1649 cpi->oxcf.target_bandwidth, 1000);
1650
1651 /* Set or reset optimal and maximum buffer levels. */
1652 if (cpi->oxcf.optimal_buffer_level == 0)
1653 cpi->oxcf.optimal_buffer_level = cpi->oxcf.target_bandwidth / 8;
1654 else
1655 cpi->oxcf.optimal_buffer_level =
1656 rescale((int)cpi->oxcf.optimal_buffer_level,
1657 cpi->oxcf.target_bandwidth, 1000);
1658
1659 if (cpi->oxcf.maximum_buffer_size == 0)
1660 cpi->oxcf.maximum_buffer_size = cpi->oxcf.target_bandwidth / 8;
1661 else
1662 cpi->oxcf.maximum_buffer_size =
1663 rescale((int)cpi->oxcf.maximum_buffer_size,
1664 cpi->oxcf.target_bandwidth, 1000);
1665 // Under a configuration change, where maximum_buffer_size may change,
1666 // keep buffer level clipped to the maximum allowed buffer size.
1667 if (cpi->bits_off_target > cpi->oxcf.maximum_buffer_size) {
1668 cpi->bits_off_target = cpi->oxcf.maximum_buffer_size;
1669 cpi->buffer_level = cpi->bits_off_target;
1670 }
1671
1672 /* Set up frame rate and related parameters rate control values. */
1673 vp8_new_framerate(cpi, cpi->framerate);
1674
1675 /* Set absolute upper and lower quality limits */
1676 cpi->worst_quality = cpi->oxcf.worst_allowed_q;
1677 cpi->best_quality = cpi->oxcf.best_allowed_q;
1678
1679 /* active values should only be modified if out of new range */
1680 if (cpi->active_worst_quality > cpi->oxcf.worst_allowed_q)
1681 {
1682 cpi->active_worst_quality = cpi->oxcf.worst_allowed_q;
1683 }
1684 /* less likely */
1685 else if (cpi->active_worst_quality < cpi->oxcf.best_allowed_q)
1686 {
1687 cpi->active_worst_quality = cpi->oxcf.best_allowed_q;
1688 }
1689 if (cpi->active_best_quality < cpi->oxcf.best_allowed_q)
1690 {
1691 cpi->active_best_quality = cpi->oxcf.best_allowed_q;
1692 }
1693 /* less likely */
1694 else if (cpi->active_best_quality > cpi->oxcf.worst_allowed_q)
1695 {
1696 cpi->active_best_quality = cpi->oxcf.worst_allowed_q;
1697 }
1698
1699 cpi->buffered_mode = cpi->oxcf.optimal_buffer_level > 0;
1700
1701 cpi->cq_target_quality = cpi->oxcf.cq_level;
1702
1703 /* Only allow dropped frames in buffered mode */
1704 cpi->drop_frames_allowed = cpi->oxcf.allow_df && cpi->buffered_mode;
1705
1706 cpi->target_bandwidth = cpi->oxcf.target_bandwidth;
1707
1708 // Check if the number of temporal layers has changed, and if so reset the
1709 // pattern counter and set/initialize the temporal layer context for the
1710 // new layer configuration.
1711 if (cpi->oxcf.number_of_layers != prev_number_of_layers)
1712 {
1713 // If the number of temporal layers are changed we must start at the
1714 // base of the pattern cycle, so set the layer id to 0 and reset
1715 // the temporal pattern counter.
1716 if (cpi->temporal_layer_id > 0) {
1717 cpi->temporal_layer_id = 0;
1718 }
1719 cpi->temporal_pattern_counter = 0;
1720 reset_temporal_layer_change(cpi, oxcf, prev_number_of_layers);
1721 }
1722
1723 if (!cpi->initial_width)
1724 {
1725 cpi->initial_width = cpi->oxcf.Width;
1726 cpi->initial_height = cpi->oxcf.Height;
1727 }
1728
1729 cm->Width = cpi->oxcf.Width;
1730 cm->Height = cpi->oxcf.Height;
1731 assert(cm->Width <= cpi->initial_width);
1732 assert(cm->Height <= cpi->initial_height);
1733
1734 /* TODO(jkoleszar): if an internal spatial resampling is active,
1735 * and we downsize the input image, maybe we should clear the
1736 * internal scale immediately rather than waiting for it to
1737 * correct.
1738 */
1739
1740 /* VP8 sharpness level mapping 0-7 (vs 0-10 in general VPx dialogs) */
1741 if (cpi->oxcf.Sharpness > 7)
1742 cpi->oxcf.Sharpness = 7;
1743
1744 cm->sharpness_level = cpi->oxcf.Sharpness;
1745
1746 if (cm->horiz_scale != NORMAL || cm->vert_scale != NORMAL)
1747 {
1748 int UNINITIALIZED_IS_SAFE(hr), UNINITIALIZED_IS_SAFE(hs);
1749 int UNINITIALIZED_IS_SAFE(vr), UNINITIALIZED_IS_SAFE(vs);
1750
1751 Scale2Ratio(cm->horiz_scale, &hr, &hs);
1752 Scale2Ratio(cm->vert_scale, &vr, &vs);
1753
1754 /* always go to the next whole number */
1755 cm->Width = (hs - 1 + cpi->oxcf.Width * hr) / hs;
1756 cm->Height = (vs - 1 + cpi->oxcf.Height * vr) / vs;
1757 }
1758
1759 if (last_w != cpi->oxcf.Width || last_h != cpi->oxcf.Height)
1760 cpi->force_next_frame_intra = 1;
1761
1762 if (((cm->Width + 15) & 0xfffffff0) !=
1763 cm->yv12_fb[cm->lst_fb_idx].y_width ||
1764 ((cm->Height + 15) & 0xfffffff0) !=
1765 cm->yv12_fb[cm->lst_fb_idx].y_height ||
1766 cm->yv12_fb[cm->lst_fb_idx].y_width == 0)
1767 {
1768 dealloc_raw_frame_buffers(cpi);
1769 alloc_raw_frame_buffers(cpi);
1770 vp8_alloc_compressor_data(cpi);
1771 }
1772
1773 if (cpi->oxcf.fixed_q >= 0)
1774 {
1775 cpi->last_q[0] = cpi->oxcf.fixed_q;
1776 cpi->last_q[1] = cpi->oxcf.fixed_q;
1777 }
1778
1779 cpi->Speed = cpi->oxcf.cpu_used;
1780
1781 /* force to allowlag to 0 if lag_in_frames is 0; */
1782 if (cpi->oxcf.lag_in_frames == 0)
1783 {
1784 cpi->oxcf.allow_lag = 0;
1785 }
1786 /* Limit on lag buffers as these are not currently dynamically allocated */
1787 else if (cpi->oxcf.lag_in_frames > MAX_LAG_BUFFERS)
1788 cpi->oxcf.lag_in_frames = MAX_LAG_BUFFERS;
1789
1790 /* YX Temp */
1791 cpi->alt_ref_source = NULL;
1792 cpi->is_src_frame_alt_ref = 0;
1793
1794 #if CONFIG_TEMPORAL_DENOISING
1795 if (cpi->oxcf.noise_sensitivity)
1796 {
1797 if (!cpi->denoiser.yv12_mc_running_avg.buffer_alloc)
1798 {
1799 int width = (cpi->oxcf.Width + 15) & ~15;
1800 int height = (cpi->oxcf.Height + 15) & ~15;
1801 vp8_denoiser_allocate(&cpi->denoiser, width, height,
1802 cm->mb_rows, cm->mb_cols,
1803 cpi->oxcf.noise_sensitivity);
1804 }
1805 }
1806 #endif
1807
1808 #if 0
1809 /* Experimental RD Code */
1810 cpi->frame_distortion = 0;
1811 cpi->last_frame_distortion = 0;
1812 #endif
1813
1814 }
1815
1816 #ifndef M_LOG2_E
1817 #define M_LOG2_E 0.693147180559945309417
1818 #endif
1819 #define log2f(x) (log (x) / (float) M_LOG2_E)
1820
cal_mvsadcosts(int * mvsadcost[2])1821 static void cal_mvsadcosts(int *mvsadcost[2])
1822 {
1823 int i = 1;
1824
1825 mvsadcost [0] [0] = 300;
1826 mvsadcost [1] [0] = 300;
1827
1828 do
1829 {
1830 double z = 256 * (2 * (log2f(8 * i) + .6));
1831 mvsadcost [0][i] = (int) z;
1832 mvsadcost [1][i] = (int) z;
1833 mvsadcost [0][-i] = (int) z;
1834 mvsadcost [1][-i] = (int) z;
1835 }
1836 while (++i <= mvfp_max);
1837 }
1838
vp8_create_compressor(VP8_CONFIG * oxcf)1839 struct VP8_COMP* vp8_create_compressor(VP8_CONFIG *oxcf)
1840 {
1841 int i;
1842
1843 VP8_COMP *cpi;
1844 VP8_COMMON *cm;
1845
1846 cpi = vpx_memalign(32, sizeof(VP8_COMP));
1847 /* Check that the CPI instance is valid */
1848 if (!cpi)
1849 return 0;
1850
1851 cm = &cpi->common;
1852
1853 memset(cpi, 0, sizeof(VP8_COMP));
1854
1855 if (setjmp(cm->error.jmp))
1856 {
1857 cpi->common.error.setjmp = 0;
1858 vp8_remove_compressor(&cpi);
1859 return 0;
1860 }
1861
1862 cpi->common.error.setjmp = 1;
1863
1864 CHECK_MEM_ERROR(cpi->mb.ss, vpx_calloc(sizeof(search_site), (MAX_MVSEARCH_STEPS * 8) + 1));
1865
1866 vp8_create_common(&cpi->common);
1867
1868 init_config(cpi, oxcf);
1869
1870 memcpy(cpi->base_skip_false_prob, vp8cx_base_skip_false_prob, sizeof(vp8cx_base_skip_false_prob));
1871 cpi->common.current_video_frame = 0;
1872 cpi->temporal_pattern_counter = 0;
1873 cpi->temporal_layer_id = -1;
1874 cpi->kf_overspend_bits = 0;
1875 cpi->kf_bitrate_adjustment = 0;
1876 cpi->frames_till_gf_update_due = 0;
1877 cpi->gf_overspend_bits = 0;
1878 cpi->non_gf_bitrate_adjustment = 0;
1879 cpi->prob_last_coded = 128;
1880 cpi->prob_gf_coded = 128;
1881 cpi->prob_intra_coded = 63;
1882
1883 /* Prime the recent reference frame usage counters.
1884 * Hereafter they will be maintained as a sort of moving average
1885 */
1886 cpi->recent_ref_frame_usage[INTRA_FRAME] = 1;
1887 cpi->recent_ref_frame_usage[LAST_FRAME] = 1;
1888 cpi->recent_ref_frame_usage[GOLDEN_FRAME] = 1;
1889 cpi->recent_ref_frame_usage[ALTREF_FRAME] = 1;
1890
1891 /* Set reference frame sign bias for ALTREF frame to 1 (for now) */
1892 cpi->common.ref_frame_sign_bias[ALTREF_FRAME] = 1;
1893
1894 cpi->twopass.gf_decay_rate = 0;
1895 cpi->baseline_gf_interval = DEFAULT_GF_INTERVAL;
1896
1897 cpi->gold_is_last = 0 ;
1898 cpi->alt_is_last = 0 ;
1899 cpi->gold_is_alt = 0 ;
1900
1901 cpi->active_map_enabled = 0;
1902
1903 #if 0
1904 /* Experimental code for lagged and one pass */
1905 /* Initialise one_pass GF frames stats */
1906 /* Update stats used for GF selection */
1907 if (cpi->pass == 0)
1908 {
1909 cpi->one_pass_frame_index = 0;
1910
1911 for (i = 0; i < MAX_LAG_BUFFERS; i++)
1912 {
1913 cpi->one_pass_frame_stats[i].frames_so_far = 0;
1914 cpi->one_pass_frame_stats[i].frame_intra_error = 0.0;
1915 cpi->one_pass_frame_stats[i].frame_coded_error = 0.0;
1916 cpi->one_pass_frame_stats[i].frame_pcnt_inter = 0.0;
1917 cpi->one_pass_frame_stats[i].frame_pcnt_motion = 0.0;
1918 cpi->one_pass_frame_stats[i].frame_mvr = 0.0;
1919 cpi->one_pass_frame_stats[i].frame_mvr_abs = 0.0;
1920 cpi->one_pass_frame_stats[i].frame_mvc = 0.0;
1921 cpi->one_pass_frame_stats[i].frame_mvc_abs = 0.0;
1922 }
1923 }
1924 #endif
1925
1926 cpi->mse_source_denoised = 0;
1927
1928 /* Should we use the cyclic refresh method.
1929 * Currently this is tied to error resilliant mode
1930 */
1931 cpi->cyclic_refresh_mode_enabled = cpi->oxcf.error_resilient_mode;
1932 cpi->cyclic_refresh_mode_max_mbs_perframe = (cpi->common.mb_rows * cpi->common.mb_cols) / 7;
1933 if (cpi->oxcf.number_of_layers == 1) {
1934 cpi->cyclic_refresh_mode_max_mbs_perframe =
1935 (cpi->common.mb_rows * cpi->common.mb_cols) / 20;
1936 } else if (cpi->oxcf.number_of_layers == 2) {
1937 cpi->cyclic_refresh_mode_max_mbs_perframe =
1938 (cpi->common.mb_rows * cpi->common.mb_cols) / 10;
1939 }
1940 cpi->cyclic_refresh_mode_index = 0;
1941 cpi->cyclic_refresh_q = 32;
1942
1943 if (cpi->cyclic_refresh_mode_enabled)
1944 {
1945 CHECK_MEM_ERROR(cpi->cyclic_refresh_map, vpx_calloc((cpi->common.mb_rows * cpi->common.mb_cols), 1));
1946 }
1947 else
1948 cpi->cyclic_refresh_map = (signed char *) NULL;
1949
1950 CHECK_MEM_ERROR(cpi->consec_zero_last,
1951 vpx_calloc(cm->mb_rows * cm->mb_cols, 1));
1952 CHECK_MEM_ERROR(cpi->consec_zero_last_mvbias,
1953 vpx_calloc((cpi->common.mb_rows * cpi->common.mb_cols), 1));
1954
1955 #ifdef VP8_ENTROPY_STATS
1956 init_context_counters();
1957 #endif
1958
1959 /*Initialize the feed-forward activity masking.*/
1960 cpi->activity_avg = 90<<12;
1961
1962 /* Give a sensible default for the first frame. */
1963 cpi->frames_since_key = 8;
1964 cpi->key_frame_frequency = cpi->oxcf.key_freq;
1965 cpi->this_key_frame_forced = 0;
1966 cpi->next_key_frame_forced = 0;
1967
1968 cpi->source_alt_ref_pending = 0;
1969 cpi->source_alt_ref_active = 0;
1970 cpi->common.refresh_alt_ref_frame = 0;
1971
1972 cpi->force_maxqp = 0;
1973
1974 cpi->b_calculate_psnr = CONFIG_INTERNAL_STATS;
1975 #if CONFIG_INTERNAL_STATS
1976 cpi->b_calculate_ssimg = 0;
1977
1978 cpi->count = 0;
1979 cpi->bytes = 0;
1980
1981 if (cpi->b_calculate_psnr)
1982 {
1983 cpi->total_sq_error = 0.0;
1984 cpi->total_sq_error2 = 0.0;
1985 cpi->total_y = 0.0;
1986 cpi->total_u = 0.0;
1987 cpi->total_v = 0.0;
1988 cpi->total = 0.0;
1989 cpi->totalp_y = 0.0;
1990 cpi->totalp_u = 0.0;
1991 cpi->totalp_v = 0.0;
1992 cpi->totalp = 0.0;
1993 cpi->tot_recode_hits = 0;
1994 cpi->summed_quality = 0;
1995 cpi->summed_weights = 0;
1996 }
1997
1998 if (cpi->b_calculate_ssimg)
1999 {
2000 cpi->total_ssimg_y = 0;
2001 cpi->total_ssimg_u = 0;
2002 cpi->total_ssimg_v = 0;
2003 cpi->total_ssimg_all = 0;
2004 }
2005
2006 #endif
2007
2008 cpi->first_time_stamp_ever = 0x7FFFFFFF;
2009
2010 cpi->frames_till_gf_update_due = 0;
2011 cpi->key_frame_count = 1;
2012
2013 cpi->ni_av_qi = cpi->oxcf.worst_allowed_q;
2014 cpi->ni_tot_qi = 0;
2015 cpi->ni_frames = 0;
2016 cpi->total_byte_count = 0;
2017
2018 cpi->drop_frame = 0;
2019
2020 cpi->rate_correction_factor = 1.0;
2021 cpi->key_frame_rate_correction_factor = 1.0;
2022 cpi->gf_rate_correction_factor = 1.0;
2023 cpi->twopass.est_max_qcorrection_factor = 1.0;
2024
2025 for (i = 0; i < KEY_FRAME_CONTEXT; i++)
2026 {
2027 cpi->prior_key_frame_distance[i] = (int)cpi->output_framerate;
2028 }
2029
2030 #ifdef OUTPUT_YUV_SRC
2031 yuv_file = fopen("bd.yuv", "ab");
2032 #endif
2033 #ifdef OUTPUT_YUV_DENOISED
2034 yuv_denoised_file = fopen("denoised.yuv", "ab");
2035 #endif
2036
2037 #if 0
2038 framepsnr = fopen("framepsnr.stt", "a");
2039 kf_list = fopen("kf_list.stt", "w");
2040 #endif
2041
2042 cpi->output_pkt_list = oxcf->output_pkt_list;
2043
2044 #if !CONFIG_REALTIME_ONLY
2045
2046 if (cpi->pass == 1)
2047 {
2048 vp8_init_first_pass(cpi);
2049 }
2050 else if (cpi->pass == 2)
2051 {
2052 size_t packet_sz = sizeof(FIRSTPASS_STATS);
2053 int packets = (int)(oxcf->two_pass_stats_in.sz / packet_sz);
2054
2055 cpi->twopass.stats_in_start = oxcf->two_pass_stats_in.buf;
2056 cpi->twopass.stats_in = cpi->twopass.stats_in_start;
2057 cpi->twopass.stats_in_end = (void*)((char *)cpi->twopass.stats_in
2058 + (packets - 1) * packet_sz);
2059 vp8_init_second_pass(cpi);
2060 }
2061
2062 #endif
2063
2064 if (cpi->compressor_speed == 2)
2065 {
2066 cpi->avg_encode_time = 0;
2067 cpi->avg_pick_mode_time = 0;
2068 }
2069
2070 vp8_set_speed_features(cpi);
2071
2072 /* Set starting values of RD threshold multipliers (128 = *1) */
2073 for (i = 0; i < MAX_MODES; i++)
2074 {
2075 cpi->mb.rd_thresh_mult[i] = 128;
2076 }
2077
2078 #ifdef VP8_ENTROPY_STATS
2079 init_mv_ref_counts();
2080 #endif
2081
2082 #if CONFIG_MULTITHREAD
2083 if(vp8cx_create_encoder_threads(cpi))
2084 {
2085 vp8_remove_compressor(&cpi);
2086 return 0;
2087 }
2088 #endif
2089
2090 cpi->fn_ptr[BLOCK_16X16].sdf = vpx_sad16x16;
2091 cpi->fn_ptr[BLOCK_16X16].vf = vpx_variance16x16;
2092 cpi->fn_ptr[BLOCK_16X16].svf = vpx_sub_pixel_variance16x16;
2093 cpi->fn_ptr[BLOCK_16X16].svf_halfpix_h = vpx_variance_halfpixvar16x16_h;
2094 cpi->fn_ptr[BLOCK_16X16].svf_halfpix_v = vpx_variance_halfpixvar16x16_v;
2095 cpi->fn_ptr[BLOCK_16X16].svf_halfpix_hv = vpx_variance_halfpixvar16x16_hv;
2096 cpi->fn_ptr[BLOCK_16X16].sdx3f = vpx_sad16x16x3;
2097 cpi->fn_ptr[BLOCK_16X16].sdx8f = vpx_sad16x16x8;
2098 cpi->fn_ptr[BLOCK_16X16].sdx4df = vpx_sad16x16x4d;
2099
2100 cpi->fn_ptr[BLOCK_16X8].sdf = vpx_sad16x8;
2101 cpi->fn_ptr[BLOCK_16X8].vf = vpx_variance16x8;
2102 cpi->fn_ptr[BLOCK_16X8].svf = vpx_sub_pixel_variance16x8;
2103 cpi->fn_ptr[BLOCK_16X8].svf_halfpix_h = NULL;
2104 cpi->fn_ptr[BLOCK_16X8].svf_halfpix_v = NULL;
2105 cpi->fn_ptr[BLOCK_16X8].svf_halfpix_hv = NULL;
2106 cpi->fn_ptr[BLOCK_16X8].sdx3f = vpx_sad16x8x3;
2107 cpi->fn_ptr[BLOCK_16X8].sdx8f = vpx_sad16x8x8;
2108 cpi->fn_ptr[BLOCK_16X8].sdx4df = vpx_sad16x8x4d;
2109
2110 cpi->fn_ptr[BLOCK_8X16].sdf = vpx_sad8x16;
2111 cpi->fn_ptr[BLOCK_8X16].vf = vpx_variance8x16;
2112 cpi->fn_ptr[BLOCK_8X16].svf = vpx_sub_pixel_variance8x16;
2113 cpi->fn_ptr[BLOCK_8X16].svf_halfpix_h = NULL;
2114 cpi->fn_ptr[BLOCK_8X16].svf_halfpix_v = NULL;
2115 cpi->fn_ptr[BLOCK_8X16].svf_halfpix_hv = NULL;
2116 cpi->fn_ptr[BLOCK_8X16].sdx3f = vpx_sad8x16x3;
2117 cpi->fn_ptr[BLOCK_8X16].sdx8f = vpx_sad8x16x8;
2118 cpi->fn_ptr[BLOCK_8X16].sdx4df = vpx_sad8x16x4d;
2119
2120 cpi->fn_ptr[BLOCK_8X8].sdf = vpx_sad8x8;
2121 cpi->fn_ptr[BLOCK_8X8].vf = vpx_variance8x8;
2122 cpi->fn_ptr[BLOCK_8X8].svf = vpx_sub_pixel_variance8x8;
2123 cpi->fn_ptr[BLOCK_8X8].svf_halfpix_h = NULL;
2124 cpi->fn_ptr[BLOCK_8X8].svf_halfpix_v = NULL;
2125 cpi->fn_ptr[BLOCK_8X8].svf_halfpix_hv = NULL;
2126 cpi->fn_ptr[BLOCK_8X8].sdx3f = vpx_sad8x8x3;
2127 cpi->fn_ptr[BLOCK_8X8].sdx8f = vpx_sad8x8x8;
2128 cpi->fn_ptr[BLOCK_8X8].sdx4df = vpx_sad8x8x4d;
2129
2130 cpi->fn_ptr[BLOCK_4X4].sdf = vpx_sad4x4;
2131 cpi->fn_ptr[BLOCK_4X4].vf = vpx_variance4x4;
2132 cpi->fn_ptr[BLOCK_4X4].svf = vpx_sub_pixel_variance4x4;
2133 cpi->fn_ptr[BLOCK_4X4].svf_halfpix_h = NULL;
2134 cpi->fn_ptr[BLOCK_4X4].svf_halfpix_v = NULL;
2135 cpi->fn_ptr[BLOCK_4X4].svf_halfpix_hv = NULL;
2136 cpi->fn_ptr[BLOCK_4X4].sdx3f = vpx_sad4x4x3;
2137 cpi->fn_ptr[BLOCK_4X4].sdx8f = vpx_sad4x4x8;
2138 cpi->fn_ptr[BLOCK_4X4].sdx4df = vpx_sad4x4x4d;
2139
2140 #if ARCH_X86 || ARCH_X86_64
2141 cpi->fn_ptr[BLOCK_16X16].copymem = vp8_copy32xn;
2142 cpi->fn_ptr[BLOCK_16X8].copymem = vp8_copy32xn;
2143 cpi->fn_ptr[BLOCK_8X16].copymem = vp8_copy32xn;
2144 cpi->fn_ptr[BLOCK_8X8].copymem = vp8_copy32xn;
2145 cpi->fn_ptr[BLOCK_4X4].copymem = vp8_copy32xn;
2146 #endif
2147
2148 cpi->full_search_sad = vp8_full_search_sad;
2149 cpi->diamond_search_sad = vp8_diamond_search_sad;
2150 cpi->refining_search_sad = vp8_refining_search_sad;
2151
2152 /* make sure frame 1 is okay */
2153 cpi->mb.error_bins[0] = cpi->common.MBs;
2154
2155 /* vp8cx_init_quantizer() is first called here. Add check in
2156 * vp8cx_frame_init_quantizer() so that vp8cx_init_quantizer is only
2157 * called later when needed. This will avoid unnecessary calls of
2158 * vp8cx_init_quantizer() for every frame.
2159 */
2160 vp8cx_init_quantizer(cpi);
2161
2162 vp8_loop_filter_init(cm);
2163
2164 cpi->common.error.setjmp = 0;
2165
2166 #if CONFIG_MULTI_RES_ENCODING
2167
2168 /* Calculate # of MBs in a row in lower-resolution level image. */
2169 if (cpi->oxcf.mr_encoder_id > 0)
2170 vp8_cal_low_res_mb_cols(cpi);
2171
2172 #endif
2173
2174 /* setup RD costs to MACROBLOCK struct */
2175
2176 cpi->mb.mvcost[0] = &cpi->rd_costs.mvcosts[0][mv_max+1];
2177 cpi->mb.mvcost[1] = &cpi->rd_costs.mvcosts[1][mv_max+1];
2178 cpi->mb.mvsadcost[0] = &cpi->rd_costs.mvsadcosts[0][mvfp_max+1];
2179 cpi->mb.mvsadcost[1] = &cpi->rd_costs.mvsadcosts[1][mvfp_max+1];
2180
2181 cal_mvsadcosts(cpi->mb.mvsadcost);
2182
2183 cpi->mb.mbmode_cost = cpi->rd_costs.mbmode_cost;
2184 cpi->mb.intra_uv_mode_cost = cpi->rd_costs.intra_uv_mode_cost;
2185 cpi->mb.bmode_costs = cpi->rd_costs.bmode_costs;
2186 cpi->mb.inter_bmode_costs = cpi->rd_costs.inter_bmode_costs;
2187 cpi->mb.token_costs = cpi->rd_costs.token_costs;
2188
2189 /* setup block ptrs & offsets */
2190 vp8_setup_block_ptrs(&cpi->mb);
2191 vp8_setup_block_dptrs(&cpi->mb.e_mbd);
2192
2193 return cpi;
2194 }
2195
2196
vp8_remove_compressor(VP8_COMP ** ptr)2197 void vp8_remove_compressor(VP8_COMP **ptr)
2198 {
2199 VP8_COMP *cpi = *ptr;
2200
2201 if (!cpi)
2202 return;
2203
2204 if (cpi && (cpi->common.current_video_frame > 0))
2205 {
2206 #if !CONFIG_REALTIME_ONLY
2207
2208 if (cpi->pass == 2)
2209 {
2210 vp8_end_second_pass(cpi);
2211 }
2212
2213 #endif
2214
2215 #ifdef VP8_ENTROPY_STATS
2216 print_context_counters();
2217 print_tree_update_probs();
2218 print_mode_context();
2219 #endif
2220
2221 #if CONFIG_INTERNAL_STATS
2222
2223 if (cpi->pass != 1)
2224 {
2225 FILE *f = fopen("opsnr.stt", "a");
2226 double time_encoded = (cpi->last_end_time_stamp_seen
2227 - cpi->first_time_stamp_ever) / 10000000.000;
2228 double total_encode_time = (cpi->time_receive_data +
2229 cpi->time_compress_data) / 1000.000;
2230 double dr = (double)cpi->bytes * 8.0 / 1000.0 / time_encoded;
2231
2232 if (cpi->b_calculate_psnr)
2233 {
2234 if (cpi->oxcf.number_of_layers > 1)
2235 {
2236 int i;
2237
2238 fprintf(f, "Layer\tBitrate\tAVGPsnr\tGLBPsnr\tAVPsnrP\t"
2239 "GLPsnrP\tVPXSSIM\t\n");
2240 for (i=0; i<(int)cpi->oxcf.number_of_layers; i++)
2241 {
2242 double dr = (double)cpi->bytes_in_layer[i] *
2243 8.0 / 1000.0 / time_encoded;
2244 double samples = 3.0 / 2 * cpi->frames_in_layer[i] *
2245 cpi->common.Width * cpi->common.Height;
2246 double total_psnr =
2247 vpx_sse_to_psnr(samples, 255.0,
2248 cpi->total_error2[i]);
2249 double total_psnr2 =
2250 vpx_sse_to_psnr(samples, 255.0,
2251 cpi->total_error2_p[i]);
2252 double total_ssim = 100 * pow(cpi->sum_ssim[i] /
2253 cpi->sum_weights[i], 8.0);
2254
2255 fprintf(f, "%5d\t%7.3f\t%7.3f\t%7.3f\t%7.3f\t"
2256 "%7.3f\t%7.3f\n",
2257 i, dr,
2258 cpi->sum_psnr[i] / cpi->frames_in_layer[i],
2259 total_psnr,
2260 cpi->sum_psnr_p[i] / cpi->frames_in_layer[i],
2261 total_psnr2, total_ssim);
2262 }
2263 }
2264 else
2265 {
2266 double samples = 3.0 / 2 * cpi->count *
2267 cpi->common.Width * cpi->common.Height;
2268 double total_psnr = vpx_sse_to_psnr(samples, 255.0,
2269 cpi->total_sq_error);
2270 double total_psnr2 = vpx_sse_to_psnr(samples, 255.0,
2271 cpi->total_sq_error2);
2272 double total_ssim = 100 * pow(cpi->summed_quality /
2273 cpi->summed_weights, 8.0);
2274
2275 fprintf(f, "Bitrate\tAVGPsnr\tGLBPsnr\tAVPsnrP\t"
2276 "GLPsnrP\tVPXSSIM\t Time(us)\n");
2277 fprintf(f, "%7.3f\t%7.3f\t%7.3f\t%7.3f\t%7.3f\t"
2278 "%7.3f\t%8.0f\n",
2279 dr, cpi->total / cpi->count, total_psnr,
2280 cpi->totalp / cpi->count, total_psnr2,
2281 total_ssim, total_encode_time);
2282 }
2283 }
2284
2285 if (cpi->b_calculate_ssimg)
2286 {
2287 if (cpi->oxcf.number_of_layers > 1)
2288 {
2289 int i;
2290
2291 fprintf(f, "Layer\tBitRate\tSSIM_Y\tSSIM_U\tSSIM_V\tSSIM_A\t"
2292 "Time(us)\n");
2293 for (i=0; i<(int)cpi->oxcf.number_of_layers; i++)
2294 {
2295 double dr = (double)cpi->bytes_in_layer[i] *
2296 8.0 / 1000.0 / time_encoded;
2297 fprintf(f, "%5d\t%7.3f\t%6.4f\t"
2298 "%6.4f\t%6.4f\t%6.4f\t%8.0f\n",
2299 i, dr,
2300 cpi->total_ssimg_y_in_layer[i] /
2301 cpi->frames_in_layer[i],
2302 cpi->total_ssimg_u_in_layer[i] /
2303 cpi->frames_in_layer[i],
2304 cpi->total_ssimg_v_in_layer[i] /
2305 cpi->frames_in_layer[i],
2306 cpi->total_ssimg_all_in_layer[i] /
2307 cpi->frames_in_layer[i],
2308 total_encode_time);
2309 }
2310 }
2311 else
2312 {
2313 fprintf(f, "BitRate\tSSIM_Y\tSSIM_U\tSSIM_V\tSSIM_A\t"
2314 "Time(us)\n");
2315 fprintf(f, "%7.3f\t%6.4f\t%6.4f\t%6.4f\t%6.4f\t%8.0f\n", dr,
2316 cpi->total_ssimg_y / cpi->count,
2317 cpi->total_ssimg_u / cpi->count,
2318 cpi->total_ssimg_v / cpi->count,
2319 cpi->total_ssimg_all / cpi->count, total_encode_time);
2320 }
2321 }
2322
2323 fclose(f);
2324 #if 0
2325 f = fopen("qskip.stt", "a");
2326 fprintf(f, "minq:%d -maxq:%d skiptrue:skipfalse = %d:%d\n", cpi->oxcf.best_allowed_q, cpi->oxcf.worst_allowed_q, skiptruecount, skipfalsecount);
2327 fclose(f);
2328 #endif
2329
2330 }
2331
2332 #endif
2333
2334
2335 #ifdef SPEEDSTATS
2336
2337 if (cpi->compressor_speed == 2)
2338 {
2339 int i;
2340 FILE *f = fopen("cxspeed.stt", "a");
2341 cnt_pm /= cpi->common.MBs;
2342
2343 for (i = 0; i < 16; i++)
2344 fprintf(f, "%5d", frames_at_speed[i]);
2345
2346 fprintf(f, "\n");
2347 fclose(f);
2348 }
2349
2350 #endif
2351
2352
2353 #ifdef MODE_STATS
2354 {
2355 extern int count_mb_seg[4];
2356 FILE *f = fopen("modes.stt", "a");
2357 double dr = (double)cpi->framerate * (double)bytes * (double)8 / (double)count / (double)1000 ;
2358 fprintf(f, "intra_mode in Intra Frames:\n");
2359 fprintf(f, "Y: %8d, %8d, %8d, %8d, %8d\n", y_modes[0], y_modes[1], y_modes[2], y_modes[3], y_modes[4]);
2360 fprintf(f, "UV:%8d, %8d, %8d, %8d\n", uv_modes[0], uv_modes[1], uv_modes[2], uv_modes[3]);
2361 fprintf(f, "B: ");
2362 {
2363 int i;
2364
2365 for (i = 0; i < 10; i++)
2366 fprintf(f, "%8d, ", b_modes[i]);
2367
2368 fprintf(f, "\n");
2369
2370 }
2371
2372 fprintf(f, "Modes in Inter Frames:\n");
2373 fprintf(f, "Y: %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d\n",
2374 inter_y_modes[0], inter_y_modes[1], inter_y_modes[2], inter_y_modes[3], inter_y_modes[4],
2375 inter_y_modes[5], inter_y_modes[6], inter_y_modes[7], inter_y_modes[8], inter_y_modes[9]);
2376 fprintf(f, "UV:%8d, %8d, %8d, %8d\n", inter_uv_modes[0], inter_uv_modes[1], inter_uv_modes[2], inter_uv_modes[3]);
2377 fprintf(f, "B: ");
2378 {
2379 int i;
2380
2381 for (i = 0; i < 15; i++)
2382 fprintf(f, "%8d, ", inter_b_modes[i]);
2383
2384 fprintf(f, "\n");
2385
2386 }
2387 fprintf(f, "P:%8d, %8d, %8d, %8d\n", count_mb_seg[0], count_mb_seg[1], count_mb_seg[2], count_mb_seg[3]);
2388 fprintf(f, "PB:%8d, %8d, %8d, %8d\n", inter_b_modes[LEFT4X4], inter_b_modes[ABOVE4X4], inter_b_modes[ZERO4X4], inter_b_modes[NEW4X4]);
2389
2390
2391
2392 fclose(f);
2393 }
2394 #endif
2395
2396 #ifdef VP8_ENTROPY_STATS
2397 {
2398 int i, j, k;
2399 FILE *fmode = fopen("modecontext.c", "w");
2400
2401 fprintf(fmode, "\n#include \"entropymode.h\"\n\n");
2402 fprintf(fmode, "const unsigned int vp8_kf_default_bmode_counts ");
2403 fprintf(fmode, "[VP8_BINTRAMODES] [VP8_BINTRAMODES] [VP8_BINTRAMODES] =\n{\n");
2404
2405 for (i = 0; i < 10; i++)
2406 {
2407
2408 fprintf(fmode, " { /* Above Mode : %d */\n", i);
2409
2410 for (j = 0; j < 10; j++)
2411 {
2412
2413 fprintf(fmode, " {");
2414
2415 for (k = 0; k < 10; k++)
2416 {
2417 if (!intra_mode_stats[i][j][k])
2418 fprintf(fmode, " %5d, ", 1);
2419 else
2420 fprintf(fmode, " %5d, ", intra_mode_stats[i][j][k]);
2421 }
2422
2423 fprintf(fmode, "}, /* left_mode %d */\n", j);
2424
2425 }
2426
2427 fprintf(fmode, " },\n");
2428
2429 }
2430
2431 fprintf(fmode, "};\n");
2432 fclose(fmode);
2433 }
2434 #endif
2435
2436
2437 #if defined(SECTIONBITS_OUTPUT)
2438
2439 if (0)
2440 {
2441 int i;
2442 FILE *f = fopen("tokenbits.stt", "a");
2443
2444 for (i = 0; i < 28; i++)
2445 fprintf(f, "%8d", (int)(Sectionbits[i] / 256));
2446
2447 fprintf(f, "\n");
2448 fclose(f);
2449 }
2450
2451 #endif
2452
2453 #if 0
2454 {
2455 printf("\n_pick_loop_filter_level:%d\n", cpi->time_pick_lpf / 1000);
2456 printf("\n_frames recive_data encod_mb_row compress_frame Total\n");
2457 printf("%6d %10ld %10ld %10ld %10ld\n", cpi->common.current_video_frame, cpi->time_receive_data / 1000, cpi->time_encode_mb_row / 1000, cpi->time_compress_data / 1000, (cpi->time_receive_data + cpi->time_compress_data) / 1000);
2458 }
2459 #endif
2460
2461 }
2462
2463 #if CONFIG_MULTITHREAD
2464 vp8cx_remove_encoder_threads(cpi);
2465 #endif
2466
2467 #if CONFIG_TEMPORAL_DENOISING
2468 vp8_denoiser_free(&cpi->denoiser);
2469 #endif
2470 dealloc_compressor_data(cpi);
2471 vpx_free(cpi->mb.ss);
2472 vpx_free(cpi->tok);
2473 vpx_free(cpi->cyclic_refresh_map);
2474 vpx_free(cpi->consec_zero_last);
2475 vpx_free(cpi->consec_zero_last_mvbias);
2476
2477 vp8_remove_common(&cpi->common);
2478 vpx_free(cpi);
2479 *ptr = 0;
2480
2481 #ifdef OUTPUT_YUV_SRC
2482 fclose(yuv_file);
2483 #endif
2484 #ifdef OUTPUT_YUV_DENOISED
2485 fclose(yuv_denoised_file);
2486 #endif
2487
2488 #if 0
2489
2490 if (keyfile)
2491 fclose(keyfile);
2492
2493 if (framepsnr)
2494 fclose(framepsnr);
2495
2496 if (kf_list)
2497 fclose(kf_list);
2498
2499 #endif
2500
2501 }
2502
2503
calc_plane_error(unsigned char * orig,int orig_stride,unsigned char * recon,int recon_stride,unsigned int cols,unsigned int rows)2504 static uint64_t calc_plane_error(unsigned char *orig, int orig_stride,
2505 unsigned char *recon, int recon_stride,
2506 unsigned int cols, unsigned int rows)
2507 {
2508 unsigned int row, col;
2509 uint64_t total_sse = 0;
2510 int diff;
2511
2512 for (row = 0; row + 16 <= rows; row += 16)
2513 {
2514 for (col = 0; col + 16 <= cols; col += 16)
2515 {
2516 unsigned int sse;
2517
2518 vpx_mse16x16(orig + col, orig_stride,
2519 recon + col, recon_stride,
2520 &sse);
2521 total_sse += sse;
2522 }
2523
2524 /* Handle odd-sized width */
2525 if (col < cols)
2526 {
2527 unsigned int border_row, border_col;
2528 unsigned char *border_orig = orig;
2529 unsigned char *border_recon = recon;
2530
2531 for (border_row = 0; border_row < 16; border_row++)
2532 {
2533 for (border_col = col; border_col < cols; border_col++)
2534 {
2535 diff = border_orig[border_col] - border_recon[border_col];
2536 total_sse += diff * diff;
2537 }
2538
2539 border_orig += orig_stride;
2540 border_recon += recon_stride;
2541 }
2542 }
2543
2544 orig += orig_stride * 16;
2545 recon += recon_stride * 16;
2546 }
2547
2548 /* Handle odd-sized height */
2549 for (; row < rows; row++)
2550 {
2551 for (col = 0; col < cols; col++)
2552 {
2553 diff = orig[col] - recon[col];
2554 total_sse += diff * diff;
2555 }
2556
2557 orig += orig_stride;
2558 recon += recon_stride;
2559 }
2560
2561 vp8_clear_system_state();
2562 return total_sse;
2563 }
2564
2565
generate_psnr_packet(VP8_COMP * cpi)2566 static void generate_psnr_packet(VP8_COMP *cpi)
2567 {
2568 YV12_BUFFER_CONFIG *orig = cpi->Source;
2569 YV12_BUFFER_CONFIG *recon = cpi->common.frame_to_show;
2570 struct vpx_codec_cx_pkt pkt;
2571 uint64_t sse;
2572 int i;
2573 unsigned int width = cpi->common.Width;
2574 unsigned int height = cpi->common.Height;
2575
2576 pkt.kind = VPX_CODEC_PSNR_PKT;
2577 sse = calc_plane_error(orig->y_buffer, orig->y_stride,
2578 recon->y_buffer, recon->y_stride,
2579 width, height);
2580 pkt.data.psnr.sse[0] = sse;
2581 pkt.data.psnr.sse[1] = sse;
2582 pkt.data.psnr.samples[0] = width * height;
2583 pkt.data.psnr.samples[1] = width * height;
2584
2585 width = (width + 1) / 2;
2586 height = (height + 1) / 2;
2587
2588 sse = calc_plane_error(orig->u_buffer, orig->uv_stride,
2589 recon->u_buffer, recon->uv_stride,
2590 width, height);
2591 pkt.data.psnr.sse[0] += sse;
2592 pkt.data.psnr.sse[2] = sse;
2593 pkt.data.psnr.samples[0] += width * height;
2594 pkt.data.psnr.samples[2] = width * height;
2595
2596 sse = calc_plane_error(orig->v_buffer, orig->uv_stride,
2597 recon->v_buffer, recon->uv_stride,
2598 width, height);
2599 pkt.data.psnr.sse[0] += sse;
2600 pkt.data.psnr.sse[3] = sse;
2601 pkt.data.psnr.samples[0] += width * height;
2602 pkt.data.psnr.samples[3] = width * height;
2603
2604 for (i = 0; i < 4; i++)
2605 pkt.data.psnr.psnr[i] = vpx_sse_to_psnr(pkt.data.psnr.samples[i], 255.0,
2606 (double)(pkt.data.psnr.sse[i]));
2607
2608 vpx_codec_pkt_list_add(cpi->output_pkt_list, &pkt);
2609 }
2610
2611
vp8_use_as_reference(VP8_COMP * cpi,int ref_frame_flags)2612 int vp8_use_as_reference(VP8_COMP *cpi, int ref_frame_flags)
2613 {
2614 if (ref_frame_flags > 7)
2615 return -1 ;
2616
2617 cpi->ref_frame_flags = ref_frame_flags;
2618 return 0;
2619 }
vp8_update_reference(VP8_COMP * cpi,int ref_frame_flags)2620 int vp8_update_reference(VP8_COMP *cpi, int ref_frame_flags)
2621 {
2622 if (ref_frame_flags > 7)
2623 return -1 ;
2624
2625 cpi->common.refresh_golden_frame = 0;
2626 cpi->common.refresh_alt_ref_frame = 0;
2627 cpi->common.refresh_last_frame = 0;
2628
2629 if (ref_frame_flags & VP8_LAST_FRAME)
2630 cpi->common.refresh_last_frame = 1;
2631
2632 if (ref_frame_flags & VP8_GOLD_FRAME)
2633 cpi->common.refresh_golden_frame = 1;
2634
2635 if (ref_frame_flags & VP8_ALTR_FRAME)
2636 cpi->common.refresh_alt_ref_frame = 1;
2637
2638 return 0;
2639 }
2640
vp8_get_reference(VP8_COMP * cpi,enum vpx_ref_frame_type ref_frame_flag,YV12_BUFFER_CONFIG * sd)2641 int vp8_get_reference(VP8_COMP *cpi, enum vpx_ref_frame_type ref_frame_flag, YV12_BUFFER_CONFIG *sd)
2642 {
2643 VP8_COMMON *cm = &cpi->common;
2644 int ref_fb_idx;
2645
2646 if (ref_frame_flag == VP8_LAST_FRAME)
2647 ref_fb_idx = cm->lst_fb_idx;
2648 else if (ref_frame_flag == VP8_GOLD_FRAME)
2649 ref_fb_idx = cm->gld_fb_idx;
2650 else if (ref_frame_flag == VP8_ALTR_FRAME)
2651 ref_fb_idx = cm->alt_fb_idx;
2652 else
2653 return -1;
2654
2655 vp8_yv12_copy_frame(&cm->yv12_fb[ref_fb_idx], sd);
2656
2657 return 0;
2658 }
vp8_set_reference(VP8_COMP * cpi,enum vpx_ref_frame_type ref_frame_flag,YV12_BUFFER_CONFIG * sd)2659 int vp8_set_reference(VP8_COMP *cpi, enum vpx_ref_frame_type ref_frame_flag, YV12_BUFFER_CONFIG *sd)
2660 {
2661 VP8_COMMON *cm = &cpi->common;
2662
2663 int ref_fb_idx;
2664
2665 if (ref_frame_flag == VP8_LAST_FRAME)
2666 ref_fb_idx = cm->lst_fb_idx;
2667 else if (ref_frame_flag == VP8_GOLD_FRAME)
2668 ref_fb_idx = cm->gld_fb_idx;
2669 else if (ref_frame_flag == VP8_ALTR_FRAME)
2670 ref_fb_idx = cm->alt_fb_idx;
2671 else
2672 return -1;
2673
2674 vp8_yv12_copy_frame(sd, &cm->yv12_fb[ref_fb_idx]);
2675
2676 return 0;
2677 }
vp8_update_entropy(VP8_COMP * cpi,int update)2678 int vp8_update_entropy(VP8_COMP *cpi, int update)
2679 {
2680 VP8_COMMON *cm = &cpi->common;
2681 cm->refresh_entropy_probs = update;
2682
2683 return 0;
2684 }
2685
2686
2687 #if defined(OUTPUT_YUV_SRC) || defined(OUTPUT_YUV_DENOISED)
vp8_write_yuv_frame(FILE * yuv_file,YV12_BUFFER_CONFIG * s)2688 void vp8_write_yuv_frame(FILE *yuv_file, YV12_BUFFER_CONFIG *s)
2689 {
2690 unsigned char *src = s->y_buffer;
2691 int h = s->y_height;
2692
2693 do
2694 {
2695 fwrite(src, s->y_width, 1, yuv_file);
2696 src += s->y_stride;
2697 }
2698 while (--h);
2699
2700 src = s->u_buffer;
2701 h = s->uv_height;
2702
2703 do
2704 {
2705 fwrite(src, s->uv_width, 1, yuv_file);
2706 src += s->uv_stride;
2707 }
2708 while (--h);
2709
2710 src = s->v_buffer;
2711 h = s->uv_height;
2712
2713 do
2714 {
2715 fwrite(src, s->uv_width, 1, yuv_file);
2716 src += s->uv_stride;
2717 }
2718 while (--h);
2719 }
2720 #endif
2721
scale_and_extend_source(YV12_BUFFER_CONFIG * sd,VP8_COMP * cpi)2722 static void scale_and_extend_source(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi)
2723 {
2724 VP8_COMMON *cm = &cpi->common;
2725
2726 /* are we resizing the image */
2727 if (cm->horiz_scale != 0 || cm->vert_scale != 0)
2728 {
2729 #if CONFIG_SPATIAL_RESAMPLING
2730 int UNINITIALIZED_IS_SAFE(hr), UNINITIALIZED_IS_SAFE(hs);
2731 int UNINITIALIZED_IS_SAFE(vr), UNINITIALIZED_IS_SAFE(vs);
2732 int tmp_height;
2733
2734 if (cm->vert_scale == 3)
2735 tmp_height = 9;
2736 else
2737 tmp_height = 11;
2738
2739 Scale2Ratio(cm->horiz_scale, &hr, &hs);
2740 Scale2Ratio(cm->vert_scale, &vr, &vs);
2741
2742 vpx_scale_frame(sd, &cpi->scaled_source, cm->temp_scale_frame.y_buffer,
2743 tmp_height, hs, hr, vs, vr, 0);
2744
2745 vp8_yv12_extend_frame_borders(&cpi->scaled_source);
2746 cpi->Source = &cpi->scaled_source;
2747 #endif
2748 }
2749 else
2750 cpi->Source = sd;
2751 }
2752
2753
resize_key_frame(VP8_COMP * cpi)2754 static int resize_key_frame(VP8_COMP *cpi)
2755 {
2756 #if CONFIG_SPATIAL_RESAMPLING
2757 VP8_COMMON *cm = &cpi->common;
2758
2759 /* Do we need to apply resampling for one pass cbr.
2760 * In one pass this is more limited than in two pass cbr.
2761 * The test and any change is only made once per key frame sequence.
2762 */
2763 if (cpi->oxcf.allow_spatial_resampling && (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER))
2764 {
2765 int UNINITIALIZED_IS_SAFE(hr), UNINITIALIZED_IS_SAFE(hs);
2766 int UNINITIALIZED_IS_SAFE(vr), UNINITIALIZED_IS_SAFE(vs);
2767 int new_width, new_height;
2768
2769 /* If we are below the resample DOWN watermark then scale down a
2770 * notch.
2771 */
2772 if (cpi->buffer_level < (cpi->oxcf.resample_down_water_mark * cpi->oxcf.optimal_buffer_level / 100))
2773 {
2774 cm->horiz_scale = (cm->horiz_scale < ONETWO) ? cm->horiz_scale + 1 : ONETWO;
2775 cm->vert_scale = (cm->vert_scale < ONETWO) ? cm->vert_scale + 1 : ONETWO;
2776 }
2777 /* Should we now start scaling back up */
2778 else if (cpi->buffer_level > (cpi->oxcf.resample_up_water_mark * cpi->oxcf.optimal_buffer_level / 100))
2779 {
2780 cm->horiz_scale = (cm->horiz_scale > NORMAL) ? cm->horiz_scale - 1 : NORMAL;
2781 cm->vert_scale = (cm->vert_scale > NORMAL) ? cm->vert_scale - 1 : NORMAL;
2782 }
2783
2784 /* Get the new height and width */
2785 Scale2Ratio(cm->horiz_scale, &hr, &hs);
2786 Scale2Ratio(cm->vert_scale, &vr, &vs);
2787 new_width = ((hs - 1) + (cpi->oxcf.Width * hr)) / hs;
2788 new_height = ((vs - 1) + (cpi->oxcf.Height * vr)) / vs;
2789
2790 /* If the image size has changed we need to reallocate the buffers
2791 * and resample the source image
2792 */
2793 if ((cm->Width != new_width) || (cm->Height != new_height))
2794 {
2795 cm->Width = new_width;
2796 cm->Height = new_height;
2797 vp8_alloc_compressor_data(cpi);
2798 scale_and_extend_source(cpi->un_scaled_source, cpi);
2799 return 1;
2800 }
2801 }
2802
2803 #endif
2804 return 0;
2805 }
2806
2807
update_alt_ref_frame_stats(VP8_COMP * cpi)2808 static void update_alt_ref_frame_stats(VP8_COMP *cpi)
2809 {
2810 VP8_COMMON *cm = &cpi->common;
2811
2812 /* Select an interval before next GF or altref */
2813 if (!cpi->auto_gold)
2814 cpi->frames_till_gf_update_due = DEFAULT_GF_INTERVAL;
2815
2816 if ((cpi->pass != 2) && cpi->frames_till_gf_update_due)
2817 {
2818 cpi->current_gf_interval = cpi->frames_till_gf_update_due;
2819
2820 /* Set the bits per frame that we should try and recover in
2821 * subsequent inter frames to account for the extra GF spend...
2822 * note that his does not apply for GF updates that occur
2823 * coincident with a key frame as the extra cost of key frames is
2824 * dealt with elsewhere.
2825 */
2826 cpi->gf_overspend_bits += cpi->projected_frame_size;
2827 cpi->non_gf_bitrate_adjustment = cpi->gf_overspend_bits / cpi->frames_till_gf_update_due;
2828 }
2829
2830 /* Update data structure that monitors level of reference to last GF */
2831 memset(cpi->gf_active_flags, 1, (cm->mb_rows * cm->mb_cols));
2832 cpi->gf_active_count = cm->mb_rows * cm->mb_cols;
2833
2834 /* this frame refreshes means next frames don't unless specified by user */
2835 cpi->frames_since_golden = 0;
2836
2837 /* Clear the alternate reference update pending flag. */
2838 cpi->source_alt_ref_pending = 0;
2839
2840 /* Set the alternate reference frame active flag */
2841 cpi->source_alt_ref_active = 1;
2842
2843
2844 }
update_golden_frame_stats(VP8_COMP * cpi)2845 static void update_golden_frame_stats(VP8_COMP *cpi)
2846 {
2847 VP8_COMMON *cm = &cpi->common;
2848
2849 /* Update the Golden frame usage counts. */
2850 if (cm->refresh_golden_frame)
2851 {
2852 /* Select an interval before next GF */
2853 if (!cpi->auto_gold)
2854 cpi->frames_till_gf_update_due = DEFAULT_GF_INTERVAL;
2855
2856 if ((cpi->pass != 2) && (cpi->frames_till_gf_update_due > 0))
2857 {
2858 cpi->current_gf_interval = cpi->frames_till_gf_update_due;
2859
2860 /* Set the bits per frame that we should try and recover in
2861 * subsequent inter frames to account for the extra GF spend...
2862 * note that his does not apply for GF updates that occur
2863 * coincident with a key frame as the extra cost of key frames
2864 * is dealt with elsewhere.
2865 */
2866 if ((cm->frame_type != KEY_FRAME) && !cpi->source_alt_ref_active)
2867 {
2868 /* Calcluate GF bits to be recovered
2869 * Projected size - av frame bits available for inter
2870 * frames for clip as a whole
2871 */
2872 cpi->gf_overspend_bits += (cpi->projected_frame_size - cpi->inter_frame_target);
2873 }
2874
2875 cpi->non_gf_bitrate_adjustment = cpi->gf_overspend_bits / cpi->frames_till_gf_update_due;
2876
2877 }
2878
2879 /* Update data structure that monitors level of reference to last GF */
2880 memset(cpi->gf_active_flags, 1, (cm->mb_rows * cm->mb_cols));
2881 cpi->gf_active_count = cm->mb_rows * cm->mb_cols;
2882
2883 /* this frame refreshes means next frames don't unless specified by
2884 * user
2885 */
2886 cm->refresh_golden_frame = 0;
2887 cpi->frames_since_golden = 0;
2888
2889 cpi->recent_ref_frame_usage[INTRA_FRAME] = 1;
2890 cpi->recent_ref_frame_usage[LAST_FRAME] = 1;
2891 cpi->recent_ref_frame_usage[GOLDEN_FRAME] = 1;
2892 cpi->recent_ref_frame_usage[ALTREF_FRAME] = 1;
2893
2894 /* ******** Fixed Q test code only ************ */
2895 /* If we are going to use the ALT reference for the next group of
2896 * frames set a flag to say so.
2897 */
2898 if (cpi->oxcf.fixed_q >= 0 &&
2899 cpi->oxcf.play_alternate && !cpi->common.refresh_alt_ref_frame)
2900 {
2901 cpi->source_alt_ref_pending = 1;
2902 cpi->frames_till_gf_update_due = cpi->baseline_gf_interval;
2903 }
2904
2905 if (!cpi->source_alt_ref_pending)
2906 cpi->source_alt_ref_active = 0;
2907
2908 /* Decrement count down till next gf */
2909 if (cpi->frames_till_gf_update_due > 0)
2910 cpi->frames_till_gf_update_due--;
2911
2912 }
2913 else if (!cpi->common.refresh_alt_ref_frame)
2914 {
2915 /* Decrement count down till next gf */
2916 if (cpi->frames_till_gf_update_due > 0)
2917 cpi->frames_till_gf_update_due--;
2918
2919 if (cpi->frames_till_alt_ref_frame)
2920 cpi->frames_till_alt_ref_frame --;
2921
2922 cpi->frames_since_golden ++;
2923
2924 if (cpi->frames_since_golden > 1)
2925 {
2926 cpi->recent_ref_frame_usage[INTRA_FRAME] +=
2927 cpi->mb.count_mb_ref_frame_usage[INTRA_FRAME];
2928 cpi->recent_ref_frame_usage[LAST_FRAME] +=
2929 cpi->mb.count_mb_ref_frame_usage[LAST_FRAME];
2930 cpi->recent_ref_frame_usage[GOLDEN_FRAME] +=
2931 cpi->mb.count_mb_ref_frame_usage[GOLDEN_FRAME];
2932 cpi->recent_ref_frame_usage[ALTREF_FRAME] +=
2933 cpi->mb.count_mb_ref_frame_usage[ALTREF_FRAME];
2934 }
2935 }
2936 }
2937
2938 /* This function updates the reference frame probability estimates that
2939 * will be used during mode selection
2940 */
update_rd_ref_frame_probs(VP8_COMP * cpi)2941 static void update_rd_ref_frame_probs(VP8_COMP *cpi)
2942 {
2943 VP8_COMMON *cm = &cpi->common;
2944
2945 const int *const rfct = cpi->mb.count_mb_ref_frame_usage;
2946 const int rf_intra = rfct[INTRA_FRAME];
2947 const int rf_inter = rfct[LAST_FRAME] + rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME];
2948
2949 if (cm->frame_type == KEY_FRAME)
2950 {
2951 cpi->prob_intra_coded = 255;
2952 cpi->prob_last_coded = 128;
2953 cpi->prob_gf_coded = 128;
2954 }
2955 else if (!(rf_intra + rf_inter))
2956 {
2957 cpi->prob_intra_coded = 63;
2958 cpi->prob_last_coded = 128;
2959 cpi->prob_gf_coded = 128;
2960 }
2961
2962 /* update reference frame costs since we can do better than what we got
2963 * last frame.
2964 */
2965 if (cpi->oxcf.number_of_layers == 1)
2966 {
2967 if (cpi->common.refresh_alt_ref_frame)
2968 {
2969 cpi->prob_intra_coded += 40;
2970 if (cpi->prob_intra_coded > 255)
2971 cpi->prob_intra_coded = 255;
2972 cpi->prob_last_coded = 200;
2973 cpi->prob_gf_coded = 1;
2974 }
2975 else if (cpi->frames_since_golden == 0)
2976 {
2977 cpi->prob_last_coded = 214;
2978 }
2979 else if (cpi->frames_since_golden == 1)
2980 {
2981 cpi->prob_last_coded = 192;
2982 cpi->prob_gf_coded = 220;
2983 }
2984 else if (cpi->source_alt_ref_active)
2985 {
2986 cpi->prob_gf_coded -= 20;
2987
2988 if (cpi->prob_gf_coded < 10)
2989 cpi->prob_gf_coded = 10;
2990 }
2991 if (!cpi->source_alt_ref_active)
2992 cpi->prob_gf_coded = 255;
2993 }
2994 }
2995
2996
2997 #if !CONFIG_REALTIME_ONLY
2998 /* 1 = key, 0 = inter */
decide_key_frame(VP8_COMP * cpi)2999 static int decide_key_frame(VP8_COMP *cpi)
3000 {
3001 VP8_COMMON *cm = &cpi->common;
3002
3003 int code_key_frame = 0;
3004
3005 cpi->kf_boost = 0;
3006
3007 if (cpi->Speed > 11)
3008 return 0;
3009
3010 /* Clear down mmx registers */
3011 vp8_clear_system_state();
3012
3013 if ((cpi->compressor_speed == 2) && (cpi->Speed >= 5) && (cpi->sf.RD == 0))
3014 {
3015 double change = 1.0 * abs((int)(cpi->mb.intra_error -
3016 cpi->last_intra_error)) / (1 + cpi->last_intra_error);
3017 double change2 = 1.0 * abs((int)(cpi->mb.prediction_error -
3018 cpi->last_prediction_error)) / (1 + cpi->last_prediction_error);
3019 double minerror = cm->MBs * 256;
3020
3021 cpi->last_intra_error = cpi->mb.intra_error;
3022 cpi->last_prediction_error = cpi->mb.prediction_error;
3023
3024 if (10 * cpi->mb.intra_error / (1 + cpi->mb.prediction_error) < 15
3025 && cpi->mb.prediction_error > minerror
3026 && (change > .25 || change2 > .25))
3027 {
3028 /*(change > 1.4 || change < .75)&& cpi->this_frame_percent_intra > cpi->last_frame_percent_intra + 3*/
3029 return 1;
3030 }
3031
3032 return 0;
3033
3034 }
3035
3036 /* If the following are true we might as well code a key frame */
3037 if (((cpi->this_frame_percent_intra == 100) &&
3038 (cpi->this_frame_percent_intra > (cpi->last_frame_percent_intra + 2))) ||
3039 ((cpi->this_frame_percent_intra > 95) &&
3040 (cpi->this_frame_percent_intra >= (cpi->last_frame_percent_intra + 5))))
3041 {
3042 code_key_frame = 1;
3043 }
3044 /* in addition if the following are true and this is not a golden frame
3045 * then code a key frame Note that on golden frames there often seems
3046 * to be a pop in intra useage anyway hence this restriction is
3047 * designed to prevent spurious key frames. The Intra pop needs to be
3048 * investigated.
3049 */
3050 else if (((cpi->this_frame_percent_intra > 60) &&
3051 (cpi->this_frame_percent_intra > (cpi->last_frame_percent_intra * 2))) ||
3052 ((cpi->this_frame_percent_intra > 75) &&
3053 (cpi->this_frame_percent_intra > (cpi->last_frame_percent_intra * 3 / 2))) ||
3054 ((cpi->this_frame_percent_intra > 90) &&
3055 (cpi->this_frame_percent_intra > (cpi->last_frame_percent_intra + 10))))
3056 {
3057 if (!cm->refresh_golden_frame)
3058 code_key_frame = 1;
3059 }
3060
3061 return code_key_frame;
3062
3063 }
3064
Pass1Encode(VP8_COMP * cpi,unsigned long * size,unsigned char * dest,unsigned int * frame_flags)3065 static void Pass1Encode(VP8_COMP *cpi, unsigned long *size, unsigned char *dest, unsigned int *frame_flags)
3066 {
3067 (void) size;
3068 (void) dest;
3069 (void) frame_flags;
3070 vp8_set_quantizer(cpi, 26);
3071
3072 vp8_first_pass(cpi);
3073 }
3074 #endif
3075
3076 #if 0
3077 void write_cx_frame_to_file(YV12_BUFFER_CONFIG *frame, int this_frame)
3078 {
3079
3080 /* write the frame */
3081 FILE *yframe;
3082 int i;
3083 char filename[255];
3084
3085 sprintf(filename, "cx\\y%04d.raw", this_frame);
3086 yframe = fopen(filename, "wb");
3087
3088 for (i = 0; i < frame->y_height; i++)
3089 fwrite(frame->y_buffer + i * frame->y_stride, frame->y_width, 1, yframe);
3090
3091 fclose(yframe);
3092 sprintf(filename, "cx\\u%04d.raw", this_frame);
3093 yframe = fopen(filename, "wb");
3094
3095 for (i = 0; i < frame->uv_height; i++)
3096 fwrite(frame->u_buffer + i * frame->uv_stride, frame->uv_width, 1, yframe);
3097
3098 fclose(yframe);
3099 sprintf(filename, "cx\\v%04d.raw", this_frame);
3100 yframe = fopen(filename, "wb");
3101
3102 for (i = 0; i < frame->uv_height; i++)
3103 fwrite(frame->v_buffer + i * frame->uv_stride, frame->uv_width, 1, yframe);
3104
3105 fclose(yframe);
3106 }
3107 #endif
3108 /* return of 0 means drop frame */
3109
3110 #if !CONFIG_REALTIME_ONLY
3111 /* Function to test for conditions that indeicate we should loop
3112 * back and recode a frame.
3113 */
recode_loop_test(VP8_COMP * cpi,int high_limit,int low_limit,int q,int maxq,int minq)3114 static int recode_loop_test( VP8_COMP *cpi,
3115 int high_limit, int low_limit,
3116 int q, int maxq, int minq )
3117 {
3118 int force_recode = 0;
3119 VP8_COMMON *cm = &cpi->common;
3120
3121 /* Is frame recode allowed at all
3122 * Yes if either recode mode 1 is selected or mode two is selcted
3123 * and the frame is a key frame. golden frame or alt_ref_frame
3124 */
3125 if ( (cpi->sf.recode_loop == 1) ||
3126 ( (cpi->sf.recode_loop == 2) &&
3127 ( (cm->frame_type == KEY_FRAME) ||
3128 cm->refresh_golden_frame ||
3129 cm->refresh_alt_ref_frame ) ) )
3130 {
3131 /* General over and under shoot tests */
3132 if ( ((cpi->projected_frame_size > high_limit) && (q < maxq)) ||
3133 ((cpi->projected_frame_size < low_limit) && (q > minq)) )
3134 {
3135 force_recode = 1;
3136 }
3137 /* Special Constrained quality tests */
3138 else if (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY)
3139 {
3140 /* Undershoot and below auto cq level */
3141 if ( (q > cpi->cq_target_quality) &&
3142 (cpi->projected_frame_size <
3143 ((cpi->this_frame_target * 7) >> 3)))
3144 {
3145 force_recode = 1;
3146 }
3147 /* Severe undershoot and between auto and user cq level */
3148 else if ( (q > cpi->oxcf.cq_level) &&
3149 (cpi->projected_frame_size < cpi->min_frame_bandwidth) &&
3150 (cpi->active_best_quality > cpi->oxcf.cq_level))
3151 {
3152 force_recode = 1;
3153 cpi->active_best_quality = cpi->oxcf.cq_level;
3154 }
3155 }
3156 }
3157
3158 return force_recode;
3159 }
3160 #endif // !CONFIG_REALTIME_ONLY
3161
update_reference_frames(VP8_COMP * cpi)3162 static void update_reference_frames(VP8_COMP *cpi)
3163 {
3164 VP8_COMMON *cm = &cpi->common;
3165 YV12_BUFFER_CONFIG *yv12_fb = cm->yv12_fb;
3166
3167 /* At this point the new frame has been encoded.
3168 * If any buffer copy / swapping is signaled it should be done here.
3169 */
3170
3171 if (cm->frame_type == KEY_FRAME)
3172 {
3173 yv12_fb[cm->new_fb_idx].flags |= VP8_GOLD_FRAME | VP8_ALTR_FRAME ;
3174
3175 yv12_fb[cm->gld_fb_idx].flags &= ~VP8_GOLD_FRAME;
3176 yv12_fb[cm->alt_fb_idx].flags &= ~VP8_ALTR_FRAME;
3177
3178 cm->alt_fb_idx = cm->gld_fb_idx = cm->new_fb_idx;
3179
3180 cpi->current_ref_frames[GOLDEN_FRAME] = cm->current_video_frame;
3181 cpi->current_ref_frames[ALTREF_FRAME] = cm->current_video_frame;
3182 }
3183 else /* For non key frames */
3184 {
3185 if (cm->refresh_alt_ref_frame)
3186 {
3187 assert(!cm->copy_buffer_to_arf);
3188
3189 cm->yv12_fb[cm->new_fb_idx].flags |= VP8_ALTR_FRAME;
3190 cm->yv12_fb[cm->alt_fb_idx].flags &= ~VP8_ALTR_FRAME;
3191 cm->alt_fb_idx = cm->new_fb_idx;
3192
3193 cpi->current_ref_frames[ALTREF_FRAME] = cm->current_video_frame;
3194 }
3195 else if (cm->copy_buffer_to_arf)
3196 {
3197 assert(!(cm->copy_buffer_to_arf & ~0x3));
3198
3199 if (cm->copy_buffer_to_arf == 1)
3200 {
3201 if(cm->alt_fb_idx != cm->lst_fb_idx)
3202 {
3203 yv12_fb[cm->lst_fb_idx].flags |= VP8_ALTR_FRAME;
3204 yv12_fb[cm->alt_fb_idx].flags &= ~VP8_ALTR_FRAME;
3205 cm->alt_fb_idx = cm->lst_fb_idx;
3206
3207 cpi->current_ref_frames[ALTREF_FRAME] =
3208 cpi->current_ref_frames[LAST_FRAME];
3209 }
3210 }
3211 else /* if (cm->copy_buffer_to_arf == 2) */
3212 {
3213 if(cm->alt_fb_idx != cm->gld_fb_idx)
3214 {
3215 yv12_fb[cm->gld_fb_idx].flags |= VP8_ALTR_FRAME;
3216 yv12_fb[cm->alt_fb_idx].flags &= ~VP8_ALTR_FRAME;
3217 cm->alt_fb_idx = cm->gld_fb_idx;
3218
3219 cpi->current_ref_frames[ALTREF_FRAME] =
3220 cpi->current_ref_frames[GOLDEN_FRAME];
3221 }
3222 }
3223 }
3224
3225 if (cm->refresh_golden_frame)
3226 {
3227 assert(!cm->copy_buffer_to_gf);
3228
3229 cm->yv12_fb[cm->new_fb_idx].flags |= VP8_GOLD_FRAME;
3230 cm->yv12_fb[cm->gld_fb_idx].flags &= ~VP8_GOLD_FRAME;
3231 cm->gld_fb_idx = cm->new_fb_idx;
3232
3233 cpi->current_ref_frames[GOLDEN_FRAME] = cm->current_video_frame;
3234 }
3235 else if (cm->copy_buffer_to_gf)
3236 {
3237 assert(!(cm->copy_buffer_to_arf & ~0x3));
3238
3239 if (cm->copy_buffer_to_gf == 1)
3240 {
3241 if(cm->gld_fb_idx != cm->lst_fb_idx)
3242 {
3243 yv12_fb[cm->lst_fb_idx].flags |= VP8_GOLD_FRAME;
3244 yv12_fb[cm->gld_fb_idx].flags &= ~VP8_GOLD_FRAME;
3245 cm->gld_fb_idx = cm->lst_fb_idx;
3246
3247 cpi->current_ref_frames[GOLDEN_FRAME] =
3248 cpi->current_ref_frames[LAST_FRAME];
3249 }
3250 }
3251 else /* if (cm->copy_buffer_to_gf == 2) */
3252 {
3253 if(cm->alt_fb_idx != cm->gld_fb_idx)
3254 {
3255 yv12_fb[cm->alt_fb_idx].flags |= VP8_GOLD_FRAME;
3256 yv12_fb[cm->gld_fb_idx].flags &= ~VP8_GOLD_FRAME;
3257 cm->gld_fb_idx = cm->alt_fb_idx;
3258
3259 cpi->current_ref_frames[GOLDEN_FRAME] =
3260 cpi->current_ref_frames[ALTREF_FRAME];
3261 }
3262 }
3263 }
3264 }
3265
3266 if (cm->refresh_last_frame)
3267 {
3268 cm->yv12_fb[cm->new_fb_idx].flags |= VP8_LAST_FRAME;
3269 cm->yv12_fb[cm->lst_fb_idx].flags &= ~VP8_LAST_FRAME;
3270 cm->lst_fb_idx = cm->new_fb_idx;
3271
3272 cpi->current_ref_frames[LAST_FRAME] = cm->current_video_frame;
3273 }
3274
3275 #if CONFIG_TEMPORAL_DENOISING
3276 if (cpi->oxcf.noise_sensitivity)
3277 {
3278 /* we shouldn't have to keep multiple copies as we know in advance which
3279 * buffer we should start - for now to get something up and running
3280 * I've chosen to copy the buffers
3281 */
3282 if (cm->frame_type == KEY_FRAME)
3283 {
3284 int i;
3285 for (i = LAST_FRAME; i < MAX_REF_FRAMES; ++i)
3286 vp8_yv12_copy_frame(cpi->Source,
3287 &cpi->denoiser.yv12_running_avg[i]);
3288 }
3289 else /* For non key frames */
3290 {
3291 vp8_yv12_extend_frame_borders(
3292 &cpi->denoiser.yv12_running_avg[INTRA_FRAME]);
3293
3294 if (cm->refresh_alt_ref_frame || cm->copy_buffer_to_arf)
3295 {
3296 vp8_yv12_copy_frame(
3297 &cpi->denoiser.yv12_running_avg[INTRA_FRAME],
3298 &cpi->denoiser.yv12_running_avg[ALTREF_FRAME]);
3299 }
3300 if (cm->refresh_golden_frame || cm->copy_buffer_to_gf)
3301 {
3302 vp8_yv12_copy_frame(
3303 &cpi->denoiser.yv12_running_avg[INTRA_FRAME],
3304 &cpi->denoiser.yv12_running_avg[GOLDEN_FRAME]);
3305 }
3306 if(cm->refresh_last_frame)
3307 {
3308 vp8_yv12_copy_frame(
3309 &cpi->denoiser.yv12_running_avg[INTRA_FRAME],
3310 &cpi->denoiser.yv12_running_avg[LAST_FRAME]);
3311 }
3312 }
3313 if (cpi->oxcf.noise_sensitivity == 4)
3314 vp8_yv12_copy_frame(cpi->Source, &cpi->denoiser.yv12_last_source);
3315
3316 }
3317 #endif
3318
3319 }
3320
measure_square_diff_partial(YV12_BUFFER_CONFIG * source,YV12_BUFFER_CONFIG * dest,VP8_COMP * cpi)3321 static int measure_square_diff_partial(YV12_BUFFER_CONFIG *source,
3322 YV12_BUFFER_CONFIG *dest,
3323 VP8_COMP *cpi)
3324 {
3325 int i, j;
3326 int Total = 0;
3327 int num_blocks = 0;
3328 int skip = 2;
3329 int min_consec_zero_last = 10;
3330 int tot_num_blocks = (source->y_height * source->y_width) >> 8;
3331 unsigned char *src = source->y_buffer;
3332 unsigned char *dst = dest->y_buffer;
3333
3334 /* Loop through the Y plane, every |skip| blocks along rows and colmumns,
3335 * summing the square differences, and only for blocks that have been
3336 * zero_last mode at least |x| frames in a row.
3337 */
3338 for (i = 0; i < source->y_height; i += 16 * skip)
3339 {
3340 int block_index_row = (i >> 4) * cpi->common.mb_cols;
3341 for (j = 0; j < source->y_width; j += 16 * skip)
3342 {
3343 int index = block_index_row + (j >> 4);
3344 if (cpi->consec_zero_last[index] >= min_consec_zero_last) {
3345 unsigned int sse;
3346 Total += vpx_mse16x16(src + j,
3347 source->y_stride,
3348 dst + j, dest->y_stride,
3349 &sse);
3350 num_blocks++;
3351 }
3352 }
3353 src += 16 * skip * source->y_stride;
3354 dst += 16 * skip * dest->y_stride;
3355 }
3356 // Only return non-zero if we have at least ~1/16 samples for estimate.
3357 if (num_blocks > (tot_num_blocks >> 4)) {
3358 return (Total / num_blocks);
3359 } else {
3360 return 0;
3361 }
3362 }
3363
3364 #if CONFIG_TEMPORAL_DENOISING
process_denoiser_mode_change(VP8_COMP * cpi)3365 static void process_denoiser_mode_change(VP8_COMP *cpi) {
3366 const VP8_COMMON *const cm = &cpi->common;
3367 int i, j;
3368 int total = 0;
3369 int num_blocks = 0;
3370 // Number of blocks skipped along row/column in computing the
3371 // nmse (normalized mean square error) of source.
3372 int skip = 2;
3373 // Only select blocks for computing nmse that have been encoded
3374 // as ZERO LAST min_consec_zero_last frames in a row.
3375 // Scale with number of temporal layers.
3376 int min_consec_zero_last = 12 / cpi->oxcf.number_of_layers;
3377 // Decision is tested for changing the denoising mode every
3378 // num_mode_change times this function is called. Note that this
3379 // function called every 8 frames, so (8 * num_mode_change) is number
3380 // of frames where denoising mode change is tested for switch.
3381 int num_mode_change = 20;
3382 // Framerate factor, to compensate for larger mse at lower framerates.
3383 // Use ref_framerate, which is full source framerate for temporal layers.
3384 // TODO(marpan): Adjust this factor.
3385 int fac_framerate = cpi->ref_framerate < 25.0f ? 80 : 100;
3386 int tot_num_blocks = cm->mb_rows * cm->mb_cols;
3387 int ystride = cpi->Source->y_stride;
3388 unsigned char *src = cpi->Source->y_buffer;
3389 unsigned char *dst = cpi->denoiser.yv12_last_source.y_buffer;
3390 static const unsigned char const_source[16] = {
3391 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
3392 128, 128, 128};
3393 int bandwidth = (int)(cpi->target_bandwidth);
3394 // For temporal layers, use full bandwidth (top layer).
3395 if (cpi->oxcf.number_of_layers > 1) {
3396 LAYER_CONTEXT *lc = &cpi->layer_context[cpi->oxcf.number_of_layers - 1];
3397 bandwidth = (int)(lc->target_bandwidth);
3398 }
3399 // Loop through the Y plane, every skip blocks along rows and columns,
3400 // summing the normalized mean square error, only for blocks that have
3401 // been encoded as ZEROMV LAST at least min_consec_zero_last least frames in
3402 // a row and have small sum difference between current and previous frame.
3403 // Normalization here is by the contrast of the current frame block.
3404 for (i = 0; i < cm->Height; i += 16 * skip) {
3405 int block_index_row = (i >> 4) * cm->mb_cols;
3406 for (j = 0; j < cm->Width; j += 16 * skip) {
3407 int index = block_index_row + (j >> 4);
3408 if (cpi->consec_zero_last[index] >= min_consec_zero_last) {
3409 unsigned int sse;
3410 const unsigned int var = vpx_variance16x16(src + j,
3411 ystride,
3412 dst + j,
3413 ystride,
3414 &sse);
3415 // Only consider this block as valid for noise measurement
3416 // if the sum_diff average of the current and previous frame
3417 // is small (to avoid effects from lighting change).
3418 if ((sse - var) < 128) {
3419 unsigned int sse2;
3420 const unsigned int act = vpx_variance16x16(src + j,
3421 ystride,
3422 const_source,
3423 0,
3424 &sse2);
3425 if (act > 0)
3426 total += sse / act;
3427 num_blocks++;
3428 }
3429 }
3430 }
3431 src += 16 * skip * ystride;
3432 dst += 16 * skip * ystride;
3433 }
3434 total = total * fac_framerate / 100;
3435
3436 // Only consider this frame as valid sample if we have computed nmse over
3437 // at least ~1/16 blocks, and Total > 0 (Total == 0 can happen if the
3438 // application inputs duplicate frames, or contrast is all zero).
3439 if (total > 0 &&
3440 (num_blocks > (tot_num_blocks >> 4))) {
3441 // Update the recursive mean square source_diff.
3442 total = (total << 8) / num_blocks;
3443 if (cpi->denoiser.nmse_source_diff_count == 0) {
3444 // First sample in new interval.
3445 cpi->denoiser.nmse_source_diff = total;
3446 cpi->denoiser.qp_avg = cm->base_qindex;
3447 } else {
3448 // For subsequent samples, use average with weight ~1/4 for new sample.
3449 cpi->denoiser.nmse_source_diff = (int)((total +
3450 3 * cpi->denoiser.nmse_source_diff) >> 2);
3451 cpi->denoiser.qp_avg = (int)((cm->base_qindex +
3452 3 * cpi->denoiser.qp_avg) >> 2);
3453 }
3454 cpi->denoiser.nmse_source_diff_count++;
3455 }
3456 // Check for changing the denoiser mode, when we have obtained #samples =
3457 // num_mode_change. Condition the change also on the bitrate and QP.
3458 if (cpi->denoiser.nmse_source_diff_count == num_mode_change) {
3459 // Check for going up: from normal to aggressive mode.
3460 if ((cpi->denoiser.denoiser_mode == kDenoiserOnYUV) &&
3461 (cpi->denoiser.nmse_source_diff >
3462 cpi->denoiser.threshold_aggressive_mode) &&
3463 (cpi->denoiser.qp_avg < cpi->denoiser.qp_threshold_up &&
3464 bandwidth > cpi->denoiser.bitrate_threshold)) {
3465 vp8_denoiser_set_parameters(&cpi->denoiser, kDenoiserOnYUVAggressive);
3466 } else {
3467 // Check for going down: from aggressive to normal mode.
3468 if (((cpi->denoiser.denoiser_mode == kDenoiserOnYUVAggressive) &&
3469 (cpi->denoiser.nmse_source_diff <
3470 cpi->denoiser.threshold_aggressive_mode)) ||
3471 ((cpi->denoiser.denoiser_mode == kDenoiserOnYUVAggressive) &&
3472 (cpi->denoiser.qp_avg > cpi->denoiser.qp_threshold_down ||
3473 bandwidth < cpi->denoiser.bitrate_threshold))) {
3474 vp8_denoiser_set_parameters(&cpi->denoiser, kDenoiserOnYUV);
3475 }
3476 }
3477 // Reset metric and counter for next interval.
3478 cpi->denoiser.nmse_source_diff = 0;
3479 cpi->denoiser.qp_avg = 0;
3480 cpi->denoiser.nmse_source_diff_count = 0;
3481 }
3482 }
3483 #endif
3484
vp8_loopfilter_frame(VP8_COMP * cpi,VP8_COMMON * cm)3485 void vp8_loopfilter_frame(VP8_COMP *cpi, VP8_COMMON *cm)
3486 {
3487 const FRAME_TYPE frame_type = cm->frame_type;
3488
3489 int update_any_ref_buffers = 1;
3490 if (cpi->common.refresh_last_frame == 0 &&
3491 cpi->common.refresh_golden_frame == 0 &&
3492 cpi->common.refresh_alt_ref_frame == 0) {
3493 update_any_ref_buffers = 0;
3494 }
3495
3496 if (cm->no_lpf)
3497 {
3498 cm->filter_level = 0;
3499 }
3500 else
3501 {
3502 struct vpx_usec_timer timer;
3503
3504 vp8_clear_system_state();
3505
3506 vpx_usec_timer_start(&timer);
3507 if (cpi->sf.auto_filter == 0) {
3508 #if CONFIG_TEMPORAL_DENOISING
3509 if (cpi->oxcf.noise_sensitivity && cm->frame_type != KEY_FRAME) {
3510 // Use the denoised buffer for selecting base loop filter level.
3511 // Denoised signal for current frame is stored in INTRA_FRAME.
3512 // No denoising on key frames.
3513 vp8cx_pick_filter_level_fast(
3514 &cpi->denoiser.yv12_running_avg[INTRA_FRAME], cpi);
3515 } else {
3516 vp8cx_pick_filter_level_fast(cpi->Source, cpi);
3517 }
3518 #else
3519 vp8cx_pick_filter_level_fast(cpi->Source, cpi);
3520 #endif
3521 } else {
3522 #if CONFIG_TEMPORAL_DENOISING
3523 if (cpi->oxcf.noise_sensitivity && cm->frame_type != KEY_FRAME) {
3524 // Use the denoised buffer for selecting base loop filter level.
3525 // Denoised signal for current frame is stored in INTRA_FRAME.
3526 // No denoising on key frames.
3527 vp8cx_pick_filter_level(
3528 &cpi->denoiser.yv12_running_avg[INTRA_FRAME], cpi);
3529 } else {
3530 vp8cx_pick_filter_level(cpi->Source, cpi);
3531 }
3532 #else
3533 vp8cx_pick_filter_level(cpi->Source, cpi);
3534 #endif
3535 }
3536
3537
3538 if (cm->filter_level > 0)
3539 {
3540 vp8cx_set_alt_lf_level(cpi, cm->filter_level);
3541 }
3542
3543 vpx_usec_timer_mark(&timer);
3544 cpi->time_pick_lpf += vpx_usec_timer_elapsed(&timer);
3545 }
3546
3547 #if CONFIG_MULTITHREAD
3548 if (cpi->b_multi_threaded)
3549 sem_post(&cpi->h_event_end_lpf); /* signal that we have set filter_level */
3550 #endif
3551
3552 // No need to apply loop-filter if the encoded frame does not update
3553 // any reference buffers.
3554 if (cm->filter_level > 0 && update_any_ref_buffers)
3555 {
3556 vp8_loop_filter_frame(cm, &cpi->mb.e_mbd, frame_type);
3557 }
3558
3559 vp8_yv12_extend_frame_borders(cm->frame_to_show);
3560
3561 }
3562
encode_frame_to_data_rate(VP8_COMP * cpi,unsigned long * size,unsigned char * dest,unsigned char * dest_end,unsigned int * frame_flags)3563 static void encode_frame_to_data_rate
3564 (
3565 VP8_COMP *cpi,
3566 unsigned long *size,
3567 unsigned char *dest,
3568 unsigned char* dest_end,
3569 unsigned int *frame_flags
3570 )
3571 {
3572 int Q;
3573 int frame_over_shoot_limit;
3574 int frame_under_shoot_limit;
3575
3576 int Loop = 0;
3577 int loop_count;
3578
3579 VP8_COMMON *cm = &cpi->common;
3580 int active_worst_qchanged = 0;
3581
3582 #if !CONFIG_REALTIME_ONLY
3583 int q_low;
3584 int q_high;
3585 int zbin_oq_high;
3586 int zbin_oq_low = 0;
3587 int top_index;
3588 int bottom_index;
3589 int overshoot_seen = 0;
3590 int undershoot_seen = 0;
3591 #endif
3592
3593 int drop_mark = (int)(cpi->oxcf.drop_frames_water_mark *
3594 cpi->oxcf.optimal_buffer_level / 100);
3595 int drop_mark75 = drop_mark * 2 / 3;
3596 int drop_mark50 = drop_mark / 4;
3597 int drop_mark25 = drop_mark / 8;
3598
3599
3600 /* Clear down mmx registers to allow floating point in what follows */
3601 vp8_clear_system_state();
3602
3603 #if CONFIG_MULTITHREAD
3604 /* wait for the last picture loopfilter thread done */
3605 if (cpi->b_lpf_running)
3606 {
3607 sem_wait(&cpi->h_event_end_lpf);
3608 cpi->b_lpf_running = 0;
3609 }
3610 #endif
3611
3612 if(cpi->force_next_frame_intra)
3613 {
3614 cm->frame_type = KEY_FRAME; /* delayed intra frame */
3615 cpi->force_next_frame_intra = 0;
3616 }
3617
3618 /* For an alt ref frame in 2 pass we skip the call to the second pass
3619 * function that sets the target bandwidth
3620 */
3621 #if !CONFIG_REALTIME_ONLY
3622
3623 if (cpi->pass == 2)
3624 {
3625 if (cpi->common.refresh_alt_ref_frame)
3626 {
3627 /* Per frame bit target for the alt ref frame */
3628 cpi->per_frame_bandwidth = cpi->twopass.gf_bits;
3629 /* per second target bitrate */
3630 cpi->target_bandwidth = (int)(cpi->twopass.gf_bits *
3631 cpi->output_framerate);
3632 }
3633 }
3634 else
3635 #endif
3636 cpi->per_frame_bandwidth = (int)(cpi->target_bandwidth / cpi->output_framerate);
3637
3638 /* Default turn off buffer to buffer copying */
3639 cm->copy_buffer_to_gf = 0;
3640 cm->copy_buffer_to_arf = 0;
3641
3642 /* Clear zbin over-quant value and mode boost values. */
3643 cpi->mb.zbin_over_quant = 0;
3644 cpi->mb.zbin_mode_boost = 0;
3645
3646 /* Enable or disable mode based tweaking of the zbin
3647 * For 2 Pass Only used where GF/ARF prediction quality
3648 * is above a threshold
3649 */
3650 cpi->mb.zbin_mode_boost_enabled = 1;
3651 if (cpi->pass == 2)
3652 {
3653 if ( cpi->gfu_boost <= 400 )
3654 {
3655 cpi->mb.zbin_mode_boost_enabled = 0;
3656 }
3657 }
3658
3659 /* Current default encoder behaviour for the altref sign bias */
3660 if (cpi->source_alt_ref_active)
3661 cpi->common.ref_frame_sign_bias[ALTREF_FRAME] = 1;
3662 else
3663 cpi->common.ref_frame_sign_bias[ALTREF_FRAME] = 0;
3664
3665 /* Check to see if a key frame is signaled
3666 * For two pass with auto key frame enabled cm->frame_type may already
3667 * be set, but not for one pass.
3668 */
3669 if ((cm->current_video_frame == 0) ||
3670 (cm->frame_flags & FRAMEFLAGS_KEY) ||
3671 (cpi->oxcf.auto_key && (cpi->frames_since_key % cpi->key_frame_frequency == 0)))
3672 {
3673 /* Key frame from VFW/auto-keyframe/first frame */
3674 cm->frame_type = KEY_FRAME;
3675 #if CONFIG_TEMPORAL_DENOISING
3676 if (cpi->oxcf.noise_sensitivity == 4) {
3677 // For adaptive mode, reset denoiser to normal mode on key frame.
3678 vp8_denoiser_set_parameters(&cpi->denoiser, kDenoiserOnYUV);
3679 }
3680 #endif
3681 }
3682
3683 #if CONFIG_MULTI_RES_ENCODING
3684 if (cpi->oxcf.mr_total_resolutions > 1) {
3685 LOWER_RES_FRAME_INFO* low_res_frame_info
3686 = (LOWER_RES_FRAME_INFO*)cpi->oxcf.mr_low_res_mode_info;
3687
3688 if (cpi->oxcf.mr_encoder_id) {
3689
3690 // TODO(marpan): This constraint shouldn't be needed, as we would like
3691 // to allow for key frame setting (forced or periodic) defined per
3692 // spatial layer. For now, keep this in.
3693 cm->frame_type = low_res_frame_info->frame_type;
3694
3695 // Check if lower resolution is available for motion vector reuse.
3696 if(cm->frame_type != KEY_FRAME)
3697 {
3698 cpi->mr_low_res_mv_avail = 1;
3699 cpi->mr_low_res_mv_avail &= !(low_res_frame_info->is_frame_dropped);
3700
3701 if (cpi->ref_frame_flags & VP8_LAST_FRAME)
3702 cpi->mr_low_res_mv_avail &= (cpi->current_ref_frames[LAST_FRAME]
3703 == low_res_frame_info->low_res_ref_frames[LAST_FRAME]);
3704
3705 if (cpi->ref_frame_flags & VP8_GOLD_FRAME)
3706 cpi->mr_low_res_mv_avail &= (cpi->current_ref_frames[GOLDEN_FRAME]
3707 == low_res_frame_info->low_res_ref_frames[GOLDEN_FRAME]);
3708
3709 // Don't use altref to determine whether low res is available.
3710 // TODO (marpan): Should we make this type of condition on a
3711 // per-reference frame basis?
3712 /*
3713 if (cpi->ref_frame_flags & VP8_ALTR_FRAME)
3714 cpi->mr_low_res_mv_avail &= (cpi->current_ref_frames[ALTREF_FRAME]
3715 == low_res_frame_info->low_res_ref_frames[ALTREF_FRAME]);
3716 */
3717 }
3718 }
3719
3720 // On a key frame: For the lowest resolution, keep track of the key frame
3721 // counter value. For the higher resolutions, reset the current video
3722 // frame counter to that of the lowest resolution.
3723 // This is done to the handle the case where we may stop/start encoding
3724 // higher layer(s). The restart-encoding of higher layer is only signaled
3725 // by a key frame for now.
3726 // TODO (marpan): Add flag to indicate restart-encoding of higher layer.
3727 if (cm->frame_type == KEY_FRAME) {
3728 if (cpi->oxcf.mr_encoder_id) {
3729 // If the initial starting value of the buffer level is zero (this can
3730 // happen because we may have not started encoding this higher stream),
3731 // then reset it to non-zero value based on |starting_buffer_level|.
3732 if (cpi->common.current_video_frame == 0 && cpi->buffer_level == 0) {
3733 unsigned int i;
3734 cpi->bits_off_target = cpi->oxcf.starting_buffer_level;
3735 cpi->buffer_level = cpi->oxcf.starting_buffer_level;
3736 for (i = 0; i < cpi->oxcf.number_of_layers; i++) {
3737 LAYER_CONTEXT *lc = &cpi->layer_context[i];
3738 lc->bits_off_target = lc->starting_buffer_level;
3739 lc->buffer_level = lc->starting_buffer_level;
3740 }
3741 }
3742 cpi->common.current_video_frame =
3743 low_res_frame_info->key_frame_counter_value;
3744 } else {
3745 low_res_frame_info->key_frame_counter_value =
3746 cpi->common.current_video_frame;
3747 }
3748 }
3749
3750 }
3751 #endif
3752
3753 // Find the reference frame closest to the current frame.
3754 cpi->closest_reference_frame = LAST_FRAME;
3755 if(cm->frame_type != KEY_FRAME) {
3756 int i;
3757 MV_REFERENCE_FRAME closest_ref = INTRA_FRAME;
3758 if (cpi->ref_frame_flags & VP8_LAST_FRAME) {
3759 closest_ref = LAST_FRAME;
3760 } else if (cpi->ref_frame_flags & VP8_GOLD_FRAME) {
3761 closest_ref = GOLDEN_FRAME;
3762 } else if (cpi->ref_frame_flags & VP8_ALTR_FRAME) {
3763 closest_ref = ALTREF_FRAME;
3764 }
3765 for(i = 1; i <= 3; i++) {
3766 vpx_ref_frame_type_t ref_frame_type = (vpx_ref_frame_type_t)
3767 ((i == 3) ? 4 : i);
3768 if (cpi->ref_frame_flags & ref_frame_type) {
3769 if ((cm->current_video_frame - cpi->current_ref_frames[i]) <
3770 (cm->current_video_frame - cpi->current_ref_frames[closest_ref])) {
3771 closest_ref = i;
3772 }
3773 }
3774 }
3775 cpi->closest_reference_frame = closest_ref;
3776 }
3777
3778 /* Set various flags etc to special state if it is a key frame */
3779 if (cm->frame_type == KEY_FRAME)
3780 {
3781 int i;
3782
3783 // Set the loop filter deltas and segmentation map update
3784 setup_features(cpi);
3785
3786 /* The alternate reference frame cannot be active for a key frame */
3787 cpi->source_alt_ref_active = 0;
3788
3789 /* Reset the RD threshold multipliers to default of * 1 (128) */
3790 for (i = 0; i < MAX_MODES; i++)
3791 {
3792 cpi->mb.rd_thresh_mult[i] = 128;
3793 }
3794
3795 // Reset the zero_last counter to 0 on key frame.
3796 memset(cpi->consec_zero_last, 0, cm->mb_rows * cm->mb_cols);
3797 memset(cpi->consec_zero_last_mvbias, 0,
3798 (cpi->common.mb_rows * cpi->common.mb_cols));
3799 }
3800
3801 #if 0
3802 /* Experimental code for lagged compress and one pass
3803 * Initialise one_pass GF frames stats
3804 * Update stats used for GF selection
3805 */
3806 {
3807 cpi->one_pass_frame_index = cm->current_video_frame % MAX_LAG_BUFFERS;
3808
3809 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frames_so_far = 0;
3810 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_intra_error = 0.0;
3811 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_coded_error = 0.0;
3812 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_pcnt_inter = 0.0;
3813 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_pcnt_motion = 0.0;
3814 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_mvr = 0.0;
3815 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_mvr_abs = 0.0;
3816 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_mvc = 0.0;
3817 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_mvc_abs = 0.0;
3818 }
3819 #endif
3820
3821 update_rd_ref_frame_probs(cpi);
3822
3823 if (cpi->drop_frames_allowed)
3824 {
3825 /* The reset to decimation 0 is only done here for one pass.
3826 * Once it is set two pass leaves decimation on till the next kf.
3827 */
3828 if ((cpi->buffer_level > drop_mark) && (cpi->decimation_factor > 0))
3829 cpi->decimation_factor --;
3830
3831 if (cpi->buffer_level > drop_mark75 && cpi->decimation_factor > 0)
3832 cpi->decimation_factor = 1;
3833
3834 else if (cpi->buffer_level < drop_mark25 && (cpi->decimation_factor == 2 || cpi->decimation_factor == 3))
3835 {
3836 cpi->decimation_factor = 3;
3837 }
3838 else if (cpi->buffer_level < drop_mark50 && (cpi->decimation_factor == 1 || cpi->decimation_factor == 2))
3839 {
3840 cpi->decimation_factor = 2;
3841 }
3842 else if (cpi->buffer_level < drop_mark75 && (cpi->decimation_factor == 0 || cpi->decimation_factor == 1))
3843 {
3844 cpi->decimation_factor = 1;
3845 }
3846 }
3847
3848 /* The following decimates the frame rate according to a regular
3849 * pattern (i.e. to 1/2 or 2/3 frame rate) This can be used to help
3850 * prevent buffer under-run in CBR mode. Alternatively it might be
3851 * desirable in some situations to drop frame rate but throw more bits
3852 * at each frame.
3853 *
3854 * Note that dropping a key frame can be problematic if spatial
3855 * resampling is also active
3856 */
3857 if (cpi->decimation_factor > 0)
3858 {
3859 switch (cpi->decimation_factor)
3860 {
3861 case 1:
3862 cpi->per_frame_bandwidth = cpi->per_frame_bandwidth * 3 / 2;
3863 break;
3864 case 2:
3865 cpi->per_frame_bandwidth = cpi->per_frame_bandwidth * 5 / 4;
3866 break;
3867 case 3:
3868 cpi->per_frame_bandwidth = cpi->per_frame_bandwidth * 5 / 4;
3869 break;
3870 }
3871
3872 /* Note that we should not throw out a key frame (especially when
3873 * spatial resampling is enabled).
3874 */
3875 if (cm->frame_type == KEY_FRAME)
3876 {
3877 cpi->decimation_count = cpi->decimation_factor;
3878 }
3879 else if (cpi->decimation_count > 0)
3880 {
3881 cpi->decimation_count --;
3882
3883 cpi->bits_off_target += cpi->av_per_frame_bandwidth;
3884 if (cpi->bits_off_target > cpi->oxcf.maximum_buffer_size)
3885 cpi->bits_off_target = cpi->oxcf.maximum_buffer_size;
3886
3887 #if CONFIG_MULTI_RES_ENCODING
3888 vp8_store_drop_frame_info(cpi);
3889 #endif
3890
3891 cm->current_video_frame++;
3892 cpi->frames_since_key++;
3893 // We advance the temporal pattern for dropped frames.
3894 cpi->temporal_pattern_counter++;
3895
3896 #if CONFIG_INTERNAL_STATS
3897 cpi->count ++;
3898 #endif
3899
3900 cpi->buffer_level = cpi->bits_off_target;
3901
3902 if (cpi->oxcf.number_of_layers > 1)
3903 {
3904 unsigned int i;
3905
3906 /* Propagate bits saved by dropping the frame to higher
3907 * layers
3908 */
3909 for (i=cpi->current_layer+1; i<cpi->oxcf.number_of_layers; i++)
3910 {
3911 LAYER_CONTEXT *lc = &cpi->layer_context[i];
3912 lc->bits_off_target += (int)(lc->target_bandwidth /
3913 lc->framerate);
3914 if (lc->bits_off_target > lc->maximum_buffer_size)
3915 lc->bits_off_target = lc->maximum_buffer_size;
3916 lc->buffer_level = lc->bits_off_target;
3917 }
3918 }
3919
3920 return;
3921 }
3922 else
3923 cpi->decimation_count = cpi->decimation_factor;
3924 }
3925 else
3926 cpi->decimation_count = 0;
3927
3928 /* Decide how big to make the frame */
3929 if (!vp8_pick_frame_size(cpi))
3930 {
3931 /*TODO: 2 drop_frame and return code could be put together. */
3932 #if CONFIG_MULTI_RES_ENCODING
3933 vp8_store_drop_frame_info(cpi);
3934 #endif
3935 cm->current_video_frame++;
3936 cpi->frames_since_key++;
3937 // We advance the temporal pattern for dropped frames.
3938 cpi->temporal_pattern_counter++;
3939 return;
3940 }
3941
3942 /* Reduce active_worst_allowed_q for CBR if our buffer is getting too full.
3943 * This has a knock on effect on active best quality as well.
3944 * For CBR if the buffer reaches its maximum level then we can no longer
3945 * save up bits for later frames so we might as well use them up
3946 * on the current frame.
3947 */
3948 if ((cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) &&
3949 (cpi->buffer_level >= cpi->oxcf.optimal_buffer_level) && cpi->buffered_mode)
3950 {
3951 /* Max adjustment is 1/4 */
3952 int Adjustment = cpi->active_worst_quality / 4;
3953
3954 if (Adjustment)
3955 {
3956 int buff_lvl_step;
3957
3958 if (cpi->buffer_level < cpi->oxcf.maximum_buffer_size)
3959 {
3960 buff_lvl_step = (int)
3961 ((cpi->oxcf.maximum_buffer_size -
3962 cpi->oxcf.optimal_buffer_level) /
3963 Adjustment);
3964
3965 if (buff_lvl_step)
3966 Adjustment = (int)
3967 ((cpi->buffer_level -
3968 cpi->oxcf.optimal_buffer_level) /
3969 buff_lvl_step);
3970 else
3971 Adjustment = 0;
3972 }
3973
3974 cpi->active_worst_quality -= Adjustment;
3975
3976 if(cpi->active_worst_quality < cpi->active_best_quality)
3977 cpi->active_worst_quality = cpi->active_best_quality;
3978 }
3979 }
3980
3981 /* Set an active best quality and if necessary active worst quality
3982 * There is some odd behavior for one pass here that needs attention.
3983 */
3984 if ( (cpi->pass == 2) || (cpi->ni_frames > 150))
3985 {
3986 vp8_clear_system_state();
3987
3988 Q = cpi->active_worst_quality;
3989
3990 if ( cm->frame_type == KEY_FRAME )
3991 {
3992 if ( cpi->pass == 2 )
3993 {
3994 if (cpi->gfu_boost > 600)
3995 cpi->active_best_quality = kf_low_motion_minq[Q];
3996 else
3997 cpi->active_best_quality = kf_high_motion_minq[Q];
3998
3999 /* Special case for key frames forced because we have reached
4000 * the maximum key frame interval. Here force the Q to a range
4001 * based on the ambient Q to reduce the risk of popping
4002 */
4003 if ( cpi->this_key_frame_forced )
4004 {
4005 if ( cpi->active_best_quality > cpi->avg_frame_qindex * 7/8)
4006 cpi->active_best_quality = cpi->avg_frame_qindex * 7/8;
4007 else if ( cpi->active_best_quality < cpi->avg_frame_qindex >> 2 )
4008 cpi->active_best_quality = cpi->avg_frame_qindex >> 2;
4009 }
4010 }
4011 /* One pass more conservative */
4012 else
4013 cpi->active_best_quality = kf_high_motion_minq[Q];
4014 }
4015
4016 else if (cpi->oxcf.number_of_layers==1 &&
4017 (cm->refresh_golden_frame || cpi->common.refresh_alt_ref_frame))
4018 {
4019 /* Use the lower of cpi->active_worst_quality and recent
4020 * average Q as basis for GF/ARF Q limit unless last frame was
4021 * a key frame.
4022 */
4023 if ( (cpi->frames_since_key > 1) &&
4024 (cpi->avg_frame_qindex < cpi->active_worst_quality) )
4025 {
4026 Q = cpi->avg_frame_qindex;
4027 }
4028
4029 /* For constrained quality dont allow Q less than the cq level */
4030 if ( (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) &&
4031 (Q < cpi->cq_target_quality) )
4032 {
4033 Q = cpi->cq_target_quality;
4034 }
4035
4036 if ( cpi->pass == 2 )
4037 {
4038 if ( cpi->gfu_boost > 1000 )
4039 cpi->active_best_quality = gf_low_motion_minq[Q];
4040 else if ( cpi->gfu_boost < 400 )
4041 cpi->active_best_quality = gf_high_motion_minq[Q];
4042 else
4043 cpi->active_best_quality = gf_mid_motion_minq[Q];
4044
4045 /* Constrained quality use slightly lower active best. */
4046 if ( cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY )
4047 {
4048 cpi->active_best_quality =
4049 cpi->active_best_quality * 15/16;
4050 }
4051 }
4052 /* One pass more conservative */
4053 else
4054 cpi->active_best_quality = gf_high_motion_minq[Q];
4055 }
4056 else
4057 {
4058 cpi->active_best_quality = inter_minq[Q];
4059
4060 /* For the constant/constrained quality mode we dont want
4061 * q to fall below the cq level.
4062 */
4063 if ((cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) &&
4064 (cpi->active_best_quality < cpi->cq_target_quality) )
4065 {
4066 /* If we are strongly undershooting the target rate in the last
4067 * frames then use the user passed in cq value not the auto
4068 * cq value.
4069 */
4070 if ( cpi->rolling_actual_bits < cpi->min_frame_bandwidth )
4071 cpi->active_best_quality = cpi->oxcf.cq_level;
4072 else
4073 cpi->active_best_quality = cpi->cq_target_quality;
4074 }
4075 }
4076
4077 /* If CBR and the buffer is as full then it is reasonable to allow
4078 * higher quality on the frames to prevent bits just going to waste.
4079 */
4080 if (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER)
4081 {
4082 /* Note that the use of >= here elliminates the risk of a devide
4083 * by 0 error in the else if clause
4084 */
4085 if (cpi->buffer_level >= cpi->oxcf.maximum_buffer_size)
4086 cpi->active_best_quality = cpi->best_quality;
4087
4088 else if (cpi->buffer_level > cpi->oxcf.optimal_buffer_level)
4089 {
4090 int Fraction = (int)
4091 (((cpi->buffer_level - cpi->oxcf.optimal_buffer_level) * 128)
4092 / (cpi->oxcf.maximum_buffer_size -
4093 cpi->oxcf.optimal_buffer_level));
4094 int min_qadjustment = ((cpi->active_best_quality -
4095 cpi->best_quality) * Fraction) / 128;
4096
4097 cpi->active_best_quality -= min_qadjustment;
4098 }
4099 }
4100 }
4101 /* Make sure constrained quality mode limits are adhered to for the first
4102 * few frames of one pass encodes
4103 */
4104 else if (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY)
4105 {
4106 if ( (cm->frame_type == KEY_FRAME) ||
4107 cm->refresh_golden_frame || cpi->common.refresh_alt_ref_frame )
4108 {
4109 cpi->active_best_quality = cpi->best_quality;
4110 }
4111 else if (cpi->active_best_quality < cpi->cq_target_quality)
4112 {
4113 cpi->active_best_quality = cpi->cq_target_quality;
4114 }
4115 }
4116
4117 /* Clip the active best and worst quality values to limits */
4118 if (cpi->active_worst_quality > cpi->worst_quality)
4119 cpi->active_worst_quality = cpi->worst_quality;
4120
4121 if (cpi->active_best_quality < cpi->best_quality)
4122 cpi->active_best_quality = cpi->best_quality;
4123
4124 if ( cpi->active_worst_quality < cpi->active_best_quality )
4125 cpi->active_worst_quality = cpi->active_best_quality;
4126
4127 /* Determine initial Q to try */
4128 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
4129
4130 #if !CONFIG_REALTIME_ONLY
4131
4132 /* Set highest allowed value for Zbin over quant */
4133 if (cm->frame_type == KEY_FRAME)
4134 zbin_oq_high = 0;
4135 else if ((cpi->oxcf.number_of_layers == 1) && ((cm->refresh_alt_ref_frame ||
4136 (cm->refresh_golden_frame && !cpi->source_alt_ref_active))))
4137 {
4138 zbin_oq_high = 16;
4139 }
4140 else
4141 zbin_oq_high = ZBIN_OQ_MAX;
4142 #endif
4143
4144 /* Setup background Q adjustment for error resilient mode.
4145 * For multi-layer encodes only enable this for the base layer.
4146 */
4147 if (cpi->cyclic_refresh_mode_enabled)
4148 {
4149 // Special case for screen_content_mode with golden frame updates.
4150 int disable_cr_gf = (cpi->oxcf.screen_content_mode == 2 &&
4151 cm->refresh_golden_frame);
4152 if (cpi->current_layer == 0 && cpi->force_maxqp == 0 && !disable_cr_gf)
4153 cyclic_background_refresh(cpi, Q, 0);
4154 else
4155 disable_segmentation(cpi);
4156 }
4157
4158 vp8_compute_frame_size_bounds(cpi, &frame_under_shoot_limit, &frame_over_shoot_limit);
4159
4160 #if !CONFIG_REALTIME_ONLY
4161 /* Limit Q range for the adaptive loop. */
4162 bottom_index = cpi->active_best_quality;
4163 top_index = cpi->active_worst_quality;
4164 q_low = cpi->active_best_quality;
4165 q_high = cpi->active_worst_quality;
4166 #endif
4167
4168 vp8_save_coding_context(cpi);
4169
4170 loop_count = 0;
4171
4172 scale_and_extend_source(cpi->un_scaled_source, cpi);
4173
4174 #if CONFIG_TEMPORAL_DENOISING && CONFIG_POSTPROC
4175 // Option to apply spatial blur under the aggressive or adaptive
4176 // (temporal denoising) mode.
4177 if (cpi->oxcf.noise_sensitivity >= 3) {
4178 if (cpi->denoiser.denoise_pars.spatial_blur != 0) {
4179 vp8_de_noise(cm, cpi->Source, cpi->Source,
4180 cpi->denoiser.denoise_pars.spatial_blur, 1, 0, 0);
4181 }
4182 }
4183 #endif
4184
4185 #if !(CONFIG_REALTIME_ONLY) && CONFIG_POSTPROC && !(CONFIG_TEMPORAL_DENOISING)
4186
4187 if (cpi->oxcf.noise_sensitivity > 0)
4188 {
4189 unsigned char *src;
4190 int l = 0;
4191
4192 switch (cpi->oxcf.noise_sensitivity)
4193 {
4194 case 1:
4195 l = 20;
4196 break;
4197 case 2:
4198 l = 40;
4199 break;
4200 case 3:
4201 l = 60;
4202 break;
4203 case 4:
4204 l = 80;
4205 break;
4206 case 5:
4207 l = 100;
4208 break;
4209 case 6:
4210 l = 150;
4211 break;
4212 }
4213
4214
4215 if (cm->frame_type == KEY_FRAME)
4216 {
4217 vp8_de_noise(cm, cpi->Source, cpi->Source, l , 1, 0, 1);
4218 }
4219 else
4220 {
4221 vp8_de_noise(cm, cpi->Source, cpi->Source, l , 1, 0, 1);
4222
4223 src = cpi->Source->y_buffer;
4224
4225 if (cpi->Source->y_stride < 0)
4226 {
4227 src += cpi->Source->y_stride * (cpi->Source->y_height - 1);
4228 }
4229 }
4230 }
4231
4232 #endif
4233
4234
4235 #ifdef OUTPUT_YUV_SRC
4236 vp8_write_yuv_frame(yuv_file, cpi->Source);
4237 #endif
4238
4239 do
4240 {
4241 vp8_clear_system_state();
4242
4243 vp8_set_quantizer(cpi, Q);
4244
4245 /* setup skip prob for costing in mode/mv decision */
4246 if (cpi->common.mb_no_coeff_skip)
4247 {
4248 cpi->prob_skip_false = cpi->base_skip_false_prob[Q];
4249
4250 if (cm->frame_type != KEY_FRAME)
4251 {
4252 if (cpi->common.refresh_alt_ref_frame)
4253 {
4254 if (cpi->last_skip_false_probs[2] != 0)
4255 cpi->prob_skip_false = cpi->last_skip_false_probs[2];
4256
4257 /*
4258 if(cpi->last_skip_false_probs[2]!=0 && abs(Q- cpi->last_skip_probs_q[2])<=16 )
4259 cpi->prob_skip_false = cpi->last_skip_false_probs[2];
4260 else if (cpi->last_skip_false_probs[2]!=0)
4261 cpi->prob_skip_false = (cpi->last_skip_false_probs[2] + cpi->prob_skip_false ) / 2;
4262 */
4263 }
4264 else if (cpi->common.refresh_golden_frame)
4265 {
4266 if (cpi->last_skip_false_probs[1] != 0)
4267 cpi->prob_skip_false = cpi->last_skip_false_probs[1];
4268
4269 /*
4270 if(cpi->last_skip_false_probs[1]!=0 && abs(Q- cpi->last_skip_probs_q[1])<=16 )
4271 cpi->prob_skip_false = cpi->last_skip_false_probs[1];
4272 else if (cpi->last_skip_false_probs[1]!=0)
4273 cpi->prob_skip_false = (cpi->last_skip_false_probs[1] + cpi->prob_skip_false ) / 2;
4274 */
4275 }
4276 else
4277 {
4278 if (cpi->last_skip_false_probs[0] != 0)
4279 cpi->prob_skip_false = cpi->last_skip_false_probs[0];
4280
4281 /*
4282 if(cpi->last_skip_false_probs[0]!=0 && abs(Q- cpi->last_skip_probs_q[0])<=16 )
4283 cpi->prob_skip_false = cpi->last_skip_false_probs[0];
4284 else if(cpi->last_skip_false_probs[0]!=0)
4285 cpi->prob_skip_false = (cpi->last_skip_false_probs[0] + cpi->prob_skip_false ) / 2;
4286 */
4287 }
4288
4289 /* as this is for cost estimate, let's make sure it does not
4290 * go extreme eitehr way
4291 */
4292 if (cpi->prob_skip_false < 5)
4293 cpi->prob_skip_false = 5;
4294
4295 if (cpi->prob_skip_false > 250)
4296 cpi->prob_skip_false = 250;
4297
4298 if (cpi->oxcf.number_of_layers == 1 && cpi->is_src_frame_alt_ref)
4299 cpi->prob_skip_false = 1;
4300 }
4301
4302 #if 0
4303
4304 if (cpi->pass != 1)
4305 {
4306 FILE *f = fopen("skip.stt", "a");
4307 fprintf(f, "%d, %d, %4d ", cpi->common.refresh_golden_frame, cpi->common.refresh_alt_ref_frame, cpi->prob_skip_false);
4308 fclose(f);
4309 }
4310
4311 #endif
4312
4313 }
4314
4315 if (cm->frame_type == KEY_FRAME)
4316 {
4317 if(resize_key_frame(cpi))
4318 {
4319 /* If the frame size has changed, need to reset Q, quantizer,
4320 * and background refresh.
4321 */
4322 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
4323 if (cpi->cyclic_refresh_mode_enabled)
4324 {
4325 if (cpi->current_layer==0)
4326 cyclic_background_refresh(cpi, Q, 0);
4327 else
4328 disable_segmentation(cpi);
4329 }
4330 // Reset the zero_last counter to 0 on key frame.
4331 memset(cpi->consec_zero_last, 0, cm->mb_rows * cm->mb_cols);
4332 memset(cpi->consec_zero_last_mvbias, 0,
4333 (cpi->common.mb_rows * cpi->common.mb_cols));
4334 vp8_set_quantizer(cpi, Q);
4335 }
4336
4337 vp8_setup_key_frame(cpi);
4338 }
4339
4340
4341
4342 #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
4343 {
4344 if(cpi->oxcf.error_resilient_mode)
4345 cm->refresh_entropy_probs = 0;
4346
4347 if (cpi->oxcf.error_resilient_mode & VPX_ERROR_RESILIENT_PARTITIONS)
4348 {
4349 if (cm->frame_type == KEY_FRAME)
4350 cm->refresh_entropy_probs = 1;
4351 }
4352
4353 if (cm->refresh_entropy_probs == 0)
4354 {
4355 /* save a copy for later refresh */
4356 memcpy(&cm->lfc, &cm->fc, sizeof(cm->fc));
4357 }
4358
4359 vp8_update_coef_context(cpi);
4360
4361 vp8_update_coef_probs(cpi);
4362
4363 /* transform / motion compensation build reconstruction frame
4364 * +pack coef partitions
4365 */
4366 vp8_encode_frame(cpi);
4367
4368 /* cpi->projected_frame_size is not needed for RT mode */
4369 }
4370 #else
4371 /* transform / motion compensation build reconstruction frame */
4372 vp8_encode_frame(cpi);
4373
4374 if (cpi->oxcf.screen_content_mode == 2) {
4375 if (vp8_drop_encodedframe_overshoot(cpi, Q))
4376 return;
4377 }
4378
4379 cpi->projected_frame_size -= vp8_estimate_entropy_savings(cpi);
4380 cpi->projected_frame_size = (cpi->projected_frame_size > 0) ? cpi->projected_frame_size : 0;
4381 #endif
4382 vp8_clear_system_state();
4383
4384 /* Test to see if the stats generated for this frame indicate that
4385 * we should have coded a key frame (assuming that we didn't)!
4386 */
4387
4388 if (cpi->pass != 2 && cpi->oxcf.auto_key && cm->frame_type != KEY_FRAME
4389 && cpi->compressor_speed != 2)
4390 {
4391 #if !CONFIG_REALTIME_ONLY
4392 if (decide_key_frame(cpi))
4393 {
4394 /* Reset all our sizing numbers and recode */
4395 cm->frame_type = KEY_FRAME;
4396
4397 vp8_pick_frame_size(cpi);
4398
4399 /* Clear the Alt reference frame active flag when we have
4400 * a key frame
4401 */
4402 cpi->source_alt_ref_active = 0;
4403
4404 // Set the loop filter deltas and segmentation map update
4405 setup_features(cpi);
4406
4407 vp8_restore_coding_context(cpi);
4408
4409 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
4410
4411 vp8_compute_frame_size_bounds(cpi, &frame_under_shoot_limit, &frame_over_shoot_limit);
4412
4413 /* Limit Q range for the adaptive loop. */
4414 bottom_index = cpi->active_best_quality;
4415 top_index = cpi->active_worst_quality;
4416 q_low = cpi->active_best_quality;
4417 q_high = cpi->active_worst_quality;
4418
4419 loop_count++;
4420 Loop = 1;
4421
4422 continue;
4423 }
4424 #endif
4425 }
4426
4427 vp8_clear_system_state();
4428
4429 if (frame_over_shoot_limit == 0)
4430 frame_over_shoot_limit = 1;
4431
4432 /* Are we are overshooting and up against the limit of active max Q. */
4433 if (((cpi->pass != 2) || (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER)) &&
4434 (Q == cpi->active_worst_quality) &&
4435 (cpi->active_worst_quality < cpi->worst_quality) &&
4436 (cpi->projected_frame_size > frame_over_shoot_limit))
4437 {
4438 int over_size_percent = ((cpi->projected_frame_size - frame_over_shoot_limit) * 100) / frame_over_shoot_limit;
4439
4440 /* If so is there any scope for relaxing it */
4441 while ((cpi->active_worst_quality < cpi->worst_quality) && (over_size_percent > 0))
4442 {
4443 cpi->active_worst_quality++;
4444 /* Assume 1 qstep = about 4% on frame size. */
4445 over_size_percent = (int)(over_size_percent * 0.96);
4446 }
4447 #if !CONFIG_REALTIME_ONLY
4448 top_index = cpi->active_worst_quality;
4449 #endif // !CONFIG_REALTIME_ONLY
4450 /* If we have updated the active max Q do not call
4451 * vp8_update_rate_correction_factors() this loop.
4452 */
4453 active_worst_qchanged = 1;
4454 }
4455 else
4456 active_worst_qchanged = 0;
4457
4458 #if !CONFIG_REALTIME_ONLY
4459 /* Special case handling for forced key frames */
4460 if ( (cm->frame_type == KEY_FRAME) && cpi->this_key_frame_forced )
4461 {
4462 int last_q = Q;
4463 int kf_err = vp8_calc_ss_err(cpi->Source,
4464 &cm->yv12_fb[cm->new_fb_idx]);
4465
4466 /* The key frame is not good enough */
4467 if ( kf_err > ((cpi->ambient_err * 7) >> 3) )
4468 {
4469 /* Lower q_high */
4470 q_high = (Q > q_low) ? (Q - 1) : q_low;
4471
4472 /* Adjust Q */
4473 Q = (q_high + q_low) >> 1;
4474 }
4475 /* The key frame is much better than the previous frame */
4476 else if ( kf_err < (cpi->ambient_err >> 1) )
4477 {
4478 /* Raise q_low */
4479 q_low = (Q < q_high) ? (Q + 1) : q_high;
4480
4481 /* Adjust Q */
4482 Q = (q_high + q_low + 1) >> 1;
4483 }
4484
4485 /* Clamp Q to upper and lower limits: */
4486 if (Q > q_high)
4487 Q = q_high;
4488 else if (Q < q_low)
4489 Q = q_low;
4490
4491 Loop = Q != last_q;
4492 }
4493
4494 /* Is the projected frame size out of range and are we allowed
4495 * to attempt to recode.
4496 */
4497 else if ( recode_loop_test( cpi,
4498 frame_over_shoot_limit, frame_under_shoot_limit,
4499 Q, top_index, bottom_index ) )
4500 {
4501 int last_q = Q;
4502 int Retries = 0;
4503
4504 /* Frame size out of permitted range. Update correction factor
4505 * & compute new Q to try...
4506 */
4507
4508 /* Frame is too large */
4509 if (cpi->projected_frame_size > cpi->this_frame_target)
4510 {
4511 /* Raise Qlow as to at least the current value */
4512 q_low = (Q < q_high) ? (Q + 1) : q_high;
4513
4514 /* If we are using over quant do the same for zbin_oq_low */
4515 if (cpi->mb.zbin_over_quant > 0)
4516 zbin_oq_low = (cpi->mb.zbin_over_quant < zbin_oq_high) ?
4517 (cpi->mb.zbin_over_quant + 1) : zbin_oq_high;
4518
4519 if (undershoot_seen)
4520 {
4521 /* Update rate_correction_factor unless
4522 * cpi->active_worst_quality has changed.
4523 */
4524 if (!active_worst_qchanged)
4525 vp8_update_rate_correction_factors(cpi, 1);
4526
4527 Q = (q_high + q_low + 1) / 2;
4528
4529 /* Adjust cpi->zbin_over_quant (only allowed when Q
4530 * is max)
4531 */
4532 if (Q < MAXQ)
4533 cpi->mb.zbin_over_quant = 0;
4534 else
4535 {
4536 zbin_oq_low = (cpi->mb.zbin_over_quant < zbin_oq_high) ?
4537 (cpi->mb.zbin_over_quant + 1) : zbin_oq_high;
4538 cpi->mb.zbin_over_quant =
4539 (zbin_oq_high + zbin_oq_low) / 2;
4540 }
4541 }
4542 else
4543 {
4544 /* Update rate_correction_factor unless
4545 * cpi->active_worst_quality has changed.
4546 */
4547 if (!active_worst_qchanged)
4548 vp8_update_rate_correction_factors(cpi, 0);
4549
4550 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
4551
4552 while (((Q < q_low) ||
4553 (cpi->mb.zbin_over_quant < zbin_oq_low)) &&
4554 (Retries < 10))
4555 {
4556 vp8_update_rate_correction_factors(cpi, 0);
4557 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
4558 Retries ++;
4559 }
4560 }
4561
4562 overshoot_seen = 1;
4563 }
4564 /* Frame is too small */
4565 else
4566 {
4567 if (cpi->mb.zbin_over_quant == 0)
4568 /* Lower q_high if not using over quant */
4569 q_high = (Q > q_low) ? (Q - 1) : q_low;
4570 else
4571 /* else lower zbin_oq_high */
4572 zbin_oq_high = (cpi->mb.zbin_over_quant > zbin_oq_low) ?
4573 (cpi->mb.zbin_over_quant - 1) : zbin_oq_low;
4574
4575 if (overshoot_seen)
4576 {
4577 /* Update rate_correction_factor unless
4578 * cpi->active_worst_quality has changed.
4579 */
4580 if (!active_worst_qchanged)
4581 vp8_update_rate_correction_factors(cpi, 1);
4582
4583 Q = (q_high + q_low) / 2;
4584
4585 /* Adjust cpi->zbin_over_quant (only allowed when Q
4586 * is max)
4587 */
4588 if (Q < MAXQ)
4589 cpi->mb.zbin_over_quant = 0;
4590 else
4591 cpi->mb.zbin_over_quant =
4592 (zbin_oq_high + zbin_oq_low) / 2;
4593 }
4594 else
4595 {
4596 /* Update rate_correction_factor unless
4597 * cpi->active_worst_quality has changed.
4598 */
4599 if (!active_worst_qchanged)
4600 vp8_update_rate_correction_factors(cpi, 0);
4601
4602 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
4603
4604 /* Special case reset for qlow for constrained quality.
4605 * This should only trigger where there is very substantial
4606 * undershoot on a frame and the auto cq level is above
4607 * the user passsed in value.
4608 */
4609 if ( (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) &&
4610 (Q < q_low) )
4611 {
4612 q_low = Q;
4613 }
4614
4615 while (((Q > q_high) ||
4616 (cpi->mb.zbin_over_quant > zbin_oq_high)) &&
4617 (Retries < 10))
4618 {
4619 vp8_update_rate_correction_factors(cpi, 0);
4620 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
4621 Retries ++;
4622 }
4623 }
4624
4625 undershoot_seen = 1;
4626 }
4627
4628 /* Clamp Q to upper and lower limits: */
4629 if (Q > q_high)
4630 Q = q_high;
4631 else if (Q < q_low)
4632 Q = q_low;
4633
4634 /* Clamp cpi->zbin_over_quant */
4635 cpi->mb.zbin_over_quant = (cpi->mb.zbin_over_quant < zbin_oq_low) ?
4636 zbin_oq_low : (cpi->mb.zbin_over_quant > zbin_oq_high) ?
4637 zbin_oq_high : cpi->mb.zbin_over_quant;
4638
4639 Loop = Q != last_q;
4640 }
4641 else
4642 #endif
4643 Loop = 0;
4644
4645 if (cpi->is_src_frame_alt_ref)
4646 Loop = 0;
4647
4648 if (Loop == 1)
4649 {
4650 vp8_restore_coding_context(cpi);
4651 loop_count++;
4652 #if CONFIG_INTERNAL_STATS
4653 cpi->tot_recode_hits++;
4654 #endif
4655 }
4656 }
4657 while (Loop == 1);
4658
4659 #if 0
4660 /* Experimental code for lagged and one pass
4661 * Update stats used for one pass GF selection
4662 */
4663 {
4664 cpi->one_pass_frame_stats[cpi->one_pass_frame_index].frame_coded_error = (double)cpi->prediction_error;
4665 cpi->one_pass_frame_stats[cpi->one_pass_frame_index].frame_intra_error = (double)cpi->intra_error;
4666 cpi->one_pass_frame_stats[cpi->one_pass_frame_index].frame_pcnt_inter = (double)(100 - cpi->this_frame_percent_intra) / 100.0;
4667 }
4668 #endif
4669
4670 /* Special case code to reduce pulsing when key frames are forced at a
4671 * fixed interval. Note the reconstruction error if it is the frame before
4672 * the force key frame
4673 */
4674 if ( cpi->next_key_frame_forced && (cpi->twopass.frames_to_key == 0) )
4675 {
4676 cpi->ambient_err = vp8_calc_ss_err(cpi->Source,
4677 &cm->yv12_fb[cm->new_fb_idx]);
4678 }
4679
4680 /* This frame's MVs are saved and will be used in next frame's MV predictor.
4681 * Last frame has one more line(add to bottom) and one more column(add to
4682 * right) than cm->mip. The edge elements are initialized to 0.
4683 */
4684 #if CONFIG_MULTI_RES_ENCODING
4685 if(!cpi->oxcf.mr_encoder_id && cm->show_frame)
4686 #else
4687 if(cm->show_frame) /* do not save for altref frame */
4688 #endif
4689 {
4690 int mb_row;
4691 int mb_col;
4692 /* Point to beginning of allocated MODE_INFO arrays. */
4693 MODE_INFO *tmp = cm->mip;
4694
4695 if(cm->frame_type != KEY_FRAME)
4696 {
4697 for (mb_row = 0; mb_row < cm->mb_rows+1; mb_row ++)
4698 {
4699 for (mb_col = 0; mb_col < cm->mb_cols+1; mb_col ++)
4700 {
4701 if(tmp->mbmi.ref_frame != INTRA_FRAME)
4702 cpi->lfmv[mb_col + mb_row*(cm->mode_info_stride+1)].as_int = tmp->mbmi.mv.as_int;
4703
4704 cpi->lf_ref_frame_sign_bias[mb_col + mb_row*(cm->mode_info_stride+1)] = cm->ref_frame_sign_bias[tmp->mbmi.ref_frame];
4705 cpi->lf_ref_frame[mb_col + mb_row*(cm->mode_info_stride+1)] = tmp->mbmi.ref_frame;
4706 tmp++;
4707 }
4708 }
4709 }
4710 }
4711
4712 /* Count last ref frame 0,0 usage on current encoded frame. */
4713 {
4714 int mb_row;
4715 int mb_col;
4716 /* Point to beginning of MODE_INFO arrays. */
4717 MODE_INFO *tmp = cm->mi;
4718
4719 cpi->zeromv_count = 0;
4720
4721 if(cm->frame_type != KEY_FRAME)
4722 {
4723 for (mb_row = 0; mb_row < cm->mb_rows; mb_row ++)
4724 {
4725 for (mb_col = 0; mb_col < cm->mb_cols; mb_col ++)
4726 {
4727 if (tmp->mbmi.mode == ZEROMV &&
4728 tmp->mbmi.ref_frame == LAST_FRAME)
4729 cpi->zeromv_count++;
4730 tmp++;
4731 }
4732 tmp++;
4733 }
4734 }
4735 }
4736
4737 #if CONFIG_MULTI_RES_ENCODING
4738 vp8_cal_dissimilarity(cpi);
4739 #endif
4740
4741 /* Update the GF useage maps.
4742 * This is done after completing the compression of a frame when all
4743 * modes etc. are finalized but before loop filter
4744 */
4745 if (cpi->oxcf.number_of_layers == 1)
4746 vp8_update_gf_useage_maps(cpi, cm, &cpi->mb);
4747
4748 if (cm->frame_type == KEY_FRAME)
4749 cm->refresh_last_frame = 1;
4750
4751 #if 0
4752 {
4753 FILE *f = fopen("gfactive.stt", "a");
4754 fprintf(f, "%8d %8d %8d %8d %8d\n", cm->current_video_frame, (100 * cpi->gf_active_count) / (cpi->common.mb_rows * cpi->common.mb_cols), cpi->this_iiratio, cpi->next_iiratio, cm->refresh_golden_frame);
4755 fclose(f);
4756 }
4757 #endif
4758
4759 /* For inter frames the current default behavior is that when
4760 * cm->refresh_golden_frame is set we copy the old GF over to the ARF buffer
4761 * This is purely an encoder decision at present.
4762 */
4763 if (!cpi->oxcf.error_resilient_mode && cm->refresh_golden_frame)
4764 cm->copy_buffer_to_arf = 2;
4765 else
4766 cm->copy_buffer_to_arf = 0;
4767
4768 cm->frame_to_show = &cm->yv12_fb[cm->new_fb_idx];
4769
4770 #if CONFIG_TEMPORAL_DENOISING
4771 // Get some measure of the amount of noise, by measuring the (partial) mse
4772 // between source and denoised buffer, for y channel. Partial refers to
4773 // computing the sse for a sub-sample of the frame (i.e., skip x blocks along row/column),
4774 // and only for blocks in that set that are consecutive ZEROMV_LAST mode.
4775 // Do this every ~8 frames, to further reduce complexity.
4776 // TODO(marpan): Keep this for now for the case cpi->oxcf.noise_sensitivity < 4,
4777 // should be removed in favor of the process_denoiser_mode_change() function below.
4778 if (cpi->oxcf.noise_sensitivity > 0 &&
4779 cpi->oxcf.noise_sensitivity < 4 &&
4780 !cpi->oxcf.screen_content_mode &&
4781 cpi->frames_since_key%8 == 0 &&
4782 cm->frame_type != KEY_FRAME) {
4783 cpi->mse_source_denoised = measure_square_diff_partial(
4784 &cpi->denoiser.yv12_running_avg[INTRA_FRAME], cpi->Source, cpi);
4785 }
4786
4787 // For the adaptive denoising mode (noise_sensitivity == 4), sample the mse
4788 // of source diff (between current and previous frame), and determine if we
4789 // should switch the denoiser mode. Sampling refers to computing the mse for
4790 // a sub-sample of the frame (i.e., skip x blocks along row/column), and
4791 // only for blocks in that set that have used ZEROMV LAST, along with some
4792 // constraint on the sum diff between blocks. This process is called every
4793 // ~8 frames, to further reduce complexity.
4794 if (cpi->oxcf.noise_sensitivity == 4 &&
4795 !cpi->oxcf.screen_content_mode &&
4796 cpi->frames_since_key % 8 == 0 &&
4797 cm->frame_type != KEY_FRAME) {
4798 process_denoiser_mode_change(cpi);
4799 }
4800 #endif
4801
4802 #if CONFIG_MULTITHREAD
4803 if (cpi->b_multi_threaded)
4804 {
4805 /* start loopfilter in separate thread */
4806 sem_post(&cpi->h_event_start_lpf);
4807 cpi->b_lpf_running = 1;
4808 }
4809 else
4810 #endif
4811 {
4812 vp8_loopfilter_frame(cpi, cm);
4813 }
4814
4815 update_reference_frames(cpi);
4816
4817 #ifdef OUTPUT_YUV_DENOISED
4818 vp8_write_yuv_frame(yuv_denoised_file,
4819 &cpi->denoiser.yv12_running_avg[INTRA_FRAME]);
4820 #endif
4821
4822 #if !(CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING)
4823 if (cpi->oxcf.error_resilient_mode)
4824 {
4825 cm->refresh_entropy_probs = 0;
4826 }
4827 #endif
4828
4829 #if CONFIG_MULTITHREAD
4830 /* wait that filter_level is picked so that we can continue with stream packing */
4831 if (cpi->b_multi_threaded)
4832 sem_wait(&cpi->h_event_end_lpf);
4833 #endif
4834
4835 /* build the bitstream */
4836 vp8_pack_bitstream(cpi, dest, dest_end, size);
4837
4838 #if CONFIG_MULTITHREAD
4839 /* if PSNR packets are generated we have to wait for the lpf */
4840 if (cpi->b_lpf_running && cpi->b_calculate_psnr)
4841 {
4842 sem_wait(&cpi->h_event_end_lpf);
4843 cpi->b_lpf_running = 0;
4844 }
4845 #endif
4846
4847 /* Move storing frame_type out of the above loop since it is also
4848 * needed in motion search besides loopfilter */
4849 cm->last_frame_type = cm->frame_type;
4850
4851 /* Update rate control heuristics */
4852 cpi->total_byte_count += (*size);
4853 cpi->projected_frame_size = (*size) << 3;
4854
4855 if (cpi->oxcf.number_of_layers > 1)
4856 {
4857 unsigned int i;
4858 for (i=cpi->current_layer+1; i<cpi->oxcf.number_of_layers; i++)
4859 cpi->layer_context[i].total_byte_count += (*size);
4860 }
4861
4862 if (!active_worst_qchanged)
4863 vp8_update_rate_correction_factors(cpi, 2);
4864
4865 cpi->last_q[cm->frame_type] = cm->base_qindex;
4866
4867 if (cm->frame_type == KEY_FRAME)
4868 {
4869 vp8_adjust_key_frame_context(cpi);
4870 }
4871
4872 /* Keep a record of ambient average Q. */
4873 if (cm->frame_type != KEY_FRAME)
4874 cpi->avg_frame_qindex = (2 + 3 * cpi->avg_frame_qindex + cm->base_qindex) >> 2;
4875
4876 /* Keep a record from which we can calculate the average Q excluding
4877 * GF updates and key frames
4878 */
4879 if ((cm->frame_type != KEY_FRAME) && ((cpi->oxcf.number_of_layers > 1) ||
4880 (!cm->refresh_golden_frame && !cm->refresh_alt_ref_frame)))
4881 {
4882 cpi->ni_frames++;
4883
4884 /* Calculate the average Q for normal inter frames (not key or GFU
4885 * frames).
4886 */
4887 if ( cpi->pass == 2 )
4888 {
4889 cpi->ni_tot_qi += Q;
4890 cpi->ni_av_qi = (cpi->ni_tot_qi / cpi->ni_frames);
4891 }
4892 else
4893 {
4894 /* Damp value for first few frames */
4895 if (cpi->ni_frames > 150 )
4896 {
4897 cpi->ni_tot_qi += Q;
4898 cpi->ni_av_qi = (cpi->ni_tot_qi / cpi->ni_frames);
4899 }
4900 /* For one pass, early in the clip ... average the current frame Q
4901 * value with the worstq entered by the user as a dampening measure
4902 */
4903 else
4904 {
4905 cpi->ni_tot_qi += Q;
4906 cpi->ni_av_qi = ((cpi->ni_tot_qi / cpi->ni_frames) + cpi->worst_quality + 1) / 2;
4907 }
4908
4909 /* If the average Q is higher than what was used in the last
4910 * frame (after going through the recode loop to keep the frame
4911 * size within range) then use the last frame value - 1. The -1
4912 * is designed to stop Q and hence the data rate, from
4913 * progressively falling away during difficult sections, but at
4914 * the same time reduce the number of itterations around the
4915 * recode loop.
4916 */
4917 if (Q > cpi->ni_av_qi)
4918 cpi->ni_av_qi = Q - 1;
4919 }
4920 }
4921
4922 /* Update the buffer level variable. */
4923 /* Non-viewable frames are a special case and are treated as pure overhead. */
4924 if ( !cm->show_frame )
4925 cpi->bits_off_target -= cpi->projected_frame_size;
4926 else
4927 cpi->bits_off_target += cpi->av_per_frame_bandwidth - cpi->projected_frame_size;
4928
4929 /* Clip the buffer level to the maximum specified buffer size */
4930 if (cpi->bits_off_target > cpi->oxcf.maximum_buffer_size)
4931 cpi->bits_off_target = cpi->oxcf.maximum_buffer_size;
4932
4933 // If the frame dropper is not enabled, don't let the buffer level go below
4934 // some threshold, given here by -|maximum_buffer_size|. For now we only do
4935 // this for screen content input.
4936 if (cpi->drop_frames_allowed == 0 && cpi->oxcf.screen_content_mode &&
4937 cpi->bits_off_target < -cpi->oxcf.maximum_buffer_size)
4938 cpi->bits_off_target = -cpi->oxcf.maximum_buffer_size;
4939
4940 /* Rolling monitors of whether we are over or underspending used to
4941 * help regulate min and Max Q in two pass.
4942 */
4943 cpi->rolling_target_bits = ((cpi->rolling_target_bits * 3) + cpi->this_frame_target + 2) / 4;
4944 cpi->rolling_actual_bits = ((cpi->rolling_actual_bits * 3) + cpi->projected_frame_size + 2) / 4;
4945 cpi->long_rolling_target_bits = ((cpi->long_rolling_target_bits * 31) + cpi->this_frame_target + 16) / 32;
4946 cpi->long_rolling_actual_bits = ((cpi->long_rolling_actual_bits * 31) + cpi->projected_frame_size + 16) / 32;
4947
4948 /* Actual bits spent */
4949 cpi->total_actual_bits += cpi->projected_frame_size;
4950
4951 /* Debug stats */
4952 cpi->total_target_vs_actual += (cpi->this_frame_target - cpi->projected_frame_size);
4953
4954 cpi->buffer_level = cpi->bits_off_target;
4955
4956 /* Propagate values to higher temporal layers */
4957 if (cpi->oxcf.number_of_layers > 1)
4958 {
4959 unsigned int i;
4960
4961 for (i=cpi->current_layer+1; i<cpi->oxcf.number_of_layers; i++)
4962 {
4963 LAYER_CONTEXT *lc = &cpi->layer_context[i];
4964 int bits_off_for_this_layer =
4965 (int)(lc->target_bandwidth / lc->framerate -
4966 cpi->projected_frame_size);
4967
4968 lc->bits_off_target += bits_off_for_this_layer;
4969
4970 /* Clip buffer level to maximum buffer size for the layer */
4971 if (lc->bits_off_target > lc->maximum_buffer_size)
4972 lc->bits_off_target = lc->maximum_buffer_size;
4973
4974 lc->total_actual_bits += cpi->projected_frame_size;
4975 lc->total_target_vs_actual += bits_off_for_this_layer;
4976 lc->buffer_level = lc->bits_off_target;
4977 }
4978 }
4979
4980 /* Update bits left to the kf and gf groups to account for overshoot
4981 * or undershoot on these frames
4982 */
4983 if (cm->frame_type == KEY_FRAME)
4984 {
4985 cpi->twopass.kf_group_bits += cpi->this_frame_target - cpi->projected_frame_size;
4986
4987 if (cpi->twopass.kf_group_bits < 0)
4988 cpi->twopass.kf_group_bits = 0 ;
4989 }
4990 else if (cm->refresh_golden_frame || cm->refresh_alt_ref_frame)
4991 {
4992 cpi->twopass.gf_group_bits += cpi->this_frame_target - cpi->projected_frame_size;
4993
4994 if (cpi->twopass.gf_group_bits < 0)
4995 cpi->twopass.gf_group_bits = 0 ;
4996 }
4997
4998 if (cm->frame_type != KEY_FRAME)
4999 {
5000 if (cpi->common.refresh_alt_ref_frame)
5001 {
5002 cpi->last_skip_false_probs[2] = cpi->prob_skip_false;
5003 cpi->last_skip_probs_q[2] = cm->base_qindex;
5004 }
5005 else if (cpi->common.refresh_golden_frame)
5006 {
5007 cpi->last_skip_false_probs[1] = cpi->prob_skip_false;
5008 cpi->last_skip_probs_q[1] = cm->base_qindex;
5009 }
5010 else
5011 {
5012 cpi->last_skip_false_probs[0] = cpi->prob_skip_false;
5013 cpi->last_skip_probs_q[0] = cm->base_qindex;
5014
5015 /* update the baseline */
5016 cpi->base_skip_false_prob[cm->base_qindex] = cpi->prob_skip_false;
5017
5018 }
5019 }
5020
5021 #if 0 && CONFIG_INTERNAL_STATS
5022 {
5023 FILE *f = fopen("tmp.stt", "a");
5024
5025 vp8_clear_system_state();
5026
5027 if (cpi->twopass.total_left_stats.coded_error != 0.0)
5028 fprintf(f, "%10d %10d %10d %10d %10d %10"PRId64" %10"PRId64
5029 "%10"PRId64" %10d %6d %6d %6d %6d %5d %5d %5d %8d "
5030 "%8.2lf %"PRId64" %10.3lf %10"PRId64" %8d\n",
5031 cpi->common.current_video_frame, cpi->this_frame_target,
5032 cpi->projected_frame_size,
5033 (cpi->projected_frame_size - cpi->this_frame_target),
5034 cpi->total_target_vs_actual,
5035 cpi->buffer_level,
5036 (cpi->oxcf.starting_buffer_level-cpi->bits_off_target),
5037 cpi->total_actual_bits, cm->base_qindex,
5038 cpi->active_best_quality, cpi->active_worst_quality,
5039 cpi->ni_av_qi, cpi->cq_target_quality,
5040 cm->refresh_golden_frame, cm->refresh_alt_ref_frame,
5041 cm->frame_type, cpi->gfu_boost,
5042 cpi->twopass.est_max_qcorrection_factor,
5043 cpi->twopass.bits_left,
5044 cpi->twopass.total_left_stats.coded_error,
5045 (double)cpi->twopass.bits_left /
5046 cpi->twopass.total_left_stats.coded_error,
5047 cpi->tot_recode_hits);
5048 else
5049 fprintf(f, "%10d %10d %10d %10d %10d %10"PRId64" %10"PRId64
5050 "%10"PRId64" %10d %6d %6d %6d %6d %5d %5d %5d %8d "
5051 "%8.2lf %"PRId64" %10.3lf %8d\n",
5052 cpi->common.current_video_frame, cpi->this_frame_target,
5053 cpi->projected_frame_size,
5054 (cpi->projected_frame_size - cpi->this_frame_target),
5055 cpi->total_target_vs_actual,
5056 cpi->buffer_level,
5057 (cpi->oxcf.starting_buffer_level-cpi->bits_off_target),
5058 cpi->total_actual_bits, cm->base_qindex,
5059 cpi->active_best_quality, cpi->active_worst_quality,
5060 cpi->ni_av_qi, cpi->cq_target_quality,
5061 cm->refresh_golden_frame, cm->refresh_alt_ref_frame,
5062 cm->frame_type, cpi->gfu_boost,
5063 cpi->twopass.est_max_qcorrection_factor,
5064 cpi->twopass.bits_left,
5065 cpi->twopass.total_left_stats.coded_error,
5066 cpi->tot_recode_hits);
5067
5068 fclose(f);
5069
5070 {
5071 FILE *fmodes = fopen("Modes.stt", "a");
5072
5073 fprintf(fmodes, "%6d:%1d:%1d:%1d ",
5074 cpi->common.current_video_frame,
5075 cm->frame_type, cm->refresh_golden_frame,
5076 cm->refresh_alt_ref_frame);
5077
5078 fprintf(fmodes, "\n");
5079
5080 fclose(fmodes);
5081 }
5082 }
5083
5084 #endif
5085
5086 if (cm->refresh_golden_frame == 1)
5087 cm->frame_flags = cm->frame_flags | FRAMEFLAGS_GOLDEN;
5088 else
5089 cm->frame_flags = cm->frame_flags&~FRAMEFLAGS_GOLDEN;
5090
5091 if (cm->refresh_alt_ref_frame == 1)
5092 cm->frame_flags = cm->frame_flags | FRAMEFLAGS_ALTREF;
5093 else
5094 cm->frame_flags = cm->frame_flags&~FRAMEFLAGS_ALTREF;
5095
5096
5097 if (cm->refresh_last_frame & cm->refresh_golden_frame)
5098 /* both refreshed */
5099 cpi->gold_is_last = 1;
5100 else if (cm->refresh_last_frame ^ cm->refresh_golden_frame)
5101 /* 1 refreshed but not the other */
5102 cpi->gold_is_last = 0;
5103
5104 if (cm->refresh_last_frame & cm->refresh_alt_ref_frame)
5105 /* both refreshed */
5106 cpi->alt_is_last = 1;
5107 else if (cm->refresh_last_frame ^ cm->refresh_alt_ref_frame)
5108 /* 1 refreshed but not the other */
5109 cpi->alt_is_last = 0;
5110
5111 if (cm->refresh_alt_ref_frame & cm->refresh_golden_frame)
5112 /* both refreshed */
5113 cpi->gold_is_alt = 1;
5114 else if (cm->refresh_alt_ref_frame ^ cm->refresh_golden_frame)
5115 /* 1 refreshed but not the other */
5116 cpi->gold_is_alt = 0;
5117
5118 cpi->ref_frame_flags = VP8_ALTR_FRAME | VP8_GOLD_FRAME | VP8_LAST_FRAME;
5119
5120 if (cpi->gold_is_last)
5121 cpi->ref_frame_flags &= ~VP8_GOLD_FRAME;
5122
5123 if (cpi->alt_is_last)
5124 cpi->ref_frame_flags &= ~VP8_ALTR_FRAME;
5125
5126 if (cpi->gold_is_alt)
5127 cpi->ref_frame_flags &= ~VP8_ALTR_FRAME;
5128
5129
5130 if (!cpi->oxcf.error_resilient_mode)
5131 {
5132 if (cpi->oxcf.play_alternate && cm->refresh_alt_ref_frame && (cm->frame_type != KEY_FRAME))
5133 /* Update the alternate reference frame stats as appropriate. */
5134 update_alt_ref_frame_stats(cpi);
5135 else
5136 /* Update the Golden frame stats as appropriate. */
5137 update_golden_frame_stats(cpi);
5138 }
5139
5140 if (cm->frame_type == KEY_FRAME)
5141 {
5142 /* Tell the caller that the frame was coded as a key frame */
5143 *frame_flags = cm->frame_flags | FRAMEFLAGS_KEY;
5144
5145 /* As this frame is a key frame the next defaults to an inter frame. */
5146 cm->frame_type = INTER_FRAME;
5147
5148 cpi->last_frame_percent_intra = 100;
5149 }
5150 else
5151 {
5152 *frame_flags = cm->frame_flags&~FRAMEFLAGS_KEY;
5153
5154 cpi->last_frame_percent_intra = cpi->this_frame_percent_intra;
5155 }
5156
5157 /* Clear the one shot update flags for segmentation map and mode/ref
5158 * loop filter deltas.
5159 */
5160 cpi->mb.e_mbd.update_mb_segmentation_map = 0;
5161 cpi->mb.e_mbd.update_mb_segmentation_data = 0;
5162 cpi->mb.e_mbd.mode_ref_lf_delta_update = 0;
5163
5164
5165 /* Dont increment frame counters if this was an altref buffer update
5166 * not a real frame
5167 */
5168 if (cm->show_frame)
5169 {
5170 cm->current_video_frame++;
5171 cpi->frames_since_key++;
5172 cpi->temporal_pattern_counter++;
5173 }
5174
5175 /* reset to normal state now that we are done. */
5176
5177
5178
5179 #if 0
5180 {
5181 char filename[512];
5182 FILE *recon_file;
5183 sprintf(filename, "enc%04d.yuv", (int) cm->current_video_frame);
5184 recon_file = fopen(filename, "wb");
5185 fwrite(cm->yv12_fb[cm->lst_fb_idx].buffer_alloc,
5186 cm->yv12_fb[cm->lst_fb_idx].frame_size, 1, recon_file);
5187 fclose(recon_file);
5188 }
5189 #endif
5190
5191 /* DEBUG */
5192 /* vp8_write_yuv_frame("encoder_recon.yuv", cm->frame_to_show); */
5193
5194
5195 }
5196 #if !CONFIG_REALTIME_ONLY
Pass2Encode(VP8_COMP * cpi,unsigned long * size,unsigned char * dest,unsigned char * dest_end,unsigned int * frame_flags)5197 static void Pass2Encode(VP8_COMP *cpi, unsigned long *size, unsigned char *dest, unsigned char * dest_end, unsigned int *frame_flags)
5198 {
5199
5200 if (!cpi->common.refresh_alt_ref_frame)
5201 vp8_second_pass(cpi);
5202
5203 encode_frame_to_data_rate(cpi, size, dest, dest_end, frame_flags);
5204 cpi->twopass.bits_left -= 8 * *size;
5205
5206 if (!cpi->common.refresh_alt_ref_frame)
5207 {
5208 double two_pass_min_rate = (double)(cpi->oxcf.target_bandwidth
5209 *cpi->oxcf.two_pass_vbrmin_section / 100);
5210 cpi->twopass.bits_left += (int64_t)(two_pass_min_rate / cpi->framerate);
5211 }
5212 }
5213 #endif
5214
vp8_receive_raw_frame(VP8_COMP * cpi,unsigned int frame_flags,YV12_BUFFER_CONFIG * sd,int64_t time_stamp,int64_t end_time)5215 int vp8_receive_raw_frame(VP8_COMP *cpi, unsigned int frame_flags, YV12_BUFFER_CONFIG *sd, int64_t time_stamp, int64_t end_time)
5216 {
5217 struct vpx_usec_timer timer;
5218 int res = 0;
5219
5220 vpx_usec_timer_start(&timer);
5221
5222 /* Reinit the lookahead buffer if the frame size changes */
5223 if (sd->y_width != cpi->oxcf.Width || sd->y_height != cpi->oxcf.Height)
5224 {
5225 assert(cpi->oxcf.lag_in_frames < 2);
5226 dealloc_raw_frame_buffers(cpi);
5227 alloc_raw_frame_buffers(cpi);
5228 }
5229
5230 if(vp8_lookahead_push(cpi->lookahead, sd, time_stamp, end_time,
5231 frame_flags, cpi->active_map_enabled ? cpi->active_map : NULL))
5232 res = -1;
5233 vpx_usec_timer_mark(&timer);
5234 cpi->time_receive_data += vpx_usec_timer_elapsed(&timer);
5235
5236 return res;
5237 }
5238
5239
frame_is_reference(const VP8_COMP * cpi)5240 static int frame_is_reference(const VP8_COMP *cpi)
5241 {
5242 const VP8_COMMON *cm = &cpi->common;
5243 const MACROBLOCKD *xd = &cpi->mb.e_mbd;
5244
5245 return cm->frame_type == KEY_FRAME || cm->refresh_last_frame
5246 || cm->refresh_golden_frame || cm->refresh_alt_ref_frame
5247 || cm->copy_buffer_to_gf || cm->copy_buffer_to_arf
5248 || cm->refresh_entropy_probs
5249 || xd->mode_ref_lf_delta_update
5250 || xd->update_mb_segmentation_map || xd->update_mb_segmentation_data;
5251 }
5252
5253
vp8_get_compressed_data(VP8_COMP * cpi,unsigned int * frame_flags,unsigned long * size,unsigned char * dest,unsigned char * dest_end,int64_t * time_stamp,int64_t * time_end,int flush)5254 int vp8_get_compressed_data(VP8_COMP *cpi, unsigned int *frame_flags, unsigned long *size, unsigned char *dest, unsigned char *dest_end, int64_t *time_stamp, int64_t *time_end, int flush)
5255 {
5256 VP8_COMMON *cm;
5257 struct vpx_usec_timer tsctimer;
5258 struct vpx_usec_timer ticktimer;
5259 struct vpx_usec_timer cmptimer;
5260 YV12_BUFFER_CONFIG *force_src_buffer = NULL;
5261
5262 if (!cpi)
5263 return -1;
5264
5265 cm = &cpi->common;
5266
5267 if (setjmp(cpi->common.error.jmp))
5268 {
5269 cpi->common.error.setjmp = 0;
5270 vp8_clear_system_state();
5271 return VPX_CODEC_CORRUPT_FRAME;
5272 }
5273
5274 cpi->common.error.setjmp = 1;
5275
5276 vpx_usec_timer_start(&cmptimer);
5277
5278 cpi->source = NULL;
5279
5280 #if !CONFIG_REALTIME_ONLY
5281 /* Should we code an alternate reference frame */
5282 if (cpi->oxcf.error_resilient_mode == 0 &&
5283 cpi->oxcf.play_alternate &&
5284 cpi->source_alt_ref_pending)
5285 {
5286 if ((cpi->source = vp8_lookahead_peek(cpi->lookahead,
5287 cpi->frames_till_gf_update_due,
5288 PEEK_FORWARD)))
5289 {
5290 cpi->alt_ref_source = cpi->source;
5291 if (cpi->oxcf.arnr_max_frames > 0)
5292 {
5293 vp8_temporal_filter_prepare_c(cpi,
5294 cpi->frames_till_gf_update_due);
5295 force_src_buffer = &cpi->alt_ref_buffer;
5296 }
5297 cpi->frames_till_alt_ref_frame = cpi->frames_till_gf_update_due;
5298 cm->refresh_alt_ref_frame = 1;
5299 cm->refresh_golden_frame = 0;
5300 cm->refresh_last_frame = 0;
5301 cm->show_frame = 0;
5302 /* Clear Pending alt Ref flag. */
5303 cpi->source_alt_ref_pending = 0;
5304 cpi->is_src_frame_alt_ref = 0;
5305 }
5306 }
5307 #endif
5308
5309 if (!cpi->source)
5310 {
5311 /* Read last frame source if we are encoding first pass. */
5312 if (cpi->pass == 1 && cm->current_video_frame > 0)
5313 {
5314 if((cpi->last_source = vp8_lookahead_peek(cpi->lookahead, 1,
5315 PEEK_BACKWARD)) == NULL)
5316 return -1;
5317 }
5318
5319
5320 if ((cpi->source = vp8_lookahead_pop(cpi->lookahead, flush)))
5321 {
5322 cm->show_frame = 1;
5323
5324 cpi->is_src_frame_alt_ref = cpi->alt_ref_source
5325 && (cpi->source == cpi->alt_ref_source);
5326
5327 if(cpi->is_src_frame_alt_ref)
5328 cpi->alt_ref_source = NULL;
5329 }
5330 }
5331
5332 if (cpi->source)
5333 {
5334 cpi->Source = force_src_buffer ? force_src_buffer : &cpi->source->img;
5335 cpi->un_scaled_source = cpi->Source;
5336 *time_stamp = cpi->source->ts_start;
5337 *time_end = cpi->source->ts_end;
5338 *frame_flags = cpi->source->flags;
5339
5340 if (cpi->pass == 1 && cm->current_video_frame > 0)
5341 {
5342 cpi->last_frame_unscaled_source = &cpi->last_source->img;
5343 }
5344 }
5345 else
5346 {
5347 *size = 0;
5348 #if !CONFIG_REALTIME_ONLY
5349
5350 if (flush && cpi->pass == 1 && !cpi->twopass.first_pass_done)
5351 {
5352 vp8_end_first_pass(cpi); /* get last stats packet */
5353 cpi->twopass.first_pass_done = 1;
5354 }
5355
5356 #endif
5357
5358 return -1;
5359 }
5360
5361 if (cpi->source->ts_start < cpi->first_time_stamp_ever)
5362 {
5363 cpi->first_time_stamp_ever = cpi->source->ts_start;
5364 cpi->last_end_time_stamp_seen = cpi->source->ts_start;
5365 }
5366
5367 /* adjust frame rates based on timestamps given */
5368 if (cm->show_frame)
5369 {
5370 int64_t this_duration;
5371 int step = 0;
5372
5373 if (cpi->source->ts_start == cpi->first_time_stamp_ever)
5374 {
5375 this_duration = cpi->source->ts_end - cpi->source->ts_start;
5376 step = 1;
5377 }
5378 else
5379 {
5380 int64_t last_duration;
5381
5382 this_duration = cpi->source->ts_end - cpi->last_end_time_stamp_seen;
5383 last_duration = cpi->last_end_time_stamp_seen
5384 - cpi->last_time_stamp_seen;
5385 /* do a step update if the duration changes by 10% */
5386 if (last_duration)
5387 step = (int)(((this_duration - last_duration) *
5388 10 / last_duration));
5389 }
5390
5391 if (this_duration)
5392 {
5393 if (step)
5394 cpi->ref_framerate = 10000000.0 / this_duration;
5395 else
5396 {
5397 double avg_duration, interval;
5398
5399 /* Average this frame's rate into the last second's average
5400 * frame rate. If we haven't seen 1 second yet, then average
5401 * over the whole interval seen.
5402 */
5403 interval = (double)(cpi->source->ts_end -
5404 cpi->first_time_stamp_ever);
5405 if(interval > 10000000.0)
5406 interval = 10000000;
5407
5408 avg_duration = 10000000.0 / cpi->ref_framerate;
5409 avg_duration *= (interval - avg_duration + this_duration);
5410 avg_duration /= interval;
5411
5412 cpi->ref_framerate = 10000000.0 / avg_duration;
5413 }
5414 #if CONFIG_MULTI_RES_ENCODING
5415 if (cpi->oxcf.mr_total_resolutions > 1) {
5416 LOWER_RES_FRAME_INFO* low_res_frame_info = (LOWER_RES_FRAME_INFO*)
5417 cpi->oxcf.mr_low_res_mode_info;
5418 // Frame rate should be the same for all spatial layers in
5419 // multi-res-encoding (simulcast), so we constrain the frame for
5420 // higher layers to be that of lowest resolution. This is needed
5421 // as he application may decide to skip encoding a high layer and
5422 // then start again, in which case a big jump in time-stamps will
5423 // be received for that high layer, which will yield an incorrect
5424 // frame rate (from time-stamp adjustment in above calculation).
5425 if (cpi->oxcf.mr_encoder_id) {
5426 cpi->ref_framerate = low_res_frame_info->low_res_framerate;
5427 }
5428 else {
5429 // Keep track of frame rate for lowest resolution.
5430 low_res_frame_info->low_res_framerate = cpi->ref_framerate;
5431 }
5432 }
5433 #endif
5434 if (cpi->oxcf.number_of_layers > 1)
5435 {
5436 unsigned int i;
5437
5438 /* Update frame rates for each layer */
5439 assert(cpi->oxcf.number_of_layers <= VPX_TS_MAX_LAYERS);
5440 for (i = 0; i < cpi->oxcf.number_of_layers &&
5441 i < VPX_TS_MAX_LAYERS; ++i)
5442 {
5443 LAYER_CONTEXT *lc = &cpi->layer_context[i];
5444 lc->framerate = cpi->ref_framerate /
5445 cpi->oxcf.rate_decimator[i];
5446 }
5447 }
5448 else
5449 vp8_new_framerate(cpi, cpi->ref_framerate);
5450 }
5451
5452 cpi->last_time_stamp_seen = cpi->source->ts_start;
5453 cpi->last_end_time_stamp_seen = cpi->source->ts_end;
5454 }
5455
5456 if (cpi->oxcf.number_of_layers > 1)
5457 {
5458 int layer;
5459
5460 update_layer_contexts (cpi);
5461
5462 /* Restore layer specific context & set frame rate */
5463 if (cpi->temporal_layer_id >= 0) {
5464 layer = cpi->temporal_layer_id;
5465 } else {
5466 layer = cpi->oxcf.layer_id[
5467 cpi->temporal_pattern_counter % cpi->oxcf.periodicity];
5468 }
5469 restore_layer_context (cpi, layer);
5470 vp8_new_framerate(cpi, cpi->layer_context[layer].framerate);
5471 }
5472
5473 if (cpi->compressor_speed == 2)
5474 {
5475 vpx_usec_timer_start(&tsctimer);
5476 vpx_usec_timer_start(&ticktimer);
5477 }
5478
5479 cpi->lf_zeromv_pct = (cpi->zeromv_count * 100)/cm->MBs;
5480
5481 #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
5482 {
5483 int i;
5484 const int num_part = (1 << cm->multi_token_partition);
5485 /* the available bytes in dest */
5486 const unsigned long dest_size = dest_end - dest;
5487 const int tok_part_buff_size = (dest_size * 9) / (10 * num_part);
5488
5489 unsigned char *dp = dest;
5490
5491 cpi->partition_d[0] = dp;
5492 dp += dest_size/10; /* reserve 1/10 for control partition */
5493 cpi->partition_d_end[0] = dp;
5494
5495 for(i = 0; i < num_part; i++)
5496 {
5497 cpi->partition_d[i + 1] = dp;
5498 dp += tok_part_buff_size;
5499 cpi->partition_d_end[i + 1] = dp;
5500 }
5501 }
5502 #endif
5503
5504 /* start with a 0 size frame */
5505 *size = 0;
5506
5507 /* Clear down mmx registers */
5508 vp8_clear_system_state();
5509
5510 cm->frame_type = INTER_FRAME;
5511 cm->frame_flags = *frame_flags;
5512
5513 #if 0
5514
5515 if (cm->refresh_alt_ref_frame)
5516 {
5517 cm->refresh_golden_frame = 0;
5518 cm->refresh_last_frame = 0;
5519 }
5520 else
5521 {
5522 cm->refresh_golden_frame = 0;
5523 cm->refresh_last_frame = 1;
5524 }
5525
5526 #endif
5527 /* find a free buffer for the new frame */
5528 {
5529 int i = 0;
5530 for(; i < NUM_YV12_BUFFERS; i++)
5531 {
5532 if(!cm->yv12_fb[i].flags)
5533 {
5534 cm->new_fb_idx = i;
5535 break;
5536 }
5537 }
5538
5539 assert(i < NUM_YV12_BUFFERS );
5540 }
5541 #if !CONFIG_REALTIME_ONLY
5542
5543 if (cpi->pass == 1)
5544 {
5545 Pass1Encode(cpi, size, dest, frame_flags);
5546 }
5547 else if (cpi->pass == 2)
5548 {
5549 Pass2Encode(cpi, size, dest, dest_end, frame_flags);
5550 }
5551 else
5552 #endif
5553 encode_frame_to_data_rate(cpi, size, dest, dest_end, frame_flags);
5554
5555 if (cpi->compressor_speed == 2)
5556 {
5557 unsigned int duration, duration2;
5558 vpx_usec_timer_mark(&tsctimer);
5559 vpx_usec_timer_mark(&ticktimer);
5560
5561 duration = (int)(vpx_usec_timer_elapsed(&ticktimer));
5562 duration2 = (unsigned int)((double)duration / 2);
5563
5564 if (cm->frame_type != KEY_FRAME)
5565 {
5566 if (cpi->avg_encode_time == 0)
5567 cpi->avg_encode_time = duration;
5568 else
5569 cpi->avg_encode_time = (7 * cpi->avg_encode_time + duration) >> 3;
5570 }
5571
5572 if (duration2)
5573 {
5574 {
5575
5576 if (cpi->avg_pick_mode_time == 0)
5577 cpi->avg_pick_mode_time = duration2;
5578 else
5579 cpi->avg_pick_mode_time = (7 * cpi->avg_pick_mode_time + duration2) >> 3;
5580 }
5581 }
5582
5583 }
5584
5585 if (cm->refresh_entropy_probs == 0)
5586 {
5587 memcpy(&cm->fc, &cm->lfc, sizeof(cm->fc));
5588 }
5589
5590 /* Save the contexts separately for alt ref, gold and last. */
5591 /* (TODO jbb -> Optimize this with pointers to avoid extra copies. ) */
5592 if(cm->refresh_alt_ref_frame)
5593 memcpy(&cpi->lfc_a, &cm->fc, sizeof(cm->fc));
5594
5595 if(cm->refresh_golden_frame)
5596 memcpy(&cpi->lfc_g, &cm->fc, sizeof(cm->fc));
5597
5598 if(cm->refresh_last_frame)
5599 memcpy(&cpi->lfc_n, &cm->fc, sizeof(cm->fc));
5600
5601 /* if its a dropped frame honor the requests on subsequent frames */
5602 if (*size > 0)
5603 {
5604 cpi->droppable = !frame_is_reference(cpi);
5605
5606 /* return to normal state */
5607 cm->refresh_entropy_probs = 1;
5608 cm->refresh_alt_ref_frame = 0;
5609 cm->refresh_golden_frame = 0;
5610 cm->refresh_last_frame = 1;
5611 cm->frame_type = INTER_FRAME;
5612
5613 }
5614
5615 /* Save layer specific state */
5616 if (cpi->oxcf.number_of_layers > 1)
5617 save_layer_context (cpi);
5618
5619 vpx_usec_timer_mark(&cmptimer);
5620 cpi->time_compress_data += vpx_usec_timer_elapsed(&cmptimer);
5621
5622 if (cpi->b_calculate_psnr && cpi->pass != 1 && cm->show_frame)
5623 {
5624 generate_psnr_packet(cpi);
5625 }
5626
5627 #if CONFIG_INTERNAL_STATS
5628
5629 if (cpi->pass != 1)
5630 {
5631 cpi->bytes += *size;
5632
5633 if (cm->show_frame)
5634 {
5635 cpi->common.show_frame_mi = cpi->common.mi;
5636 cpi->count ++;
5637
5638 if (cpi->b_calculate_psnr)
5639 {
5640 uint64_t ye,ue,ve;
5641 double frame_psnr;
5642 YV12_BUFFER_CONFIG *orig = cpi->Source;
5643 YV12_BUFFER_CONFIG *recon = cpi->common.frame_to_show;
5644 unsigned int y_width = cpi->common.Width;
5645 unsigned int y_height = cpi->common.Height;
5646 unsigned int uv_width = (y_width + 1) / 2;
5647 unsigned int uv_height = (y_height + 1) / 2;
5648 int y_samples = y_height * y_width;
5649 int uv_samples = uv_height * uv_width;
5650 int t_samples = y_samples + 2 * uv_samples;
5651 double sq_error;
5652
5653 ye = calc_plane_error(orig->y_buffer, orig->y_stride,
5654 recon->y_buffer, recon->y_stride, y_width, y_height);
5655
5656 ue = calc_plane_error(orig->u_buffer, orig->uv_stride,
5657 recon->u_buffer, recon->uv_stride, uv_width, uv_height);
5658
5659 ve = calc_plane_error(orig->v_buffer, orig->uv_stride,
5660 recon->v_buffer, recon->uv_stride, uv_width, uv_height);
5661
5662 sq_error = (double)(ye + ue + ve);
5663
5664 frame_psnr = vpx_sse_to_psnr(t_samples, 255.0, sq_error);
5665
5666 cpi->total_y += vpx_sse_to_psnr(y_samples, 255.0, (double)ye);
5667 cpi->total_u += vpx_sse_to_psnr(uv_samples, 255.0, (double)ue);
5668 cpi->total_v += vpx_sse_to_psnr(uv_samples, 255.0, (double)ve);
5669 cpi->total_sq_error += sq_error;
5670 cpi->total += frame_psnr;
5671 #if CONFIG_POSTPROC
5672 {
5673 YV12_BUFFER_CONFIG *pp = &cm->post_proc_buffer;
5674 double sq_error2;
5675 double frame_psnr2, frame_ssim2 = 0;
5676 double weight = 0;
5677
5678 vp8_deblock(cm, cm->frame_to_show, &cm->post_proc_buffer, cm->filter_level * 10 / 6, 1, 0);
5679 vp8_clear_system_state();
5680
5681 ye = calc_plane_error(orig->y_buffer, orig->y_stride,
5682 pp->y_buffer, pp->y_stride, y_width, y_height);
5683
5684 ue = calc_plane_error(orig->u_buffer, orig->uv_stride,
5685 pp->u_buffer, pp->uv_stride, uv_width, uv_height);
5686
5687 ve = calc_plane_error(orig->v_buffer, orig->uv_stride,
5688 pp->v_buffer, pp->uv_stride, uv_width, uv_height);
5689
5690 sq_error2 = (double)(ye + ue + ve);
5691
5692 frame_psnr2 = vpx_sse_to_psnr(t_samples, 255.0, sq_error2);
5693
5694 cpi->totalp_y += vpx_sse_to_psnr(y_samples,
5695 255.0, (double)ye);
5696 cpi->totalp_u += vpx_sse_to_psnr(uv_samples,
5697 255.0, (double)ue);
5698 cpi->totalp_v += vpx_sse_to_psnr(uv_samples,
5699 255.0, (double)ve);
5700 cpi->total_sq_error2 += sq_error2;
5701 cpi->totalp += frame_psnr2;
5702
5703 frame_ssim2 = vpx_calc_ssim(cpi->Source,
5704 &cm->post_proc_buffer, &weight);
5705
5706 cpi->summed_quality += frame_ssim2 * weight;
5707 cpi->summed_weights += weight;
5708
5709 if (cpi->oxcf.number_of_layers > 1)
5710 {
5711 unsigned int i;
5712
5713 for (i=cpi->current_layer;
5714 i<cpi->oxcf.number_of_layers; i++)
5715 {
5716 cpi->frames_in_layer[i]++;
5717
5718 cpi->bytes_in_layer[i] += *size;
5719 cpi->sum_psnr[i] += frame_psnr;
5720 cpi->sum_psnr_p[i] += frame_psnr2;
5721 cpi->total_error2[i] += sq_error;
5722 cpi->total_error2_p[i] += sq_error2;
5723 cpi->sum_ssim[i] += frame_ssim2 * weight;
5724 cpi->sum_weights[i] += weight;
5725 }
5726 }
5727 }
5728 #endif
5729 }
5730
5731 if (cpi->b_calculate_ssimg)
5732 {
5733 double y, u, v, frame_all;
5734 frame_all = vpx_calc_ssimg(cpi->Source, cm->frame_to_show,
5735 &y, &u, &v);
5736
5737 if (cpi->oxcf.number_of_layers > 1)
5738 {
5739 unsigned int i;
5740
5741 for (i=cpi->current_layer;
5742 i<cpi->oxcf.number_of_layers; i++)
5743 {
5744 if (!cpi->b_calculate_psnr)
5745 cpi->frames_in_layer[i]++;
5746
5747 cpi->total_ssimg_y_in_layer[i] += y;
5748 cpi->total_ssimg_u_in_layer[i] += u;
5749 cpi->total_ssimg_v_in_layer[i] += v;
5750 cpi->total_ssimg_all_in_layer[i] += frame_all;
5751 }
5752 }
5753 else
5754 {
5755 cpi->total_ssimg_y += y;
5756 cpi->total_ssimg_u += u;
5757 cpi->total_ssimg_v += v;
5758 cpi->total_ssimg_all += frame_all;
5759 }
5760 }
5761
5762 }
5763 }
5764
5765 #if 0
5766
5767 if (cpi->common.frame_type != 0 && cpi->common.base_qindex == cpi->oxcf.worst_allowed_q)
5768 {
5769 skiptruecount += cpi->skip_true_count;
5770 skipfalsecount += cpi->skip_false_count;
5771 }
5772
5773 #endif
5774 #if 0
5775
5776 if (cpi->pass != 1)
5777 {
5778 FILE *f = fopen("skip.stt", "a");
5779 fprintf(f, "frame:%4d flags:%4x Q:%4d P:%4d Size:%5d\n", cpi->common.current_video_frame, *frame_flags, cpi->common.base_qindex, cpi->prob_skip_false, *size);
5780
5781 if (cpi->is_src_frame_alt_ref == 1)
5782 fprintf(f, "skipcount: %4d framesize: %d\n", cpi->skip_true_count , *size);
5783
5784 fclose(f);
5785 }
5786
5787 #endif
5788 #endif
5789
5790 cpi->common.error.setjmp = 0;
5791
5792 return 0;
5793 }
5794
vp8_get_preview_raw_frame(VP8_COMP * cpi,YV12_BUFFER_CONFIG * dest,vp8_ppflags_t * flags)5795 int vp8_get_preview_raw_frame(VP8_COMP *cpi, YV12_BUFFER_CONFIG *dest, vp8_ppflags_t *flags)
5796 {
5797 if (cpi->common.refresh_alt_ref_frame)
5798 return -1;
5799 else
5800 {
5801 int ret;
5802
5803 #if CONFIG_MULTITHREAD
5804 if(cpi->b_lpf_running)
5805 {
5806 sem_wait(&cpi->h_event_end_lpf);
5807 cpi->b_lpf_running = 0;
5808 }
5809 #endif
5810
5811 #if CONFIG_POSTPROC
5812 cpi->common.show_frame_mi = cpi->common.mi;
5813 ret = vp8_post_proc_frame(&cpi->common, dest, flags);
5814 #else
5815 (void)flags;
5816
5817 if (cpi->common.frame_to_show)
5818 {
5819 *dest = *cpi->common.frame_to_show;
5820 dest->y_width = cpi->common.Width;
5821 dest->y_height = cpi->common.Height;
5822 dest->uv_height = cpi->common.Height / 2;
5823 ret = 0;
5824 }
5825 else
5826 {
5827 ret = -1;
5828 }
5829
5830 #endif
5831 vp8_clear_system_state();
5832 return ret;
5833 }
5834 }
5835
vp8_set_roimap(VP8_COMP * cpi,unsigned char * map,unsigned int rows,unsigned int cols,int delta_q[4],int delta_lf[4],unsigned int threshold[4])5836 int vp8_set_roimap(VP8_COMP *cpi, unsigned char *map, unsigned int rows, unsigned int cols, int delta_q[4], int delta_lf[4], unsigned int threshold[4])
5837 {
5838 signed char feature_data[MB_LVL_MAX][MAX_MB_SEGMENTS];
5839 int internal_delta_q[MAX_MB_SEGMENTS];
5840 const int range = 63;
5841 int i;
5842
5843 // This method is currently incompatible with the cyclic refresh method
5844 if ( cpi->cyclic_refresh_mode_enabled )
5845 return -1;
5846
5847 // Check number of rows and columns match
5848 if (cpi->common.mb_rows != rows || cpi->common.mb_cols != cols)
5849 return -1;
5850
5851 // Range check the delta Q values and convert the external Q range values
5852 // to internal ones.
5853 if ( (abs(delta_q[0]) > range) || (abs(delta_q[1]) > range) ||
5854 (abs(delta_q[2]) > range) || (abs(delta_q[3]) > range) )
5855 return -1;
5856
5857 // Range check the delta lf values
5858 if ( (abs(delta_lf[0]) > range) || (abs(delta_lf[1]) > range) ||
5859 (abs(delta_lf[2]) > range) || (abs(delta_lf[3]) > range) )
5860 return -1;
5861
5862 if (!map)
5863 {
5864 disable_segmentation(cpi);
5865 return 0;
5866 }
5867
5868 // Translate the external delta q values to internal values.
5869 for ( i = 0; i < MAX_MB_SEGMENTS; i++ )
5870 internal_delta_q[i] =
5871 ( delta_q[i] >= 0 ) ? q_trans[delta_q[i]] : -q_trans[-delta_q[i]];
5872
5873 /* Set the segmentation Map */
5874 set_segmentation_map(cpi, map);
5875
5876 /* Activate segmentation. */
5877 enable_segmentation(cpi);
5878
5879 /* Set up the quant segment data */
5880 feature_data[MB_LVL_ALT_Q][0] = internal_delta_q[0];
5881 feature_data[MB_LVL_ALT_Q][1] = internal_delta_q[1];
5882 feature_data[MB_LVL_ALT_Q][2] = internal_delta_q[2];
5883 feature_data[MB_LVL_ALT_Q][3] = internal_delta_q[3];
5884
5885 /* Set up the loop segment data s */
5886 feature_data[MB_LVL_ALT_LF][0] = delta_lf[0];
5887 feature_data[MB_LVL_ALT_LF][1] = delta_lf[1];
5888 feature_data[MB_LVL_ALT_LF][2] = delta_lf[2];
5889 feature_data[MB_LVL_ALT_LF][3] = delta_lf[3];
5890
5891 cpi->segment_encode_breakout[0] = threshold[0];
5892 cpi->segment_encode_breakout[1] = threshold[1];
5893 cpi->segment_encode_breakout[2] = threshold[2];
5894 cpi->segment_encode_breakout[3] = threshold[3];
5895
5896 /* Initialise the feature data structure */
5897 set_segment_data(cpi, &feature_data[0][0], SEGMENT_DELTADATA);
5898
5899 return 0;
5900 }
5901
vp8_set_active_map(VP8_COMP * cpi,unsigned char * map,unsigned int rows,unsigned int cols)5902 int vp8_set_active_map(VP8_COMP *cpi, unsigned char *map, unsigned int rows, unsigned int cols)
5903 {
5904 if (rows == cpi->common.mb_rows && cols == cpi->common.mb_cols)
5905 {
5906 if (map)
5907 {
5908 memcpy(cpi->active_map, map, rows * cols);
5909 cpi->active_map_enabled = 1;
5910 }
5911 else
5912 cpi->active_map_enabled = 0;
5913
5914 return 0;
5915 }
5916 else
5917 {
5918 return -1 ;
5919 }
5920 }
5921
vp8_set_internal_size(VP8_COMP * cpi,VPX_SCALING horiz_mode,VPX_SCALING vert_mode)5922 int vp8_set_internal_size(VP8_COMP *cpi, VPX_SCALING horiz_mode, VPX_SCALING vert_mode)
5923 {
5924 if (horiz_mode <= ONETWO)
5925 cpi->common.horiz_scale = horiz_mode;
5926 else
5927 return -1;
5928
5929 if (vert_mode <= ONETWO)
5930 cpi->common.vert_scale = vert_mode;
5931 else
5932 return -1;
5933
5934 return 0;
5935 }
5936
5937
5938
vp8_calc_ss_err(YV12_BUFFER_CONFIG * source,YV12_BUFFER_CONFIG * dest)5939 int vp8_calc_ss_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest)
5940 {
5941 int i, j;
5942 int Total = 0;
5943
5944 unsigned char *src = source->y_buffer;
5945 unsigned char *dst = dest->y_buffer;
5946
5947 /* Loop through the Y plane raw and reconstruction data summing
5948 * (square differences)
5949 */
5950 for (i = 0; i < source->y_height; i += 16)
5951 {
5952 for (j = 0; j < source->y_width; j += 16)
5953 {
5954 unsigned int sse;
5955 Total += vpx_mse16x16(src + j, source->y_stride,
5956 dst + j, dest->y_stride, &sse);
5957 }
5958
5959 src += 16 * source->y_stride;
5960 dst += 16 * dest->y_stride;
5961 }
5962
5963 return Total;
5964 }
5965
5966
vp8_get_quantizer(VP8_COMP * cpi)5967 int vp8_get_quantizer(VP8_COMP *cpi)
5968 {
5969 return cpi->common.base_qindex;
5970 }
5971