1 /*
2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #include "vpx_config.h"
12 #include "./vpx_scale_rtcd.h"
13 #include "./vpx_dsp_rtcd.h"
14 #include "./vp8_rtcd.h"
15 #include "bitstream.h"
16 #include "vp8/common/onyxc_int.h"
17 #include "vp8/common/blockd.h"
18 #include "onyx_int.h"
19 #include "vp8/common/systemdependent.h"
20 #include "vp8/common/vp8_skin_detection.h"
21 #include "vp8/encoder/quantize.h"
22 #include "vp8/common/alloccommon.h"
23 #include "mcomp.h"
24 #include "firstpass.h"
25 #include "vpx_dsp/psnr.h"
26 #include "vpx_scale/vpx_scale.h"
27 #include "vp8/common/extend.h"
28 #include "ratectrl.h"
29 #include "vp8/common/quant_common.h"
30 #include "segmentation.h"
31 #if CONFIG_POSTPROC
32 #include "vp8/common/postproc.h"
33 #endif
34 #include "vpx_mem/vpx_mem.h"
35 #include "vp8/common/reconintra.h"
36 #include "vp8/common/swapyv12buffer.h"
37 #include "vp8/common/threading.h"
38 #include "vpx_ports/system_state.h"
39 #include "vpx_ports/vpx_timer.h"
40 #include "vpx_util/vpx_write_yuv_frame.h"
41 #if ARCH_ARM
42 #include "vpx_ports/arm.h"
43 #endif
44 #if CONFIG_MULTI_RES_ENCODING
45 #include "mr_dissim.h"
46 #endif
47 #include "encodeframe.h"
48 #if CONFIG_MULTITHREAD
49 #include "ethreading.h"
50 #endif
51 #include "picklpf.h"
52 #if !CONFIG_REALTIME_ONLY
53 #include "temporal_filter.h"
54 #endif
55
56 #include <assert.h>
57 #include <math.h>
58 #include <stdio.h>
59 #include <limits.h>
60
61 #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
62 extern int vp8_update_coef_context(VP8_COMP *cpi);
63 #endif
64
65 extern void vp8_deblock_frame(YV12_BUFFER_CONFIG *source,
66 YV12_BUFFER_CONFIG *post, int filt_lvl,
67 int low_var_thresh, int flag);
68 extern void print_parms(VP8_CONFIG *ocf, char *filenam);
69 extern unsigned int vp8_get_processor_freq();
70 extern void print_tree_update_probs();
71
72 int vp8_calc_ss_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest);
73
74 static void set_default_lf_deltas(VP8_COMP *cpi);
75
76 extern const int vp8_gf_interval_table[101];
77
78 #if CONFIG_INTERNAL_STATS
79 #include "math.h"
80 #include "vpx_dsp/ssim.h"
81 #endif
82
83 #ifdef OUTPUT_YUV_SRC
84 FILE *yuv_file;
85 #endif
86 #ifdef OUTPUT_YUV_DENOISED
87 FILE *yuv_denoised_file;
88 #endif
89 #ifdef OUTPUT_YUV_SKINMAP
90 static FILE *yuv_skinmap_file = NULL;
91 #endif
92
93 #if 0
94 FILE *framepsnr;
95 FILE *kf_list;
96 FILE *keyfile;
97 #endif
98
99 #if 0
100 extern int skip_true_count;
101 extern int skip_false_count;
102 #endif
103
104 #ifdef VP8_ENTROPY_STATS
105 extern int intra_mode_stats[10][10][10];
106 #endif
107
108 #ifdef SPEEDSTATS
109 unsigned int frames_at_speed[16] = { 0, 0, 0, 0, 0, 0, 0, 0,
110 0, 0, 0, 0, 0, 0, 0, 0 };
111 unsigned int tot_pm = 0;
112 unsigned int cnt_pm = 0;
113 unsigned int tot_ef = 0;
114 unsigned int cnt_ef = 0;
115 #endif
116
117 #ifdef MODE_STATS
118 extern unsigned __int64 Sectionbits[50];
119 extern int y_modes[5];
120 extern int uv_modes[4];
121 extern int b_modes[10];
122
123 extern int inter_y_modes[10];
124 extern int inter_uv_modes[4];
125 extern unsigned int inter_b_modes[15];
126 #endif
127
128 extern const int vp8_bits_per_mb[2][QINDEX_RANGE];
129
130 extern const int qrounding_factors[129];
131 extern const int qzbin_factors[129];
132 extern void vp8cx_init_quantizer(VP8_COMP *cpi);
133 extern const int vp8cx_base_skip_false_prob[128];
134
135 /* Tables relating active max Q to active min Q */
136 static const unsigned char kf_low_motion_minq[QINDEX_RANGE] = {
137 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
138 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
139 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,
140 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 4, 4, 4, 5, 5, 5,
141 5, 5, 6, 6, 6, 6, 7, 7, 8, 8, 8, 8, 9, 9, 10, 10, 10, 10, 11,
142 11, 11, 11, 12, 12, 13, 13, 13, 13, 14, 14, 15, 15, 15, 15, 16, 16, 16, 16,
143 17, 17, 18, 18, 18, 18, 19, 20, 20, 21, 21, 22, 23, 23
144 };
145 static const unsigned char kf_high_motion_minq[QINDEX_RANGE] = {
146 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
147 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1,
148 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 5,
149 5, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 8, 8, 8, 8, 9, 9, 10, 10,
150 10, 10, 11, 11, 11, 11, 12, 12, 13, 13, 13, 13, 14, 14, 15, 15, 15, 15, 16,
151 16, 16, 16, 17, 17, 18, 18, 18, 18, 19, 19, 20, 20, 20, 20, 21, 21, 21, 21,
152 22, 22, 23, 23, 24, 25, 25, 26, 26, 27, 28, 28, 29, 30
153 };
154 static const unsigned char gf_low_motion_minq[QINDEX_RANGE] = {
155 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3,
156 3, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8,
157 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15,
158 15, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20, 21, 21, 22, 22, 23, 23, 24, 24,
159 25, 25, 26, 26, 27, 27, 28, 28, 29, 29, 30, 30, 31, 31, 32, 32, 33, 33, 34,
160 34, 35, 35, 36, 36, 37, 37, 38, 38, 39, 39, 40, 40, 41, 41, 42, 42, 43, 44,
161 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58
162 };
163 static const unsigned char gf_mid_motion_minq[QINDEX_RANGE] = {
164 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 2, 2, 3, 3, 3, 4, 4, 4, 5,
165 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10, 10, 11,
166 11, 11, 12, 12, 12, 12, 13, 13, 13, 14, 14, 14, 15, 15, 16, 16, 17, 17, 18,
167 18, 19, 19, 20, 20, 21, 21, 22, 22, 23, 23, 24, 24, 25, 25, 26, 26, 27, 27,
168 28, 28, 29, 29, 30, 30, 31, 31, 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 37,
169 37, 38, 39, 39, 40, 40, 41, 41, 42, 42, 43, 43, 44, 45, 46, 47, 48, 49, 50,
170 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64
171 };
172 static const unsigned char gf_high_motion_minq[QINDEX_RANGE] = {
173 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5,
174 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10, 11, 11,
175 12, 12, 13, 13, 14, 14, 15, 15, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20, 21,
176 21, 22, 22, 23, 23, 24, 24, 25, 25, 26, 26, 27, 27, 28, 28, 29, 29, 30, 30,
177 31, 31, 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 37, 37, 38, 38, 39, 39, 40,
178 40, 41, 41, 42, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56,
179 57, 58, 59, 60, 62, 64, 66, 68, 70, 72, 74, 76, 78, 80
180 };
181 static const unsigned char inter_minq[QINDEX_RANGE] = {
182 0, 0, 1, 1, 2, 3, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 9, 10, 11,
183 11, 12, 13, 13, 14, 15, 15, 16, 17, 17, 18, 19, 20, 20, 21, 22, 22, 23, 24,
184 24, 25, 26, 27, 27, 28, 29, 30, 30, 31, 32, 33, 33, 34, 35, 36, 36, 37, 38,
185 39, 39, 40, 41, 42, 42, 43, 44, 45, 46, 46, 47, 48, 49, 50, 50, 51, 52, 53,
186 54, 55, 55, 56, 57, 58, 59, 60, 60, 61, 62, 63, 64, 65, 66, 67, 67, 68, 69,
187 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 86,
188 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100
189 };
190
191 #ifdef PACKET_TESTING
192 extern FILE *vpxlogc;
193 #endif
194
save_layer_context(VP8_COMP * cpi)195 static void save_layer_context(VP8_COMP *cpi) {
196 LAYER_CONTEXT *lc = &cpi->layer_context[cpi->current_layer];
197
198 /* Save layer dependent coding state */
199 lc->target_bandwidth = cpi->target_bandwidth;
200 lc->starting_buffer_level = cpi->oxcf.starting_buffer_level;
201 lc->optimal_buffer_level = cpi->oxcf.optimal_buffer_level;
202 lc->maximum_buffer_size = cpi->oxcf.maximum_buffer_size;
203 lc->starting_buffer_level_in_ms = cpi->oxcf.starting_buffer_level_in_ms;
204 lc->optimal_buffer_level_in_ms = cpi->oxcf.optimal_buffer_level_in_ms;
205 lc->maximum_buffer_size_in_ms = cpi->oxcf.maximum_buffer_size_in_ms;
206 lc->buffer_level = cpi->buffer_level;
207 lc->bits_off_target = cpi->bits_off_target;
208 lc->total_actual_bits = cpi->total_actual_bits;
209 lc->worst_quality = cpi->worst_quality;
210 lc->active_worst_quality = cpi->active_worst_quality;
211 lc->best_quality = cpi->best_quality;
212 lc->active_best_quality = cpi->active_best_quality;
213 lc->ni_av_qi = cpi->ni_av_qi;
214 lc->ni_tot_qi = cpi->ni_tot_qi;
215 lc->ni_frames = cpi->ni_frames;
216 lc->avg_frame_qindex = cpi->avg_frame_qindex;
217 lc->rate_correction_factor = cpi->rate_correction_factor;
218 lc->key_frame_rate_correction_factor = cpi->key_frame_rate_correction_factor;
219 lc->gf_rate_correction_factor = cpi->gf_rate_correction_factor;
220 lc->zbin_over_quant = cpi->mb.zbin_over_quant;
221 lc->inter_frame_target = cpi->inter_frame_target;
222 lc->total_byte_count = cpi->total_byte_count;
223 lc->filter_level = cpi->common.filter_level;
224 lc->frames_since_last_drop_overshoot = cpi->frames_since_last_drop_overshoot;
225 lc->force_maxqp = cpi->force_maxqp;
226 lc->last_frame_percent_intra = cpi->last_frame_percent_intra;
227
228 memcpy(lc->count_mb_ref_frame_usage, cpi->mb.count_mb_ref_frame_usage,
229 sizeof(cpi->mb.count_mb_ref_frame_usage));
230 }
231
restore_layer_context(VP8_COMP * cpi,const int layer)232 static void restore_layer_context(VP8_COMP *cpi, const int layer) {
233 LAYER_CONTEXT *lc = &cpi->layer_context[layer];
234
235 /* Restore layer dependent coding state */
236 cpi->current_layer = layer;
237 cpi->target_bandwidth = lc->target_bandwidth;
238 cpi->oxcf.target_bandwidth = lc->target_bandwidth;
239 cpi->oxcf.starting_buffer_level = lc->starting_buffer_level;
240 cpi->oxcf.optimal_buffer_level = lc->optimal_buffer_level;
241 cpi->oxcf.maximum_buffer_size = lc->maximum_buffer_size;
242 cpi->oxcf.starting_buffer_level_in_ms = lc->starting_buffer_level_in_ms;
243 cpi->oxcf.optimal_buffer_level_in_ms = lc->optimal_buffer_level_in_ms;
244 cpi->oxcf.maximum_buffer_size_in_ms = lc->maximum_buffer_size_in_ms;
245 cpi->buffer_level = lc->buffer_level;
246 cpi->bits_off_target = lc->bits_off_target;
247 cpi->total_actual_bits = lc->total_actual_bits;
248 cpi->active_worst_quality = lc->active_worst_quality;
249 cpi->active_best_quality = lc->active_best_quality;
250 cpi->ni_av_qi = lc->ni_av_qi;
251 cpi->ni_tot_qi = lc->ni_tot_qi;
252 cpi->ni_frames = lc->ni_frames;
253 cpi->avg_frame_qindex = lc->avg_frame_qindex;
254 cpi->rate_correction_factor = lc->rate_correction_factor;
255 cpi->key_frame_rate_correction_factor = lc->key_frame_rate_correction_factor;
256 cpi->gf_rate_correction_factor = lc->gf_rate_correction_factor;
257 cpi->mb.zbin_over_quant = lc->zbin_over_quant;
258 cpi->inter_frame_target = lc->inter_frame_target;
259 cpi->total_byte_count = lc->total_byte_count;
260 cpi->common.filter_level = lc->filter_level;
261 cpi->frames_since_last_drop_overshoot = lc->frames_since_last_drop_overshoot;
262 cpi->force_maxqp = lc->force_maxqp;
263 cpi->last_frame_percent_intra = lc->last_frame_percent_intra;
264
265 memcpy(cpi->mb.count_mb_ref_frame_usage, lc->count_mb_ref_frame_usage,
266 sizeof(cpi->mb.count_mb_ref_frame_usage));
267 }
268
rescale(int val,int num,int denom)269 static int rescale(int val, int num, int denom) {
270 int64_t llnum = num;
271 int64_t llden = denom;
272 int64_t llval = val;
273
274 return (int)(llval * llnum / llden);
275 }
276
init_temporal_layer_context(VP8_COMP * cpi,VP8_CONFIG * oxcf,const int layer,double prev_layer_framerate)277 static void init_temporal_layer_context(VP8_COMP *cpi, VP8_CONFIG *oxcf,
278 const int layer,
279 double prev_layer_framerate) {
280 LAYER_CONTEXT *lc = &cpi->layer_context[layer];
281
282 lc->framerate = cpi->output_framerate / cpi->oxcf.rate_decimator[layer];
283 lc->target_bandwidth = cpi->oxcf.target_bitrate[layer] * 1000;
284
285 lc->starting_buffer_level_in_ms = oxcf->starting_buffer_level;
286 lc->optimal_buffer_level_in_ms = oxcf->optimal_buffer_level;
287 lc->maximum_buffer_size_in_ms = oxcf->maximum_buffer_size;
288
289 lc->starting_buffer_level =
290 rescale((int)(oxcf->starting_buffer_level), lc->target_bandwidth, 1000);
291
292 if (oxcf->optimal_buffer_level == 0) {
293 lc->optimal_buffer_level = lc->target_bandwidth / 8;
294 } else {
295 lc->optimal_buffer_level =
296 rescale((int)(oxcf->optimal_buffer_level), lc->target_bandwidth, 1000);
297 }
298
299 if (oxcf->maximum_buffer_size == 0) {
300 lc->maximum_buffer_size = lc->target_bandwidth / 8;
301 } else {
302 lc->maximum_buffer_size =
303 rescale((int)(oxcf->maximum_buffer_size), lc->target_bandwidth, 1000);
304 }
305
306 /* Work out the average size of a frame within this layer */
307 if (layer > 0) {
308 lc->avg_frame_size_for_layer =
309 (int)((cpi->oxcf.target_bitrate[layer] -
310 cpi->oxcf.target_bitrate[layer - 1]) *
311 1000 / (lc->framerate - prev_layer_framerate));
312 }
313
314 lc->active_worst_quality = cpi->oxcf.worst_allowed_q;
315 lc->active_best_quality = cpi->oxcf.best_allowed_q;
316 lc->avg_frame_qindex = cpi->oxcf.worst_allowed_q;
317
318 lc->buffer_level = lc->starting_buffer_level;
319 lc->bits_off_target = lc->starting_buffer_level;
320
321 lc->total_actual_bits = 0;
322 lc->ni_av_qi = 0;
323 lc->ni_tot_qi = 0;
324 lc->ni_frames = 0;
325 lc->rate_correction_factor = 1.0;
326 lc->key_frame_rate_correction_factor = 1.0;
327 lc->gf_rate_correction_factor = 1.0;
328 lc->inter_frame_target = 0;
329 }
330
331 // Upon a run-time change in temporal layers, reset the layer context parameters
332 // for any "new" layers. For "existing" layers, let them inherit the parameters
333 // from the previous layer state (at the same layer #). In future we may want
334 // to better map the previous layer state(s) to the "new" ones.
reset_temporal_layer_change(VP8_COMP * cpi,VP8_CONFIG * oxcf,const int prev_num_layers)335 static void reset_temporal_layer_change(VP8_COMP *cpi, VP8_CONFIG *oxcf,
336 const int prev_num_layers) {
337 int i;
338 double prev_layer_framerate = 0;
339 const int curr_num_layers = cpi->oxcf.number_of_layers;
340 // If the previous state was 1 layer, get current layer context from cpi.
341 // We need this to set the layer context for the new layers below.
342 if (prev_num_layers == 1) {
343 cpi->current_layer = 0;
344 save_layer_context(cpi);
345 }
346 for (i = 0; i < curr_num_layers; ++i) {
347 LAYER_CONTEXT *lc = &cpi->layer_context[i];
348 if (i >= prev_num_layers) {
349 init_temporal_layer_context(cpi, oxcf, i, prev_layer_framerate);
350 }
351 // The initial buffer levels are set based on their starting levels.
352 // We could set the buffer levels based on the previous state (normalized
353 // properly by the layer bandwidths) but we would need to keep track of
354 // the previous set of layer bandwidths (i.e., target_bitrate[i])
355 // before the layer change. For now, reset to the starting levels.
356 lc->buffer_level =
357 cpi->oxcf.starting_buffer_level_in_ms * cpi->oxcf.target_bitrate[i];
358 lc->bits_off_target = lc->buffer_level;
359 // TDOD(marpan): Should we set the rate_correction_factor and
360 // active_worst/best_quality to values derived from the previous layer
361 // state (to smooth-out quality dips/rate fluctuation at transition)?
362
363 // We need to treat the 1 layer case separately: oxcf.target_bitrate[i]
364 // is not set for 1 layer, and the restore_layer_context/save_context()
365 // are not called in the encoding loop, so we need to call it here to
366 // pass the layer context state to |cpi|.
367 if (curr_num_layers == 1) {
368 lc->target_bandwidth = cpi->oxcf.target_bandwidth;
369 lc->buffer_level =
370 cpi->oxcf.starting_buffer_level_in_ms * lc->target_bandwidth / 1000;
371 lc->bits_off_target = lc->buffer_level;
372 restore_layer_context(cpi, 0);
373 }
374 prev_layer_framerate = cpi->output_framerate / cpi->oxcf.rate_decimator[i];
375 }
376 }
377
setup_features(VP8_COMP * cpi)378 static void setup_features(VP8_COMP *cpi) {
379 // If segmentation enabled set the update flags
380 if (cpi->mb.e_mbd.segmentation_enabled) {
381 cpi->mb.e_mbd.update_mb_segmentation_map = 1;
382 cpi->mb.e_mbd.update_mb_segmentation_data = 1;
383 } else {
384 cpi->mb.e_mbd.update_mb_segmentation_map = 0;
385 cpi->mb.e_mbd.update_mb_segmentation_data = 0;
386 }
387
388 cpi->mb.e_mbd.mode_ref_lf_delta_enabled = 0;
389 cpi->mb.e_mbd.mode_ref_lf_delta_update = 0;
390 memset(cpi->mb.e_mbd.ref_lf_deltas, 0, sizeof(cpi->mb.e_mbd.ref_lf_deltas));
391 memset(cpi->mb.e_mbd.mode_lf_deltas, 0, sizeof(cpi->mb.e_mbd.mode_lf_deltas));
392 memset(cpi->mb.e_mbd.last_ref_lf_deltas, 0,
393 sizeof(cpi->mb.e_mbd.ref_lf_deltas));
394 memset(cpi->mb.e_mbd.last_mode_lf_deltas, 0,
395 sizeof(cpi->mb.e_mbd.mode_lf_deltas));
396
397 set_default_lf_deltas(cpi);
398 }
399
400 static void dealloc_raw_frame_buffers(VP8_COMP *cpi);
401
vp8_initialize_enc(void)402 void vp8_initialize_enc(void) {
403 static volatile int init_done = 0;
404
405 if (!init_done) {
406 vpx_dsp_rtcd();
407 vp8_init_intra_predictors();
408 init_done = 1;
409 }
410 }
411
dealloc_compressor_data(VP8_COMP * cpi)412 static void dealloc_compressor_data(VP8_COMP *cpi) {
413 vpx_free(cpi->tplist);
414 cpi->tplist = NULL;
415
416 /* Delete last frame MV storage buffers */
417 vpx_free(cpi->lfmv);
418 cpi->lfmv = 0;
419
420 vpx_free(cpi->lf_ref_frame_sign_bias);
421 cpi->lf_ref_frame_sign_bias = 0;
422
423 vpx_free(cpi->lf_ref_frame);
424 cpi->lf_ref_frame = 0;
425
426 /* Delete sementation map */
427 vpx_free(cpi->segmentation_map);
428 cpi->segmentation_map = 0;
429
430 vpx_free(cpi->active_map);
431 cpi->active_map = 0;
432
433 vp8_de_alloc_frame_buffers(&cpi->common);
434
435 vp8_yv12_de_alloc_frame_buffer(&cpi->pick_lf_lvl_frame);
436 vp8_yv12_de_alloc_frame_buffer(&cpi->scaled_source);
437 dealloc_raw_frame_buffers(cpi);
438
439 vpx_free(cpi->tok);
440 cpi->tok = 0;
441
442 /* Structure used to monitor GF usage */
443 vpx_free(cpi->gf_active_flags);
444 cpi->gf_active_flags = 0;
445
446 /* Activity mask based per mb zbin adjustments */
447 vpx_free(cpi->mb_activity_map);
448 cpi->mb_activity_map = 0;
449
450 vpx_free(cpi->mb.pip);
451 cpi->mb.pip = 0;
452
453 #if CONFIG_MULTITHREAD
454 vpx_free(cpi->mt_current_mb_col);
455 cpi->mt_current_mb_col = NULL;
456 #endif
457 }
458
enable_segmentation(VP8_COMP * cpi)459 static void enable_segmentation(VP8_COMP *cpi) {
460 /* Set the appropriate feature bit */
461 cpi->mb.e_mbd.segmentation_enabled = 1;
462 cpi->mb.e_mbd.update_mb_segmentation_map = 1;
463 cpi->mb.e_mbd.update_mb_segmentation_data = 1;
464 }
disable_segmentation(VP8_COMP * cpi)465 static void disable_segmentation(VP8_COMP *cpi) {
466 /* Clear the appropriate feature bit */
467 cpi->mb.e_mbd.segmentation_enabled = 0;
468 }
469
470 /* Valid values for a segment are 0 to 3
471 * Segmentation map is arrange as [Rows][Columns]
472 */
set_segmentation_map(VP8_COMP * cpi,unsigned char * segmentation_map)473 static void set_segmentation_map(VP8_COMP *cpi,
474 unsigned char *segmentation_map) {
475 /* Copy in the new segmentation map */
476 memcpy(cpi->segmentation_map, segmentation_map,
477 (cpi->common.mb_rows * cpi->common.mb_cols));
478
479 /* Signal that the map should be updated. */
480 cpi->mb.e_mbd.update_mb_segmentation_map = 1;
481 cpi->mb.e_mbd.update_mb_segmentation_data = 1;
482 }
483
484 /* The values given for each segment can be either deltas (from the default
485 * value chosen for the frame) or absolute values.
486 *
487 * Valid range for abs values is:
488 * (0-127 for MB_LVL_ALT_Q), (0-63 for SEGMENT_ALT_LF)
489 * Valid range for delta values are:
490 * (+/-127 for MB_LVL_ALT_Q), (+/-63 for SEGMENT_ALT_LF)
491 *
492 * abs_delta = SEGMENT_DELTADATA (deltas)
493 * abs_delta = SEGMENT_ABSDATA (use the absolute values given).
494 *
495 */
set_segment_data(VP8_COMP * cpi,signed char * feature_data,unsigned char abs_delta)496 static void set_segment_data(VP8_COMP *cpi, signed char *feature_data,
497 unsigned char abs_delta) {
498 cpi->mb.e_mbd.mb_segement_abs_delta = abs_delta;
499 memcpy(cpi->segment_feature_data, feature_data,
500 sizeof(cpi->segment_feature_data));
501 }
502
503 /* A simple function to cyclically refresh the background at a lower Q */
cyclic_background_refresh(VP8_COMP * cpi,int Q,int lf_adjustment)504 static void cyclic_background_refresh(VP8_COMP *cpi, int Q, int lf_adjustment) {
505 unsigned char *seg_map = cpi->segmentation_map;
506 signed char feature_data[MB_LVL_MAX][MAX_MB_SEGMENTS];
507 int i;
508 int block_count = cpi->cyclic_refresh_mode_max_mbs_perframe;
509 int mbs_in_frame = cpi->common.mb_rows * cpi->common.mb_cols;
510
511 cpi->cyclic_refresh_q = Q / 2;
512
513 if (cpi->oxcf.screen_content_mode) {
514 // Modify quality ramp-up based on Q. Above some Q level, increase the
515 // number of blocks to be refreshed, and reduce it below the thredhold.
516 // Turn-off under certain conditions (i.e., away from key frame, and if
517 // we are at good quality (low Q) and most of the blocks were
518 // skipped-encoded
519 // in previous frame.
520 int qp_thresh = (cpi->oxcf.screen_content_mode == 2) ? 80 : 100;
521 if (Q >= qp_thresh) {
522 cpi->cyclic_refresh_mode_max_mbs_perframe =
523 (cpi->common.mb_rows * cpi->common.mb_cols) / 10;
524 } else if (cpi->frames_since_key > 250 && Q < 20 &&
525 cpi->mb.skip_true_count > (int)(0.95 * mbs_in_frame)) {
526 cpi->cyclic_refresh_mode_max_mbs_perframe = 0;
527 } else {
528 cpi->cyclic_refresh_mode_max_mbs_perframe =
529 (cpi->common.mb_rows * cpi->common.mb_cols) / 20;
530 }
531 block_count = cpi->cyclic_refresh_mode_max_mbs_perframe;
532 }
533
534 // Set every macroblock to be eligible for update.
535 // For key frame this will reset seg map to 0.
536 memset(cpi->segmentation_map, 0, mbs_in_frame);
537
538 if (cpi->common.frame_type != KEY_FRAME && block_count > 0) {
539 /* Cycle through the macro_block rows */
540 /* MB loop to set local segmentation map */
541 i = cpi->cyclic_refresh_mode_index;
542 assert(i < mbs_in_frame);
543 do {
544 /* If the MB is as a candidate for clean up then mark it for
545 * possible boost/refresh (segment 1) The segment id may get
546 * reset to 0 later if the MB gets coded anything other than
547 * last frame 0,0 as only (last frame 0,0) MBs are eligable for
548 * refresh : that is to say Mbs likely to be background blocks.
549 */
550 if (cpi->cyclic_refresh_map[i] == 0) {
551 seg_map[i] = 1;
552 block_count--;
553 } else if (cpi->cyclic_refresh_map[i] < 0) {
554 cpi->cyclic_refresh_map[i]++;
555 }
556
557 i++;
558 if (i == mbs_in_frame) i = 0;
559
560 } while (block_count && i != cpi->cyclic_refresh_mode_index);
561
562 cpi->cyclic_refresh_mode_index = i;
563
564 #if CONFIG_TEMPORAL_DENOISING
565 if (cpi->oxcf.noise_sensitivity > 0) {
566 if (cpi->denoiser.denoiser_mode == kDenoiserOnYUVAggressive &&
567 Q < (int)cpi->denoiser.denoise_pars.qp_thresh &&
568 (cpi->frames_since_key >
569 2 * cpi->denoiser.denoise_pars.consec_zerolast)) {
570 // Under aggressive denoising, use segmentation to turn off loop
571 // filter below some qp thresh. The filter is reduced for all
572 // blocks that have been encoded as ZEROMV LAST x frames in a row,
573 // where x is set by cpi->denoiser.denoise_pars.consec_zerolast.
574 // This is to avoid "dot" artifacts that can occur from repeated
575 // loop filtering on noisy input source.
576 cpi->cyclic_refresh_q = Q;
577 // lf_adjustment = -MAX_LOOP_FILTER;
578 lf_adjustment = -40;
579 for (i = 0; i < mbs_in_frame; ++i) {
580 seg_map[i] = (cpi->consec_zero_last[i] >
581 cpi->denoiser.denoise_pars.consec_zerolast)
582 ? 1
583 : 0;
584 }
585 }
586 }
587 #endif
588 }
589
590 /* Activate segmentation. */
591 cpi->mb.e_mbd.update_mb_segmentation_map = 1;
592 cpi->mb.e_mbd.update_mb_segmentation_data = 1;
593 enable_segmentation(cpi);
594
595 /* Set up the quant segment data */
596 feature_data[MB_LVL_ALT_Q][0] = 0;
597 feature_data[MB_LVL_ALT_Q][1] = (cpi->cyclic_refresh_q - Q);
598 feature_data[MB_LVL_ALT_Q][2] = 0;
599 feature_data[MB_LVL_ALT_Q][3] = 0;
600
601 /* Set up the loop segment data */
602 feature_data[MB_LVL_ALT_LF][0] = 0;
603 feature_data[MB_LVL_ALT_LF][1] = lf_adjustment;
604 feature_data[MB_LVL_ALT_LF][2] = 0;
605 feature_data[MB_LVL_ALT_LF][3] = 0;
606
607 /* Initialise the feature data structure */
608 set_segment_data(cpi, &feature_data[0][0], SEGMENT_DELTADATA);
609 }
610
compute_skin_map(VP8_COMP * cpi)611 static void compute_skin_map(VP8_COMP *cpi) {
612 int mb_row, mb_col, num_bl;
613 VP8_COMMON *cm = &cpi->common;
614 const uint8_t *src_y = cpi->Source->y_buffer;
615 const uint8_t *src_u = cpi->Source->u_buffer;
616 const uint8_t *src_v = cpi->Source->v_buffer;
617 const int src_ystride = cpi->Source->y_stride;
618 const int src_uvstride = cpi->Source->uv_stride;
619
620 const SKIN_DETECTION_BLOCK_SIZE bsize =
621 (cm->Width * cm->Height <= 352 * 288) ? SKIN_8X8 : SKIN_16X16;
622
623 for (mb_row = 0; mb_row < cm->mb_rows; mb_row++) {
624 num_bl = 0;
625 for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) {
626 const int bl_index = mb_row * cm->mb_cols + mb_col;
627 cpi->skin_map[bl_index] =
628 vp8_compute_skin_block(src_y, src_u, src_v, src_ystride, src_uvstride,
629 bsize, cpi->consec_zero_last[bl_index], 0);
630 num_bl++;
631 src_y += 16;
632 src_u += 8;
633 src_v += 8;
634 }
635 src_y += (src_ystride << 4) - (num_bl << 4);
636 src_u += (src_uvstride << 3) - (num_bl << 3);
637 src_v += (src_uvstride << 3) - (num_bl << 3);
638 }
639
640 // Remove isolated skin blocks (none of its neighbors are skin) and isolated
641 // non-skin blocks (all of its neighbors are skin). Skip the boundary.
642 for (mb_row = 1; mb_row < cm->mb_rows - 1; mb_row++) {
643 for (mb_col = 1; mb_col < cm->mb_cols - 1; mb_col++) {
644 const int bl_index = mb_row * cm->mb_cols + mb_col;
645 int num_neighbor = 0;
646 int mi, mj;
647 int non_skin_threshold = 8;
648
649 for (mi = -1; mi <= 1; mi += 1) {
650 for (mj = -1; mj <= 1; mj += 1) {
651 int bl_neighbor_index = (mb_row + mi) * cm->mb_cols + mb_col + mj;
652 if (cpi->skin_map[bl_neighbor_index]) num_neighbor++;
653 }
654 }
655
656 if (cpi->skin_map[bl_index] && num_neighbor < 2)
657 cpi->skin_map[bl_index] = 0;
658 if (!cpi->skin_map[bl_index] && num_neighbor == non_skin_threshold)
659 cpi->skin_map[bl_index] = 1;
660 }
661 }
662 }
663
set_default_lf_deltas(VP8_COMP * cpi)664 static void set_default_lf_deltas(VP8_COMP *cpi) {
665 cpi->mb.e_mbd.mode_ref_lf_delta_enabled = 1;
666 cpi->mb.e_mbd.mode_ref_lf_delta_update = 1;
667
668 memset(cpi->mb.e_mbd.ref_lf_deltas, 0, sizeof(cpi->mb.e_mbd.ref_lf_deltas));
669 memset(cpi->mb.e_mbd.mode_lf_deltas, 0, sizeof(cpi->mb.e_mbd.mode_lf_deltas));
670
671 /* Test of ref frame deltas */
672 cpi->mb.e_mbd.ref_lf_deltas[INTRA_FRAME] = 2;
673 cpi->mb.e_mbd.ref_lf_deltas[LAST_FRAME] = 0;
674 cpi->mb.e_mbd.ref_lf_deltas[GOLDEN_FRAME] = -2;
675 cpi->mb.e_mbd.ref_lf_deltas[ALTREF_FRAME] = -2;
676
677 cpi->mb.e_mbd.mode_lf_deltas[0] = 4; /* BPRED */
678
679 if (cpi->oxcf.Mode == MODE_REALTIME) {
680 cpi->mb.e_mbd.mode_lf_deltas[1] = -12; /* Zero */
681 } else {
682 cpi->mb.e_mbd.mode_lf_deltas[1] = -2; /* Zero */
683 }
684
685 cpi->mb.e_mbd.mode_lf_deltas[2] = 2; /* New mv */
686 cpi->mb.e_mbd.mode_lf_deltas[3] = 4; /* Split mv */
687 }
688
689 /* Convenience macros for mapping speed and mode into a continuous
690 * range
691 */
692 #define GOOD(x) (x + 1)
693 #define RT(x) (x + 7)
694
speed_map(int speed,const int * map)695 static int speed_map(int speed, const int *map) {
696 int res;
697
698 do {
699 res = *map++;
700 } while (speed >= *map++);
701 return res;
702 }
703
704 static const int thresh_mult_map_znn[] = {
705 /* map common to zero, nearest, and near */
706 0, GOOD(2), 1500, GOOD(3), 2000, RT(0), 1000, RT(2), 2000, INT_MAX
707 };
708
709 static const int thresh_mult_map_vhpred[] = { 1000, GOOD(2), 1500, GOOD(3),
710 2000, RT(0), 1000, RT(1),
711 2000, RT(7), INT_MAX, INT_MAX };
712
713 static const int thresh_mult_map_bpred[] = { 2000, GOOD(0), 2500, GOOD(2),
714 5000, GOOD(3), 7500, RT(0),
715 2500, RT(1), 5000, RT(6),
716 INT_MAX, INT_MAX };
717
718 static const int thresh_mult_map_tm[] = { 1000, GOOD(2), 1500, GOOD(3),
719 2000, RT(0), 0, RT(1),
720 1000, RT(2), 2000, RT(7),
721 INT_MAX, INT_MAX };
722
723 static const int thresh_mult_map_new1[] = { 1000, GOOD(2), 2000,
724 RT(0), 2000, INT_MAX };
725
726 static const int thresh_mult_map_new2[] = { 1000, GOOD(2), 2000, GOOD(3),
727 2500, GOOD(5), 4000, RT(0),
728 2000, RT(2), 2500, RT(5),
729 4000, INT_MAX };
730
731 static const int thresh_mult_map_split1[] = {
732 2500, GOOD(0), 1700, GOOD(2), 10000, GOOD(3), 25000, GOOD(4), INT_MAX,
733 RT(0), 5000, RT(1), 10000, RT(2), 25000, RT(3), INT_MAX, INT_MAX
734 };
735
736 static const int thresh_mult_map_split2[] = {
737 5000, GOOD(0), 4500, GOOD(2), 20000, GOOD(3), 50000, GOOD(4), INT_MAX,
738 RT(0), 10000, RT(1), 20000, RT(2), 50000, RT(3), INT_MAX, INT_MAX
739 };
740
741 static const int mode_check_freq_map_zn2[] = {
742 /* {zero,nearest}{2,3} */
743 0, RT(10), 1 << 1, RT(11), 1 << 2, RT(12), 1 << 3, INT_MAX
744 };
745
746 static const int mode_check_freq_map_vhbpred[] = {
747 0, GOOD(5), 2, RT(0), 0, RT(3), 2, RT(5), 4, INT_MAX
748 };
749
750 static const int mode_check_freq_map_near2[] = {
751 0, GOOD(5), 2, RT(0), 0, RT(3), 2,
752 RT(10), 1 << 2, RT(11), 1 << 3, RT(12), 1 << 4, INT_MAX
753 };
754
755 static const int mode_check_freq_map_new1[] = {
756 0, RT(10), 1 << 1, RT(11), 1 << 2, RT(12), 1 << 3, INT_MAX
757 };
758
759 static const int mode_check_freq_map_new2[] = { 0, GOOD(5), 4, RT(0),
760 0, RT(3), 4, RT(10),
761 1 << 3, RT(11), 1 << 4, RT(12),
762 1 << 5, INT_MAX };
763
764 static const int mode_check_freq_map_split1[] = {
765 0, GOOD(2), 2, GOOD(3), 7, RT(1), 2, RT(2), 7, INT_MAX
766 };
767
768 static const int mode_check_freq_map_split2[] = {
769 0, GOOD(1), 2, GOOD(2), 4, GOOD(3), 15, RT(1), 4, RT(2), 15, INT_MAX
770 };
771
vp8_set_speed_features(VP8_COMP * cpi)772 void vp8_set_speed_features(VP8_COMP *cpi) {
773 SPEED_FEATURES *sf = &cpi->sf;
774 int Mode = cpi->compressor_speed;
775 int Speed = cpi->Speed;
776 int Speed2;
777 int i;
778 VP8_COMMON *cm = &cpi->common;
779 int last_improved_quant = sf->improved_quant;
780 int ref_frames;
781
782 /* Initialise default mode frequency sampling variables */
783 for (i = 0; i < MAX_MODES; ++i) {
784 cpi->mode_check_freq[i] = 0;
785 }
786
787 cpi->mb.mbs_tested_so_far = 0;
788 cpi->mb.mbs_zero_last_dot_suppress = 0;
789
790 /* best quality defaults */
791 sf->RD = 1;
792 sf->search_method = NSTEP;
793 sf->improved_quant = 1;
794 sf->improved_dct = 1;
795 sf->auto_filter = 1;
796 sf->recode_loop = 1;
797 sf->quarter_pixel_search = 1;
798 sf->half_pixel_search = 1;
799 sf->iterative_sub_pixel = 1;
800 sf->optimize_coefficients = 1;
801 sf->use_fastquant_for_pick = 0;
802 sf->no_skip_block4x4_search = 1;
803
804 sf->first_step = 0;
805 sf->max_step_search_steps = MAX_MVSEARCH_STEPS;
806 sf->improved_mv_pred = 1;
807
808 /* default thresholds to 0 */
809 for (i = 0; i < MAX_MODES; ++i) sf->thresh_mult[i] = 0;
810
811 /* Count enabled references */
812 ref_frames = 1;
813 if (cpi->ref_frame_flags & VP8_LAST_FRAME) ref_frames++;
814 if (cpi->ref_frame_flags & VP8_GOLD_FRAME) ref_frames++;
815 if (cpi->ref_frame_flags & VP8_ALTR_FRAME) ref_frames++;
816
817 /* Convert speed to continuous range, with clamping */
818 if (Mode == 0) {
819 Speed = 0;
820 } else if (Mode == 2) {
821 Speed = RT(Speed);
822 } else {
823 if (Speed > 5) Speed = 5;
824 Speed = GOOD(Speed);
825 }
826
827 sf->thresh_mult[THR_ZERO1] = sf->thresh_mult[THR_NEAREST1] =
828 sf->thresh_mult[THR_NEAR1] = sf->thresh_mult[THR_DC] = 0; /* always */
829
830 sf->thresh_mult[THR_ZERO2] = sf->thresh_mult[THR_ZERO3] =
831 sf->thresh_mult[THR_NEAREST2] = sf->thresh_mult[THR_NEAREST3] =
832 sf->thresh_mult[THR_NEAR2] = sf->thresh_mult[THR_NEAR3] =
833 speed_map(Speed, thresh_mult_map_znn);
834
835 sf->thresh_mult[THR_V_PRED] = sf->thresh_mult[THR_H_PRED] =
836 speed_map(Speed, thresh_mult_map_vhpred);
837 sf->thresh_mult[THR_B_PRED] = speed_map(Speed, thresh_mult_map_bpred);
838 sf->thresh_mult[THR_TM] = speed_map(Speed, thresh_mult_map_tm);
839 sf->thresh_mult[THR_NEW1] = speed_map(Speed, thresh_mult_map_new1);
840 sf->thresh_mult[THR_NEW2] = sf->thresh_mult[THR_NEW3] =
841 speed_map(Speed, thresh_mult_map_new2);
842 sf->thresh_mult[THR_SPLIT1] = speed_map(Speed, thresh_mult_map_split1);
843 sf->thresh_mult[THR_SPLIT2] = sf->thresh_mult[THR_SPLIT3] =
844 speed_map(Speed, thresh_mult_map_split2);
845
846 // Special case for temporal layers.
847 // Reduce the thresholds for zero/nearest/near for GOLDEN, if GOLDEN is
848 // used as second reference. We don't modify thresholds for ALTREF case
849 // since ALTREF is usually used as long-term reference in temporal layers.
850 if ((cpi->Speed <= 6) && (cpi->oxcf.number_of_layers > 1) &&
851 (cpi->ref_frame_flags & VP8_LAST_FRAME) &&
852 (cpi->ref_frame_flags & VP8_GOLD_FRAME)) {
853 if (cpi->closest_reference_frame == GOLDEN_FRAME) {
854 sf->thresh_mult[THR_ZERO2] = sf->thresh_mult[THR_ZERO2] >> 3;
855 sf->thresh_mult[THR_NEAREST2] = sf->thresh_mult[THR_NEAREST2] >> 3;
856 sf->thresh_mult[THR_NEAR2] = sf->thresh_mult[THR_NEAR2] >> 3;
857 } else {
858 sf->thresh_mult[THR_ZERO2] = sf->thresh_mult[THR_ZERO2] >> 1;
859 sf->thresh_mult[THR_NEAREST2] = sf->thresh_mult[THR_NEAREST2] >> 1;
860 sf->thresh_mult[THR_NEAR2] = sf->thresh_mult[THR_NEAR2] >> 1;
861 }
862 }
863
864 cpi->mode_check_freq[THR_ZERO1] = cpi->mode_check_freq[THR_NEAREST1] =
865 cpi->mode_check_freq[THR_NEAR1] = cpi->mode_check_freq[THR_TM] =
866 cpi->mode_check_freq[THR_DC] = 0; /* always */
867
868 cpi->mode_check_freq[THR_ZERO2] = cpi->mode_check_freq[THR_ZERO3] =
869 cpi->mode_check_freq[THR_NEAREST2] = cpi->mode_check_freq[THR_NEAREST3] =
870 speed_map(Speed, mode_check_freq_map_zn2);
871
872 cpi->mode_check_freq[THR_NEAR2] = cpi->mode_check_freq[THR_NEAR3] =
873 speed_map(Speed, mode_check_freq_map_near2);
874
875 cpi->mode_check_freq[THR_V_PRED] = cpi->mode_check_freq[THR_H_PRED] =
876 cpi->mode_check_freq[THR_B_PRED] =
877 speed_map(Speed, mode_check_freq_map_vhbpred);
878
879 // For real-time mode at speed 10 keep the mode_check_freq threshold
880 // for NEW1 similar to that of speed 9.
881 Speed2 = Speed;
882 if (cpi->Speed == 10 && Mode == 2) Speed2 = RT(9);
883 cpi->mode_check_freq[THR_NEW1] = speed_map(Speed2, mode_check_freq_map_new1);
884
885 cpi->mode_check_freq[THR_NEW2] = cpi->mode_check_freq[THR_NEW3] =
886 speed_map(Speed, mode_check_freq_map_new2);
887
888 cpi->mode_check_freq[THR_SPLIT1] =
889 speed_map(Speed, mode_check_freq_map_split1);
890 cpi->mode_check_freq[THR_SPLIT2] = cpi->mode_check_freq[THR_SPLIT3] =
891 speed_map(Speed, mode_check_freq_map_split2);
892 Speed = cpi->Speed;
893 switch (Mode) {
894 #if !CONFIG_REALTIME_ONLY
895 case 0: /* best quality mode */
896 sf->first_step = 0;
897 sf->max_step_search_steps = MAX_MVSEARCH_STEPS;
898 break;
899 case 1:
900 case 3:
901 if (Speed > 0) {
902 /* Disable coefficient optimization above speed 0 */
903 sf->optimize_coefficients = 0;
904 sf->use_fastquant_for_pick = 1;
905 sf->no_skip_block4x4_search = 0;
906
907 sf->first_step = 1;
908 }
909
910 if (Speed > 2) {
911 sf->improved_quant = 0;
912 sf->improved_dct = 0;
913
914 /* Only do recode loop on key frames, golden frames and
915 * alt ref frames
916 */
917 sf->recode_loop = 2;
918 }
919
920 if (Speed > 3) {
921 sf->auto_filter = 1;
922 sf->recode_loop = 0; /* recode loop off */
923 sf->RD = 0; /* Turn rd off */
924 }
925
926 if (Speed > 4) {
927 sf->auto_filter = 0; /* Faster selection of loop filter */
928 }
929
930 break;
931 #endif
932 case 2:
933 sf->optimize_coefficients = 0;
934 sf->recode_loop = 0;
935 sf->auto_filter = 1;
936 sf->iterative_sub_pixel = 1;
937 sf->search_method = NSTEP;
938
939 if (Speed > 0) {
940 sf->improved_quant = 0;
941 sf->improved_dct = 0;
942
943 sf->use_fastquant_for_pick = 1;
944 sf->no_skip_block4x4_search = 0;
945 sf->first_step = 1;
946 }
947
948 if (Speed > 2) sf->auto_filter = 0; /* Faster selection of loop filter */
949
950 if (Speed > 3) {
951 sf->RD = 0;
952 sf->auto_filter = 1;
953 }
954
955 if (Speed > 4) {
956 sf->auto_filter = 0; /* Faster selection of loop filter */
957 sf->search_method = HEX;
958 sf->iterative_sub_pixel = 0;
959 }
960
961 if (Speed > 6) {
962 unsigned int sum = 0;
963 unsigned int total_mbs = cm->MBs;
964 int thresh;
965 unsigned int total_skip;
966
967 int min = 2000;
968
969 if (cpi->oxcf.encode_breakout > 2000) min = cpi->oxcf.encode_breakout;
970
971 min >>= 7;
972
973 for (i = 0; i < min; ++i) {
974 sum += cpi->mb.error_bins[i];
975 }
976
977 total_skip = sum;
978 sum = 0;
979
980 /* i starts from 2 to make sure thresh started from 2048 */
981 for (; i < 1024; ++i) {
982 sum += cpi->mb.error_bins[i];
983
984 if (10 * sum >=
985 (unsigned int)(cpi->Speed - 6) * (total_mbs - total_skip)) {
986 break;
987 }
988 }
989
990 i--;
991 thresh = (i << 7);
992
993 if (thresh < 2000) thresh = 2000;
994
995 if (ref_frames > 1) {
996 sf->thresh_mult[THR_NEW1] = thresh;
997 sf->thresh_mult[THR_NEAREST1] = thresh >> 1;
998 sf->thresh_mult[THR_NEAR1] = thresh >> 1;
999 }
1000
1001 if (ref_frames > 2) {
1002 sf->thresh_mult[THR_NEW2] = thresh << 1;
1003 sf->thresh_mult[THR_NEAREST2] = thresh;
1004 sf->thresh_mult[THR_NEAR2] = thresh;
1005 }
1006
1007 if (ref_frames > 3) {
1008 sf->thresh_mult[THR_NEW3] = thresh << 1;
1009 sf->thresh_mult[THR_NEAREST3] = thresh;
1010 sf->thresh_mult[THR_NEAR3] = thresh;
1011 }
1012
1013 sf->improved_mv_pred = 0;
1014 }
1015
1016 if (Speed > 8) sf->quarter_pixel_search = 0;
1017
1018 if (cm->version == 0) {
1019 cm->filter_type = NORMAL_LOOPFILTER;
1020
1021 if (Speed >= 14) cm->filter_type = SIMPLE_LOOPFILTER;
1022 } else {
1023 cm->filter_type = SIMPLE_LOOPFILTER;
1024 }
1025
1026 /* This has a big hit on quality. Last resort */
1027 if (Speed >= 15) sf->half_pixel_search = 0;
1028
1029 memset(cpi->mb.error_bins, 0, sizeof(cpi->mb.error_bins));
1030
1031 }; /* switch */
1032
1033 /* Slow quant, dct and trellis not worthwhile for first pass
1034 * so make sure they are always turned off.
1035 */
1036 if (cpi->pass == 1) {
1037 sf->improved_quant = 0;
1038 sf->optimize_coefficients = 0;
1039 sf->improved_dct = 0;
1040 }
1041
1042 if (cpi->sf.search_method == NSTEP) {
1043 vp8_init3smotion_compensation(&cpi->mb,
1044 cm->yv12_fb[cm->lst_fb_idx].y_stride);
1045 } else if (cpi->sf.search_method == DIAMOND) {
1046 vp8_init_dsmotion_compensation(&cpi->mb,
1047 cm->yv12_fb[cm->lst_fb_idx].y_stride);
1048 }
1049
1050 if (cpi->sf.improved_dct) {
1051 cpi->mb.short_fdct8x4 = vp8_short_fdct8x4;
1052 cpi->mb.short_fdct4x4 = vp8_short_fdct4x4;
1053 } else {
1054 /* No fast FDCT defined for any platform at this time. */
1055 cpi->mb.short_fdct8x4 = vp8_short_fdct8x4;
1056 cpi->mb.short_fdct4x4 = vp8_short_fdct4x4;
1057 }
1058
1059 cpi->mb.short_walsh4x4 = vp8_short_walsh4x4;
1060
1061 if (cpi->sf.improved_quant) {
1062 cpi->mb.quantize_b = vp8_regular_quantize_b;
1063 } else {
1064 cpi->mb.quantize_b = vp8_fast_quantize_b;
1065 }
1066 if (cpi->sf.improved_quant != last_improved_quant) vp8cx_init_quantizer(cpi);
1067
1068 if (cpi->sf.iterative_sub_pixel == 1) {
1069 cpi->find_fractional_mv_step = vp8_find_best_sub_pixel_step_iteratively;
1070 } else if (cpi->sf.quarter_pixel_search) {
1071 cpi->find_fractional_mv_step = vp8_find_best_sub_pixel_step;
1072 } else if (cpi->sf.half_pixel_search) {
1073 cpi->find_fractional_mv_step = vp8_find_best_half_pixel_step;
1074 } else {
1075 cpi->find_fractional_mv_step = vp8_skip_fractional_mv_step;
1076 }
1077
1078 if (cpi->sf.optimize_coefficients == 1 && cpi->pass != 1) {
1079 cpi->mb.optimize = 1;
1080 } else {
1081 cpi->mb.optimize = 0;
1082 }
1083
1084 if (cpi->common.full_pixel) {
1085 cpi->find_fractional_mv_step = vp8_skip_fractional_mv_step;
1086 }
1087
1088 #ifdef SPEEDSTATS
1089 frames_at_speed[cpi->Speed]++;
1090 #endif
1091 }
1092 #undef GOOD
1093 #undef RT
1094
alloc_raw_frame_buffers(VP8_COMP * cpi)1095 static void alloc_raw_frame_buffers(VP8_COMP *cpi) {
1096 #if VP8_TEMPORAL_ALT_REF
1097 int width = (cpi->oxcf.Width + 15) & ~15;
1098 int height = (cpi->oxcf.Height + 15) & ~15;
1099 #endif
1100
1101 cpi->lookahead = vp8_lookahead_init(cpi->oxcf.Width, cpi->oxcf.Height,
1102 cpi->oxcf.lag_in_frames);
1103 if (!cpi->lookahead) {
1104 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1105 "Failed to allocate lag buffers");
1106 }
1107
1108 #if VP8_TEMPORAL_ALT_REF
1109
1110 if (vp8_yv12_alloc_frame_buffer(&cpi->alt_ref_buffer, width, height,
1111 VP8BORDERINPIXELS)) {
1112 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1113 "Failed to allocate altref buffer");
1114 }
1115
1116 #endif
1117 }
1118
dealloc_raw_frame_buffers(VP8_COMP * cpi)1119 static void dealloc_raw_frame_buffers(VP8_COMP *cpi) {
1120 #if VP8_TEMPORAL_ALT_REF
1121 vp8_yv12_de_alloc_frame_buffer(&cpi->alt_ref_buffer);
1122 #endif
1123 vp8_lookahead_destroy(cpi->lookahead);
1124 }
1125
vp8_alloc_partition_data(VP8_COMP * cpi)1126 static int vp8_alloc_partition_data(VP8_COMP *cpi) {
1127 vpx_free(cpi->mb.pip);
1128
1129 cpi->mb.pip =
1130 vpx_calloc((cpi->common.mb_cols + 1) * (cpi->common.mb_rows + 1),
1131 sizeof(PARTITION_INFO));
1132 if (!cpi->mb.pip) return 1;
1133
1134 cpi->mb.pi = cpi->mb.pip + cpi->common.mode_info_stride + 1;
1135
1136 return 0;
1137 }
1138
vp8_alloc_compressor_data(VP8_COMP * cpi)1139 void vp8_alloc_compressor_data(VP8_COMP *cpi) {
1140 VP8_COMMON *cm = &cpi->common;
1141
1142 int width = cm->Width;
1143 int height = cm->Height;
1144
1145 if (vp8_alloc_frame_buffers(cm, width, height)) {
1146 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1147 "Failed to allocate frame buffers");
1148 }
1149
1150 if (vp8_alloc_partition_data(cpi)) {
1151 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1152 "Failed to allocate partition data");
1153 }
1154
1155 if ((width & 0xf) != 0) width += 16 - (width & 0xf);
1156
1157 if ((height & 0xf) != 0) height += 16 - (height & 0xf);
1158
1159 if (vp8_yv12_alloc_frame_buffer(&cpi->pick_lf_lvl_frame, width, height,
1160 VP8BORDERINPIXELS)) {
1161 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1162 "Failed to allocate last frame buffer");
1163 }
1164
1165 if (vp8_yv12_alloc_frame_buffer(&cpi->scaled_source, width, height,
1166 VP8BORDERINPIXELS)) {
1167 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1168 "Failed to allocate scaled source buffer");
1169 }
1170
1171 vpx_free(cpi->tok);
1172
1173 {
1174 #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
1175 unsigned int tokens = 8 * 24 * 16; /* one MB for each thread */
1176 #else
1177 unsigned int tokens = cm->mb_rows * cm->mb_cols * 24 * 16;
1178 #endif
1179 CHECK_MEM_ERROR(cpi->tok, vpx_calloc(tokens, sizeof(*cpi->tok)));
1180 }
1181
1182 /* Data used for real time vc mode to see if gf needs refreshing */
1183 cpi->zeromv_count = 0;
1184
1185 /* Structures used to monitor GF usage */
1186 vpx_free(cpi->gf_active_flags);
1187 CHECK_MEM_ERROR(
1188 cpi->gf_active_flags,
1189 vpx_calloc(sizeof(*cpi->gf_active_flags), cm->mb_rows * cm->mb_cols));
1190 cpi->gf_active_count = cm->mb_rows * cm->mb_cols;
1191
1192 vpx_free(cpi->mb_activity_map);
1193 CHECK_MEM_ERROR(
1194 cpi->mb_activity_map,
1195 vpx_calloc(sizeof(*cpi->mb_activity_map), cm->mb_rows * cm->mb_cols));
1196
1197 /* allocate memory for storing last frame's MVs for MV prediction. */
1198 vpx_free(cpi->lfmv);
1199 CHECK_MEM_ERROR(cpi->lfmv, vpx_calloc((cm->mb_rows + 2) * (cm->mb_cols + 2),
1200 sizeof(*cpi->lfmv)));
1201 vpx_free(cpi->lf_ref_frame_sign_bias);
1202 CHECK_MEM_ERROR(cpi->lf_ref_frame_sign_bias,
1203 vpx_calloc((cm->mb_rows + 2) * (cm->mb_cols + 2),
1204 sizeof(*cpi->lf_ref_frame_sign_bias)));
1205 vpx_free(cpi->lf_ref_frame);
1206 CHECK_MEM_ERROR(cpi->lf_ref_frame,
1207 vpx_calloc((cm->mb_rows + 2) * (cm->mb_cols + 2),
1208 sizeof(*cpi->lf_ref_frame)));
1209
1210 /* Create the encoder segmentation map and set all entries to 0 */
1211 vpx_free(cpi->segmentation_map);
1212 CHECK_MEM_ERROR(
1213 cpi->segmentation_map,
1214 vpx_calloc(cm->mb_rows * cm->mb_cols, sizeof(*cpi->segmentation_map)));
1215 cpi->cyclic_refresh_mode_index = 0;
1216 vpx_free(cpi->active_map);
1217 CHECK_MEM_ERROR(cpi->active_map, vpx_calloc(cm->mb_rows * cm->mb_cols,
1218 sizeof(*cpi->active_map)));
1219 memset(cpi->active_map, 1, (cm->mb_rows * cm->mb_cols));
1220
1221 #if CONFIG_MULTITHREAD
1222 if (width < 640) {
1223 cpi->mt_sync_range = 1;
1224 } else if (width <= 1280) {
1225 cpi->mt_sync_range = 4;
1226 } else if (width <= 2560) {
1227 cpi->mt_sync_range = 8;
1228 } else {
1229 cpi->mt_sync_range = 16;
1230 }
1231
1232 if (cpi->oxcf.multi_threaded > 1) {
1233 int i;
1234
1235 vpx_free(cpi->mt_current_mb_col);
1236 CHECK_MEM_ERROR(cpi->mt_current_mb_col,
1237 vpx_malloc(sizeof(*cpi->mt_current_mb_col) * cm->mb_rows));
1238 for (i = 0; i < cm->mb_rows; ++i)
1239 vpx_atomic_init(&cpi->mt_current_mb_col[i], 0);
1240 }
1241
1242 #endif
1243
1244 vpx_free(cpi->tplist);
1245 CHECK_MEM_ERROR(cpi->tplist, vpx_malloc(sizeof(TOKENLIST) * cm->mb_rows));
1246
1247 #if CONFIG_TEMPORAL_DENOISING
1248 if (cpi->oxcf.noise_sensitivity > 0) {
1249 vp8_denoiser_free(&cpi->denoiser);
1250 if (vp8_denoiser_allocate(&cpi->denoiser, width, height, cm->mb_rows,
1251 cm->mb_cols, cpi->oxcf.noise_sensitivity)) {
1252 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1253 "Failed to allocate denoiser");
1254 }
1255 }
1256 #endif
1257 }
1258
1259 /* Quant MOD */
1260 static const int q_trans[] = {
1261 0, 1, 2, 3, 4, 5, 7, 8, 9, 10, 12, 13, 15, 17, 18, 19,
1262 20, 21, 23, 24, 25, 26, 27, 28, 29, 30, 31, 33, 35, 37, 39, 41,
1263 43, 45, 47, 49, 51, 53, 55, 57, 59, 61, 64, 67, 70, 73, 76, 79,
1264 82, 85, 88, 91, 94, 97, 100, 103, 106, 109, 112, 115, 118, 121, 124, 127,
1265 };
1266
vp8_reverse_trans(int x)1267 int vp8_reverse_trans(int x) {
1268 int i;
1269
1270 for (i = 0; i < 64; ++i) {
1271 if (q_trans[i] >= x) return i;
1272 }
1273
1274 return 63;
1275 }
vp8_new_framerate(VP8_COMP * cpi,double framerate)1276 void vp8_new_framerate(VP8_COMP *cpi, double framerate) {
1277 if (framerate < .1) framerate = 30;
1278
1279 cpi->framerate = framerate;
1280 cpi->output_framerate = framerate;
1281 cpi->per_frame_bandwidth =
1282 (int)(cpi->oxcf.target_bandwidth / cpi->output_framerate);
1283 cpi->av_per_frame_bandwidth = cpi->per_frame_bandwidth;
1284 cpi->min_frame_bandwidth = (int)(cpi->av_per_frame_bandwidth *
1285 cpi->oxcf.two_pass_vbrmin_section / 100);
1286
1287 /* Set Maximum gf/arf interval */
1288 cpi->max_gf_interval = ((int)(cpi->output_framerate / 2.0) + 2);
1289
1290 if (cpi->max_gf_interval < 12) cpi->max_gf_interval = 12;
1291
1292 /* Extended interval for genuinely static scenes */
1293 cpi->twopass.static_scene_max_gf_interval = cpi->key_frame_frequency >> 1;
1294
1295 /* Special conditions when altr ref frame enabled in lagged compress mode */
1296 if (cpi->oxcf.play_alternate && cpi->oxcf.lag_in_frames) {
1297 if (cpi->max_gf_interval > cpi->oxcf.lag_in_frames - 1) {
1298 cpi->max_gf_interval = cpi->oxcf.lag_in_frames - 1;
1299 }
1300
1301 if (cpi->twopass.static_scene_max_gf_interval >
1302 cpi->oxcf.lag_in_frames - 1) {
1303 cpi->twopass.static_scene_max_gf_interval = cpi->oxcf.lag_in_frames - 1;
1304 }
1305 }
1306
1307 if (cpi->max_gf_interval > cpi->twopass.static_scene_max_gf_interval) {
1308 cpi->max_gf_interval = cpi->twopass.static_scene_max_gf_interval;
1309 }
1310 }
1311
init_config(VP8_COMP * cpi,VP8_CONFIG * oxcf)1312 static void init_config(VP8_COMP *cpi, VP8_CONFIG *oxcf) {
1313 VP8_COMMON *cm = &cpi->common;
1314
1315 cpi->oxcf = *oxcf;
1316
1317 cpi->auto_gold = 1;
1318 cpi->auto_adjust_gold_quantizer = 1;
1319
1320 cm->version = oxcf->Version;
1321 vp8_setup_version(cm);
1322
1323 /* Frame rate is not available on the first frame, as it's derived from
1324 * the observed timestamps. The actual value used here doesn't matter
1325 * too much, as it will adapt quickly.
1326 */
1327 if (oxcf->timebase.num > 0) {
1328 cpi->framerate =
1329 (double)(oxcf->timebase.den) / (double)(oxcf->timebase.num);
1330 } else {
1331 cpi->framerate = 30;
1332 }
1333
1334 /* If the reciprocal of the timebase seems like a reasonable framerate,
1335 * then use that as a guess, otherwise use 30.
1336 */
1337 if (cpi->framerate > 180) cpi->framerate = 30;
1338
1339 cpi->ref_framerate = cpi->framerate;
1340
1341 cpi->ref_frame_flags = VP8_ALTR_FRAME | VP8_GOLD_FRAME | VP8_LAST_FRAME;
1342
1343 cm->refresh_golden_frame = 0;
1344 cm->refresh_last_frame = 1;
1345 cm->refresh_entropy_probs = 1;
1346
1347 /* change includes all joint functionality */
1348 vp8_change_config(cpi, oxcf);
1349
1350 /* Initialize active best and worst q and average q values. */
1351 cpi->active_worst_quality = cpi->oxcf.worst_allowed_q;
1352 cpi->active_best_quality = cpi->oxcf.best_allowed_q;
1353 cpi->avg_frame_qindex = cpi->oxcf.worst_allowed_q;
1354
1355 /* Initialise the starting buffer levels */
1356 cpi->buffer_level = cpi->oxcf.starting_buffer_level;
1357 cpi->bits_off_target = cpi->oxcf.starting_buffer_level;
1358
1359 cpi->rolling_target_bits = cpi->av_per_frame_bandwidth;
1360 cpi->rolling_actual_bits = cpi->av_per_frame_bandwidth;
1361 cpi->long_rolling_target_bits = cpi->av_per_frame_bandwidth;
1362 cpi->long_rolling_actual_bits = cpi->av_per_frame_bandwidth;
1363
1364 cpi->total_actual_bits = 0;
1365 cpi->total_target_vs_actual = 0;
1366
1367 /* Temporal scalabilty */
1368 if (cpi->oxcf.number_of_layers > 1) {
1369 unsigned int i;
1370 double prev_layer_framerate = 0;
1371
1372 for (i = 0; i < cpi->oxcf.number_of_layers; ++i) {
1373 init_temporal_layer_context(cpi, oxcf, i, prev_layer_framerate);
1374 prev_layer_framerate =
1375 cpi->output_framerate / cpi->oxcf.rate_decimator[i];
1376 }
1377 }
1378
1379 #if VP8_TEMPORAL_ALT_REF
1380 {
1381 int i;
1382
1383 cpi->fixed_divide[0] = 0;
1384
1385 for (i = 1; i < 512; ++i) cpi->fixed_divide[i] = 0x80000 / i;
1386 }
1387 #endif
1388 }
1389
update_layer_contexts(VP8_COMP * cpi)1390 static void update_layer_contexts(VP8_COMP *cpi) {
1391 VP8_CONFIG *oxcf = &cpi->oxcf;
1392
1393 /* Update snapshots of the layer contexts to reflect new parameters */
1394 if (oxcf->number_of_layers > 1) {
1395 unsigned int i;
1396 double prev_layer_framerate = 0;
1397
1398 assert(oxcf->number_of_layers <= VPX_TS_MAX_LAYERS);
1399 for (i = 0; i < oxcf->number_of_layers && i < VPX_TS_MAX_LAYERS; ++i) {
1400 LAYER_CONTEXT *lc = &cpi->layer_context[i];
1401
1402 lc->framerate = cpi->ref_framerate / oxcf->rate_decimator[i];
1403 lc->target_bandwidth = oxcf->target_bitrate[i] * 1000;
1404
1405 lc->starting_buffer_level = rescale(
1406 (int)oxcf->starting_buffer_level_in_ms, lc->target_bandwidth, 1000);
1407
1408 if (oxcf->optimal_buffer_level == 0) {
1409 lc->optimal_buffer_level = lc->target_bandwidth / 8;
1410 } else {
1411 lc->optimal_buffer_level = rescale(
1412 (int)oxcf->optimal_buffer_level_in_ms, lc->target_bandwidth, 1000);
1413 }
1414
1415 if (oxcf->maximum_buffer_size == 0) {
1416 lc->maximum_buffer_size = lc->target_bandwidth / 8;
1417 } else {
1418 lc->maximum_buffer_size = rescale((int)oxcf->maximum_buffer_size_in_ms,
1419 lc->target_bandwidth, 1000);
1420 }
1421
1422 /* Work out the average size of a frame within this layer */
1423 if (i > 0) {
1424 lc->avg_frame_size_for_layer =
1425 (int)((oxcf->target_bitrate[i] - oxcf->target_bitrate[i - 1]) *
1426 1000 / (lc->framerate - prev_layer_framerate));
1427 }
1428
1429 prev_layer_framerate = lc->framerate;
1430 }
1431 }
1432 }
1433
vp8_change_config(VP8_COMP * cpi,VP8_CONFIG * oxcf)1434 void vp8_change_config(VP8_COMP *cpi, VP8_CONFIG *oxcf) {
1435 VP8_COMMON *cm = &cpi->common;
1436 int last_w, last_h;
1437 unsigned int prev_number_of_layers;
1438
1439 if (!cpi) return;
1440
1441 if (!oxcf) return;
1442
1443 if (cm->version != oxcf->Version) {
1444 cm->version = oxcf->Version;
1445 vp8_setup_version(cm);
1446 }
1447
1448 last_w = cpi->oxcf.Width;
1449 last_h = cpi->oxcf.Height;
1450 prev_number_of_layers = cpi->oxcf.number_of_layers;
1451
1452 cpi->oxcf = *oxcf;
1453
1454 switch (cpi->oxcf.Mode) {
1455 case MODE_REALTIME:
1456 cpi->pass = 0;
1457 cpi->compressor_speed = 2;
1458
1459 if (cpi->oxcf.cpu_used < -16) {
1460 cpi->oxcf.cpu_used = -16;
1461 }
1462
1463 if (cpi->oxcf.cpu_used > 16) cpi->oxcf.cpu_used = 16;
1464
1465 break;
1466
1467 case MODE_GOODQUALITY:
1468 cpi->pass = 0;
1469 cpi->compressor_speed = 1;
1470
1471 if (cpi->oxcf.cpu_used < -5) {
1472 cpi->oxcf.cpu_used = -5;
1473 }
1474
1475 if (cpi->oxcf.cpu_used > 5) cpi->oxcf.cpu_used = 5;
1476
1477 break;
1478
1479 case MODE_BESTQUALITY:
1480 cpi->pass = 0;
1481 cpi->compressor_speed = 0;
1482 break;
1483
1484 case MODE_FIRSTPASS:
1485 cpi->pass = 1;
1486 cpi->compressor_speed = 1;
1487 break;
1488 case MODE_SECONDPASS:
1489 cpi->pass = 2;
1490 cpi->compressor_speed = 1;
1491
1492 if (cpi->oxcf.cpu_used < -5) {
1493 cpi->oxcf.cpu_used = -5;
1494 }
1495
1496 if (cpi->oxcf.cpu_used > 5) cpi->oxcf.cpu_used = 5;
1497
1498 break;
1499 case MODE_SECONDPASS_BEST:
1500 cpi->pass = 2;
1501 cpi->compressor_speed = 0;
1502 break;
1503 }
1504
1505 if (cpi->pass == 0) cpi->auto_worst_q = 1;
1506
1507 cpi->oxcf.worst_allowed_q = q_trans[oxcf->worst_allowed_q];
1508 cpi->oxcf.best_allowed_q = q_trans[oxcf->best_allowed_q];
1509 cpi->oxcf.cq_level = q_trans[cpi->oxcf.cq_level];
1510
1511 if (oxcf->fixed_q >= 0) {
1512 if (oxcf->worst_allowed_q < 0) {
1513 cpi->oxcf.fixed_q = q_trans[0];
1514 } else {
1515 cpi->oxcf.fixed_q = q_trans[oxcf->worst_allowed_q];
1516 }
1517
1518 if (oxcf->alt_q < 0) {
1519 cpi->oxcf.alt_q = q_trans[0];
1520 } else {
1521 cpi->oxcf.alt_q = q_trans[oxcf->alt_q];
1522 }
1523
1524 if (oxcf->key_q < 0) {
1525 cpi->oxcf.key_q = q_trans[0];
1526 } else {
1527 cpi->oxcf.key_q = q_trans[oxcf->key_q];
1528 }
1529
1530 if (oxcf->gold_q < 0) {
1531 cpi->oxcf.gold_q = q_trans[0];
1532 } else {
1533 cpi->oxcf.gold_q = q_trans[oxcf->gold_q];
1534 }
1535 }
1536
1537 cpi->baseline_gf_interval =
1538 cpi->oxcf.alt_freq ? cpi->oxcf.alt_freq : DEFAULT_GF_INTERVAL;
1539
1540 // GF behavior for 1 pass CBR, used when error_resilience is off.
1541 if (!cpi->oxcf.error_resilient_mode &&
1542 cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER &&
1543 cpi->oxcf.Mode == MODE_REALTIME)
1544 cpi->baseline_gf_interval = cpi->gf_interval_onepass_cbr;
1545
1546 #if (CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING)
1547 cpi->oxcf.token_partitions = 3;
1548 #endif
1549
1550 if (cpi->oxcf.token_partitions >= 0 && cpi->oxcf.token_partitions <= 3) {
1551 cm->multi_token_partition = (TOKEN_PARTITION)cpi->oxcf.token_partitions;
1552 }
1553
1554 setup_features(cpi);
1555
1556 if (!cpi->use_roi_static_threshold) {
1557 int i;
1558 for (i = 0; i < MAX_MB_SEGMENTS; ++i) {
1559 cpi->segment_encode_breakout[i] = cpi->oxcf.encode_breakout;
1560 }
1561 }
1562
1563 /* At the moment the first order values may not be > MAXQ */
1564 if (cpi->oxcf.fixed_q > MAXQ) cpi->oxcf.fixed_q = MAXQ;
1565
1566 /* local file playback mode == really big buffer */
1567 if (cpi->oxcf.end_usage == USAGE_LOCAL_FILE_PLAYBACK) {
1568 cpi->oxcf.starting_buffer_level = 60000;
1569 cpi->oxcf.optimal_buffer_level = 60000;
1570 cpi->oxcf.maximum_buffer_size = 240000;
1571 cpi->oxcf.starting_buffer_level_in_ms = 60000;
1572 cpi->oxcf.optimal_buffer_level_in_ms = 60000;
1573 cpi->oxcf.maximum_buffer_size_in_ms = 240000;
1574 }
1575
1576 /* Convert target bandwidth from Kbit/s to Bit/s */
1577 cpi->oxcf.target_bandwidth *= 1000;
1578
1579 cpi->oxcf.starting_buffer_level = rescale(
1580 (int)cpi->oxcf.starting_buffer_level, cpi->oxcf.target_bandwidth, 1000);
1581
1582 /* Set or reset optimal and maximum buffer levels. */
1583 if (cpi->oxcf.optimal_buffer_level == 0) {
1584 cpi->oxcf.optimal_buffer_level = cpi->oxcf.target_bandwidth / 8;
1585 } else {
1586 cpi->oxcf.optimal_buffer_level = rescale(
1587 (int)cpi->oxcf.optimal_buffer_level, cpi->oxcf.target_bandwidth, 1000);
1588 }
1589
1590 if (cpi->oxcf.maximum_buffer_size == 0) {
1591 cpi->oxcf.maximum_buffer_size = cpi->oxcf.target_bandwidth / 8;
1592 } else {
1593 cpi->oxcf.maximum_buffer_size = rescale((int)cpi->oxcf.maximum_buffer_size,
1594 cpi->oxcf.target_bandwidth, 1000);
1595 }
1596 // Under a configuration change, where maximum_buffer_size may change,
1597 // keep buffer level clipped to the maximum allowed buffer size.
1598 if (cpi->bits_off_target > cpi->oxcf.maximum_buffer_size) {
1599 cpi->bits_off_target = cpi->oxcf.maximum_buffer_size;
1600 cpi->buffer_level = cpi->bits_off_target;
1601 }
1602
1603 /* Set up frame rate and related parameters rate control values. */
1604 vp8_new_framerate(cpi, cpi->framerate);
1605
1606 /* Set absolute upper and lower quality limits */
1607 cpi->worst_quality = cpi->oxcf.worst_allowed_q;
1608 cpi->best_quality = cpi->oxcf.best_allowed_q;
1609
1610 /* active values should only be modified if out of new range */
1611 if (cpi->active_worst_quality > cpi->oxcf.worst_allowed_q) {
1612 cpi->active_worst_quality = cpi->oxcf.worst_allowed_q;
1613 }
1614 /* less likely */
1615 else if (cpi->active_worst_quality < cpi->oxcf.best_allowed_q) {
1616 cpi->active_worst_quality = cpi->oxcf.best_allowed_q;
1617 }
1618 if (cpi->active_best_quality < cpi->oxcf.best_allowed_q) {
1619 cpi->active_best_quality = cpi->oxcf.best_allowed_q;
1620 }
1621 /* less likely */
1622 else if (cpi->active_best_quality > cpi->oxcf.worst_allowed_q) {
1623 cpi->active_best_quality = cpi->oxcf.worst_allowed_q;
1624 }
1625
1626 cpi->buffered_mode = cpi->oxcf.optimal_buffer_level > 0;
1627
1628 cpi->cq_target_quality = cpi->oxcf.cq_level;
1629
1630 /* Only allow dropped frames in buffered mode */
1631 cpi->drop_frames_allowed = cpi->oxcf.allow_df && cpi->buffered_mode;
1632
1633 cpi->target_bandwidth = cpi->oxcf.target_bandwidth;
1634
1635 // Check if the number of temporal layers has changed, and if so reset the
1636 // pattern counter and set/initialize the temporal layer context for the
1637 // new layer configuration.
1638 if (cpi->oxcf.number_of_layers != prev_number_of_layers) {
1639 // If the number of temporal layers are changed we must start at the
1640 // base of the pattern cycle, so set the layer id to 0 and reset
1641 // the temporal pattern counter.
1642 if (cpi->temporal_layer_id > 0) {
1643 cpi->temporal_layer_id = 0;
1644 }
1645 cpi->temporal_pattern_counter = 0;
1646 reset_temporal_layer_change(cpi, oxcf, prev_number_of_layers);
1647 }
1648
1649 if (!cpi->initial_width) {
1650 cpi->initial_width = cpi->oxcf.Width;
1651 cpi->initial_height = cpi->oxcf.Height;
1652 }
1653
1654 cm->Width = cpi->oxcf.Width;
1655 cm->Height = cpi->oxcf.Height;
1656 assert(cm->Width <= cpi->initial_width);
1657 assert(cm->Height <= cpi->initial_height);
1658
1659 /* TODO(jkoleszar): if an internal spatial resampling is active,
1660 * and we downsize the input image, maybe we should clear the
1661 * internal scale immediately rather than waiting for it to
1662 * correct.
1663 */
1664
1665 /* VP8 sharpness level mapping 0-7 (vs 0-10 in general VPx dialogs) */
1666 if (cpi->oxcf.Sharpness > 7) cpi->oxcf.Sharpness = 7;
1667
1668 cm->sharpness_level = cpi->oxcf.Sharpness;
1669
1670 if (cm->horiz_scale != NORMAL || cm->vert_scale != NORMAL) {
1671 int hr, hs, vr, vs;
1672
1673 Scale2Ratio(cm->horiz_scale, &hr, &hs);
1674 Scale2Ratio(cm->vert_scale, &vr, &vs);
1675
1676 /* always go to the next whole number */
1677 cm->Width = (hs - 1 + cpi->oxcf.Width * hr) / hs;
1678 cm->Height = (vs - 1 + cpi->oxcf.Height * vr) / vs;
1679 }
1680
1681 if (last_w != cpi->oxcf.Width || last_h != cpi->oxcf.Height) {
1682 cpi->force_next_frame_intra = 1;
1683 }
1684
1685 if (((cm->Width + 15) & ~15) != cm->yv12_fb[cm->lst_fb_idx].y_width ||
1686 ((cm->Height + 15) & ~15) != cm->yv12_fb[cm->lst_fb_idx].y_height ||
1687 cm->yv12_fb[cm->lst_fb_idx].y_width == 0) {
1688 dealloc_raw_frame_buffers(cpi);
1689 alloc_raw_frame_buffers(cpi);
1690 vp8_alloc_compressor_data(cpi);
1691 }
1692
1693 if (cpi->oxcf.fixed_q >= 0) {
1694 cpi->last_q[0] = cpi->oxcf.fixed_q;
1695 cpi->last_q[1] = cpi->oxcf.fixed_q;
1696 }
1697
1698 cpi->Speed = cpi->oxcf.cpu_used;
1699
1700 /* force to allowlag to 0 if lag_in_frames is 0; */
1701 if (cpi->oxcf.lag_in_frames == 0) {
1702 cpi->oxcf.allow_lag = 0;
1703 }
1704 /* Limit on lag buffers as these are not currently dynamically allocated */
1705 else if (cpi->oxcf.lag_in_frames > MAX_LAG_BUFFERS) {
1706 cpi->oxcf.lag_in_frames = MAX_LAG_BUFFERS;
1707 }
1708
1709 /* YX Temp */
1710 cpi->alt_ref_source = NULL;
1711 cpi->is_src_frame_alt_ref = 0;
1712
1713 #if CONFIG_TEMPORAL_DENOISING
1714 if (cpi->oxcf.noise_sensitivity) {
1715 if (!cpi->denoiser.yv12_mc_running_avg.buffer_alloc) {
1716 int width = (cpi->oxcf.Width + 15) & ~15;
1717 int height = (cpi->oxcf.Height + 15) & ~15;
1718 if (vp8_denoiser_allocate(&cpi->denoiser, width, height, cm->mb_rows,
1719 cm->mb_cols, cpi->oxcf.noise_sensitivity)) {
1720 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1721 "Failed to allocate denoiser");
1722 }
1723 }
1724 }
1725 #endif
1726
1727 #if 0
1728 /* Experimental RD Code */
1729 cpi->frame_distortion = 0;
1730 cpi->last_frame_distortion = 0;
1731 #endif
1732 }
1733
1734 #ifndef M_LOG2_E
1735 #define M_LOG2_E 0.693147180559945309417
1736 #endif
1737 #define log2f(x) (log(x) / (float)M_LOG2_E)
1738
cal_mvsadcosts(int * mvsadcost[2])1739 static void cal_mvsadcosts(int *mvsadcost[2]) {
1740 int i = 1;
1741
1742 mvsadcost[0][0] = 300;
1743 mvsadcost[1][0] = 300;
1744
1745 do {
1746 double z = 256 * (2 * (log2f(8 * i) + .6));
1747 mvsadcost[0][i] = (int)z;
1748 mvsadcost[1][i] = (int)z;
1749 mvsadcost[0][-i] = (int)z;
1750 mvsadcost[1][-i] = (int)z;
1751 } while (++i <= mvfp_max);
1752 }
1753
vp8_create_compressor(VP8_CONFIG * oxcf)1754 struct VP8_COMP *vp8_create_compressor(VP8_CONFIG *oxcf) {
1755 int i;
1756
1757 VP8_COMP *cpi;
1758 VP8_COMMON *cm;
1759
1760 cpi = vpx_memalign(32, sizeof(VP8_COMP));
1761 /* Check that the CPI instance is valid */
1762 if (!cpi) return 0;
1763
1764 cm = &cpi->common;
1765
1766 memset(cpi, 0, sizeof(VP8_COMP));
1767
1768 if (setjmp(cm->error.jmp)) {
1769 cpi->common.error.setjmp = 0;
1770 vp8_remove_compressor(&cpi);
1771 return 0;
1772 }
1773
1774 cpi->common.error.setjmp = 1;
1775
1776 CHECK_MEM_ERROR(cpi->mb.ss, vpx_calloc(sizeof(search_site),
1777 (MAX_MVSEARCH_STEPS * 8) + 1));
1778
1779 vp8_create_common(&cpi->common);
1780
1781 init_config(cpi, oxcf);
1782
1783 memcpy(cpi->base_skip_false_prob, vp8cx_base_skip_false_prob,
1784 sizeof(vp8cx_base_skip_false_prob));
1785 cpi->common.current_video_frame = 0;
1786 cpi->temporal_pattern_counter = 0;
1787 cpi->temporal_layer_id = -1;
1788 cpi->kf_overspend_bits = 0;
1789 cpi->kf_bitrate_adjustment = 0;
1790 cpi->frames_till_gf_update_due = 0;
1791 cpi->gf_overspend_bits = 0;
1792 cpi->non_gf_bitrate_adjustment = 0;
1793 cpi->prob_last_coded = 128;
1794 cpi->prob_gf_coded = 128;
1795 cpi->prob_intra_coded = 63;
1796
1797 /* Prime the recent reference frame usage counters.
1798 * Hereafter they will be maintained as a sort of moving average
1799 */
1800 cpi->recent_ref_frame_usage[INTRA_FRAME] = 1;
1801 cpi->recent_ref_frame_usage[LAST_FRAME] = 1;
1802 cpi->recent_ref_frame_usage[GOLDEN_FRAME] = 1;
1803 cpi->recent_ref_frame_usage[ALTREF_FRAME] = 1;
1804
1805 /* Set reference frame sign bias for ALTREF frame to 1 (for now) */
1806 cpi->common.ref_frame_sign_bias[ALTREF_FRAME] = 1;
1807
1808 cpi->twopass.gf_decay_rate = 0;
1809 cpi->baseline_gf_interval = DEFAULT_GF_INTERVAL;
1810
1811 cpi->gold_is_last = 0;
1812 cpi->alt_is_last = 0;
1813 cpi->gold_is_alt = 0;
1814
1815 cpi->active_map_enabled = 0;
1816
1817 cpi->use_roi_static_threshold = 0;
1818
1819 #if 0
1820 /* Experimental code for lagged and one pass */
1821 /* Initialise one_pass GF frames stats */
1822 /* Update stats used for GF selection */
1823 if (cpi->pass == 0)
1824 {
1825 cpi->one_pass_frame_index = 0;
1826
1827 for (i = 0; i < MAX_LAG_BUFFERS; ++i)
1828 {
1829 cpi->one_pass_frame_stats[i].frames_so_far = 0;
1830 cpi->one_pass_frame_stats[i].frame_intra_error = 0.0;
1831 cpi->one_pass_frame_stats[i].frame_coded_error = 0.0;
1832 cpi->one_pass_frame_stats[i].frame_pcnt_inter = 0.0;
1833 cpi->one_pass_frame_stats[i].frame_pcnt_motion = 0.0;
1834 cpi->one_pass_frame_stats[i].frame_mvr = 0.0;
1835 cpi->one_pass_frame_stats[i].frame_mvr_abs = 0.0;
1836 cpi->one_pass_frame_stats[i].frame_mvc = 0.0;
1837 cpi->one_pass_frame_stats[i].frame_mvc_abs = 0.0;
1838 }
1839 }
1840 #endif
1841
1842 cpi->mse_source_denoised = 0;
1843
1844 /* Should we use the cyclic refresh method.
1845 * Currently there is no external control for this.
1846 * Enable it for error_resilient_mode, or for 1 pass CBR mode.
1847 */
1848 cpi->cyclic_refresh_mode_enabled =
1849 (cpi->oxcf.error_resilient_mode ||
1850 (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER &&
1851 cpi->oxcf.Mode <= 2));
1852 cpi->cyclic_refresh_mode_max_mbs_perframe =
1853 (cpi->common.mb_rows * cpi->common.mb_cols) / 7;
1854 if (cpi->oxcf.number_of_layers == 1) {
1855 cpi->cyclic_refresh_mode_max_mbs_perframe =
1856 (cpi->common.mb_rows * cpi->common.mb_cols) / 20;
1857 } else if (cpi->oxcf.number_of_layers == 2) {
1858 cpi->cyclic_refresh_mode_max_mbs_perframe =
1859 (cpi->common.mb_rows * cpi->common.mb_cols) / 10;
1860 }
1861 cpi->cyclic_refresh_mode_index = 0;
1862 cpi->cyclic_refresh_q = 32;
1863
1864 // GF behavior for 1 pass CBR, used when error_resilience is off.
1865 cpi->gf_update_onepass_cbr = 0;
1866 cpi->gf_noboost_onepass_cbr = 0;
1867 if (!cpi->oxcf.error_resilient_mode &&
1868 cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER && cpi->oxcf.Mode <= 2) {
1869 cpi->gf_update_onepass_cbr = 1;
1870 cpi->gf_noboost_onepass_cbr = 1;
1871 cpi->gf_interval_onepass_cbr =
1872 cpi->cyclic_refresh_mode_max_mbs_perframe > 0
1873 ? (2 * (cpi->common.mb_rows * cpi->common.mb_cols) /
1874 cpi->cyclic_refresh_mode_max_mbs_perframe)
1875 : 10;
1876 cpi->gf_interval_onepass_cbr =
1877 VPXMIN(40, VPXMAX(6, cpi->gf_interval_onepass_cbr));
1878 cpi->baseline_gf_interval = cpi->gf_interval_onepass_cbr;
1879 }
1880
1881 if (cpi->cyclic_refresh_mode_enabled) {
1882 CHECK_MEM_ERROR(cpi->cyclic_refresh_map,
1883 vpx_calloc((cpi->common.mb_rows * cpi->common.mb_cols), 1));
1884 } else {
1885 cpi->cyclic_refresh_map = (signed char *)NULL;
1886 }
1887
1888 CHECK_MEM_ERROR(cpi->skin_map, vpx_calloc(cm->mb_rows * cm->mb_cols,
1889 sizeof(cpi->skin_map[0])));
1890
1891 CHECK_MEM_ERROR(cpi->consec_zero_last,
1892 vpx_calloc(cm->mb_rows * cm->mb_cols, 1));
1893 CHECK_MEM_ERROR(cpi->consec_zero_last_mvbias,
1894 vpx_calloc((cpi->common.mb_rows * cpi->common.mb_cols), 1));
1895
1896 #ifdef VP8_ENTROPY_STATS
1897 init_context_counters();
1898 #endif
1899
1900 /*Initialize the feed-forward activity masking.*/
1901 cpi->activity_avg = 90 << 12;
1902
1903 /* Give a sensible default for the first frame. */
1904 cpi->frames_since_key = 8;
1905 cpi->key_frame_frequency = cpi->oxcf.key_freq;
1906 cpi->this_key_frame_forced = 0;
1907 cpi->next_key_frame_forced = 0;
1908
1909 cpi->source_alt_ref_pending = 0;
1910 cpi->source_alt_ref_active = 0;
1911 cpi->common.refresh_alt_ref_frame = 0;
1912
1913 cpi->force_maxqp = 0;
1914 cpi->frames_since_last_drop_overshoot = 0;
1915
1916 cpi->b_calculate_psnr = CONFIG_INTERNAL_STATS;
1917 #if CONFIG_INTERNAL_STATS
1918 cpi->b_calculate_ssimg = 0;
1919
1920 cpi->count = 0;
1921 cpi->bytes = 0;
1922
1923 if (cpi->b_calculate_psnr) {
1924 cpi->total_sq_error = 0.0;
1925 cpi->total_sq_error2 = 0.0;
1926 cpi->total_y = 0.0;
1927 cpi->total_u = 0.0;
1928 cpi->total_v = 0.0;
1929 cpi->total = 0.0;
1930 cpi->totalp_y = 0.0;
1931 cpi->totalp_u = 0.0;
1932 cpi->totalp_v = 0.0;
1933 cpi->totalp = 0.0;
1934 cpi->tot_recode_hits = 0;
1935 cpi->summed_quality = 0;
1936 cpi->summed_weights = 0;
1937 }
1938
1939 #endif
1940
1941 cpi->first_time_stamp_ever = 0x7FFFFFFF;
1942
1943 cpi->frames_till_gf_update_due = 0;
1944 cpi->key_frame_count = 1;
1945
1946 cpi->ni_av_qi = cpi->oxcf.worst_allowed_q;
1947 cpi->ni_tot_qi = 0;
1948 cpi->ni_frames = 0;
1949 cpi->total_byte_count = 0;
1950
1951 cpi->drop_frame = 0;
1952
1953 cpi->rate_correction_factor = 1.0;
1954 cpi->key_frame_rate_correction_factor = 1.0;
1955 cpi->gf_rate_correction_factor = 1.0;
1956 cpi->twopass.est_max_qcorrection_factor = 1.0;
1957
1958 for (i = 0; i < KEY_FRAME_CONTEXT; ++i) {
1959 cpi->prior_key_frame_distance[i] = (int)cpi->output_framerate;
1960 }
1961
1962 #ifdef OUTPUT_YUV_SRC
1963 yuv_file = fopen("bd.yuv", "ab");
1964 #endif
1965 #ifdef OUTPUT_YUV_DENOISED
1966 yuv_denoised_file = fopen("denoised.yuv", "ab");
1967 #endif
1968 #ifdef OUTPUT_YUV_SKINMAP
1969 yuv_skinmap_file = fopen("skinmap.yuv", "wb");
1970 #endif
1971
1972 #if 0
1973 framepsnr = fopen("framepsnr.stt", "a");
1974 kf_list = fopen("kf_list.stt", "w");
1975 #endif
1976
1977 cpi->output_pkt_list = oxcf->output_pkt_list;
1978
1979 #if !CONFIG_REALTIME_ONLY
1980
1981 if (cpi->pass == 1) {
1982 vp8_init_first_pass(cpi);
1983 } else if (cpi->pass == 2) {
1984 size_t packet_sz = sizeof(FIRSTPASS_STATS);
1985 int packets = (int)(oxcf->two_pass_stats_in.sz / packet_sz);
1986
1987 cpi->twopass.stats_in_start = oxcf->two_pass_stats_in.buf;
1988 cpi->twopass.stats_in = cpi->twopass.stats_in_start;
1989 cpi->twopass.stats_in_end =
1990 (void *)((char *)cpi->twopass.stats_in + (packets - 1) * packet_sz);
1991 vp8_init_second_pass(cpi);
1992 }
1993
1994 #endif
1995
1996 if (cpi->compressor_speed == 2) {
1997 cpi->avg_encode_time = 0;
1998 cpi->avg_pick_mode_time = 0;
1999 }
2000
2001 vp8_set_speed_features(cpi);
2002
2003 /* Set starting values of RD threshold multipliers (128 = *1) */
2004 for (i = 0; i < MAX_MODES; ++i) {
2005 cpi->mb.rd_thresh_mult[i] = 128;
2006 }
2007
2008 #ifdef VP8_ENTROPY_STATS
2009 init_mv_ref_counts();
2010 #endif
2011
2012 #if CONFIG_MULTITHREAD
2013 if (vp8cx_create_encoder_threads(cpi)) {
2014 vp8_remove_compressor(&cpi);
2015 return 0;
2016 }
2017 #endif
2018
2019 cpi->fn_ptr[BLOCK_16X16].sdf = vpx_sad16x16;
2020 cpi->fn_ptr[BLOCK_16X16].vf = vpx_variance16x16;
2021 cpi->fn_ptr[BLOCK_16X16].svf = vpx_sub_pixel_variance16x16;
2022 cpi->fn_ptr[BLOCK_16X16].sdx3f = vpx_sad16x16x3;
2023 cpi->fn_ptr[BLOCK_16X16].sdx8f = vpx_sad16x16x8;
2024 cpi->fn_ptr[BLOCK_16X16].sdx4df = vpx_sad16x16x4d;
2025
2026 cpi->fn_ptr[BLOCK_16X8].sdf = vpx_sad16x8;
2027 cpi->fn_ptr[BLOCK_16X8].vf = vpx_variance16x8;
2028 cpi->fn_ptr[BLOCK_16X8].svf = vpx_sub_pixel_variance16x8;
2029 cpi->fn_ptr[BLOCK_16X8].sdx3f = vpx_sad16x8x3;
2030 cpi->fn_ptr[BLOCK_16X8].sdx8f = vpx_sad16x8x8;
2031 cpi->fn_ptr[BLOCK_16X8].sdx4df = vpx_sad16x8x4d;
2032
2033 cpi->fn_ptr[BLOCK_8X16].sdf = vpx_sad8x16;
2034 cpi->fn_ptr[BLOCK_8X16].vf = vpx_variance8x16;
2035 cpi->fn_ptr[BLOCK_8X16].svf = vpx_sub_pixel_variance8x16;
2036 cpi->fn_ptr[BLOCK_8X16].sdx3f = vpx_sad8x16x3;
2037 cpi->fn_ptr[BLOCK_8X16].sdx8f = vpx_sad8x16x8;
2038 cpi->fn_ptr[BLOCK_8X16].sdx4df = vpx_sad8x16x4d;
2039
2040 cpi->fn_ptr[BLOCK_8X8].sdf = vpx_sad8x8;
2041 cpi->fn_ptr[BLOCK_8X8].vf = vpx_variance8x8;
2042 cpi->fn_ptr[BLOCK_8X8].svf = vpx_sub_pixel_variance8x8;
2043 cpi->fn_ptr[BLOCK_8X8].sdx3f = vpx_sad8x8x3;
2044 cpi->fn_ptr[BLOCK_8X8].sdx8f = vpx_sad8x8x8;
2045 cpi->fn_ptr[BLOCK_8X8].sdx4df = vpx_sad8x8x4d;
2046
2047 cpi->fn_ptr[BLOCK_4X4].sdf = vpx_sad4x4;
2048 cpi->fn_ptr[BLOCK_4X4].vf = vpx_variance4x4;
2049 cpi->fn_ptr[BLOCK_4X4].svf = vpx_sub_pixel_variance4x4;
2050 cpi->fn_ptr[BLOCK_4X4].sdx3f = vpx_sad4x4x3;
2051 cpi->fn_ptr[BLOCK_4X4].sdx8f = vpx_sad4x4x8;
2052 cpi->fn_ptr[BLOCK_4X4].sdx4df = vpx_sad4x4x4d;
2053
2054 #if ARCH_X86 || ARCH_X86_64
2055 cpi->fn_ptr[BLOCK_16X16].copymem = vp8_copy32xn;
2056 cpi->fn_ptr[BLOCK_16X8].copymem = vp8_copy32xn;
2057 cpi->fn_ptr[BLOCK_8X16].copymem = vp8_copy32xn;
2058 cpi->fn_ptr[BLOCK_8X8].copymem = vp8_copy32xn;
2059 cpi->fn_ptr[BLOCK_4X4].copymem = vp8_copy32xn;
2060 #endif
2061
2062 cpi->full_search_sad = vp8_full_search_sad;
2063 cpi->diamond_search_sad = vp8_diamond_search_sad;
2064 cpi->refining_search_sad = vp8_refining_search_sad;
2065
2066 /* make sure frame 1 is okay */
2067 cpi->mb.error_bins[0] = cpi->common.MBs;
2068
2069 /* vp8cx_init_quantizer() is first called here. Add check in
2070 * vp8cx_frame_init_quantizer() so that vp8cx_init_quantizer is only
2071 * called later when needed. This will avoid unnecessary calls of
2072 * vp8cx_init_quantizer() for every frame.
2073 */
2074 vp8cx_init_quantizer(cpi);
2075
2076 vp8_loop_filter_init(cm);
2077
2078 cpi->common.error.setjmp = 0;
2079
2080 #if CONFIG_MULTI_RES_ENCODING
2081
2082 /* Calculate # of MBs in a row in lower-resolution level image. */
2083 if (cpi->oxcf.mr_encoder_id > 0) vp8_cal_low_res_mb_cols(cpi);
2084
2085 #endif
2086
2087 /* setup RD costs to MACROBLOCK struct */
2088
2089 cpi->mb.mvcost[0] = &cpi->rd_costs.mvcosts[0][mv_max + 1];
2090 cpi->mb.mvcost[1] = &cpi->rd_costs.mvcosts[1][mv_max + 1];
2091 cpi->mb.mvsadcost[0] = &cpi->rd_costs.mvsadcosts[0][mvfp_max + 1];
2092 cpi->mb.mvsadcost[1] = &cpi->rd_costs.mvsadcosts[1][mvfp_max + 1];
2093
2094 cal_mvsadcosts(cpi->mb.mvsadcost);
2095
2096 cpi->mb.mbmode_cost = cpi->rd_costs.mbmode_cost;
2097 cpi->mb.intra_uv_mode_cost = cpi->rd_costs.intra_uv_mode_cost;
2098 cpi->mb.bmode_costs = cpi->rd_costs.bmode_costs;
2099 cpi->mb.inter_bmode_costs = cpi->rd_costs.inter_bmode_costs;
2100 cpi->mb.token_costs = cpi->rd_costs.token_costs;
2101
2102 /* setup block ptrs & offsets */
2103 vp8_setup_block_ptrs(&cpi->mb);
2104 vp8_setup_block_dptrs(&cpi->mb.e_mbd);
2105
2106 return cpi;
2107 }
2108
vp8_remove_compressor(VP8_COMP ** ptr)2109 void vp8_remove_compressor(VP8_COMP **ptr) {
2110 VP8_COMP *cpi = *ptr;
2111
2112 if (!cpi) return;
2113
2114 if (cpi && (cpi->common.current_video_frame > 0)) {
2115 #if !CONFIG_REALTIME_ONLY
2116
2117 if (cpi->pass == 2) {
2118 vp8_end_second_pass(cpi);
2119 }
2120
2121 #endif
2122
2123 #ifdef VP8_ENTROPY_STATS
2124 print_context_counters();
2125 print_tree_update_probs();
2126 print_mode_context();
2127 #endif
2128
2129 #if CONFIG_INTERNAL_STATS
2130
2131 if (cpi->pass != 1) {
2132 FILE *f = fopen("opsnr.stt", "a");
2133 double time_encoded =
2134 (cpi->last_end_time_stamp_seen - cpi->first_time_stamp_ever) /
2135 10000000.000;
2136 double dr = (double)cpi->bytes * 8.0 / 1000.0 / time_encoded;
2137
2138 if (cpi->b_calculate_psnr) {
2139 if (cpi->oxcf.number_of_layers > 1) {
2140 int i;
2141
2142 fprintf(f,
2143 "Layer\tBitrate\tAVGPsnr\tGLBPsnr\tAVPsnrP\t"
2144 "GLPsnrP\tVPXSSIM\n");
2145 for (i = 0; i < (int)cpi->oxcf.number_of_layers; ++i) {
2146 double dr =
2147 (double)cpi->bytes_in_layer[i] * 8.0 / 1000.0 / time_encoded;
2148 double samples = 3.0 / 2 * cpi->frames_in_layer[i] *
2149 cpi->common.Width * cpi->common.Height;
2150 double total_psnr =
2151 vpx_sse_to_psnr(samples, 255.0, cpi->total_error2[i]);
2152 double total_psnr2 =
2153 vpx_sse_to_psnr(samples, 255.0, cpi->total_error2_p[i]);
2154 double total_ssim =
2155 100 * pow(cpi->sum_ssim[i] / cpi->sum_weights[i], 8.0);
2156
2157 fprintf(f,
2158 "%5d\t%7.3f\t%7.3f\t%7.3f\t%7.3f\t"
2159 "%7.3f\t%7.3f\n",
2160 i, dr, cpi->sum_psnr[i] / cpi->frames_in_layer[i],
2161 total_psnr, cpi->sum_psnr_p[i] / cpi->frames_in_layer[i],
2162 total_psnr2, total_ssim);
2163 }
2164 } else {
2165 double samples =
2166 3.0 / 2 * cpi->count * cpi->common.Width * cpi->common.Height;
2167 double total_psnr =
2168 vpx_sse_to_psnr(samples, 255.0, cpi->total_sq_error);
2169 double total_psnr2 =
2170 vpx_sse_to_psnr(samples, 255.0, cpi->total_sq_error2);
2171 double total_ssim =
2172 100 * pow(cpi->summed_quality / cpi->summed_weights, 8.0);
2173
2174 fprintf(f,
2175 "Bitrate\tAVGPsnr\tGLBPsnr\tAVPsnrP\t"
2176 "GLPsnrP\tVPXSSIM\n");
2177 fprintf(f,
2178 "%7.3f\t%7.3f\t%7.3f\t%7.3f\t%7.3f\t"
2179 "%7.3f\n",
2180 dr, cpi->total / cpi->count, total_psnr,
2181 cpi->totalp / cpi->count, total_psnr2, total_ssim);
2182 }
2183 }
2184 fclose(f);
2185 #if 0
2186 f = fopen("qskip.stt", "a");
2187 fprintf(f, "minq:%d -maxq:%d skiptrue:skipfalse = %d:%d\n", cpi->oxcf.best_allowed_q, cpi->oxcf.worst_allowed_q, skiptruecount, skipfalsecount);
2188 fclose(f);
2189 #endif
2190 }
2191
2192 #endif
2193
2194 #ifdef SPEEDSTATS
2195
2196 if (cpi->compressor_speed == 2) {
2197 int i;
2198 FILE *f = fopen("cxspeed.stt", "a");
2199 cnt_pm /= cpi->common.MBs;
2200
2201 for (i = 0; i < 16; ++i) fprintf(f, "%5d", frames_at_speed[i]);
2202
2203 fprintf(f, "\n");
2204 fclose(f);
2205 }
2206
2207 #endif
2208
2209 #ifdef MODE_STATS
2210 {
2211 extern int count_mb_seg[4];
2212 FILE *f = fopen("modes.stt", "a");
2213 double dr = (double)cpi->framerate * (double)bytes * (double)8 /
2214 (double)count / (double)1000;
2215 fprintf(f, "intra_mode in Intra Frames:\n");
2216 fprintf(f, "Y: %8d, %8d, %8d, %8d, %8d\n", y_modes[0], y_modes[1],
2217 y_modes[2], y_modes[3], y_modes[4]);
2218 fprintf(f, "UV:%8d, %8d, %8d, %8d\n", uv_modes[0], uv_modes[1],
2219 uv_modes[2], uv_modes[3]);
2220 fprintf(f, "B: ");
2221 {
2222 int i;
2223
2224 for (i = 0; i < 10; ++i) fprintf(f, "%8d, ", b_modes[i]);
2225
2226 fprintf(f, "\n");
2227 }
2228
2229 fprintf(f, "Modes in Inter Frames:\n");
2230 fprintf(f, "Y: %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d\n",
2231 inter_y_modes[0], inter_y_modes[1], inter_y_modes[2],
2232 inter_y_modes[3], inter_y_modes[4], inter_y_modes[5],
2233 inter_y_modes[6], inter_y_modes[7], inter_y_modes[8],
2234 inter_y_modes[9]);
2235 fprintf(f, "UV:%8d, %8d, %8d, %8d\n", inter_uv_modes[0],
2236 inter_uv_modes[1], inter_uv_modes[2], inter_uv_modes[3]);
2237 fprintf(f, "B: ");
2238 {
2239 int i;
2240
2241 for (i = 0; i < 15; ++i) fprintf(f, "%8d, ", inter_b_modes[i]);
2242
2243 fprintf(f, "\n");
2244 }
2245 fprintf(f, "P:%8d, %8d, %8d, %8d\n", count_mb_seg[0], count_mb_seg[1],
2246 count_mb_seg[2], count_mb_seg[3]);
2247 fprintf(f, "PB:%8d, %8d, %8d, %8d\n", inter_b_modes[LEFT4X4],
2248 inter_b_modes[ABOVE4X4], inter_b_modes[ZERO4X4],
2249 inter_b_modes[NEW4X4]);
2250
2251 fclose(f);
2252 }
2253 #endif
2254
2255 #ifdef VP8_ENTROPY_STATS
2256 {
2257 int i, j, k;
2258 FILE *fmode = fopen("modecontext.c", "w");
2259
2260 fprintf(fmode, "\n#include \"entropymode.h\"\n\n");
2261 fprintf(fmode, "const unsigned int vp8_kf_default_bmode_counts ");
2262 fprintf(fmode,
2263 "[VP8_BINTRAMODES] [VP8_BINTRAMODES] [VP8_BINTRAMODES] =\n{\n");
2264
2265 for (i = 0; i < 10; ++i) {
2266 fprintf(fmode, " { /* Above Mode : %d */\n", i);
2267
2268 for (j = 0; j < 10; ++j) {
2269 fprintf(fmode, " {");
2270
2271 for (k = 0; k < 10; ++k) {
2272 if (!intra_mode_stats[i][j][k])
2273 fprintf(fmode, " %5d, ", 1);
2274 else
2275 fprintf(fmode, " %5d, ", intra_mode_stats[i][j][k]);
2276 }
2277
2278 fprintf(fmode, "}, /* left_mode %d */\n", j);
2279 }
2280
2281 fprintf(fmode, " },\n");
2282 }
2283
2284 fprintf(fmode, "};\n");
2285 fclose(fmode);
2286 }
2287 #endif
2288
2289 #if defined(SECTIONBITS_OUTPUT)
2290
2291 if (0) {
2292 int i;
2293 FILE *f = fopen("tokenbits.stt", "a");
2294
2295 for (i = 0; i < 28; ++i) fprintf(f, "%8d", (int)(Sectionbits[i] / 256));
2296
2297 fprintf(f, "\n");
2298 fclose(f);
2299 }
2300
2301 #endif
2302
2303 #if 0
2304 {
2305 printf("\n_pick_loop_filter_level:%d\n", cpi->time_pick_lpf / 1000);
2306 printf("\n_frames recive_data encod_mb_row compress_frame Total\n");
2307 printf("%6d %10ld %10ld %10ld %10ld\n", cpi->common.current_video_frame, cpi->time_receive_data / 1000, cpi->time_encode_mb_row / 1000, cpi->time_compress_data / 1000, (cpi->time_receive_data + cpi->time_compress_data) / 1000);
2308 }
2309 #endif
2310 }
2311
2312 #if CONFIG_MULTITHREAD
2313 vp8cx_remove_encoder_threads(cpi);
2314 #endif
2315
2316 #if CONFIG_TEMPORAL_DENOISING
2317 vp8_denoiser_free(&cpi->denoiser);
2318 #endif
2319 dealloc_compressor_data(cpi);
2320 vpx_free(cpi->mb.ss);
2321 vpx_free(cpi->tok);
2322 vpx_free(cpi->skin_map);
2323 vpx_free(cpi->cyclic_refresh_map);
2324 vpx_free(cpi->consec_zero_last);
2325 vpx_free(cpi->consec_zero_last_mvbias);
2326
2327 vp8_remove_common(&cpi->common);
2328 vpx_free(cpi);
2329 *ptr = 0;
2330
2331 #ifdef OUTPUT_YUV_SRC
2332 fclose(yuv_file);
2333 #endif
2334 #ifdef OUTPUT_YUV_DENOISED
2335 fclose(yuv_denoised_file);
2336 #endif
2337 #ifdef OUTPUT_YUV_SKINMAP
2338 fclose(yuv_skinmap_file);
2339 #endif
2340
2341 #if 0
2342
2343 if (keyfile)
2344 fclose(keyfile);
2345
2346 if (framepsnr)
2347 fclose(framepsnr);
2348
2349 if (kf_list)
2350 fclose(kf_list);
2351
2352 #endif
2353 }
2354
calc_plane_error(unsigned char * orig,int orig_stride,unsigned char * recon,int recon_stride,unsigned int cols,unsigned int rows)2355 static uint64_t calc_plane_error(unsigned char *orig, int orig_stride,
2356 unsigned char *recon, int recon_stride,
2357 unsigned int cols, unsigned int rows) {
2358 unsigned int row, col;
2359 uint64_t total_sse = 0;
2360 int diff;
2361
2362 for (row = 0; row + 16 <= rows; row += 16) {
2363 for (col = 0; col + 16 <= cols; col += 16) {
2364 unsigned int sse;
2365
2366 vpx_mse16x16(orig + col, orig_stride, recon + col, recon_stride, &sse);
2367 total_sse += sse;
2368 }
2369
2370 /* Handle odd-sized width */
2371 if (col < cols) {
2372 unsigned int border_row, border_col;
2373 unsigned char *border_orig = orig;
2374 unsigned char *border_recon = recon;
2375
2376 for (border_row = 0; border_row < 16; ++border_row) {
2377 for (border_col = col; border_col < cols; ++border_col) {
2378 diff = border_orig[border_col] - border_recon[border_col];
2379 total_sse += diff * diff;
2380 }
2381
2382 border_orig += orig_stride;
2383 border_recon += recon_stride;
2384 }
2385 }
2386
2387 orig += orig_stride * 16;
2388 recon += recon_stride * 16;
2389 }
2390
2391 /* Handle odd-sized height */
2392 for (; row < rows; ++row) {
2393 for (col = 0; col < cols; ++col) {
2394 diff = orig[col] - recon[col];
2395 total_sse += diff * diff;
2396 }
2397
2398 orig += orig_stride;
2399 recon += recon_stride;
2400 }
2401
2402 vpx_clear_system_state();
2403 return total_sse;
2404 }
2405
generate_psnr_packet(VP8_COMP * cpi)2406 static void generate_psnr_packet(VP8_COMP *cpi) {
2407 YV12_BUFFER_CONFIG *orig = cpi->Source;
2408 YV12_BUFFER_CONFIG *recon = cpi->common.frame_to_show;
2409 struct vpx_codec_cx_pkt pkt;
2410 uint64_t sse;
2411 int i;
2412 unsigned int width = cpi->common.Width;
2413 unsigned int height = cpi->common.Height;
2414
2415 pkt.kind = VPX_CODEC_PSNR_PKT;
2416 sse = calc_plane_error(orig->y_buffer, orig->y_stride, recon->y_buffer,
2417 recon->y_stride, width, height);
2418 pkt.data.psnr.sse[0] = sse;
2419 pkt.data.psnr.sse[1] = sse;
2420 pkt.data.psnr.samples[0] = width * height;
2421 pkt.data.psnr.samples[1] = width * height;
2422
2423 width = (width + 1) / 2;
2424 height = (height + 1) / 2;
2425
2426 sse = calc_plane_error(orig->u_buffer, orig->uv_stride, recon->u_buffer,
2427 recon->uv_stride, width, height);
2428 pkt.data.psnr.sse[0] += sse;
2429 pkt.data.psnr.sse[2] = sse;
2430 pkt.data.psnr.samples[0] += width * height;
2431 pkt.data.psnr.samples[2] = width * height;
2432
2433 sse = calc_plane_error(orig->v_buffer, orig->uv_stride, recon->v_buffer,
2434 recon->uv_stride, width, height);
2435 pkt.data.psnr.sse[0] += sse;
2436 pkt.data.psnr.sse[3] = sse;
2437 pkt.data.psnr.samples[0] += width * height;
2438 pkt.data.psnr.samples[3] = width * height;
2439
2440 for (i = 0; i < 4; ++i) {
2441 pkt.data.psnr.psnr[i] = vpx_sse_to_psnr(pkt.data.psnr.samples[i], 255.0,
2442 (double)(pkt.data.psnr.sse[i]));
2443 }
2444
2445 vpx_codec_pkt_list_add(cpi->output_pkt_list, &pkt);
2446 }
2447
vp8_use_as_reference(VP8_COMP * cpi,int ref_frame_flags)2448 int vp8_use_as_reference(VP8_COMP *cpi, int ref_frame_flags) {
2449 if (ref_frame_flags > 7) return -1;
2450
2451 cpi->ref_frame_flags = ref_frame_flags;
2452 return 0;
2453 }
vp8_update_reference(VP8_COMP * cpi,int ref_frame_flags)2454 int vp8_update_reference(VP8_COMP *cpi, int ref_frame_flags) {
2455 if (ref_frame_flags > 7) return -1;
2456
2457 cpi->common.refresh_golden_frame = 0;
2458 cpi->common.refresh_alt_ref_frame = 0;
2459 cpi->common.refresh_last_frame = 0;
2460
2461 if (ref_frame_flags & VP8_LAST_FRAME) cpi->common.refresh_last_frame = 1;
2462
2463 if (ref_frame_flags & VP8_GOLD_FRAME) cpi->common.refresh_golden_frame = 1;
2464
2465 if (ref_frame_flags & VP8_ALTR_FRAME) cpi->common.refresh_alt_ref_frame = 1;
2466
2467 return 0;
2468 }
2469
vp8_get_reference(VP8_COMP * cpi,enum vpx_ref_frame_type ref_frame_flag,YV12_BUFFER_CONFIG * sd)2470 int vp8_get_reference(VP8_COMP *cpi, enum vpx_ref_frame_type ref_frame_flag,
2471 YV12_BUFFER_CONFIG *sd) {
2472 VP8_COMMON *cm = &cpi->common;
2473 int ref_fb_idx;
2474
2475 if (ref_frame_flag == VP8_LAST_FRAME) {
2476 ref_fb_idx = cm->lst_fb_idx;
2477 } else if (ref_frame_flag == VP8_GOLD_FRAME) {
2478 ref_fb_idx = cm->gld_fb_idx;
2479 } else if (ref_frame_flag == VP8_ALTR_FRAME) {
2480 ref_fb_idx = cm->alt_fb_idx;
2481 } else {
2482 return -1;
2483 }
2484
2485 vp8_yv12_copy_frame(&cm->yv12_fb[ref_fb_idx], sd);
2486
2487 return 0;
2488 }
vp8_set_reference(VP8_COMP * cpi,enum vpx_ref_frame_type ref_frame_flag,YV12_BUFFER_CONFIG * sd)2489 int vp8_set_reference(VP8_COMP *cpi, enum vpx_ref_frame_type ref_frame_flag,
2490 YV12_BUFFER_CONFIG *sd) {
2491 VP8_COMMON *cm = &cpi->common;
2492
2493 int ref_fb_idx;
2494
2495 if (ref_frame_flag == VP8_LAST_FRAME) {
2496 ref_fb_idx = cm->lst_fb_idx;
2497 } else if (ref_frame_flag == VP8_GOLD_FRAME) {
2498 ref_fb_idx = cm->gld_fb_idx;
2499 } else if (ref_frame_flag == VP8_ALTR_FRAME) {
2500 ref_fb_idx = cm->alt_fb_idx;
2501 } else {
2502 return -1;
2503 }
2504
2505 vp8_yv12_copy_frame(sd, &cm->yv12_fb[ref_fb_idx]);
2506
2507 return 0;
2508 }
vp8_update_entropy(VP8_COMP * cpi,int update)2509 int vp8_update_entropy(VP8_COMP *cpi, int update) {
2510 VP8_COMMON *cm = &cpi->common;
2511 cm->refresh_entropy_probs = update;
2512
2513 return 0;
2514 }
2515
scale_and_extend_source(YV12_BUFFER_CONFIG * sd,VP8_COMP * cpi)2516 static void scale_and_extend_source(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi) {
2517 VP8_COMMON *cm = &cpi->common;
2518
2519 /* are we resizing the image */
2520 if (cm->horiz_scale != 0 || cm->vert_scale != 0) {
2521 #if CONFIG_SPATIAL_RESAMPLING
2522 int hr, hs, vr, vs;
2523 int tmp_height;
2524
2525 if (cm->vert_scale == 3) {
2526 tmp_height = 9;
2527 } else {
2528 tmp_height = 11;
2529 }
2530
2531 Scale2Ratio(cm->horiz_scale, &hr, &hs);
2532 Scale2Ratio(cm->vert_scale, &vr, &vs);
2533
2534 vpx_scale_frame(sd, &cpi->scaled_source, cm->temp_scale_frame.y_buffer,
2535 tmp_height, hs, hr, vs, vr, 0);
2536
2537 vp8_yv12_extend_frame_borders(&cpi->scaled_source);
2538 cpi->Source = &cpi->scaled_source;
2539 #endif
2540 } else {
2541 cpi->Source = sd;
2542 }
2543 }
2544
resize_key_frame(VP8_COMP * cpi)2545 static int resize_key_frame(VP8_COMP *cpi) {
2546 #if CONFIG_SPATIAL_RESAMPLING
2547 VP8_COMMON *cm = &cpi->common;
2548
2549 /* Do we need to apply resampling for one pass cbr.
2550 * In one pass this is more limited than in two pass cbr.
2551 * The test and any change is only made once per key frame sequence.
2552 */
2553 if (cpi->oxcf.allow_spatial_resampling &&
2554 (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER)) {
2555 int hr, hs, vr, vs;
2556 int new_width, new_height;
2557
2558 /* If we are below the resample DOWN watermark then scale down a
2559 * notch.
2560 */
2561 if (cpi->buffer_level < (cpi->oxcf.resample_down_water_mark *
2562 cpi->oxcf.optimal_buffer_level / 100)) {
2563 cm->horiz_scale =
2564 (cm->horiz_scale < ONETWO) ? cm->horiz_scale + 1 : ONETWO;
2565 cm->vert_scale = (cm->vert_scale < ONETWO) ? cm->vert_scale + 1 : ONETWO;
2566 }
2567 /* Should we now start scaling back up */
2568 else if (cpi->buffer_level > (cpi->oxcf.resample_up_water_mark *
2569 cpi->oxcf.optimal_buffer_level / 100)) {
2570 cm->horiz_scale =
2571 (cm->horiz_scale > NORMAL) ? cm->horiz_scale - 1 : NORMAL;
2572 cm->vert_scale = (cm->vert_scale > NORMAL) ? cm->vert_scale - 1 : NORMAL;
2573 }
2574
2575 /* Get the new height and width */
2576 Scale2Ratio(cm->horiz_scale, &hr, &hs);
2577 Scale2Ratio(cm->vert_scale, &vr, &vs);
2578 new_width = ((hs - 1) + (cpi->oxcf.Width * hr)) / hs;
2579 new_height = ((vs - 1) + (cpi->oxcf.Height * vr)) / vs;
2580
2581 /* If the image size has changed we need to reallocate the buffers
2582 * and resample the source image
2583 */
2584 if ((cm->Width != new_width) || (cm->Height != new_height)) {
2585 cm->Width = new_width;
2586 cm->Height = new_height;
2587 vp8_alloc_compressor_data(cpi);
2588 scale_and_extend_source(cpi->un_scaled_source, cpi);
2589 return 1;
2590 }
2591 }
2592
2593 #endif
2594 return 0;
2595 }
2596
update_alt_ref_frame_stats(VP8_COMP * cpi)2597 static void update_alt_ref_frame_stats(VP8_COMP *cpi) {
2598 VP8_COMMON *cm = &cpi->common;
2599
2600 /* Select an interval before next GF or altref */
2601 if (!cpi->auto_gold) cpi->frames_till_gf_update_due = DEFAULT_GF_INTERVAL;
2602
2603 if ((cpi->pass != 2) && cpi->frames_till_gf_update_due) {
2604 cpi->current_gf_interval = cpi->frames_till_gf_update_due;
2605
2606 /* Set the bits per frame that we should try and recover in
2607 * subsequent inter frames to account for the extra GF spend...
2608 * note that his does not apply for GF updates that occur
2609 * coincident with a key frame as the extra cost of key frames is
2610 * dealt with elsewhere.
2611 */
2612 cpi->gf_overspend_bits += cpi->projected_frame_size;
2613 cpi->non_gf_bitrate_adjustment =
2614 cpi->gf_overspend_bits / cpi->frames_till_gf_update_due;
2615 }
2616
2617 /* Update data structure that monitors level of reference to last GF */
2618 memset(cpi->gf_active_flags, 1, (cm->mb_rows * cm->mb_cols));
2619 cpi->gf_active_count = cm->mb_rows * cm->mb_cols;
2620
2621 /* this frame refreshes means next frames don't unless specified by user */
2622 cpi->frames_since_golden = 0;
2623
2624 /* Clear the alternate reference update pending flag. */
2625 cpi->source_alt_ref_pending = 0;
2626
2627 /* Set the alternate reference frame active flag */
2628 cpi->source_alt_ref_active = 1;
2629 }
update_golden_frame_stats(VP8_COMP * cpi)2630 static void update_golden_frame_stats(VP8_COMP *cpi) {
2631 VP8_COMMON *cm = &cpi->common;
2632
2633 /* Update the Golden frame usage counts. */
2634 if (cm->refresh_golden_frame) {
2635 /* Select an interval before next GF */
2636 if (!cpi->auto_gold) cpi->frames_till_gf_update_due = DEFAULT_GF_INTERVAL;
2637
2638 if ((cpi->pass != 2) && (cpi->frames_till_gf_update_due > 0)) {
2639 cpi->current_gf_interval = cpi->frames_till_gf_update_due;
2640
2641 /* Set the bits per frame that we should try and recover in
2642 * subsequent inter frames to account for the extra GF spend...
2643 * note that his does not apply for GF updates that occur
2644 * coincident with a key frame as the extra cost of key frames
2645 * is dealt with elsewhere.
2646 */
2647 if ((cm->frame_type != KEY_FRAME) && !cpi->source_alt_ref_active) {
2648 /* Calcluate GF bits to be recovered
2649 * Projected size - av frame bits available for inter
2650 * frames for clip as a whole
2651 */
2652 cpi->gf_overspend_bits +=
2653 (cpi->projected_frame_size - cpi->inter_frame_target);
2654 }
2655
2656 cpi->non_gf_bitrate_adjustment =
2657 cpi->gf_overspend_bits / cpi->frames_till_gf_update_due;
2658 }
2659
2660 /* Update data structure that monitors level of reference to last GF */
2661 memset(cpi->gf_active_flags, 1, (cm->mb_rows * cm->mb_cols));
2662 cpi->gf_active_count = cm->mb_rows * cm->mb_cols;
2663
2664 /* this frame refreshes means next frames don't unless specified by
2665 * user
2666 */
2667 cm->refresh_golden_frame = 0;
2668 cpi->frames_since_golden = 0;
2669
2670 cpi->recent_ref_frame_usage[INTRA_FRAME] = 1;
2671 cpi->recent_ref_frame_usage[LAST_FRAME] = 1;
2672 cpi->recent_ref_frame_usage[GOLDEN_FRAME] = 1;
2673 cpi->recent_ref_frame_usage[ALTREF_FRAME] = 1;
2674
2675 /* ******** Fixed Q test code only ************ */
2676 /* If we are going to use the ALT reference for the next group of
2677 * frames set a flag to say so.
2678 */
2679 if (cpi->oxcf.fixed_q >= 0 && cpi->oxcf.play_alternate &&
2680 !cpi->common.refresh_alt_ref_frame) {
2681 cpi->source_alt_ref_pending = 1;
2682 cpi->frames_till_gf_update_due = cpi->baseline_gf_interval;
2683 }
2684
2685 if (!cpi->source_alt_ref_pending) cpi->source_alt_ref_active = 0;
2686
2687 /* Decrement count down till next gf */
2688 if (cpi->frames_till_gf_update_due > 0) cpi->frames_till_gf_update_due--;
2689
2690 } else if (!cpi->common.refresh_alt_ref_frame) {
2691 /* Decrement count down till next gf */
2692 if (cpi->frames_till_gf_update_due > 0) cpi->frames_till_gf_update_due--;
2693
2694 if (cpi->frames_till_alt_ref_frame) cpi->frames_till_alt_ref_frame--;
2695
2696 cpi->frames_since_golden++;
2697
2698 if (cpi->frames_since_golden > 1) {
2699 cpi->recent_ref_frame_usage[INTRA_FRAME] +=
2700 cpi->mb.count_mb_ref_frame_usage[INTRA_FRAME];
2701 cpi->recent_ref_frame_usage[LAST_FRAME] +=
2702 cpi->mb.count_mb_ref_frame_usage[LAST_FRAME];
2703 cpi->recent_ref_frame_usage[GOLDEN_FRAME] +=
2704 cpi->mb.count_mb_ref_frame_usage[GOLDEN_FRAME];
2705 cpi->recent_ref_frame_usage[ALTREF_FRAME] +=
2706 cpi->mb.count_mb_ref_frame_usage[ALTREF_FRAME];
2707 }
2708 }
2709 }
2710
2711 /* This function updates the reference frame probability estimates that
2712 * will be used during mode selection
2713 */
update_rd_ref_frame_probs(VP8_COMP * cpi)2714 static void update_rd_ref_frame_probs(VP8_COMP *cpi) {
2715 VP8_COMMON *cm = &cpi->common;
2716
2717 const int *const rfct = cpi->mb.count_mb_ref_frame_usage;
2718 const int rf_intra = rfct[INTRA_FRAME];
2719 const int rf_inter =
2720 rfct[LAST_FRAME] + rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME];
2721
2722 if (cm->frame_type == KEY_FRAME) {
2723 cpi->prob_intra_coded = 255;
2724 cpi->prob_last_coded = 128;
2725 cpi->prob_gf_coded = 128;
2726 } else if (!(rf_intra + rf_inter)) {
2727 cpi->prob_intra_coded = 63;
2728 cpi->prob_last_coded = 128;
2729 cpi->prob_gf_coded = 128;
2730 }
2731
2732 /* update reference frame costs since we can do better than what we got
2733 * last frame.
2734 */
2735 if (cpi->oxcf.number_of_layers == 1) {
2736 if (cpi->common.refresh_alt_ref_frame) {
2737 cpi->prob_intra_coded += 40;
2738 if (cpi->prob_intra_coded > 255) cpi->prob_intra_coded = 255;
2739 cpi->prob_last_coded = 200;
2740 cpi->prob_gf_coded = 1;
2741 } else if (cpi->frames_since_golden == 0) {
2742 cpi->prob_last_coded = 214;
2743 } else if (cpi->frames_since_golden == 1) {
2744 cpi->prob_last_coded = 192;
2745 cpi->prob_gf_coded = 220;
2746 } else if (cpi->source_alt_ref_active) {
2747 cpi->prob_gf_coded -= 20;
2748
2749 if (cpi->prob_gf_coded < 10) cpi->prob_gf_coded = 10;
2750 }
2751 if (!cpi->source_alt_ref_active) cpi->prob_gf_coded = 255;
2752 }
2753 }
2754
2755 #if !CONFIG_REALTIME_ONLY
2756 /* 1 = key, 0 = inter */
decide_key_frame(VP8_COMP * cpi)2757 static int decide_key_frame(VP8_COMP *cpi) {
2758 VP8_COMMON *cm = &cpi->common;
2759
2760 int code_key_frame = 0;
2761
2762 cpi->kf_boost = 0;
2763
2764 if (cpi->Speed > 11) return 0;
2765
2766 /* Clear down mmx registers */
2767 vpx_clear_system_state();
2768
2769 if ((cpi->compressor_speed == 2) && (cpi->Speed >= 5) && (cpi->sf.RD == 0)) {
2770 double change = 1.0 *
2771 abs((int)(cpi->mb.intra_error - cpi->last_intra_error)) /
2772 (1 + cpi->last_intra_error);
2773 double change2 =
2774 1.0 *
2775 abs((int)(cpi->mb.prediction_error - cpi->last_prediction_error)) /
2776 (1 + cpi->last_prediction_error);
2777 double minerror = cm->MBs * 256;
2778
2779 cpi->last_intra_error = cpi->mb.intra_error;
2780 cpi->last_prediction_error = cpi->mb.prediction_error;
2781
2782 if (10 * cpi->mb.intra_error / (1 + cpi->mb.prediction_error) < 15 &&
2783 cpi->mb.prediction_error > minerror &&
2784 (change > .25 || change2 > .25)) {
2785 /*(change > 1.4 || change < .75)&& cpi->this_frame_percent_intra >
2786 * cpi->last_frame_percent_intra + 3*/
2787 return 1;
2788 }
2789
2790 return 0;
2791 }
2792
2793 /* If the following are true we might as well code a key frame */
2794 if (((cpi->this_frame_percent_intra == 100) &&
2795 (cpi->this_frame_percent_intra > (cpi->last_frame_percent_intra + 2))) ||
2796 ((cpi->this_frame_percent_intra > 95) &&
2797 (cpi->this_frame_percent_intra >=
2798 (cpi->last_frame_percent_intra + 5)))) {
2799 code_key_frame = 1;
2800 }
2801 /* in addition if the following are true and this is not a golden frame
2802 * then code a key frame Note that on golden frames there often seems
2803 * to be a pop in intra useage anyway hence this restriction is
2804 * designed to prevent spurious key frames. The Intra pop needs to be
2805 * investigated.
2806 */
2807 else if (((cpi->this_frame_percent_intra > 60) &&
2808 (cpi->this_frame_percent_intra >
2809 (cpi->last_frame_percent_intra * 2))) ||
2810 ((cpi->this_frame_percent_intra > 75) &&
2811 (cpi->this_frame_percent_intra >
2812 (cpi->last_frame_percent_intra * 3 / 2))) ||
2813 ((cpi->this_frame_percent_intra > 90) &&
2814 (cpi->this_frame_percent_intra >
2815 (cpi->last_frame_percent_intra + 10)))) {
2816 if (!cm->refresh_golden_frame) code_key_frame = 1;
2817 }
2818
2819 return code_key_frame;
2820 }
2821
Pass1Encode(VP8_COMP * cpi,size_t * size,unsigned char * dest,unsigned int * frame_flags)2822 static void Pass1Encode(VP8_COMP *cpi, size_t *size, unsigned char *dest,
2823 unsigned int *frame_flags) {
2824 (void)size;
2825 (void)dest;
2826 (void)frame_flags;
2827 vp8_set_quantizer(cpi, 26);
2828
2829 vp8_first_pass(cpi);
2830 }
2831 #endif
2832
2833 #if 0
2834 void write_cx_frame_to_file(YV12_BUFFER_CONFIG *frame, int this_frame)
2835 {
2836
2837 /* write the frame */
2838 FILE *yframe;
2839 int i;
2840 char filename[255];
2841
2842 sprintf(filename, "cx\\y%04d.raw", this_frame);
2843 yframe = fopen(filename, "wb");
2844
2845 for (i = 0; i < frame->y_height; ++i)
2846 fwrite(frame->y_buffer + i * frame->y_stride, frame->y_width, 1, yframe);
2847
2848 fclose(yframe);
2849 sprintf(filename, "cx\\u%04d.raw", this_frame);
2850 yframe = fopen(filename, "wb");
2851
2852 for (i = 0; i < frame->uv_height; ++i)
2853 fwrite(frame->u_buffer + i * frame->uv_stride, frame->uv_width, 1, yframe);
2854
2855 fclose(yframe);
2856 sprintf(filename, "cx\\v%04d.raw", this_frame);
2857 yframe = fopen(filename, "wb");
2858
2859 for (i = 0; i < frame->uv_height; ++i)
2860 fwrite(frame->v_buffer + i * frame->uv_stride, frame->uv_width, 1, yframe);
2861
2862 fclose(yframe);
2863 }
2864 #endif
2865 /* return of 0 means drop frame */
2866
2867 #if !CONFIG_REALTIME_ONLY
2868 /* Function to test for conditions that indeicate we should loop
2869 * back and recode a frame.
2870 */
recode_loop_test(VP8_COMP * cpi,int high_limit,int low_limit,int q,int maxq,int minq)2871 static int recode_loop_test(VP8_COMP *cpi, int high_limit, int low_limit, int q,
2872 int maxq, int minq) {
2873 int force_recode = 0;
2874 VP8_COMMON *cm = &cpi->common;
2875
2876 /* Is frame recode allowed at all
2877 * Yes if either recode mode 1 is selected or mode two is selcted
2878 * and the frame is a key frame. golden frame or alt_ref_frame
2879 */
2880 if ((cpi->sf.recode_loop == 1) ||
2881 ((cpi->sf.recode_loop == 2) &&
2882 ((cm->frame_type == KEY_FRAME) || cm->refresh_golden_frame ||
2883 cm->refresh_alt_ref_frame))) {
2884 /* General over and under shoot tests */
2885 if (((cpi->projected_frame_size > high_limit) && (q < maxq)) ||
2886 ((cpi->projected_frame_size < low_limit) && (q > minq))) {
2887 force_recode = 1;
2888 }
2889 /* Special Constrained quality tests */
2890 else if (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) {
2891 /* Undershoot and below auto cq level */
2892 if ((q > cpi->cq_target_quality) &&
2893 (cpi->projected_frame_size < ((cpi->this_frame_target * 7) >> 3))) {
2894 force_recode = 1;
2895 }
2896 /* Severe undershoot and between auto and user cq level */
2897 else if ((q > cpi->oxcf.cq_level) &&
2898 (cpi->projected_frame_size < cpi->min_frame_bandwidth) &&
2899 (cpi->active_best_quality > cpi->oxcf.cq_level)) {
2900 force_recode = 1;
2901 cpi->active_best_quality = cpi->oxcf.cq_level;
2902 }
2903 }
2904 }
2905
2906 return force_recode;
2907 }
2908 #endif // !CONFIG_REALTIME_ONLY
2909
update_reference_frames(VP8_COMP * cpi)2910 static void update_reference_frames(VP8_COMP *cpi) {
2911 VP8_COMMON *cm = &cpi->common;
2912 YV12_BUFFER_CONFIG *yv12_fb = cm->yv12_fb;
2913
2914 /* At this point the new frame has been encoded.
2915 * If any buffer copy / swapping is signaled it should be done here.
2916 */
2917
2918 if (cm->frame_type == KEY_FRAME) {
2919 yv12_fb[cm->new_fb_idx].flags |= VP8_GOLD_FRAME | VP8_ALTR_FRAME;
2920
2921 yv12_fb[cm->gld_fb_idx].flags &= ~VP8_GOLD_FRAME;
2922 yv12_fb[cm->alt_fb_idx].flags &= ~VP8_ALTR_FRAME;
2923
2924 cm->alt_fb_idx = cm->gld_fb_idx = cm->new_fb_idx;
2925
2926 cpi->current_ref_frames[GOLDEN_FRAME] = cm->current_video_frame;
2927 cpi->current_ref_frames[ALTREF_FRAME] = cm->current_video_frame;
2928 } else {
2929 if (cm->refresh_alt_ref_frame) {
2930 assert(!cm->copy_buffer_to_arf);
2931
2932 cm->yv12_fb[cm->new_fb_idx].flags |= VP8_ALTR_FRAME;
2933 cm->yv12_fb[cm->alt_fb_idx].flags &= ~VP8_ALTR_FRAME;
2934 cm->alt_fb_idx = cm->new_fb_idx;
2935
2936 cpi->current_ref_frames[ALTREF_FRAME] = cm->current_video_frame;
2937 } else if (cm->copy_buffer_to_arf) {
2938 assert(!(cm->copy_buffer_to_arf & ~0x3));
2939
2940 if (cm->copy_buffer_to_arf == 1) {
2941 if (cm->alt_fb_idx != cm->lst_fb_idx) {
2942 yv12_fb[cm->lst_fb_idx].flags |= VP8_ALTR_FRAME;
2943 yv12_fb[cm->alt_fb_idx].flags &= ~VP8_ALTR_FRAME;
2944 cm->alt_fb_idx = cm->lst_fb_idx;
2945
2946 cpi->current_ref_frames[ALTREF_FRAME] =
2947 cpi->current_ref_frames[LAST_FRAME];
2948 }
2949 } else {
2950 if (cm->alt_fb_idx != cm->gld_fb_idx) {
2951 yv12_fb[cm->gld_fb_idx].flags |= VP8_ALTR_FRAME;
2952 yv12_fb[cm->alt_fb_idx].flags &= ~VP8_ALTR_FRAME;
2953 cm->alt_fb_idx = cm->gld_fb_idx;
2954
2955 cpi->current_ref_frames[ALTREF_FRAME] =
2956 cpi->current_ref_frames[GOLDEN_FRAME];
2957 }
2958 }
2959 }
2960
2961 if (cm->refresh_golden_frame) {
2962 assert(!cm->copy_buffer_to_gf);
2963
2964 cm->yv12_fb[cm->new_fb_idx].flags |= VP8_GOLD_FRAME;
2965 cm->yv12_fb[cm->gld_fb_idx].flags &= ~VP8_GOLD_FRAME;
2966 cm->gld_fb_idx = cm->new_fb_idx;
2967
2968 cpi->current_ref_frames[GOLDEN_FRAME] = cm->current_video_frame;
2969 } else if (cm->copy_buffer_to_gf) {
2970 assert(!(cm->copy_buffer_to_arf & ~0x3));
2971
2972 if (cm->copy_buffer_to_gf == 1) {
2973 if (cm->gld_fb_idx != cm->lst_fb_idx) {
2974 yv12_fb[cm->lst_fb_idx].flags |= VP8_GOLD_FRAME;
2975 yv12_fb[cm->gld_fb_idx].flags &= ~VP8_GOLD_FRAME;
2976 cm->gld_fb_idx = cm->lst_fb_idx;
2977
2978 cpi->current_ref_frames[GOLDEN_FRAME] =
2979 cpi->current_ref_frames[LAST_FRAME];
2980 }
2981 } else {
2982 if (cm->alt_fb_idx != cm->gld_fb_idx) {
2983 yv12_fb[cm->alt_fb_idx].flags |= VP8_GOLD_FRAME;
2984 yv12_fb[cm->gld_fb_idx].flags &= ~VP8_GOLD_FRAME;
2985 cm->gld_fb_idx = cm->alt_fb_idx;
2986
2987 cpi->current_ref_frames[GOLDEN_FRAME] =
2988 cpi->current_ref_frames[ALTREF_FRAME];
2989 }
2990 }
2991 }
2992 }
2993
2994 if (cm->refresh_last_frame) {
2995 cm->yv12_fb[cm->new_fb_idx].flags |= VP8_LAST_FRAME;
2996 cm->yv12_fb[cm->lst_fb_idx].flags &= ~VP8_LAST_FRAME;
2997 cm->lst_fb_idx = cm->new_fb_idx;
2998
2999 cpi->current_ref_frames[LAST_FRAME] = cm->current_video_frame;
3000 }
3001
3002 #if CONFIG_TEMPORAL_DENOISING
3003 if (cpi->oxcf.noise_sensitivity) {
3004 /* we shouldn't have to keep multiple copies as we know in advance which
3005 * buffer we should start - for now to get something up and running
3006 * I've chosen to copy the buffers
3007 */
3008 if (cm->frame_type == KEY_FRAME) {
3009 int i;
3010 for (i = LAST_FRAME; i < MAX_REF_FRAMES; ++i)
3011 vp8_yv12_copy_frame(cpi->Source, &cpi->denoiser.yv12_running_avg[i]);
3012 } else {
3013 vp8_yv12_extend_frame_borders(
3014 &cpi->denoiser.yv12_running_avg[INTRA_FRAME]);
3015
3016 if (cm->refresh_alt_ref_frame || cm->copy_buffer_to_arf) {
3017 vp8_yv12_copy_frame(&cpi->denoiser.yv12_running_avg[INTRA_FRAME],
3018 &cpi->denoiser.yv12_running_avg[ALTREF_FRAME]);
3019 }
3020 if (cm->refresh_golden_frame || cm->copy_buffer_to_gf) {
3021 vp8_yv12_copy_frame(&cpi->denoiser.yv12_running_avg[INTRA_FRAME],
3022 &cpi->denoiser.yv12_running_avg[GOLDEN_FRAME]);
3023 }
3024 if (cm->refresh_last_frame) {
3025 vp8_yv12_copy_frame(&cpi->denoiser.yv12_running_avg[INTRA_FRAME],
3026 &cpi->denoiser.yv12_running_avg[LAST_FRAME]);
3027 }
3028 }
3029 if (cpi->oxcf.noise_sensitivity == 4)
3030 vp8_yv12_copy_frame(cpi->Source, &cpi->denoiser.yv12_last_source);
3031 }
3032 #endif
3033 }
3034
measure_square_diff_partial(YV12_BUFFER_CONFIG * source,YV12_BUFFER_CONFIG * dest,VP8_COMP * cpi)3035 static int measure_square_diff_partial(YV12_BUFFER_CONFIG *source,
3036 YV12_BUFFER_CONFIG *dest,
3037 VP8_COMP *cpi) {
3038 int i, j;
3039 int Total = 0;
3040 int num_blocks = 0;
3041 int skip = 2;
3042 int min_consec_zero_last = 10;
3043 int tot_num_blocks = (source->y_height * source->y_width) >> 8;
3044 unsigned char *src = source->y_buffer;
3045 unsigned char *dst = dest->y_buffer;
3046
3047 /* Loop through the Y plane, every |skip| blocks along rows and colmumns,
3048 * summing the square differences, and only for blocks that have been
3049 * zero_last mode at least |x| frames in a row.
3050 */
3051 for (i = 0; i < source->y_height; i += 16 * skip) {
3052 int block_index_row = (i >> 4) * cpi->common.mb_cols;
3053 for (j = 0; j < source->y_width; j += 16 * skip) {
3054 int index = block_index_row + (j >> 4);
3055 if (cpi->consec_zero_last[index] >= min_consec_zero_last) {
3056 unsigned int sse;
3057 Total += vpx_mse16x16(src + j, source->y_stride, dst + j,
3058 dest->y_stride, &sse);
3059 num_blocks++;
3060 }
3061 }
3062 src += 16 * skip * source->y_stride;
3063 dst += 16 * skip * dest->y_stride;
3064 }
3065 // Only return non-zero if we have at least ~1/16 samples for estimate.
3066 if (num_blocks > (tot_num_blocks >> 4)) {
3067 assert(num_blocks != 0);
3068 return (Total / num_blocks);
3069 } else {
3070 return 0;
3071 }
3072 }
3073
3074 #if CONFIG_TEMPORAL_DENOISING
process_denoiser_mode_change(VP8_COMP * cpi)3075 static void process_denoiser_mode_change(VP8_COMP *cpi) {
3076 const VP8_COMMON *const cm = &cpi->common;
3077 int i, j;
3078 int total = 0;
3079 int num_blocks = 0;
3080 // Number of blocks skipped along row/column in computing the
3081 // nmse (normalized mean square error) of source.
3082 int skip = 2;
3083 // Only select blocks for computing nmse that have been encoded
3084 // as ZERO LAST min_consec_zero_last frames in a row.
3085 // Scale with number of temporal layers.
3086 int min_consec_zero_last = 12 / cpi->oxcf.number_of_layers;
3087 // Decision is tested for changing the denoising mode every
3088 // num_mode_change times this function is called. Note that this
3089 // function called every 8 frames, so (8 * num_mode_change) is number
3090 // of frames where denoising mode change is tested for switch.
3091 int num_mode_change = 20;
3092 // Framerate factor, to compensate for larger mse at lower framerates.
3093 // Use ref_framerate, which is full source framerate for temporal layers.
3094 // TODO(marpan): Adjust this factor.
3095 int fac_framerate = cpi->ref_framerate < 25.0f ? 80 : 100;
3096 int tot_num_blocks = cm->mb_rows * cm->mb_cols;
3097 int ystride = cpi->Source->y_stride;
3098 unsigned char *src = cpi->Source->y_buffer;
3099 unsigned char *dst = cpi->denoiser.yv12_last_source.y_buffer;
3100 static const unsigned char const_source[16] = { 128, 128, 128, 128, 128, 128,
3101 128, 128, 128, 128, 128, 128,
3102 128, 128, 128, 128 };
3103 int bandwidth = (int)(cpi->target_bandwidth);
3104 // For temporal layers, use full bandwidth (top layer).
3105 if (cpi->oxcf.number_of_layers > 1) {
3106 LAYER_CONTEXT *lc = &cpi->layer_context[cpi->oxcf.number_of_layers - 1];
3107 bandwidth = (int)(lc->target_bandwidth);
3108 }
3109 // Loop through the Y plane, every skip blocks along rows and columns,
3110 // summing the normalized mean square error, only for blocks that have
3111 // been encoded as ZEROMV LAST at least min_consec_zero_last least frames in
3112 // a row and have small sum difference between current and previous frame.
3113 // Normalization here is by the contrast of the current frame block.
3114 for (i = 0; i < cm->Height; i += 16 * skip) {
3115 int block_index_row = (i >> 4) * cm->mb_cols;
3116 for (j = 0; j < cm->Width; j += 16 * skip) {
3117 int index = block_index_row + (j >> 4);
3118 if (cpi->consec_zero_last[index] >= min_consec_zero_last) {
3119 unsigned int sse;
3120 const unsigned int var =
3121 vpx_variance16x16(src + j, ystride, dst + j, ystride, &sse);
3122 // Only consider this block as valid for noise measurement
3123 // if the sum_diff average of the current and previous frame
3124 // is small (to avoid effects from lighting change).
3125 if ((sse - var) < 128) {
3126 unsigned int sse2;
3127 const unsigned int act =
3128 vpx_variance16x16(src + j, ystride, const_source, 0, &sse2);
3129 if (act > 0) total += sse / act;
3130 num_blocks++;
3131 }
3132 }
3133 }
3134 src += 16 * skip * ystride;
3135 dst += 16 * skip * ystride;
3136 }
3137 total = total * fac_framerate / 100;
3138
3139 // Only consider this frame as valid sample if we have computed nmse over
3140 // at least ~1/16 blocks, and Total > 0 (Total == 0 can happen if the
3141 // application inputs duplicate frames, or contrast is all zero).
3142 if (total > 0 && (num_blocks > (tot_num_blocks >> 4))) {
3143 // Update the recursive mean square source_diff.
3144 total = (total << 8) / num_blocks;
3145 if (cpi->denoiser.nmse_source_diff_count == 0) {
3146 // First sample in new interval.
3147 cpi->denoiser.nmse_source_diff = total;
3148 cpi->denoiser.qp_avg = cm->base_qindex;
3149 } else {
3150 // For subsequent samples, use average with weight ~1/4 for new sample.
3151 cpi->denoiser.nmse_source_diff =
3152 (int)((total + 3 * cpi->denoiser.nmse_source_diff) >> 2);
3153 cpi->denoiser.qp_avg =
3154 (int)((cm->base_qindex + 3 * cpi->denoiser.qp_avg) >> 2);
3155 }
3156 cpi->denoiser.nmse_source_diff_count++;
3157 }
3158 // Check for changing the denoiser mode, when we have obtained #samples =
3159 // num_mode_change. Condition the change also on the bitrate and QP.
3160 if (cpi->denoiser.nmse_source_diff_count == num_mode_change) {
3161 // Check for going up: from normal to aggressive mode.
3162 if ((cpi->denoiser.denoiser_mode == kDenoiserOnYUV) &&
3163 (cpi->denoiser.nmse_source_diff >
3164 cpi->denoiser.threshold_aggressive_mode) &&
3165 (cpi->denoiser.qp_avg < cpi->denoiser.qp_threshold_up &&
3166 bandwidth > cpi->denoiser.bitrate_threshold)) {
3167 vp8_denoiser_set_parameters(&cpi->denoiser, kDenoiserOnYUVAggressive);
3168 } else {
3169 // Check for going down: from aggressive to normal mode.
3170 if (((cpi->denoiser.denoiser_mode == kDenoiserOnYUVAggressive) &&
3171 (cpi->denoiser.nmse_source_diff <
3172 cpi->denoiser.threshold_aggressive_mode)) ||
3173 ((cpi->denoiser.denoiser_mode == kDenoiserOnYUVAggressive) &&
3174 (cpi->denoiser.qp_avg > cpi->denoiser.qp_threshold_down ||
3175 bandwidth < cpi->denoiser.bitrate_threshold))) {
3176 vp8_denoiser_set_parameters(&cpi->denoiser, kDenoiserOnYUV);
3177 }
3178 }
3179 // Reset metric and counter for next interval.
3180 cpi->denoiser.nmse_source_diff = 0;
3181 cpi->denoiser.qp_avg = 0;
3182 cpi->denoiser.nmse_source_diff_count = 0;
3183 }
3184 }
3185 #endif
3186
vp8_loopfilter_frame(VP8_COMP * cpi,VP8_COMMON * cm)3187 void vp8_loopfilter_frame(VP8_COMP *cpi, VP8_COMMON *cm) {
3188 const FRAME_TYPE frame_type = cm->frame_type;
3189
3190 int update_any_ref_buffers = 1;
3191 if (cpi->common.refresh_last_frame == 0 &&
3192 cpi->common.refresh_golden_frame == 0 &&
3193 cpi->common.refresh_alt_ref_frame == 0) {
3194 update_any_ref_buffers = 0;
3195 }
3196
3197 if (cm->no_lpf) {
3198 cm->filter_level = 0;
3199 } else {
3200 struct vpx_usec_timer timer;
3201
3202 vpx_clear_system_state();
3203
3204 vpx_usec_timer_start(&timer);
3205 if (cpi->sf.auto_filter == 0) {
3206 #if CONFIG_TEMPORAL_DENOISING
3207 if (cpi->oxcf.noise_sensitivity && cm->frame_type != KEY_FRAME) {
3208 // Use the denoised buffer for selecting base loop filter level.
3209 // Denoised signal for current frame is stored in INTRA_FRAME.
3210 // No denoising on key frames.
3211 vp8cx_pick_filter_level_fast(
3212 &cpi->denoiser.yv12_running_avg[INTRA_FRAME], cpi);
3213 } else {
3214 vp8cx_pick_filter_level_fast(cpi->Source, cpi);
3215 }
3216 #else
3217 vp8cx_pick_filter_level_fast(cpi->Source, cpi);
3218 #endif
3219 } else {
3220 #if CONFIG_TEMPORAL_DENOISING
3221 if (cpi->oxcf.noise_sensitivity && cm->frame_type != KEY_FRAME) {
3222 // Use the denoised buffer for selecting base loop filter level.
3223 // Denoised signal for current frame is stored in INTRA_FRAME.
3224 // No denoising on key frames.
3225 vp8cx_pick_filter_level(&cpi->denoiser.yv12_running_avg[INTRA_FRAME],
3226 cpi);
3227 } else {
3228 vp8cx_pick_filter_level(cpi->Source, cpi);
3229 }
3230 #else
3231 vp8cx_pick_filter_level(cpi->Source, cpi);
3232 #endif
3233 }
3234
3235 if (cm->filter_level > 0) {
3236 vp8cx_set_alt_lf_level(cpi, cm->filter_level);
3237 }
3238
3239 vpx_usec_timer_mark(&timer);
3240 cpi->time_pick_lpf += vpx_usec_timer_elapsed(&timer);
3241 }
3242
3243 #if CONFIG_MULTITHREAD
3244 if (vpx_atomic_load_acquire(&cpi->b_multi_threaded)) {
3245 sem_post(&cpi->h_event_end_lpf); /* signal that we have set filter_level */
3246 }
3247 #endif
3248
3249 // No need to apply loop-filter if the encoded frame does not update
3250 // any reference buffers.
3251 if (cm->filter_level > 0 && update_any_ref_buffers) {
3252 vp8_loop_filter_frame(cm, &cpi->mb.e_mbd, frame_type);
3253 }
3254
3255 vp8_yv12_extend_frame_borders(cm->frame_to_show);
3256 }
3257
encode_frame_to_data_rate(VP8_COMP * cpi,size_t * size,unsigned char * dest,unsigned char * dest_end,unsigned int * frame_flags)3258 static void encode_frame_to_data_rate(VP8_COMP *cpi, size_t *size,
3259 unsigned char *dest,
3260 unsigned char *dest_end,
3261 unsigned int *frame_flags) {
3262 int Q;
3263 int frame_over_shoot_limit;
3264 int frame_under_shoot_limit;
3265
3266 int Loop = 0;
3267 int loop_count;
3268
3269 VP8_COMMON *cm = &cpi->common;
3270 int active_worst_qchanged = 0;
3271
3272 #if !CONFIG_REALTIME_ONLY
3273 int q_low;
3274 int q_high;
3275 int zbin_oq_high;
3276 int zbin_oq_low = 0;
3277 int top_index;
3278 int bottom_index;
3279 int overshoot_seen = 0;
3280 int undershoot_seen = 0;
3281 #endif
3282
3283 int drop_mark = (int)(cpi->oxcf.drop_frames_water_mark *
3284 cpi->oxcf.optimal_buffer_level / 100);
3285 int drop_mark75 = drop_mark * 2 / 3;
3286 int drop_mark50 = drop_mark / 4;
3287 int drop_mark25 = drop_mark / 8;
3288
3289 /* Clear down mmx registers to allow floating point in what follows */
3290 vpx_clear_system_state();
3291
3292 if (cpi->force_next_frame_intra) {
3293 cm->frame_type = KEY_FRAME; /* delayed intra frame */
3294 cpi->force_next_frame_intra = 0;
3295 }
3296
3297 /* For an alt ref frame in 2 pass we skip the call to the second pass
3298 * function that sets the target bandwidth
3299 */
3300 switch (cpi->pass) {
3301 #if !CONFIG_REALTIME_ONLY
3302 case 2:
3303 if (cpi->common.refresh_alt_ref_frame) {
3304 /* Per frame bit target for the alt ref frame */
3305 cpi->per_frame_bandwidth = cpi->twopass.gf_bits;
3306 /* per second target bitrate */
3307 cpi->target_bandwidth =
3308 (int)(cpi->twopass.gf_bits * cpi->output_framerate);
3309 }
3310 break;
3311 #endif // !CONFIG_REALTIME_ONLY
3312 default:
3313 cpi->per_frame_bandwidth =
3314 (int)(cpi->target_bandwidth / cpi->output_framerate);
3315 break;
3316 }
3317
3318 /* Default turn off buffer to buffer copying */
3319 cm->copy_buffer_to_gf = 0;
3320 cm->copy_buffer_to_arf = 0;
3321
3322 /* Clear zbin over-quant value and mode boost values. */
3323 cpi->mb.zbin_over_quant = 0;
3324 cpi->mb.zbin_mode_boost = 0;
3325
3326 /* Enable or disable mode based tweaking of the zbin
3327 * For 2 Pass Only used where GF/ARF prediction quality
3328 * is above a threshold
3329 */
3330 cpi->mb.zbin_mode_boost_enabled = 1;
3331 if (cpi->pass == 2) {
3332 if (cpi->gfu_boost <= 400) {
3333 cpi->mb.zbin_mode_boost_enabled = 0;
3334 }
3335 }
3336
3337 /* Current default encoder behaviour for the altref sign bias */
3338 if (cpi->source_alt_ref_active) {
3339 cpi->common.ref_frame_sign_bias[ALTREF_FRAME] = 1;
3340 } else {
3341 cpi->common.ref_frame_sign_bias[ALTREF_FRAME] = 0;
3342 }
3343
3344 /* Check to see if a key frame is signaled
3345 * For two pass with auto key frame enabled cm->frame_type may already
3346 * be set, but not for one pass.
3347 */
3348 if ((cm->current_video_frame == 0) || (cm->frame_flags & FRAMEFLAGS_KEY) ||
3349 (cpi->oxcf.auto_key &&
3350 (cpi->frames_since_key % cpi->key_frame_frequency == 0))) {
3351 /* Key frame from VFW/auto-keyframe/first frame */
3352 cm->frame_type = KEY_FRAME;
3353 #if CONFIG_TEMPORAL_DENOISING
3354 if (cpi->oxcf.noise_sensitivity == 4) {
3355 // For adaptive mode, reset denoiser to normal mode on key frame.
3356 vp8_denoiser_set_parameters(&cpi->denoiser, kDenoiserOnYUV);
3357 }
3358 #endif
3359 }
3360
3361 #if CONFIG_MULTI_RES_ENCODING
3362 if (cpi->oxcf.mr_total_resolutions > 1) {
3363 LOWER_RES_FRAME_INFO *low_res_frame_info =
3364 (LOWER_RES_FRAME_INFO *)cpi->oxcf.mr_low_res_mode_info;
3365
3366 if (cpi->oxcf.mr_encoder_id) {
3367 // TODO(marpan): This constraint shouldn't be needed, as we would like
3368 // to allow for key frame setting (forced or periodic) defined per
3369 // spatial layer. For now, keep this in.
3370 cm->frame_type = low_res_frame_info->frame_type;
3371
3372 // Check if lower resolution is available for motion vector reuse.
3373 if (cm->frame_type != KEY_FRAME) {
3374 cpi->mr_low_res_mv_avail = 1;
3375 cpi->mr_low_res_mv_avail &= !(low_res_frame_info->is_frame_dropped);
3376
3377 if (cpi->ref_frame_flags & VP8_LAST_FRAME)
3378 cpi->mr_low_res_mv_avail &=
3379 (cpi->current_ref_frames[LAST_FRAME] ==
3380 low_res_frame_info->low_res_ref_frames[LAST_FRAME]);
3381
3382 if (cpi->ref_frame_flags & VP8_GOLD_FRAME)
3383 cpi->mr_low_res_mv_avail &=
3384 (cpi->current_ref_frames[GOLDEN_FRAME] ==
3385 low_res_frame_info->low_res_ref_frames[GOLDEN_FRAME]);
3386
3387 // Don't use altref to determine whether low res is available.
3388 // TODO (marpan): Should we make this type of condition on a
3389 // per-reference frame basis?
3390 /*
3391 if (cpi->ref_frame_flags & VP8_ALTR_FRAME)
3392 cpi->mr_low_res_mv_avail &= (cpi->current_ref_frames[ALTREF_FRAME]
3393 == low_res_frame_info->low_res_ref_frames[ALTREF_FRAME]);
3394 */
3395 }
3396 }
3397
3398 // On a key frame: For the lowest resolution, keep track of the key frame
3399 // counter value. For the higher resolutions, reset the current video
3400 // frame counter to that of the lowest resolution.
3401 // This is done to the handle the case where we may stop/start encoding
3402 // higher layer(s). The restart-encoding of higher layer is only signaled
3403 // by a key frame for now.
3404 // TODO (marpan): Add flag to indicate restart-encoding of higher layer.
3405 if (cm->frame_type == KEY_FRAME) {
3406 if (cpi->oxcf.mr_encoder_id) {
3407 // If the initial starting value of the buffer level is zero (this can
3408 // happen because we may have not started encoding this higher stream),
3409 // then reset it to non-zero value based on |starting_buffer_level|.
3410 if (cpi->common.current_video_frame == 0 && cpi->buffer_level == 0) {
3411 unsigned int i;
3412 cpi->bits_off_target = cpi->oxcf.starting_buffer_level;
3413 cpi->buffer_level = cpi->oxcf.starting_buffer_level;
3414 for (i = 0; i < cpi->oxcf.number_of_layers; ++i) {
3415 LAYER_CONTEXT *lc = &cpi->layer_context[i];
3416 lc->bits_off_target = lc->starting_buffer_level;
3417 lc->buffer_level = lc->starting_buffer_level;
3418 }
3419 }
3420 cpi->common.current_video_frame =
3421 low_res_frame_info->key_frame_counter_value;
3422 } else {
3423 low_res_frame_info->key_frame_counter_value =
3424 cpi->common.current_video_frame;
3425 }
3426 }
3427 }
3428 #endif
3429
3430 // Find the reference frame closest to the current frame.
3431 cpi->closest_reference_frame = LAST_FRAME;
3432 if (cm->frame_type != KEY_FRAME) {
3433 int i;
3434 MV_REFERENCE_FRAME closest_ref = INTRA_FRAME;
3435 if (cpi->ref_frame_flags & VP8_LAST_FRAME) {
3436 closest_ref = LAST_FRAME;
3437 } else if (cpi->ref_frame_flags & VP8_GOLD_FRAME) {
3438 closest_ref = GOLDEN_FRAME;
3439 } else if (cpi->ref_frame_flags & VP8_ALTR_FRAME) {
3440 closest_ref = ALTREF_FRAME;
3441 }
3442 for (i = 1; i <= 3; ++i) {
3443 vpx_ref_frame_type_t ref_frame_type =
3444 (vpx_ref_frame_type_t)((i == 3) ? 4 : i);
3445 if (cpi->ref_frame_flags & ref_frame_type) {
3446 if ((cm->current_video_frame - cpi->current_ref_frames[i]) <
3447 (cm->current_video_frame - cpi->current_ref_frames[closest_ref])) {
3448 closest_ref = i;
3449 }
3450 }
3451 }
3452 cpi->closest_reference_frame = closest_ref;
3453 }
3454
3455 /* Set various flags etc to special state if it is a key frame */
3456 if (cm->frame_type == KEY_FRAME) {
3457 int i;
3458
3459 // Set the loop filter deltas and segmentation map update
3460 setup_features(cpi);
3461
3462 /* The alternate reference frame cannot be active for a key frame */
3463 cpi->source_alt_ref_active = 0;
3464
3465 /* Reset the RD threshold multipliers to default of * 1 (128) */
3466 for (i = 0; i < MAX_MODES; ++i) {
3467 cpi->mb.rd_thresh_mult[i] = 128;
3468 }
3469
3470 // Reset the zero_last counter to 0 on key frame.
3471 memset(cpi->consec_zero_last, 0, cm->mb_rows * cm->mb_cols);
3472 memset(cpi->consec_zero_last_mvbias, 0,
3473 (cpi->common.mb_rows * cpi->common.mb_cols));
3474 }
3475
3476 #if 0
3477 /* Experimental code for lagged compress and one pass
3478 * Initialise one_pass GF frames stats
3479 * Update stats used for GF selection
3480 */
3481 {
3482 cpi->one_pass_frame_index = cm->current_video_frame % MAX_LAG_BUFFERS;
3483
3484 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frames_so_far = 0;
3485 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_intra_error = 0.0;
3486 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_coded_error = 0.0;
3487 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_pcnt_inter = 0.0;
3488 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_pcnt_motion = 0.0;
3489 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_mvr = 0.0;
3490 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_mvr_abs = 0.0;
3491 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_mvc = 0.0;
3492 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_mvc_abs = 0.0;
3493 }
3494 #endif
3495
3496 update_rd_ref_frame_probs(cpi);
3497
3498 if (cpi->drop_frames_allowed) {
3499 /* The reset to decimation 0 is only done here for one pass.
3500 * Once it is set two pass leaves decimation on till the next kf.
3501 */
3502 if ((cpi->buffer_level > drop_mark) && (cpi->decimation_factor > 0)) {
3503 cpi->decimation_factor--;
3504 }
3505
3506 if (cpi->buffer_level > drop_mark75 && cpi->decimation_factor > 0) {
3507 cpi->decimation_factor = 1;
3508
3509 } else if (cpi->buffer_level < drop_mark25 &&
3510 (cpi->decimation_factor == 2 || cpi->decimation_factor == 3)) {
3511 cpi->decimation_factor = 3;
3512 } else if (cpi->buffer_level < drop_mark50 &&
3513 (cpi->decimation_factor == 1 || cpi->decimation_factor == 2)) {
3514 cpi->decimation_factor = 2;
3515 } else if (cpi->buffer_level < drop_mark75 &&
3516 (cpi->decimation_factor == 0 || cpi->decimation_factor == 1)) {
3517 cpi->decimation_factor = 1;
3518 }
3519 }
3520
3521 /* The following decimates the frame rate according to a regular
3522 * pattern (i.e. to 1/2 or 2/3 frame rate) This can be used to help
3523 * prevent buffer under-run in CBR mode. Alternatively it might be
3524 * desirable in some situations to drop frame rate but throw more bits
3525 * at each frame.
3526 *
3527 * Note that dropping a key frame can be problematic if spatial
3528 * resampling is also active
3529 */
3530 if (cpi->decimation_factor > 0) {
3531 switch (cpi->decimation_factor) {
3532 case 1:
3533 cpi->per_frame_bandwidth = cpi->per_frame_bandwidth * 3 / 2;
3534 break;
3535 case 2:
3536 cpi->per_frame_bandwidth = cpi->per_frame_bandwidth * 5 / 4;
3537 break;
3538 case 3:
3539 cpi->per_frame_bandwidth = cpi->per_frame_bandwidth * 5 / 4;
3540 break;
3541 }
3542
3543 /* Note that we should not throw out a key frame (especially when
3544 * spatial resampling is enabled).
3545 */
3546 if (cm->frame_type == KEY_FRAME) {
3547 cpi->decimation_count = cpi->decimation_factor;
3548 } else if (cpi->decimation_count > 0) {
3549 cpi->decimation_count--;
3550
3551 cpi->bits_off_target += cpi->av_per_frame_bandwidth;
3552 if (cpi->bits_off_target > cpi->oxcf.maximum_buffer_size) {
3553 cpi->bits_off_target = cpi->oxcf.maximum_buffer_size;
3554 }
3555
3556 #if CONFIG_MULTI_RES_ENCODING
3557 vp8_store_drop_frame_info(cpi);
3558 #endif
3559
3560 cm->current_video_frame++;
3561 cpi->frames_since_key++;
3562 // We advance the temporal pattern for dropped frames.
3563 cpi->temporal_pattern_counter++;
3564
3565 #if CONFIG_INTERNAL_STATS
3566 cpi->count++;
3567 #endif
3568
3569 cpi->buffer_level = cpi->bits_off_target;
3570
3571 if (cpi->oxcf.number_of_layers > 1) {
3572 unsigned int i;
3573
3574 /* Propagate bits saved by dropping the frame to higher
3575 * layers
3576 */
3577 for (i = cpi->current_layer + 1; i < cpi->oxcf.number_of_layers; ++i) {
3578 LAYER_CONTEXT *lc = &cpi->layer_context[i];
3579 lc->bits_off_target += (int)(lc->target_bandwidth / lc->framerate);
3580 if (lc->bits_off_target > lc->maximum_buffer_size) {
3581 lc->bits_off_target = lc->maximum_buffer_size;
3582 }
3583 lc->buffer_level = lc->bits_off_target;
3584 }
3585 }
3586
3587 return;
3588 } else {
3589 cpi->decimation_count = cpi->decimation_factor;
3590 }
3591 } else {
3592 cpi->decimation_count = 0;
3593 }
3594
3595 /* Decide how big to make the frame */
3596 if (!vp8_pick_frame_size(cpi)) {
3597 /*TODO: 2 drop_frame and return code could be put together. */
3598 #if CONFIG_MULTI_RES_ENCODING
3599 vp8_store_drop_frame_info(cpi);
3600 #endif
3601 cm->current_video_frame++;
3602 cpi->frames_since_key++;
3603 // We advance the temporal pattern for dropped frames.
3604 cpi->temporal_pattern_counter++;
3605 return;
3606 }
3607
3608 /* Reduce active_worst_allowed_q for CBR if our buffer is getting too full.
3609 * This has a knock on effect on active best quality as well.
3610 * For CBR if the buffer reaches its maximum level then we can no longer
3611 * save up bits for later frames so we might as well use them up
3612 * on the current frame.
3613 */
3614 if ((cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) &&
3615 (cpi->buffer_level >= cpi->oxcf.optimal_buffer_level) &&
3616 cpi->buffered_mode) {
3617 /* Max adjustment is 1/4 */
3618 int Adjustment = cpi->active_worst_quality / 4;
3619
3620 if (Adjustment) {
3621 int buff_lvl_step;
3622
3623 if (cpi->buffer_level < cpi->oxcf.maximum_buffer_size) {
3624 buff_lvl_step = (int)((cpi->oxcf.maximum_buffer_size -
3625 cpi->oxcf.optimal_buffer_level) /
3626 Adjustment);
3627
3628 if (buff_lvl_step) {
3629 Adjustment =
3630 (int)((cpi->buffer_level - cpi->oxcf.optimal_buffer_level) /
3631 buff_lvl_step);
3632 } else {
3633 Adjustment = 0;
3634 }
3635 }
3636
3637 cpi->active_worst_quality -= Adjustment;
3638
3639 if (cpi->active_worst_quality < cpi->active_best_quality) {
3640 cpi->active_worst_quality = cpi->active_best_quality;
3641 }
3642 }
3643 }
3644
3645 /* Set an active best quality and if necessary active worst quality
3646 * There is some odd behavior for one pass here that needs attention.
3647 */
3648 if ((cpi->pass == 2) || (cpi->ni_frames > 150)) {
3649 vpx_clear_system_state();
3650
3651 Q = cpi->active_worst_quality;
3652
3653 if (cm->frame_type == KEY_FRAME) {
3654 if (cpi->pass == 2) {
3655 if (cpi->gfu_boost > 600) {
3656 cpi->active_best_quality = kf_low_motion_minq[Q];
3657 } else {
3658 cpi->active_best_quality = kf_high_motion_minq[Q];
3659 }
3660
3661 /* Special case for key frames forced because we have reached
3662 * the maximum key frame interval. Here force the Q to a range
3663 * based on the ambient Q to reduce the risk of popping
3664 */
3665 if (cpi->this_key_frame_forced) {
3666 if (cpi->active_best_quality > cpi->avg_frame_qindex * 7 / 8) {
3667 cpi->active_best_quality = cpi->avg_frame_qindex * 7 / 8;
3668 } else if (cpi->active_best_quality<cpi->avg_frame_qindex>> 2) {
3669 cpi->active_best_quality = cpi->avg_frame_qindex >> 2;
3670 }
3671 }
3672 }
3673 /* One pass more conservative */
3674 else {
3675 cpi->active_best_quality = kf_high_motion_minq[Q];
3676 }
3677 }
3678
3679 else if (cpi->oxcf.number_of_layers == 1 &&
3680 (cm->refresh_golden_frame || cpi->common.refresh_alt_ref_frame)) {
3681 /* Use the lower of cpi->active_worst_quality and recent
3682 * average Q as basis for GF/ARF Q limit unless last frame was
3683 * a key frame.
3684 */
3685 if ((cpi->frames_since_key > 1) &&
3686 (cpi->avg_frame_qindex < cpi->active_worst_quality)) {
3687 Q = cpi->avg_frame_qindex;
3688 }
3689
3690 /* For constrained quality dont allow Q less than the cq level */
3691 if ((cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) &&
3692 (Q < cpi->cq_target_quality)) {
3693 Q = cpi->cq_target_quality;
3694 }
3695
3696 if (cpi->pass == 2) {
3697 if (cpi->gfu_boost > 1000) {
3698 cpi->active_best_quality = gf_low_motion_minq[Q];
3699 } else if (cpi->gfu_boost < 400) {
3700 cpi->active_best_quality = gf_high_motion_minq[Q];
3701 } else {
3702 cpi->active_best_quality = gf_mid_motion_minq[Q];
3703 }
3704
3705 /* Constrained quality use slightly lower active best. */
3706 if (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) {
3707 cpi->active_best_quality = cpi->active_best_quality * 15 / 16;
3708 }
3709 }
3710 /* One pass more conservative */
3711 else {
3712 cpi->active_best_quality = gf_high_motion_minq[Q];
3713 }
3714 } else {
3715 cpi->active_best_quality = inter_minq[Q];
3716
3717 /* For the constant/constrained quality mode we dont want
3718 * q to fall below the cq level.
3719 */
3720 if ((cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) &&
3721 (cpi->active_best_quality < cpi->cq_target_quality)) {
3722 /* If we are strongly undershooting the target rate in the last
3723 * frames then use the user passed in cq value not the auto
3724 * cq value.
3725 */
3726 if (cpi->rolling_actual_bits < cpi->min_frame_bandwidth) {
3727 cpi->active_best_quality = cpi->oxcf.cq_level;
3728 } else {
3729 cpi->active_best_quality = cpi->cq_target_quality;
3730 }
3731 }
3732 }
3733
3734 /* If CBR and the buffer is as full then it is reasonable to allow
3735 * higher quality on the frames to prevent bits just going to waste.
3736 */
3737 if (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) {
3738 /* Note that the use of >= here elliminates the risk of a devide
3739 * by 0 error in the else if clause
3740 */
3741 if (cpi->buffer_level >= cpi->oxcf.maximum_buffer_size) {
3742 cpi->active_best_quality = cpi->best_quality;
3743
3744 } else if (cpi->buffer_level > cpi->oxcf.optimal_buffer_level) {
3745 int Fraction =
3746 (int)(((cpi->buffer_level - cpi->oxcf.optimal_buffer_level) * 128) /
3747 (cpi->oxcf.maximum_buffer_size -
3748 cpi->oxcf.optimal_buffer_level));
3749 int min_qadjustment =
3750 ((cpi->active_best_quality - cpi->best_quality) * Fraction) / 128;
3751
3752 cpi->active_best_quality -= min_qadjustment;
3753 }
3754 }
3755 }
3756 /* Make sure constrained quality mode limits are adhered to for the first
3757 * few frames of one pass encodes
3758 */
3759 else if (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) {
3760 if ((cm->frame_type == KEY_FRAME) || cm->refresh_golden_frame ||
3761 cpi->common.refresh_alt_ref_frame) {
3762 cpi->active_best_quality = cpi->best_quality;
3763 } else if (cpi->active_best_quality < cpi->cq_target_quality) {
3764 cpi->active_best_quality = cpi->cq_target_quality;
3765 }
3766 }
3767
3768 /* Clip the active best and worst quality values to limits */
3769 if (cpi->active_worst_quality > cpi->worst_quality) {
3770 cpi->active_worst_quality = cpi->worst_quality;
3771 }
3772
3773 if (cpi->active_best_quality < cpi->best_quality) {
3774 cpi->active_best_quality = cpi->best_quality;
3775 }
3776
3777 if (cpi->active_worst_quality < cpi->active_best_quality) {
3778 cpi->active_worst_quality = cpi->active_best_quality;
3779 }
3780
3781 /* Determine initial Q to try */
3782 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
3783
3784 #if !CONFIG_REALTIME_ONLY
3785
3786 /* Set highest allowed value for Zbin over quant */
3787 if (cm->frame_type == KEY_FRAME) {
3788 zbin_oq_high = 0;
3789 } else if ((cpi->oxcf.number_of_layers == 1) &&
3790 ((cm->refresh_alt_ref_frame ||
3791 (cm->refresh_golden_frame && !cpi->source_alt_ref_active)))) {
3792 zbin_oq_high = 16;
3793 } else {
3794 zbin_oq_high = ZBIN_OQ_MAX;
3795 }
3796 #endif
3797
3798 compute_skin_map(cpi);
3799
3800 /* Setup background Q adjustment for error resilient mode.
3801 * For multi-layer encodes only enable this for the base layer.
3802 */
3803 if (cpi->cyclic_refresh_mode_enabled) {
3804 // Special case for screen_content_mode with golden frame updates.
3805 int disable_cr_gf =
3806 (cpi->oxcf.screen_content_mode == 2 && cm->refresh_golden_frame);
3807 if (cpi->current_layer == 0 && cpi->force_maxqp == 0 && !disable_cr_gf) {
3808 cyclic_background_refresh(cpi, Q, 0);
3809 } else {
3810 disable_segmentation(cpi);
3811 }
3812 }
3813
3814 vp8_compute_frame_size_bounds(cpi, &frame_under_shoot_limit,
3815 &frame_over_shoot_limit);
3816
3817 #if !CONFIG_REALTIME_ONLY
3818 /* Limit Q range for the adaptive loop. */
3819 bottom_index = cpi->active_best_quality;
3820 top_index = cpi->active_worst_quality;
3821 q_low = cpi->active_best_quality;
3822 q_high = cpi->active_worst_quality;
3823 #endif
3824
3825 vp8_save_coding_context(cpi);
3826
3827 loop_count = 0;
3828
3829 scale_and_extend_source(cpi->un_scaled_source, cpi);
3830
3831 #if CONFIG_TEMPORAL_DENOISING && CONFIG_POSTPROC
3832 // Option to apply spatial blur under the aggressive or adaptive
3833 // (temporal denoising) mode.
3834 if (cpi->oxcf.noise_sensitivity >= 3) {
3835 if (cpi->denoiser.denoise_pars.spatial_blur != 0) {
3836 vp8_de_noise(cm, cpi->Source, cpi->Source,
3837 cpi->denoiser.denoise_pars.spatial_blur, 1, 0, 0);
3838 }
3839 }
3840 #endif
3841
3842 #if !(CONFIG_REALTIME_ONLY) && CONFIG_POSTPROC && !(CONFIG_TEMPORAL_DENOISING)
3843
3844 if (cpi->oxcf.noise_sensitivity > 0) {
3845 unsigned char *src;
3846 int l = 0;
3847
3848 switch (cpi->oxcf.noise_sensitivity) {
3849 case 1: l = 20; break;
3850 case 2: l = 40; break;
3851 case 3: l = 60; break;
3852 case 4: l = 80; break;
3853 case 5: l = 100; break;
3854 case 6: l = 150; break;
3855 }
3856
3857 if (cm->frame_type == KEY_FRAME) {
3858 vp8_de_noise(cm, cpi->Source, cpi->Source, l, 1, 0, 1);
3859 } else {
3860 vp8_de_noise(cm, cpi->Source, cpi->Source, l, 1, 0, 1);
3861
3862 src = cpi->Source->y_buffer;
3863
3864 if (cpi->Source->y_stride < 0) {
3865 src += cpi->Source->y_stride * (cpi->Source->y_height - 1);
3866 }
3867 }
3868 }
3869
3870 #endif
3871
3872 #ifdef OUTPUT_YUV_SRC
3873 vpx_write_yuv_frame(yuv_file, cpi->Source);
3874 #endif
3875
3876 do {
3877 vpx_clear_system_state();
3878
3879 vp8_set_quantizer(cpi, Q);
3880
3881 /* setup skip prob for costing in mode/mv decision */
3882 if (cpi->common.mb_no_coeff_skip) {
3883 cpi->prob_skip_false = cpi->base_skip_false_prob[Q];
3884
3885 if (cm->frame_type != KEY_FRAME) {
3886 if (cpi->common.refresh_alt_ref_frame) {
3887 if (cpi->last_skip_false_probs[2] != 0) {
3888 cpi->prob_skip_false = cpi->last_skip_false_probs[2];
3889 }
3890
3891 /*
3892 if(cpi->last_skip_false_probs[2]!=0 && abs(Q-
3893 cpi->last_skip_probs_q[2])<=16 )
3894 cpi->prob_skip_false = cpi->last_skip_false_probs[2];
3895 else if (cpi->last_skip_false_probs[2]!=0)
3896 cpi->prob_skip_false = (cpi->last_skip_false_probs[2] +
3897 cpi->prob_skip_false ) / 2;
3898 */
3899 } else if (cpi->common.refresh_golden_frame) {
3900 if (cpi->last_skip_false_probs[1] != 0) {
3901 cpi->prob_skip_false = cpi->last_skip_false_probs[1];
3902 }
3903
3904 /*
3905 if(cpi->last_skip_false_probs[1]!=0 && abs(Q-
3906 cpi->last_skip_probs_q[1])<=16 )
3907 cpi->prob_skip_false = cpi->last_skip_false_probs[1];
3908 else if (cpi->last_skip_false_probs[1]!=0)
3909 cpi->prob_skip_false = (cpi->last_skip_false_probs[1] +
3910 cpi->prob_skip_false ) / 2;
3911 */
3912 } else {
3913 if (cpi->last_skip_false_probs[0] != 0) {
3914 cpi->prob_skip_false = cpi->last_skip_false_probs[0];
3915 }
3916
3917 /*
3918 if(cpi->last_skip_false_probs[0]!=0 && abs(Q-
3919 cpi->last_skip_probs_q[0])<=16 )
3920 cpi->prob_skip_false = cpi->last_skip_false_probs[0];
3921 else if(cpi->last_skip_false_probs[0]!=0)
3922 cpi->prob_skip_false = (cpi->last_skip_false_probs[0] +
3923 cpi->prob_skip_false ) / 2;
3924 */
3925 }
3926
3927 /* as this is for cost estimate, let's make sure it does not
3928 * go extreme eitehr way
3929 */
3930 if (cpi->prob_skip_false < 5) cpi->prob_skip_false = 5;
3931
3932 if (cpi->prob_skip_false > 250) cpi->prob_skip_false = 250;
3933
3934 if (cpi->oxcf.number_of_layers == 1 && cpi->is_src_frame_alt_ref) {
3935 cpi->prob_skip_false = 1;
3936 }
3937 }
3938
3939 #if 0
3940
3941 if (cpi->pass != 1)
3942 {
3943 FILE *f = fopen("skip.stt", "a");
3944 fprintf(f, "%d, %d, %4d ", cpi->common.refresh_golden_frame, cpi->common.refresh_alt_ref_frame, cpi->prob_skip_false);
3945 fclose(f);
3946 }
3947
3948 #endif
3949 }
3950
3951 if (cm->frame_type == KEY_FRAME) {
3952 if (resize_key_frame(cpi)) {
3953 /* If the frame size has changed, need to reset Q, quantizer,
3954 * and background refresh.
3955 */
3956 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
3957 if (cpi->cyclic_refresh_mode_enabled) {
3958 if (cpi->current_layer == 0) {
3959 cyclic_background_refresh(cpi, Q, 0);
3960 } else {
3961 disable_segmentation(cpi);
3962 }
3963 }
3964 // Reset the zero_last counter to 0 on key frame.
3965 memset(cpi->consec_zero_last, 0, cm->mb_rows * cm->mb_cols);
3966 memset(cpi->consec_zero_last_mvbias, 0,
3967 (cpi->common.mb_rows * cpi->common.mb_cols));
3968 vp8_set_quantizer(cpi, Q);
3969 }
3970
3971 vp8_setup_key_frame(cpi);
3972 }
3973
3974 #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
3975 {
3976 if (cpi->oxcf.error_resilient_mode) cm->refresh_entropy_probs = 0;
3977
3978 if (cpi->oxcf.error_resilient_mode & VPX_ERROR_RESILIENT_PARTITIONS) {
3979 if (cm->frame_type == KEY_FRAME) cm->refresh_entropy_probs = 1;
3980 }
3981
3982 if (cm->refresh_entropy_probs == 0) {
3983 /* save a copy for later refresh */
3984 memcpy(&cm->lfc, &cm->fc, sizeof(cm->fc));
3985 }
3986
3987 vp8_update_coef_context(cpi);
3988
3989 vp8_update_coef_probs(cpi);
3990
3991 /* transform / motion compensation build reconstruction frame
3992 * +pack coef partitions
3993 */
3994 vp8_encode_frame(cpi);
3995
3996 /* cpi->projected_frame_size is not needed for RT mode */
3997 }
3998 #else
3999 /* transform / motion compensation build reconstruction frame */
4000 vp8_encode_frame(cpi);
4001
4002 if (cpi->pass == 0 && cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) {
4003 if (vp8_drop_encodedframe_overshoot(cpi, Q)) return;
4004 }
4005
4006 cpi->projected_frame_size -= vp8_estimate_entropy_savings(cpi);
4007 cpi->projected_frame_size =
4008 (cpi->projected_frame_size > 0) ? cpi->projected_frame_size : 0;
4009 #endif
4010 vpx_clear_system_state();
4011
4012 /* Test to see if the stats generated for this frame indicate that
4013 * we should have coded a key frame (assuming that we didn't)!
4014 */
4015
4016 if (cpi->pass != 2 && cpi->oxcf.auto_key && cm->frame_type != KEY_FRAME &&
4017 cpi->compressor_speed != 2) {
4018 #if !CONFIG_REALTIME_ONLY
4019 if (decide_key_frame(cpi)) {
4020 /* Reset all our sizing numbers and recode */
4021 cm->frame_type = KEY_FRAME;
4022
4023 vp8_pick_frame_size(cpi);
4024
4025 /* Clear the Alt reference frame active flag when we have
4026 * a key frame
4027 */
4028 cpi->source_alt_ref_active = 0;
4029
4030 // Set the loop filter deltas and segmentation map update
4031 setup_features(cpi);
4032
4033 vp8_restore_coding_context(cpi);
4034
4035 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
4036
4037 vp8_compute_frame_size_bounds(cpi, &frame_under_shoot_limit,
4038 &frame_over_shoot_limit);
4039
4040 /* Limit Q range for the adaptive loop. */
4041 bottom_index = cpi->active_best_quality;
4042 top_index = cpi->active_worst_quality;
4043 q_low = cpi->active_best_quality;
4044 q_high = cpi->active_worst_quality;
4045
4046 loop_count++;
4047 Loop = 1;
4048
4049 continue;
4050 }
4051 #endif
4052 }
4053
4054 vpx_clear_system_state();
4055
4056 if (frame_over_shoot_limit == 0) frame_over_shoot_limit = 1;
4057
4058 /* Are we are overshooting and up against the limit of active max Q. */
4059 if (((cpi->pass != 2) ||
4060 (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER)) &&
4061 (Q == cpi->active_worst_quality) &&
4062 (cpi->active_worst_quality < cpi->worst_quality) &&
4063 (cpi->projected_frame_size > frame_over_shoot_limit)) {
4064 int over_size_percent =
4065 ((cpi->projected_frame_size - frame_over_shoot_limit) * 100) /
4066 frame_over_shoot_limit;
4067
4068 /* If so is there any scope for relaxing it */
4069 while ((cpi->active_worst_quality < cpi->worst_quality) &&
4070 (over_size_percent > 0)) {
4071 cpi->active_worst_quality++;
4072 /* Assume 1 qstep = about 4% on frame size. */
4073 over_size_percent = (int)(over_size_percent * 0.96);
4074 }
4075 #if !CONFIG_REALTIME_ONLY
4076 top_index = cpi->active_worst_quality;
4077 #endif // !CONFIG_REALTIME_ONLY
4078 /* If we have updated the active max Q do not call
4079 * vp8_update_rate_correction_factors() this loop.
4080 */
4081 active_worst_qchanged = 1;
4082 } else {
4083 active_worst_qchanged = 0;
4084 }
4085
4086 #if CONFIG_REALTIME_ONLY
4087 Loop = 0;
4088 #else
4089 /* Special case handling for forced key frames */
4090 if ((cm->frame_type == KEY_FRAME) && cpi->this_key_frame_forced) {
4091 int last_q = Q;
4092 int kf_err = vp8_calc_ss_err(cpi->Source, &cm->yv12_fb[cm->new_fb_idx]);
4093
4094 /* The key frame is not good enough */
4095 if (kf_err > ((cpi->ambient_err * 7) >> 3)) {
4096 /* Lower q_high */
4097 q_high = (Q > q_low) ? (Q - 1) : q_low;
4098
4099 /* Adjust Q */
4100 Q = (q_high + q_low) >> 1;
4101 }
4102 /* The key frame is much better than the previous frame */
4103 else if (kf_err < (cpi->ambient_err >> 1)) {
4104 /* Raise q_low */
4105 q_low = (Q < q_high) ? (Q + 1) : q_high;
4106
4107 /* Adjust Q */
4108 Q = (q_high + q_low + 1) >> 1;
4109 }
4110
4111 /* Clamp Q to upper and lower limits: */
4112 if (Q > q_high) {
4113 Q = q_high;
4114 } else if (Q < q_low) {
4115 Q = q_low;
4116 }
4117
4118 Loop = Q != last_q;
4119 }
4120
4121 /* Is the projected frame size out of range and are we allowed
4122 * to attempt to recode.
4123 */
4124 else if (recode_loop_test(cpi, frame_over_shoot_limit,
4125 frame_under_shoot_limit, Q, top_index,
4126 bottom_index)) {
4127 int last_q = Q;
4128 int Retries = 0;
4129
4130 /* Frame size out of permitted range. Update correction factor
4131 * & compute new Q to try...
4132 */
4133
4134 /* Frame is too large */
4135 if (cpi->projected_frame_size > cpi->this_frame_target) {
4136 /* Raise Qlow as to at least the current value */
4137 q_low = (Q < q_high) ? (Q + 1) : q_high;
4138
4139 /* If we are using over quant do the same for zbin_oq_low */
4140 if (cpi->mb.zbin_over_quant > 0) {
4141 zbin_oq_low = (cpi->mb.zbin_over_quant < zbin_oq_high)
4142 ? (cpi->mb.zbin_over_quant + 1)
4143 : zbin_oq_high;
4144 }
4145
4146 if (undershoot_seen) {
4147 /* Update rate_correction_factor unless
4148 * cpi->active_worst_quality has changed.
4149 */
4150 if (!active_worst_qchanged) {
4151 vp8_update_rate_correction_factors(cpi, 1);
4152 }
4153
4154 Q = (q_high + q_low + 1) / 2;
4155
4156 /* Adjust cpi->zbin_over_quant (only allowed when Q
4157 * is max)
4158 */
4159 if (Q < MAXQ) {
4160 cpi->mb.zbin_over_quant = 0;
4161 } else {
4162 zbin_oq_low = (cpi->mb.zbin_over_quant < zbin_oq_high)
4163 ? (cpi->mb.zbin_over_quant + 1)
4164 : zbin_oq_high;
4165 cpi->mb.zbin_over_quant = (zbin_oq_high + zbin_oq_low) / 2;
4166 }
4167 } else {
4168 /* Update rate_correction_factor unless
4169 * cpi->active_worst_quality has changed.
4170 */
4171 if (!active_worst_qchanged) {
4172 vp8_update_rate_correction_factors(cpi, 0);
4173 }
4174
4175 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
4176
4177 while (((Q < q_low) || (cpi->mb.zbin_over_quant < zbin_oq_low)) &&
4178 (Retries < 10)) {
4179 vp8_update_rate_correction_factors(cpi, 0);
4180 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
4181 Retries++;
4182 }
4183 }
4184
4185 overshoot_seen = 1;
4186 }
4187 /* Frame is too small */
4188 else {
4189 if (cpi->mb.zbin_over_quant == 0) {
4190 /* Lower q_high if not using over quant */
4191 q_high = (Q > q_low) ? (Q - 1) : q_low;
4192 } else {
4193 /* else lower zbin_oq_high */
4194 zbin_oq_high = (cpi->mb.zbin_over_quant > zbin_oq_low)
4195 ? (cpi->mb.zbin_over_quant - 1)
4196 : zbin_oq_low;
4197 }
4198
4199 if (overshoot_seen) {
4200 /* Update rate_correction_factor unless
4201 * cpi->active_worst_quality has changed.
4202 */
4203 if (!active_worst_qchanged) {
4204 vp8_update_rate_correction_factors(cpi, 1);
4205 }
4206
4207 Q = (q_high + q_low) / 2;
4208
4209 /* Adjust cpi->zbin_over_quant (only allowed when Q
4210 * is max)
4211 */
4212 if (Q < MAXQ) {
4213 cpi->mb.zbin_over_quant = 0;
4214 } else {
4215 cpi->mb.zbin_over_quant = (zbin_oq_high + zbin_oq_low) / 2;
4216 }
4217 } else {
4218 /* Update rate_correction_factor unless
4219 * cpi->active_worst_quality has changed.
4220 */
4221 if (!active_worst_qchanged) {
4222 vp8_update_rate_correction_factors(cpi, 0);
4223 }
4224
4225 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
4226
4227 /* Special case reset for qlow for constrained quality.
4228 * This should only trigger where there is very substantial
4229 * undershoot on a frame and the auto cq level is above
4230 * the user passsed in value.
4231 */
4232 if ((cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) &&
4233 (Q < q_low)) {
4234 q_low = Q;
4235 }
4236
4237 while (((Q > q_high) || (cpi->mb.zbin_over_quant > zbin_oq_high)) &&
4238 (Retries < 10)) {
4239 vp8_update_rate_correction_factors(cpi, 0);
4240 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
4241 Retries++;
4242 }
4243 }
4244
4245 undershoot_seen = 1;
4246 }
4247
4248 /* Clamp Q to upper and lower limits: */
4249 if (Q > q_high) {
4250 Q = q_high;
4251 } else if (Q < q_low) {
4252 Q = q_low;
4253 }
4254
4255 /* Clamp cpi->zbin_over_quant */
4256 cpi->mb.zbin_over_quant = (cpi->mb.zbin_over_quant < zbin_oq_low)
4257 ? zbin_oq_low
4258 : (cpi->mb.zbin_over_quant > zbin_oq_high)
4259 ? zbin_oq_high
4260 : cpi->mb.zbin_over_quant;
4261
4262 Loop = Q != last_q;
4263 } else {
4264 Loop = 0;
4265 }
4266 #endif // CONFIG_REALTIME_ONLY
4267
4268 if (cpi->is_src_frame_alt_ref) Loop = 0;
4269
4270 if (Loop == 1) {
4271 vp8_restore_coding_context(cpi);
4272 loop_count++;
4273 #if CONFIG_INTERNAL_STATS
4274 cpi->tot_recode_hits++;
4275 #endif
4276 }
4277 } while (Loop == 1);
4278
4279 #if defined(DROP_UNCODED_FRAMES)
4280 /* if there are no coded macroblocks at all drop this frame */
4281 if (cpi->common.MBs == cpi->mb.skip_true_count &&
4282 (cpi->drop_frame_count & 7) != 7 && cm->frame_type != KEY_FRAME) {
4283 cpi->common.current_video_frame++;
4284 cpi->frames_since_key++;
4285 cpi->drop_frame_count++;
4286 // We advance the temporal pattern for dropped frames.
4287 cpi->temporal_pattern_counter++;
4288 return;
4289 }
4290 cpi->drop_frame_count = 0;
4291 #endif
4292
4293 #if 0
4294 /* Experimental code for lagged and one pass
4295 * Update stats used for one pass GF selection
4296 */
4297 {
4298 cpi->one_pass_frame_stats[cpi->one_pass_frame_index].frame_coded_error = (double)cpi->prediction_error;
4299 cpi->one_pass_frame_stats[cpi->one_pass_frame_index].frame_intra_error = (double)cpi->intra_error;
4300 cpi->one_pass_frame_stats[cpi->one_pass_frame_index].frame_pcnt_inter = (double)(100 - cpi->this_frame_percent_intra) / 100.0;
4301 }
4302 #endif
4303
4304 /* Special case code to reduce pulsing when key frames are forced at a
4305 * fixed interval. Note the reconstruction error if it is the frame before
4306 * the force key frame
4307 */
4308 if (cpi->next_key_frame_forced && (cpi->twopass.frames_to_key == 0)) {
4309 cpi->ambient_err =
4310 vp8_calc_ss_err(cpi->Source, &cm->yv12_fb[cm->new_fb_idx]);
4311 }
4312
4313 /* This frame's MVs are saved and will be used in next frame's MV predictor.
4314 * Last frame has one more line(add to bottom) and one more column(add to
4315 * right) than cm->mip. The edge elements are initialized to 0.
4316 */
4317 #if CONFIG_MULTI_RES_ENCODING
4318 if (!cpi->oxcf.mr_encoder_id && cm->show_frame)
4319 #else
4320 if (cm->show_frame) /* do not save for altref frame */
4321 #endif
4322 {
4323 int mb_row;
4324 int mb_col;
4325 /* Point to beginning of allocated MODE_INFO arrays. */
4326 MODE_INFO *tmp = cm->mip;
4327
4328 if (cm->frame_type != KEY_FRAME) {
4329 for (mb_row = 0; mb_row < cm->mb_rows + 1; ++mb_row) {
4330 for (mb_col = 0; mb_col < cm->mb_cols + 1; ++mb_col) {
4331 if (tmp->mbmi.ref_frame != INTRA_FRAME) {
4332 cpi->lfmv[mb_col + mb_row * (cm->mode_info_stride + 1)].as_int =
4333 tmp->mbmi.mv.as_int;
4334 }
4335
4336 cpi->lf_ref_frame_sign_bias[mb_col +
4337 mb_row * (cm->mode_info_stride + 1)] =
4338 cm->ref_frame_sign_bias[tmp->mbmi.ref_frame];
4339 cpi->lf_ref_frame[mb_col + mb_row * (cm->mode_info_stride + 1)] =
4340 tmp->mbmi.ref_frame;
4341 tmp++;
4342 }
4343 }
4344 }
4345 }
4346
4347 /* Count last ref frame 0,0 usage on current encoded frame. */
4348 {
4349 int mb_row;
4350 int mb_col;
4351 /* Point to beginning of MODE_INFO arrays. */
4352 MODE_INFO *tmp = cm->mi;
4353
4354 cpi->zeromv_count = 0;
4355
4356 if (cm->frame_type != KEY_FRAME) {
4357 for (mb_row = 0; mb_row < cm->mb_rows; ++mb_row) {
4358 for (mb_col = 0; mb_col < cm->mb_cols; ++mb_col) {
4359 if (tmp->mbmi.mode == ZEROMV && tmp->mbmi.ref_frame == LAST_FRAME) {
4360 cpi->zeromv_count++;
4361 }
4362 tmp++;
4363 }
4364 tmp++;
4365 }
4366 }
4367 }
4368
4369 #if CONFIG_MULTI_RES_ENCODING
4370 vp8_cal_dissimilarity(cpi);
4371 #endif
4372
4373 /* Update the GF useage maps.
4374 * This is done after completing the compression of a frame when all
4375 * modes etc. are finalized but before loop filter
4376 */
4377 if (cpi->oxcf.number_of_layers == 1) {
4378 vp8_update_gf_useage_maps(cpi, cm, &cpi->mb);
4379 }
4380
4381 if (cm->frame_type == KEY_FRAME) cm->refresh_last_frame = 1;
4382
4383 #if 0
4384 {
4385 FILE *f = fopen("gfactive.stt", "a");
4386 fprintf(f, "%8d %8d %8d %8d %8d\n", cm->current_video_frame, (100 * cpi->gf_active_count) / (cpi->common.mb_rows * cpi->common.mb_cols), cpi->this_iiratio, cpi->next_iiratio, cm->refresh_golden_frame);
4387 fclose(f);
4388 }
4389 #endif
4390
4391 /* For inter frames the current default behavior is that when
4392 * cm->refresh_golden_frame is set we copy the old GF over to the ARF buffer
4393 * This is purely an encoder decision at present.
4394 */
4395 if (!cpi->oxcf.error_resilient_mode && cm->refresh_golden_frame) {
4396 cm->copy_buffer_to_arf = 2;
4397 } else {
4398 cm->copy_buffer_to_arf = 0;
4399 }
4400
4401 cm->frame_to_show = &cm->yv12_fb[cm->new_fb_idx];
4402
4403 #if CONFIG_TEMPORAL_DENOISING
4404 // Get some measure of the amount of noise, by measuring the (partial) mse
4405 // between source and denoised buffer, for y channel. Partial refers to
4406 // computing the sse for a sub-sample of the frame (i.e., skip x blocks along
4407 // row/column),
4408 // and only for blocks in that set that are consecutive ZEROMV_LAST mode.
4409 // Do this every ~8 frames, to further reduce complexity.
4410 // TODO(marpan): Keep this for now for the case cpi->oxcf.noise_sensitivity <
4411 // 4,
4412 // should be removed in favor of the process_denoiser_mode_change() function
4413 // below.
4414 if (cpi->oxcf.noise_sensitivity > 0 && cpi->oxcf.noise_sensitivity < 4 &&
4415 !cpi->oxcf.screen_content_mode && cpi->frames_since_key % 8 == 0 &&
4416 cm->frame_type != KEY_FRAME) {
4417 cpi->mse_source_denoised = measure_square_diff_partial(
4418 &cpi->denoiser.yv12_running_avg[INTRA_FRAME], cpi->Source, cpi);
4419 }
4420
4421 // For the adaptive denoising mode (noise_sensitivity == 4), sample the mse
4422 // of source diff (between current and previous frame), and determine if we
4423 // should switch the denoiser mode. Sampling refers to computing the mse for
4424 // a sub-sample of the frame (i.e., skip x blocks along row/column), and
4425 // only for blocks in that set that have used ZEROMV LAST, along with some
4426 // constraint on the sum diff between blocks. This process is called every
4427 // ~8 frames, to further reduce complexity.
4428 if (cpi->oxcf.noise_sensitivity == 4 && !cpi->oxcf.screen_content_mode &&
4429 cpi->frames_since_key % 8 == 0 && cm->frame_type != KEY_FRAME) {
4430 process_denoiser_mode_change(cpi);
4431 }
4432 #endif
4433
4434 #ifdef OUTPUT_YUV_SKINMAP
4435 if (cpi->common.current_video_frame > 1) {
4436 vp8_compute_skin_map(cpi, yuv_skinmap_file);
4437 }
4438 #endif
4439
4440 #if CONFIG_MULTITHREAD
4441 if (vpx_atomic_load_acquire(&cpi->b_multi_threaded)) {
4442 /* start loopfilter in separate thread */
4443 sem_post(&cpi->h_event_start_lpf);
4444 cpi->b_lpf_running = 1;
4445 /* wait for the filter_level to be picked so that we can continue with
4446 * stream packing */
4447 sem_wait(&cpi->h_event_end_lpf);
4448 } else
4449 #endif
4450 {
4451 vp8_loopfilter_frame(cpi, cm);
4452 }
4453
4454 update_reference_frames(cpi);
4455
4456 #ifdef OUTPUT_YUV_DENOISED
4457 vpx_write_yuv_frame(yuv_denoised_file,
4458 &cpi->denoiser.yv12_running_avg[INTRA_FRAME]);
4459 #endif
4460
4461 #if !(CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING)
4462 if (cpi->oxcf.error_resilient_mode) {
4463 cm->refresh_entropy_probs = 0;
4464 }
4465 #endif
4466
4467 /* build the bitstream */
4468 vp8_pack_bitstream(cpi, dest, dest_end, size);
4469
4470 /* Move storing frame_type out of the above loop since it is also
4471 * needed in motion search besides loopfilter */
4472 cm->last_frame_type = cm->frame_type;
4473
4474 /* Update rate control heuristics */
4475 cpi->total_byte_count += (*size);
4476 cpi->projected_frame_size = (int)(*size) << 3;
4477
4478 if (cpi->oxcf.number_of_layers > 1) {
4479 unsigned int i;
4480 for (i = cpi->current_layer + 1; i < cpi->oxcf.number_of_layers; ++i) {
4481 cpi->layer_context[i].total_byte_count += (*size);
4482 }
4483 }
4484
4485 if (!active_worst_qchanged) vp8_update_rate_correction_factors(cpi, 2);
4486
4487 cpi->last_q[cm->frame_type] = cm->base_qindex;
4488
4489 if (cm->frame_type == KEY_FRAME) {
4490 vp8_adjust_key_frame_context(cpi);
4491 }
4492
4493 /* Keep a record of ambient average Q. */
4494 if (cm->frame_type != KEY_FRAME) {
4495 cpi->avg_frame_qindex =
4496 (2 + 3 * cpi->avg_frame_qindex + cm->base_qindex) >> 2;
4497 }
4498
4499 /* Keep a record from which we can calculate the average Q excluding
4500 * GF updates and key frames
4501 */
4502 if ((cm->frame_type != KEY_FRAME) &&
4503 ((cpi->oxcf.number_of_layers > 1) ||
4504 (!cm->refresh_golden_frame && !cm->refresh_alt_ref_frame))) {
4505 cpi->ni_frames++;
4506
4507 /* Calculate the average Q for normal inter frames (not key or GFU
4508 * frames).
4509 */
4510 if (cpi->pass == 2) {
4511 cpi->ni_tot_qi += Q;
4512 cpi->ni_av_qi = (cpi->ni_tot_qi / cpi->ni_frames);
4513 } else {
4514 /* Damp value for first few frames */
4515 if (cpi->ni_frames > 150) {
4516 cpi->ni_tot_qi += Q;
4517 cpi->ni_av_qi = (cpi->ni_tot_qi / cpi->ni_frames);
4518 }
4519 /* For one pass, early in the clip ... average the current frame Q
4520 * value with the worstq entered by the user as a dampening measure
4521 */
4522 else {
4523 cpi->ni_tot_qi += Q;
4524 cpi->ni_av_qi =
4525 ((cpi->ni_tot_qi / cpi->ni_frames) + cpi->worst_quality + 1) / 2;
4526 }
4527
4528 /* If the average Q is higher than what was used in the last
4529 * frame (after going through the recode loop to keep the frame
4530 * size within range) then use the last frame value - 1. The -1
4531 * is designed to stop Q and hence the data rate, from
4532 * progressively falling away during difficult sections, but at
4533 * the same time reduce the number of itterations around the
4534 * recode loop.
4535 */
4536 if (Q > cpi->ni_av_qi) cpi->ni_av_qi = Q - 1;
4537 }
4538 }
4539
4540 /* Update the buffer level variable. */
4541 /* Non-viewable frames are a special case and are treated as pure overhead. */
4542 if (!cm->show_frame) {
4543 cpi->bits_off_target -= cpi->projected_frame_size;
4544 } else {
4545 cpi->bits_off_target +=
4546 cpi->av_per_frame_bandwidth - cpi->projected_frame_size;
4547 }
4548
4549 /* Clip the buffer level to the maximum specified buffer size */
4550 if (cpi->bits_off_target > cpi->oxcf.maximum_buffer_size) {
4551 cpi->bits_off_target = cpi->oxcf.maximum_buffer_size;
4552 }
4553
4554 // If the frame dropper is not enabled, don't let the buffer level go below
4555 // some threshold, given here by -|maximum_buffer_size|. For now we only do
4556 // this for screen content input.
4557 if (cpi->drop_frames_allowed == 0 && cpi->oxcf.screen_content_mode &&
4558 cpi->bits_off_target < -cpi->oxcf.maximum_buffer_size) {
4559 cpi->bits_off_target = -cpi->oxcf.maximum_buffer_size;
4560 }
4561
4562 /* Rolling monitors of whether we are over or underspending used to
4563 * help regulate min and Max Q in two pass.
4564 */
4565 cpi->rolling_target_bits =
4566 ((cpi->rolling_target_bits * 3) + cpi->this_frame_target + 2) / 4;
4567 cpi->rolling_actual_bits =
4568 ((cpi->rolling_actual_bits * 3) + cpi->projected_frame_size + 2) / 4;
4569 cpi->long_rolling_target_bits =
4570 ((cpi->long_rolling_target_bits * 31) + cpi->this_frame_target + 16) / 32;
4571 cpi->long_rolling_actual_bits =
4572 ((cpi->long_rolling_actual_bits * 31) + cpi->projected_frame_size + 16) /
4573 32;
4574
4575 /* Actual bits spent */
4576 cpi->total_actual_bits += cpi->projected_frame_size;
4577
4578 /* Debug stats */
4579 cpi->total_target_vs_actual +=
4580 (cpi->this_frame_target - cpi->projected_frame_size);
4581
4582 cpi->buffer_level = cpi->bits_off_target;
4583
4584 /* Propagate values to higher temporal layers */
4585 if (cpi->oxcf.number_of_layers > 1) {
4586 unsigned int i;
4587
4588 for (i = cpi->current_layer + 1; i < cpi->oxcf.number_of_layers; ++i) {
4589 LAYER_CONTEXT *lc = &cpi->layer_context[i];
4590 int bits_off_for_this_layer = (int)(lc->target_bandwidth / lc->framerate -
4591 cpi->projected_frame_size);
4592
4593 lc->bits_off_target += bits_off_for_this_layer;
4594
4595 /* Clip buffer level to maximum buffer size for the layer */
4596 if (lc->bits_off_target > lc->maximum_buffer_size) {
4597 lc->bits_off_target = lc->maximum_buffer_size;
4598 }
4599
4600 lc->total_actual_bits += cpi->projected_frame_size;
4601 lc->total_target_vs_actual += bits_off_for_this_layer;
4602 lc->buffer_level = lc->bits_off_target;
4603 }
4604 }
4605
4606 /* Update bits left to the kf and gf groups to account for overshoot
4607 * or undershoot on these frames
4608 */
4609 if (cm->frame_type == KEY_FRAME) {
4610 cpi->twopass.kf_group_bits +=
4611 cpi->this_frame_target - cpi->projected_frame_size;
4612
4613 if (cpi->twopass.kf_group_bits < 0) cpi->twopass.kf_group_bits = 0;
4614 } else if (cm->refresh_golden_frame || cm->refresh_alt_ref_frame) {
4615 cpi->twopass.gf_group_bits +=
4616 cpi->this_frame_target - cpi->projected_frame_size;
4617
4618 if (cpi->twopass.gf_group_bits < 0) cpi->twopass.gf_group_bits = 0;
4619 }
4620
4621 if (cm->frame_type != KEY_FRAME) {
4622 if (cpi->common.refresh_alt_ref_frame) {
4623 cpi->last_skip_false_probs[2] = cpi->prob_skip_false;
4624 cpi->last_skip_probs_q[2] = cm->base_qindex;
4625 } else if (cpi->common.refresh_golden_frame) {
4626 cpi->last_skip_false_probs[1] = cpi->prob_skip_false;
4627 cpi->last_skip_probs_q[1] = cm->base_qindex;
4628 } else {
4629 cpi->last_skip_false_probs[0] = cpi->prob_skip_false;
4630 cpi->last_skip_probs_q[0] = cm->base_qindex;
4631
4632 /* update the baseline */
4633 cpi->base_skip_false_prob[cm->base_qindex] = cpi->prob_skip_false;
4634 }
4635 }
4636
4637 #if 0 && CONFIG_INTERNAL_STATS
4638 {
4639 FILE *f = fopen("tmp.stt", "a");
4640
4641 vpx_clear_system_state();
4642
4643 if (cpi->twopass.total_left_stats.coded_error != 0.0)
4644 fprintf(f, "%10d %10d %10d %10d %10d %10"PRId64" %10"PRId64
4645 "%10"PRId64" %10d %6d %6d %6d %6d %5d %5d %5d %8d "
4646 "%8.2lf %"PRId64" %10.3lf %10"PRId64" %8d\n",
4647 cpi->common.current_video_frame, cpi->this_frame_target,
4648 cpi->projected_frame_size,
4649 (cpi->projected_frame_size - cpi->this_frame_target),
4650 cpi->total_target_vs_actual,
4651 cpi->buffer_level,
4652 (cpi->oxcf.starting_buffer_level-cpi->bits_off_target),
4653 cpi->total_actual_bits, cm->base_qindex,
4654 cpi->active_best_quality, cpi->active_worst_quality,
4655 cpi->ni_av_qi, cpi->cq_target_quality,
4656 cm->refresh_golden_frame, cm->refresh_alt_ref_frame,
4657 cm->frame_type, cpi->gfu_boost,
4658 cpi->twopass.est_max_qcorrection_factor,
4659 cpi->twopass.bits_left,
4660 cpi->twopass.total_left_stats.coded_error,
4661 (double)cpi->twopass.bits_left /
4662 cpi->twopass.total_left_stats.coded_error,
4663 cpi->tot_recode_hits);
4664 else
4665 fprintf(f, "%10d %10d %10d %10d %10d %10"PRId64" %10"PRId64
4666 "%10"PRId64" %10d %6d %6d %6d %6d %5d %5d %5d %8d "
4667 "%8.2lf %"PRId64" %10.3lf %8d\n",
4668 cpi->common.current_video_frame, cpi->this_frame_target,
4669 cpi->projected_frame_size,
4670 (cpi->projected_frame_size - cpi->this_frame_target),
4671 cpi->total_target_vs_actual,
4672 cpi->buffer_level,
4673 (cpi->oxcf.starting_buffer_level-cpi->bits_off_target),
4674 cpi->total_actual_bits, cm->base_qindex,
4675 cpi->active_best_quality, cpi->active_worst_quality,
4676 cpi->ni_av_qi, cpi->cq_target_quality,
4677 cm->refresh_golden_frame, cm->refresh_alt_ref_frame,
4678 cm->frame_type, cpi->gfu_boost,
4679 cpi->twopass.est_max_qcorrection_factor,
4680 cpi->twopass.bits_left,
4681 cpi->twopass.total_left_stats.coded_error,
4682 cpi->tot_recode_hits);
4683
4684 fclose(f);
4685
4686 {
4687 FILE *fmodes = fopen("Modes.stt", "a");
4688
4689 fprintf(fmodes, "%6d:%1d:%1d:%1d ",
4690 cpi->common.current_video_frame,
4691 cm->frame_type, cm->refresh_golden_frame,
4692 cm->refresh_alt_ref_frame);
4693
4694 fprintf(fmodes, "\n");
4695
4696 fclose(fmodes);
4697 }
4698 }
4699
4700 #endif
4701
4702 if (cm->refresh_golden_frame == 1) {
4703 cm->frame_flags = cm->frame_flags | FRAMEFLAGS_GOLDEN;
4704 } else {
4705 cm->frame_flags = cm->frame_flags & ~FRAMEFLAGS_GOLDEN;
4706 }
4707
4708 if (cm->refresh_alt_ref_frame == 1) {
4709 cm->frame_flags = cm->frame_flags | FRAMEFLAGS_ALTREF;
4710 } else {
4711 cm->frame_flags = cm->frame_flags & ~FRAMEFLAGS_ALTREF;
4712 }
4713
4714 if (cm->refresh_last_frame & cm->refresh_golden_frame) { /* both refreshed */
4715 cpi->gold_is_last = 1;
4716 } else if (cm->refresh_last_frame ^ cm->refresh_golden_frame) {
4717 /* 1 refreshed but not the other */
4718 cpi->gold_is_last = 0;
4719 }
4720
4721 if (cm->refresh_last_frame & cm->refresh_alt_ref_frame) { /* both refreshed */
4722 cpi->alt_is_last = 1;
4723 } else if (cm->refresh_last_frame ^ cm->refresh_alt_ref_frame) {
4724 /* 1 refreshed but not the other */
4725 cpi->alt_is_last = 0;
4726 }
4727
4728 if (cm->refresh_alt_ref_frame &
4729 cm->refresh_golden_frame) { /* both refreshed */
4730 cpi->gold_is_alt = 1;
4731 } else if (cm->refresh_alt_ref_frame ^ cm->refresh_golden_frame) {
4732 /* 1 refreshed but not the other */
4733 cpi->gold_is_alt = 0;
4734 }
4735
4736 cpi->ref_frame_flags = VP8_ALTR_FRAME | VP8_GOLD_FRAME | VP8_LAST_FRAME;
4737
4738 if (cpi->gold_is_last) cpi->ref_frame_flags &= ~VP8_GOLD_FRAME;
4739
4740 if (cpi->alt_is_last) cpi->ref_frame_flags &= ~VP8_ALTR_FRAME;
4741
4742 if (cpi->gold_is_alt) cpi->ref_frame_flags &= ~VP8_ALTR_FRAME;
4743
4744 if (!cpi->oxcf.error_resilient_mode) {
4745 if (cpi->oxcf.play_alternate && cm->refresh_alt_ref_frame &&
4746 (cm->frame_type != KEY_FRAME)) {
4747 /* Update the alternate reference frame stats as appropriate. */
4748 update_alt_ref_frame_stats(cpi);
4749 } else {
4750 /* Update the Golden frame stats as appropriate. */
4751 update_golden_frame_stats(cpi);
4752 }
4753 }
4754
4755 if (cm->frame_type == KEY_FRAME) {
4756 /* Tell the caller that the frame was coded as a key frame */
4757 *frame_flags = cm->frame_flags | FRAMEFLAGS_KEY;
4758
4759 /* As this frame is a key frame the next defaults to an inter frame. */
4760 cm->frame_type = INTER_FRAME;
4761
4762 cpi->last_frame_percent_intra = 100;
4763 } else {
4764 *frame_flags = cm->frame_flags & ~FRAMEFLAGS_KEY;
4765
4766 cpi->last_frame_percent_intra = cpi->this_frame_percent_intra;
4767 }
4768
4769 /* Clear the one shot update flags for segmentation map and mode/ref
4770 * loop filter deltas.
4771 */
4772 cpi->mb.e_mbd.update_mb_segmentation_map = 0;
4773 cpi->mb.e_mbd.update_mb_segmentation_data = 0;
4774 cpi->mb.e_mbd.mode_ref_lf_delta_update = 0;
4775
4776 /* Dont increment frame counters if this was an altref buffer update
4777 * not a real frame
4778 */
4779 if (cm->show_frame) {
4780 cm->current_video_frame++;
4781 cpi->frames_since_key++;
4782 cpi->temporal_pattern_counter++;
4783 }
4784
4785 /* reset to normal state now that we are done. */
4786
4787 #if 0
4788 {
4789 char filename[512];
4790 FILE *recon_file;
4791 sprintf(filename, "enc%04d.yuv", (int) cm->current_video_frame);
4792 recon_file = fopen(filename, "wb");
4793 fwrite(cm->yv12_fb[cm->lst_fb_idx].buffer_alloc,
4794 cm->yv12_fb[cm->lst_fb_idx].frame_size, 1, recon_file);
4795 fclose(recon_file);
4796 }
4797 #endif
4798
4799 /* DEBUG */
4800 /* vpx_write_yuv_frame("encoder_recon.yuv", cm->frame_to_show); */
4801 }
4802 #if !CONFIG_REALTIME_ONLY
Pass2Encode(VP8_COMP * cpi,size_t * size,unsigned char * dest,unsigned char * dest_end,unsigned int * frame_flags)4803 static void Pass2Encode(VP8_COMP *cpi, size_t *size, unsigned char *dest,
4804 unsigned char *dest_end, unsigned int *frame_flags) {
4805 if (!cpi->common.refresh_alt_ref_frame) vp8_second_pass(cpi);
4806
4807 encode_frame_to_data_rate(cpi, size, dest, dest_end, frame_flags);
4808 cpi->twopass.bits_left -= 8 * (int)(*size);
4809
4810 if (!cpi->common.refresh_alt_ref_frame) {
4811 double two_pass_min_rate =
4812 (double)(cpi->oxcf.target_bandwidth *
4813 cpi->oxcf.two_pass_vbrmin_section / 100);
4814 cpi->twopass.bits_left += (int64_t)(two_pass_min_rate / cpi->framerate);
4815 }
4816 }
4817 #endif
4818
vp8_receive_raw_frame(VP8_COMP * cpi,unsigned int frame_flags,YV12_BUFFER_CONFIG * sd,int64_t time_stamp,int64_t end_time)4819 int vp8_receive_raw_frame(VP8_COMP *cpi, unsigned int frame_flags,
4820 YV12_BUFFER_CONFIG *sd, int64_t time_stamp,
4821 int64_t end_time) {
4822 struct vpx_usec_timer timer;
4823 int res = 0;
4824
4825 vpx_usec_timer_start(&timer);
4826
4827 /* Reinit the lookahead buffer if the frame size changes */
4828 if (sd->y_width != cpi->oxcf.Width || sd->y_height != cpi->oxcf.Height) {
4829 assert(cpi->oxcf.lag_in_frames < 2);
4830 dealloc_raw_frame_buffers(cpi);
4831 alloc_raw_frame_buffers(cpi);
4832 }
4833
4834 if (vp8_lookahead_push(cpi->lookahead, sd, time_stamp, end_time, frame_flags,
4835 cpi->active_map_enabled ? cpi->active_map : NULL)) {
4836 res = -1;
4837 }
4838 vpx_usec_timer_mark(&timer);
4839 cpi->time_receive_data += vpx_usec_timer_elapsed(&timer);
4840
4841 return res;
4842 }
4843
frame_is_reference(const VP8_COMP * cpi)4844 static int frame_is_reference(const VP8_COMP *cpi) {
4845 const VP8_COMMON *cm = &cpi->common;
4846 const MACROBLOCKD *xd = &cpi->mb.e_mbd;
4847
4848 return cm->frame_type == KEY_FRAME || cm->refresh_last_frame ||
4849 cm->refresh_golden_frame || cm->refresh_alt_ref_frame ||
4850 cm->copy_buffer_to_gf || cm->copy_buffer_to_arf ||
4851 cm->refresh_entropy_probs || xd->mode_ref_lf_delta_update ||
4852 xd->update_mb_segmentation_map || xd->update_mb_segmentation_data;
4853 }
4854
vp8_get_compressed_data(VP8_COMP * cpi,unsigned int * frame_flags,size_t * size,unsigned char * dest,unsigned char * dest_end,int64_t * time_stamp,int64_t * time_end,int flush)4855 int vp8_get_compressed_data(VP8_COMP *cpi, unsigned int *frame_flags,
4856 size_t *size, unsigned char *dest,
4857 unsigned char *dest_end, int64_t *time_stamp,
4858 int64_t *time_end, int flush) {
4859 VP8_COMMON *cm;
4860 struct vpx_usec_timer tsctimer;
4861 struct vpx_usec_timer ticktimer;
4862 struct vpx_usec_timer cmptimer;
4863 YV12_BUFFER_CONFIG *force_src_buffer = NULL;
4864
4865 if (!cpi) return -1;
4866
4867 cm = &cpi->common;
4868
4869 if (setjmp(cpi->common.error.jmp)) {
4870 cpi->common.error.setjmp = 0;
4871 vpx_clear_system_state();
4872 return VPX_CODEC_CORRUPT_FRAME;
4873 }
4874
4875 cpi->common.error.setjmp = 1;
4876
4877 vpx_usec_timer_start(&cmptimer);
4878
4879 cpi->source = NULL;
4880
4881 #if !CONFIG_REALTIME_ONLY
4882 /* Should we code an alternate reference frame */
4883 if (cpi->oxcf.error_resilient_mode == 0 && cpi->oxcf.play_alternate &&
4884 cpi->source_alt_ref_pending) {
4885 if ((cpi->source = vp8_lookahead_peek(
4886 cpi->lookahead, cpi->frames_till_gf_update_due, PEEK_FORWARD))) {
4887 cpi->alt_ref_source = cpi->source;
4888 if (cpi->oxcf.arnr_max_frames > 0) {
4889 vp8_temporal_filter_prepare_c(cpi, cpi->frames_till_gf_update_due);
4890 force_src_buffer = &cpi->alt_ref_buffer;
4891 }
4892 cpi->frames_till_alt_ref_frame = cpi->frames_till_gf_update_due;
4893 cm->refresh_alt_ref_frame = 1;
4894 cm->refresh_golden_frame = 0;
4895 cm->refresh_last_frame = 0;
4896 cm->show_frame = 0;
4897 /* Clear Pending alt Ref flag. */
4898 cpi->source_alt_ref_pending = 0;
4899 cpi->is_src_frame_alt_ref = 0;
4900 }
4901 }
4902 #endif
4903
4904 if (!cpi->source) {
4905 /* Read last frame source if we are encoding first pass. */
4906 if (cpi->pass == 1 && cm->current_video_frame > 0) {
4907 if ((cpi->last_source =
4908 vp8_lookahead_peek(cpi->lookahead, 1, PEEK_BACKWARD)) == NULL) {
4909 return -1;
4910 }
4911 }
4912
4913 if ((cpi->source = vp8_lookahead_pop(cpi->lookahead, flush))) {
4914 cm->show_frame = 1;
4915
4916 cpi->is_src_frame_alt_ref =
4917 cpi->alt_ref_source && (cpi->source == cpi->alt_ref_source);
4918
4919 if (cpi->is_src_frame_alt_ref) cpi->alt_ref_source = NULL;
4920 }
4921 }
4922
4923 if (cpi->source) {
4924 cpi->Source = force_src_buffer ? force_src_buffer : &cpi->source->img;
4925 cpi->un_scaled_source = cpi->Source;
4926 *time_stamp = cpi->source->ts_start;
4927 *time_end = cpi->source->ts_end;
4928 *frame_flags = cpi->source->flags;
4929
4930 if (cpi->pass == 1 && cm->current_video_frame > 0) {
4931 cpi->last_frame_unscaled_source = &cpi->last_source->img;
4932 }
4933 } else {
4934 *size = 0;
4935 #if !CONFIG_REALTIME_ONLY
4936
4937 if (flush && cpi->pass == 1 && !cpi->twopass.first_pass_done) {
4938 vp8_end_first_pass(cpi); /* get last stats packet */
4939 cpi->twopass.first_pass_done = 1;
4940 }
4941
4942 #endif
4943
4944 return -1;
4945 }
4946
4947 if (cpi->source->ts_start < cpi->first_time_stamp_ever) {
4948 cpi->first_time_stamp_ever = cpi->source->ts_start;
4949 cpi->last_end_time_stamp_seen = cpi->source->ts_start;
4950 }
4951
4952 /* adjust frame rates based on timestamps given */
4953 if (cm->show_frame) {
4954 int64_t this_duration;
4955 int step = 0;
4956
4957 if (cpi->source->ts_start == cpi->first_time_stamp_ever) {
4958 this_duration = cpi->source->ts_end - cpi->source->ts_start;
4959 step = 1;
4960 } else {
4961 int64_t last_duration;
4962
4963 this_duration = cpi->source->ts_end - cpi->last_end_time_stamp_seen;
4964 last_duration = cpi->last_end_time_stamp_seen - cpi->last_time_stamp_seen;
4965 /* do a step update if the duration changes by 10% */
4966 if (last_duration) {
4967 step = (int)(((this_duration - last_duration) * 10 / last_duration));
4968 }
4969 }
4970
4971 if (this_duration) {
4972 if (step) {
4973 cpi->ref_framerate = 10000000.0 / this_duration;
4974 } else {
4975 double avg_duration, interval;
4976
4977 /* Average this frame's rate into the last second's average
4978 * frame rate. If we haven't seen 1 second yet, then average
4979 * over the whole interval seen.
4980 */
4981 interval = (double)(cpi->source->ts_end - cpi->first_time_stamp_ever);
4982 if (interval > 10000000.0) interval = 10000000;
4983
4984 avg_duration = 10000000.0 / cpi->ref_framerate;
4985 avg_duration *= (interval - avg_duration + this_duration);
4986 avg_duration /= interval;
4987
4988 cpi->ref_framerate = 10000000.0 / avg_duration;
4989 }
4990 #if CONFIG_MULTI_RES_ENCODING
4991 if (cpi->oxcf.mr_total_resolutions > 1) {
4992 LOWER_RES_FRAME_INFO *low_res_frame_info =
4993 (LOWER_RES_FRAME_INFO *)cpi->oxcf.mr_low_res_mode_info;
4994 // Frame rate should be the same for all spatial layers in
4995 // multi-res-encoding (simulcast), so we constrain the frame for
4996 // higher layers to be that of lowest resolution. This is needed
4997 // as he application may decide to skip encoding a high layer and
4998 // then start again, in which case a big jump in time-stamps will
4999 // be received for that high layer, which will yield an incorrect
5000 // frame rate (from time-stamp adjustment in above calculation).
5001 if (cpi->oxcf.mr_encoder_id) {
5002 cpi->ref_framerate = low_res_frame_info->low_res_framerate;
5003 } else {
5004 // Keep track of frame rate for lowest resolution.
5005 low_res_frame_info->low_res_framerate = cpi->ref_framerate;
5006 }
5007 }
5008 #endif
5009 if (cpi->oxcf.number_of_layers > 1) {
5010 unsigned int i;
5011
5012 /* Update frame rates for each layer */
5013 assert(cpi->oxcf.number_of_layers <= VPX_TS_MAX_LAYERS);
5014 for (i = 0; i < cpi->oxcf.number_of_layers && i < VPX_TS_MAX_LAYERS;
5015 ++i) {
5016 LAYER_CONTEXT *lc = &cpi->layer_context[i];
5017 lc->framerate = cpi->ref_framerate / cpi->oxcf.rate_decimator[i];
5018 }
5019 } else {
5020 vp8_new_framerate(cpi, cpi->ref_framerate);
5021 }
5022 }
5023
5024 cpi->last_time_stamp_seen = cpi->source->ts_start;
5025 cpi->last_end_time_stamp_seen = cpi->source->ts_end;
5026 }
5027
5028 if (cpi->oxcf.number_of_layers > 1) {
5029 int layer;
5030
5031 update_layer_contexts(cpi);
5032
5033 /* Restore layer specific context & set frame rate */
5034 if (cpi->temporal_layer_id >= 0) {
5035 layer = cpi->temporal_layer_id;
5036 } else {
5037 layer =
5038 cpi->oxcf
5039 .layer_id[cpi->temporal_pattern_counter % cpi->oxcf.periodicity];
5040 }
5041 restore_layer_context(cpi, layer);
5042 vp8_new_framerate(cpi, cpi->layer_context[layer].framerate);
5043 }
5044
5045 if (cpi->compressor_speed == 2) {
5046 vpx_usec_timer_start(&tsctimer);
5047 vpx_usec_timer_start(&ticktimer);
5048 }
5049
5050 cpi->lf_zeromv_pct = (cpi->zeromv_count * 100) / cm->MBs;
5051
5052 #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
5053 {
5054 int i;
5055 const int num_part = (1 << cm->multi_token_partition);
5056 /* the available bytes in dest */
5057 const unsigned long dest_size = dest_end - dest;
5058 const int tok_part_buff_size = (dest_size * 9) / (10 * num_part);
5059
5060 unsigned char *dp = dest;
5061
5062 cpi->partition_d[0] = dp;
5063 dp += dest_size / 10; /* reserve 1/10 for control partition */
5064 cpi->partition_d_end[0] = dp;
5065
5066 for (i = 0; i < num_part; ++i) {
5067 cpi->partition_d[i + 1] = dp;
5068 dp += tok_part_buff_size;
5069 cpi->partition_d_end[i + 1] = dp;
5070 }
5071 }
5072 #endif
5073
5074 /* start with a 0 size frame */
5075 *size = 0;
5076
5077 /* Clear down mmx registers */
5078 vpx_clear_system_state();
5079
5080 cm->frame_type = INTER_FRAME;
5081 cm->frame_flags = *frame_flags;
5082
5083 #if 0
5084
5085 if (cm->refresh_alt_ref_frame)
5086 {
5087 cm->refresh_golden_frame = 0;
5088 cm->refresh_last_frame = 0;
5089 }
5090 else
5091 {
5092 cm->refresh_golden_frame = 0;
5093 cm->refresh_last_frame = 1;
5094 }
5095
5096 #endif
5097 /* find a free buffer for the new frame */
5098 {
5099 int i = 0;
5100 for (; i < NUM_YV12_BUFFERS; ++i) {
5101 if (!cm->yv12_fb[i].flags) {
5102 cm->new_fb_idx = i;
5103 break;
5104 }
5105 }
5106
5107 assert(i < NUM_YV12_BUFFERS);
5108 }
5109 switch (cpi->pass) {
5110 #if !CONFIG_REALTIME_ONLY
5111 case 1: Pass1Encode(cpi, size, dest, frame_flags); break;
5112 case 2: Pass2Encode(cpi, size, dest, dest_end, frame_flags); break;
5113 #endif // !CONFIG_REALTIME_ONLY
5114 default:
5115 encode_frame_to_data_rate(cpi, size, dest, dest_end, frame_flags);
5116 break;
5117 }
5118
5119 if (cpi->compressor_speed == 2) {
5120 unsigned int duration, duration2;
5121 vpx_usec_timer_mark(&tsctimer);
5122 vpx_usec_timer_mark(&ticktimer);
5123
5124 duration = (int)(vpx_usec_timer_elapsed(&ticktimer));
5125 duration2 = (unsigned int)((double)duration / 2);
5126
5127 if (cm->frame_type != KEY_FRAME) {
5128 if (cpi->avg_encode_time == 0) {
5129 cpi->avg_encode_time = duration;
5130 } else {
5131 cpi->avg_encode_time = (7 * cpi->avg_encode_time + duration) >> 3;
5132 }
5133 }
5134
5135 if (duration2) {
5136 {
5137 if (cpi->avg_pick_mode_time == 0) {
5138 cpi->avg_pick_mode_time = duration2;
5139 } else {
5140 cpi->avg_pick_mode_time =
5141 (7 * cpi->avg_pick_mode_time + duration2) >> 3;
5142 }
5143 }
5144 }
5145 }
5146
5147 if (cm->refresh_entropy_probs == 0) {
5148 memcpy(&cm->fc, &cm->lfc, sizeof(cm->fc));
5149 }
5150
5151 /* Save the contexts separately for alt ref, gold and last. */
5152 /* (TODO jbb -> Optimize this with pointers to avoid extra copies. ) */
5153 if (cm->refresh_alt_ref_frame) memcpy(&cpi->lfc_a, &cm->fc, sizeof(cm->fc));
5154
5155 if (cm->refresh_golden_frame) memcpy(&cpi->lfc_g, &cm->fc, sizeof(cm->fc));
5156
5157 if (cm->refresh_last_frame) memcpy(&cpi->lfc_n, &cm->fc, sizeof(cm->fc));
5158
5159 /* if its a dropped frame honor the requests on subsequent frames */
5160 if (*size > 0) {
5161 cpi->droppable = !frame_is_reference(cpi);
5162
5163 /* return to normal state */
5164 cm->refresh_entropy_probs = 1;
5165 cm->refresh_alt_ref_frame = 0;
5166 cm->refresh_golden_frame = 0;
5167 cm->refresh_last_frame = 1;
5168 cm->frame_type = INTER_FRAME;
5169 }
5170
5171 /* Save layer specific state */
5172 if (cpi->oxcf.number_of_layers > 1) save_layer_context(cpi);
5173
5174 vpx_usec_timer_mark(&cmptimer);
5175 cpi->time_compress_data += vpx_usec_timer_elapsed(&cmptimer);
5176
5177 if (cpi->b_calculate_psnr && cpi->pass != 1 && cm->show_frame) {
5178 generate_psnr_packet(cpi);
5179 }
5180
5181 #if CONFIG_INTERNAL_STATS
5182
5183 if (cpi->pass != 1) {
5184 cpi->bytes += *size;
5185
5186 if (cm->show_frame) {
5187 cpi->common.show_frame_mi = cpi->common.mi;
5188 cpi->count++;
5189
5190 if (cpi->b_calculate_psnr) {
5191 uint64_t ye, ue, ve;
5192 double frame_psnr;
5193 YV12_BUFFER_CONFIG *orig = cpi->Source;
5194 YV12_BUFFER_CONFIG *recon = cpi->common.frame_to_show;
5195 unsigned int y_width = cpi->common.Width;
5196 unsigned int y_height = cpi->common.Height;
5197 unsigned int uv_width = (y_width + 1) / 2;
5198 unsigned int uv_height = (y_height + 1) / 2;
5199 int y_samples = y_height * y_width;
5200 int uv_samples = uv_height * uv_width;
5201 int t_samples = y_samples + 2 * uv_samples;
5202 double sq_error;
5203
5204 ye = calc_plane_error(orig->y_buffer, orig->y_stride, recon->y_buffer,
5205 recon->y_stride, y_width, y_height);
5206
5207 ue = calc_plane_error(orig->u_buffer, orig->uv_stride, recon->u_buffer,
5208 recon->uv_stride, uv_width, uv_height);
5209
5210 ve = calc_plane_error(orig->v_buffer, orig->uv_stride, recon->v_buffer,
5211 recon->uv_stride, uv_width, uv_height);
5212
5213 sq_error = (double)(ye + ue + ve);
5214
5215 frame_psnr = vpx_sse_to_psnr(t_samples, 255.0, sq_error);
5216
5217 cpi->total_y += vpx_sse_to_psnr(y_samples, 255.0, (double)ye);
5218 cpi->total_u += vpx_sse_to_psnr(uv_samples, 255.0, (double)ue);
5219 cpi->total_v += vpx_sse_to_psnr(uv_samples, 255.0, (double)ve);
5220 cpi->total_sq_error += sq_error;
5221 cpi->total += frame_psnr;
5222 #if CONFIG_POSTPROC
5223 {
5224 YV12_BUFFER_CONFIG *pp = &cm->post_proc_buffer;
5225 double sq_error2;
5226 double frame_psnr2, frame_ssim2 = 0;
5227 double weight = 0;
5228
5229 vp8_deblock(cm, cm->frame_to_show, &cm->post_proc_buffer,
5230 cm->filter_level * 10 / 6, 1, 0);
5231 vpx_clear_system_state();
5232
5233 ye = calc_plane_error(orig->y_buffer, orig->y_stride, pp->y_buffer,
5234 pp->y_stride, y_width, y_height);
5235
5236 ue = calc_plane_error(orig->u_buffer, orig->uv_stride, pp->u_buffer,
5237 pp->uv_stride, uv_width, uv_height);
5238
5239 ve = calc_plane_error(orig->v_buffer, orig->uv_stride, pp->v_buffer,
5240 pp->uv_stride, uv_width, uv_height);
5241
5242 sq_error2 = (double)(ye + ue + ve);
5243
5244 frame_psnr2 = vpx_sse_to_psnr(t_samples, 255.0, sq_error2);
5245
5246 cpi->totalp_y += vpx_sse_to_psnr(y_samples, 255.0, (double)ye);
5247 cpi->totalp_u += vpx_sse_to_psnr(uv_samples, 255.0, (double)ue);
5248 cpi->totalp_v += vpx_sse_to_psnr(uv_samples, 255.0, (double)ve);
5249 cpi->total_sq_error2 += sq_error2;
5250 cpi->totalp += frame_psnr2;
5251
5252 frame_ssim2 =
5253 vpx_calc_ssim(cpi->Source, &cm->post_proc_buffer, &weight);
5254
5255 cpi->summed_quality += frame_ssim2 * weight;
5256 cpi->summed_weights += weight;
5257
5258 if (cpi->oxcf.number_of_layers > 1) {
5259 unsigned int i;
5260
5261 for (i = cpi->current_layer; i < cpi->oxcf.number_of_layers; ++i) {
5262 cpi->frames_in_layer[i]++;
5263
5264 cpi->bytes_in_layer[i] += *size;
5265 cpi->sum_psnr[i] += frame_psnr;
5266 cpi->sum_psnr_p[i] += frame_psnr2;
5267 cpi->total_error2[i] += sq_error;
5268 cpi->total_error2_p[i] += sq_error2;
5269 cpi->sum_ssim[i] += frame_ssim2 * weight;
5270 cpi->sum_weights[i] += weight;
5271 }
5272 }
5273 }
5274 #endif
5275 }
5276 }
5277 }
5278
5279 #if 0
5280
5281 if (cpi->common.frame_type != 0 && cpi->common.base_qindex == cpi->oxcf.worst_allowed_q)
5282 {
5283 skiptruecount += cpi->skip_true_count;
5284 skipfalsecount += cpi->skip_false_count;
5285 }
5286
5287 #endif
5288 #if 0
5289
5290 if (cpi->pass != 1)
5291 {
5292 FILE *f = fopen("skip.stt", "a");
5293 fprintf(f, "frame:%4d flags:%4x Q:%4d P:%4d Size:%5d\n", cpi->common.current_video_frame, *frame_flags, cpi->common.base_qindex, cpi->prob_skip_false, *size);
5294
5295 if (cpi->is_src_frame_alt_ref == 1)
5296 fprintf(f, "skipcount: %4d framesize: %d\n", cpi->skip_true_count , *size);
5297
5298 fclose(f);
5299 }
5300
5301 #endif
5302 #endif
5303
5304 cpi->common.error.setjmp = 0;
5305
5306 #if CONFIG_MULTITHREAD
5307 /* wait for the lpf thread done */
5308 if (vpx_atomic_load_acquire(&cpi->b_multi_threaded) && cpi->b_lpf_running) {
5309 sem_wait(&cpi->h_event_end_lpf);
5310 cpi->b_lpf_running = 0;
5311 }
5312 #endif
5313
5314 return 0;
5315 }
5316
vp8_get_preview_raw_frame(VP8_COMP * cpi,YV12_BUFFER_CONFIG * dest,vp8_ppflags_t * flags)5317 int vp8_get_preview_raw_frame(VP8_COMP *cpi, YV12_BUFFER_CONFIG *dest,
5318 vp8_ppflags_t *flags) {
5319 if (cpi->common.refresh_alt_ref_frame) {
5320 return -1;
5321 } else {
5322 int ret;
5323
5324 #if CONFIG_POSTPROC
5325 cpi->common.show_frame_mi = cpi->common.mi;
5326 ret = vp8_post_proc_frame(&cpi->common, dest, flags);
5327 #else
5328 (void)flags;
5329
5330 if (cpi->common.frame_to_show) {
5331 *dest = *cpi->common.frame_to_show;
5332 dest->y_width = cpi->common.Width;
5333 dest->y_height = cpi->common.Height;
5334 dest->uv_height = cpi->common.Height / 2;
5335 ret = 0;
5336 } else {
5337 ret = -1;
5338 }
5339
5340 #endif
5341 vpx_clear_system_state();
5342 return ret;
5343 }
5344 }
5345
vp8_set_roimap(VP8_COMP * cpi,unsigned char * map,unsigned int rows,unsigned int cols,int delta_q[4],int delta_lf[4],unsigned int threshold[4])5346 int vp8_set_roimap(VP8_COMP *cpi, unsigned char *map, unsigned int rows,
5347 unsigned int cols, int delta_q[4], int delta_lf[4],
5348 unsigned int threshold[4]) {
5349 signed char feature_data[MB_LVL_MAX][MAX_MB_SEGMENTS];
5350 int internal_delta_q[MAX_MB_SEGMENTS];
5351 const int range = 63;
5352 int i;
5353
5354 // Check number of rows and columns match
5355 if (cpi->common.mb_rows != (int)rows || cpi->common.mb_cols != (int)cols) {
5356 return -1;
5357 }
5358
5359 // Range check the delta Q values and convert the external Q range values
5360 // to internal ones.
5361 if ((abs(delta_q[0]) > range) || (abs(delta_q[1]) > range) ||
5362 (abs(delta_q[2]) > range) || (abs(delta_q[3]) > range)) {
5363 return -1;
5364 }
5365
5366 // Range check the delta lf values
5367 if ((abs(delta_lf[0]) > range) || (abs(delta_lf[1]) > range) ||
5368 (abs(delta_lf[2]) > range) || (abs(delta_lf[3]) > range)) {
5369 return -1;
5370 }
5371
5372 // Also disable segmentation if no deltas are specified.
5373 if (!map || (delta_q[0] == 0 && delta_q[1] == 0 && delta_q[2] == 0 &&
5374 delta_q[3] == 0 && delta_lf[0] == 0 && delta_lf[1] == 0 &&
5375 delta_lf[2] == 0 && delta_lf[3] == 0 && threshold[0] == 0 &&
5376 threshold[1] == 0 && threshold[2] == 0 && threshold[3] == 0)) {
5377 disable_segmentation(cpi);
5378 return 0;
5379 }
5380
5381 // Translate the external delta q values to internal values.
5382 for (i = 0; i < MAX_MB_SEGMENTS; ++i) {
5383 internal_delta_q[i] =
5384 (delta_q[i] >= 0) ? q_trans[delta_q[i]] : -q_trans[-delta_q[i]];
5385 }
5386
5387 /* Set the segmentation Map */
5388 set_segmentation_map(cpi, map);
5389
5390 /* Activate segmentation. */
5391 enable_segmentation(cpi);
5392
5393 /* Set up the quant segment data */
5394 feature_data[MB_LVL_ALT_Q][0] = internal_delta_q[0];
5395 feature_data[MB_LVL_ALT_Q][1] = internal_delta_q[1];
5396 feature_data[MB_LVL_ALT_Q][2] = internal_delta_q[2];
5397 feature_data[MB_LVL_ALT_Q][3] = internal_delta_q[3];
5398
5399 /* Set up the loop segment data s */
5400 feature_data[MB_LVL_ALT_LF][0] = delta_lf[0];
5401 feature_data[MB_LVL_ALT_LF][1] = delta_lf[1];
5402 feature_data[MB_LVL_ALT_LF][2] = delta_lf[2];
5403 feature_data[MB_LVL_ALT_LF][3] = delta_lf[3];
5404
5405 cpi->segment_encode_breakout[0] = threshold[0];
5406 cpi->segment_encode_breakout[1] = threshold[1];
5407 cpi->segment_encode_breakout[2] = threshold[2];
5408 cpi->segment_encode_breakout[3] = threshold[3];
5409
5410 /* Initialise the feature data structure */
5411 set_segment_data(cpi, &feature_data[0][0], SEGMENT_DELTADATA);
5412
5413 if (threshold[0] != 0 || threshold[1] != 0 || threshold[2] != 0 ||
5414 threshold[3] != 0)
5415 cpi->use_roi_static_threshold = 1;
5416 cpi->cyclic_refresh_mode_enabled = 0;
5417
5418 return 0;
5419 }
5420
vp8_set_active_map(VP8_COMP * cpi,unsigned char * map,unsigned int rows,unsigned int cols)5421 int vp8_set_active_map(VP8_COMP *cpi, unsigned char *map, unsigned int rows,
5422 unsigned int cols) {
5423 if ((int)rows == cpi->common.mb_rows && (int)cols == cpi->common.mb_cols) {
5424 if (map) {
5425 memcpy(cpi->active_map, map, rows * cols);
5426 cpi->active_map_enabled = 1;
5427 } else {
5428 cpi->active_map_enabled = 0;
5429 }
5430
5431 return 0;
5432 } else {
5433 return -1;
5434 }
5435 }
5436
vp8_set_internal_size(VP8_COMP * cpi,VPX_SCALING horiz_mode,VPX_SCALING vert_mode)5437 int vp8_set_internal_size(VP8_COMP *cpi, VPX_SCALING horiz_mode,
5438 VPX_SCALING vert_mode) {
5439 if (horiz_mode <= ONETWO) {
5440 cpi->common.horiz_scale = horiz_mode;
5441 } else {
5442 return -1;
5443 }
5444
5445 if (vert_mode <= ONETWO) {
5446 cpi->common.vert_scale = vert_mode;
5447 } else {
5448 return -1;
5449 }
5450
5451 return 0;
5452 }
5453
vp8_calc_ss_err(YV12_BUFFER_CONFIG * source,YV12_BUFFER_CONFIG * dest)5454 int vp8_calc_ss_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest) {
5455 int i, j;
5456 int Total = 0;
5457
5458 unsigned char *src = source->y_buffer;
5459 unsigned char *dst = dest->y_buffer;
5460
5461 /* Loop through the Y plane raw and reconstruction data summing
5462 * (square differences)
5463 */
5464 for (i = 0; i < source->y_height; i += 16) {
5465 for (j = 0; j < source->y_width; j += 16) {
5466 unsigned int sse;
5467 Total += vpx_mse16x16(src + j, source->y_stride, dst + j, dest->y_stride,
5468 &sse);
5469 }
5470
5471 src += 16 * source->y_stride;
5472 dst += 16 * dest->y_stride;
5473 }
5474
5475 return Total;
5476 }
5477
vp8_get_quantizer(VP8_COMP * cpi)5478 int vp8_get_quantizer(VP8_COMP *cpi) { return cpi->common.base_qindex; }
5479