1 /*
2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #include "vpx_config.h"
12 #include "./vpx_scale_rtcd.h"
13 #include "./vpx_dsp_rtcd.h"
14 #include "./vp8_rtcd.h"
15 #include "bitstream.h"
16 #include "vp8/common/onyxc_int.h"
17 #include "vp8/common/blockd.h"
18 #include "onyx_int.h"
19 #include "vp8/common/systemdependent.h"
20 #include "vp8/common/vp8_skin_detection.h"
21 #include "vp8/encoder/quantize.h"
22 #include "vp8/common/alloccommon.h"
23 #include "mcomp.h"
24 #include "firstpass.h"
25 #include "vpx_dsp/psnr.h"
26 #include "vpx_scale/vpx_scale.h"
27 #include "vp8/common/extend.h"
28 #include "ratectrl.h"
29 #include "vp8/common/quant_common.h"
30 #include "segmentation.h"
31 #if CONFIG_POSTPROC
32 #include "vp8/common/postproc.h"
33 #endif
34 #include "vpx_mem/vpx_mem.h"
35 #include "vp8/common/reconintra.h"
36 #include "vp8/common/swapyv12buffer.h"
37 #include "vp8/common/threading.h"
38 #include "vpx_ports/system_state.h"
39 #include "vpx_ports/vpx_once.h"
40 #include "vpx_ports/vpx_timer.h"
41 #include "vpx_util/vpx_write_yuv_frame.h"
42 #if VPX_ARCH_ARM
43 #include "vpx_ports/arm.h"
44 #endif
45 #if CONFIG_MULTI_RES_ENCODING
46 #include "mr_dissim.h"
47 #endif
48 #include "encodeframe.h"
49 #if CONFIG_MULTITHREAD
50 #include "ethreading.h"
51 #endif
52 #include "picklpf.h"
53 #if !CONFIG_REALTIME_ONLY
54 #include "temporal_filter.h"
55 #endif
56
57 #include <assert.h>
58 #include <math.h>
59 #include <stdio.h>
60 #include <limits.h>
61
62 #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
63 extern int vp8_update_coef_context(VP8_COMP *cpi);
64 #endif
65
66 extern unsigned int vp8_get_processor_freq();
67
68 int vp8_calc_ss_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest);
69
70 static void set_default_lf_deltas(VP8_COMP *cpi);
71
72 extern const int vp8_gf_interval_table[101];
73
74 #if CONFIG_INTERNAL_STATS
75 #include "math.h"
76 #include "vpx_dsp/ssim.h"
77 #endif
78
79 #ifdef OUTPUT_YUV_SRC
80 FILE *yuv_file;
81 #endif
82 #ifdef OUTPUT_YUV_DENOISED
83 FILE *yuv_denoised_file;
84 #endif
85 #ifdef OUTPUT_YUV_SKINMAP
86 static FILE *yuv_skinmap_file = NULL;
87 #endif
88
89 #if 0
90 FILE *framepsnr;
91 FILE *kf_list;
92 FILE *keyfile;
93 #endif
94
95 #if 0
96 extern int skip_true_count;
97 extern int skip_false_count;
98 #endif
99
100 #ifdef SPEEDSTATS
101 unsigned int frames_at_speed[16] = { 0, 0, 0, 0, 0, 0, 0, 0,
102 0, 0, 0, 0, 0, 0, 0, 0 };
103 unsigned int tot_pm = 0;
104 unsigned int cnt_pm = 0;
105 unsigned int tot_ef = 0;
106 unsigned int cnt_ef = 0;
107 #endif
108
109 #ifdef MODE_STATS
110 extern unsigned __int64 Sectionbits[50];
111 extern int y_modes[5];
112 extern int uv_modes[4];
113 extern int b_modes[10];
114
115 extern int inter_y_modes[10];
116 extern int inter_uv_modes[4];
117 extern unsigned int inter_b_modes[15];
118 #endif
119
120 extern const int vp8_bits_per_mb[2][QINDEX_RANGE];
121
122 extern const int qrounding_factors[129];
123 extern const int qzbin_factors[129];
124 extern void vp8cx_init_quantizer(VP8_COMP *cpi);
125 extern const int vp8cx_base_skip_false_prob[128];
126
127 /* Tables relating active max Q to active min Q */
128 static const unsigned char kf_low_motion_minq[QINDEX_RANGE] = {
129 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
130 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
131 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,
132 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 4, 4, 4, 5, 5, 5,
133 5, 5, 6, 6, 6, 6, 7, 7, 8, 8, 8, 8, 9, 9, 10, 10, 10, 10, 11,
134 11, 11, 11, 12, 12, 13, 13, 13, 13, 14, 14, 15, 15, 15, 15, 16, 16, 16, 16,
135 17, 17, 18, 18, 18, 18, 19, 20, 20, 21, 21, 22, 23, 23
136 };
137 static const unsigned char kf_high_motion_minq[QINDEX_RANGE] = {
138 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
139 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1,
140 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 5,
141 5, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 8, 8, 8, 8, 9, 9, 10, 10,
142 10, 10, 11, 11, 11, 11, 12, 12, 13, 13, 13, 13, 14, 14, 15, 15, 15, 15, 16,
143 16, 16, 16, 17, 17, 18, 18, 18, 18, 19, 19, 20, 20, 20, 20, 21, 21, 21, 21,
144 22, 22, 23, 23, 24, 25, 25, 26, 26, 27, 28, 28, 29, 30
145 };
146 static const unsigned char gf_low_motion_minq[QINDEX_RANGE] = {
147 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3,
148 3, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8,
149 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15,
150 15, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20, 21, 21, 22, 22, 23, 23, 24, 24,
151 25, 25, 26, 26, 27, 27, 28, 28, 29, 29, 30, 30, 31, 31, 32, 32, 33, 33, 34,
152 34, 35, 35, 36, 36, 37, 37, 38, 38, 39, 39, 40, 40, 41, 41, 42, 42, 43, 44,
153 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58
154 };
155 static const unsigned char gf_mid_motion_minq[QINDEX_RANGE] = {
156 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 2, 2, 3, 3, 3, 4, 4, 4, 5,
157 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10, 10, 11,
158 11, 11, 12, 12, 12, 12, 13, 13, 13, 14, 14, 14, 15, 15, 16, 16, 17, 17, 18,
159 18, 19, 19, 20, 20, 21, 21, 22, 22, 23, 23, 24, 24, 25, 25, 26, 26, 27, 27,
160 28, 28, 29, 29, 30, 30, 31, 31, 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 37,
161 37, 38, 39, 39, 40, 40, 41, 41, 42, 42, 43, 43, 44, 45, 46, 47, 48, 49, 50,
162 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64
163 };
164 static const unsigned char gf_high_motion_minq[QINDEX_RANGE] = {
165 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5,
166 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10, 11, 11,
167 12, 12, 13, 13, 14, 14, 15, 15, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20, 21,
168 21, 22, 22, 23, 23, 24, 24, 25, 25, 26, 26, 27, 27, 28, 28, 29, 29, 30, 30,
169 31, 31, 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 37, 37, 38, 38, 39, 39, 40,
170 40, 41, 41, 42, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56,
171 57, 58, 59, 60, 62, 64, 66, 68, 70, 72, 74, 76, 78, 80
172 };
173 static const unsigned char inter_minq[QINDEX_RANGE] = {
174 0, 0, 1, 1, 2, 3, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 9, 10, 11,
175 11, 12, 13, 13, 14, 15, 15, 16, 17, 17, 18, 19, 20, 20, 21, 22, 22, 23, 24,
176 24, 25, 26, 27, 27, 28, 29, 30, 30, 31, 32, 33, 33, 34, 35, 36, 36, 37, 38,
177 39, 39, 40, 41, 42, 42, 43, 44, 45, 46, 46, 47, 48, 49, 50, 50, 51, 52, 53,
178 54, 55, 55, 56, 57, 58, 59, 60, 60, 61, 62, 63, 64, 65, 66, 67, 67, 68, 69,
179 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 86,
180 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100
181 };
182
183 #ifdef PACKET_TESTING
184 extern FILE *vpxlogc;
185 #endif
186
vp8_save_layer_context(VP8_COMP * cpi)187 void vp8_save_layer_context(VP8_COMP *cpi) {
188 LAYER_CONTEXT *lc = &cpi->layer_context[cpi->current_layer];
189
190 /* Save layer dependent coding state */
191 lc->target_bandwidth = cpi->target_bandwidth;
192 lc->starting_buffer_level = cpi->oxcf.starting_buffer_level;
193 lc->optimal_buffer_level = cpi->oxcf.optimal_buffer_level;
194 lc->maximum_buffer_size = cpi->oxcf.maximum_buffer_size;
195 lc->starting_buffer_level_in_ms = cpi->oxcf.starting_buffer_level_in_ms;
196 lc->optimal_buffer_level_in_ms = cpi->oxcf.optimal_buffer_level_in_ms;
197 lc->maximum_buffer_size_in_ms = cpi->oxcf.maximum_buffer_size_in_ms;
198 lc->buffer_level = cpi->buffer_level;
199 lc->bits_off_target = cpi->bits_off_target;
200 lc->total_actual_bits = cpi->total_actual_bits;
201 lc->worst_quality = cpi->worst_quality;
202 lc->active_worst_quality = cpi->active_worst_quality;
203 lc->best_quality = cpi->best_quality;
204 lc->active_best_quality = cpi->active_best_quality;
205 lc->ni_av_qi = cpi->ni_av_qi;
206 lc->ni_tot_qi = cpi->ni_tot_qi;
207 lc->ni_frames = cpi->ni_frames;
208 lc->avg_frame_qindex = cpi->avg_frame_qindex;
209 lc->rate_correction_factor = cpi->rate_correction_factor;
210 lc->key_frame_rate_correction_factor = cpi->key_frame_rate_correction_factor;
211 lc->gf_rate_correction_factor = cpi->gf_rate_correction_factor;
212 lc->zbin_over_quant = cpi->mb.zbin_over_quant;
213 lc->inter_frame_target = cpi->inter_frame_target;
214 lc->total_byte_count = cpi->total_byte_count;
215 lc->filter_level = cpi->common.filter_level;
216 lc->frames_since_last_drop_overshoot = cpi->frames_since_last_drop_overshoot;
217 lc->force_maxqp = cpi->force_maxqp;
218 lc->last_frame_percent_intra = cpi->last_frame_percent_intra;
219 lc->last_q[0] = cpi->last_q[0];
220 lc->last_q[1] = cpi->last_q[1];
221
222 memcpy(lc->count_mb_ref_frame_usage, cpi->mb.count_mb_ref_frame_usage,
223 sizeof(cpi->mb.count_mb_ref_frame_usage));
224 }
225
vp8_restore_layer_context(VP8_COMP * cpi,const int layer)226 void vp8_restore_layer_context(VP8_COMP *cpi, const int layer) {
227 LAYER_CONTEXT *lc = &cpi->layer_context[layer];
228
229 /* Restore layer dependent coding state */
230 cpi->current_layer = layer;
231 cpi->target_bandwidth = lc->target_bandwidth;
232 cpi->oxcf.target_bandwidth = lc->target_bandwidth;
233 cpi->oxcf.starting_buffer_level = lc->starting_buffer_level;
234 cpi->oxcf.optimal_buffer_level = lc->optimal_buffer_level;
235 cpi->oxcf.maximum_buffer_size = lc->maximum_buffer_size;
236 cpi->oxcf.starting_buffer_level_in_ms = lc->starting_buffer_level_in_ms;
237 cpi->oxcf.optimal_buffer_level_in_ms = lc->optimal_buffer_level_in_ms;
238 cpi->oxcf.maximum_buffer_size_in_ms = lc->maximum_buffer_size_in_ms;
239 cpi->buffer_level = lc->buffer_level;
240 cpi->bits_off_target = lc->bits_off_target;
241 cpi->total_actual_bits = lc->total_actual_bits;
242 cpi->active_worst_quality = lc->active_worst_quality;
243 cpi->active_best_quality = lc->active_best_quality;
244 cpi->ni_av_qi = lc->ni_av_qi;
245 cpi->ni_tot_qi = lc->ni_tot_qi;
246 cpi->ni_frames = lc->ni_frames;
247 cpi->avg_frame_qindex = lc->avg_frame_qindex;
248 cpi->rate_correction_factor = lc->rate_correction_factor;
249 cpi->key_frame_rate_correction_factor = lc->key_frame_rate_correction_factor;
250 cpi->gf_rate_correction_factor = lc->gf_rate_correction_factor;
251 cpi->mb.zbin_over_quant = lc->zbin_over_quant;
252 cpi->inter_frame_target = lc->inter_frame_target;
253 cpi->total_byte_count = lc->total_byte_count;
254 cpi->common.filter_level = lc->filter_level;
255 cpi->frames_since_last_drop_overshoot = lc->frames_since_last_drop_overshoot;
256 cpi->force_maxqp = lc->force_maxqp;
257 cpi->last_frame_percent_intra = lc->last_frame_percent_intra;
258 cpi->last_q[0] = lc->last_q[0];
259 cpi->last_q[1] = lc->last_q[1];
260
261 memcpy(cpi->mb.count_mb_ref_frame_usage, lc->count_mb_ref_frame_usage,
262 sizeof(cpi->mb.count_mb_ref_frame_usage));
263 }
264
rescale(int val,int num,int denom)265 static int rescale(int val, int num, int denom) {
266 int64_t llnum = num;
267 int64_t llden = denom;
268 int64_t llval = val;
269
270 return (int)(llval * llnum / llden);
271 }
272
vp8_init_temporal_layer_context(VP8_COMP * cpi,VP8_CONFIG * oxcf,const int layer,double prev_layer_framerate)273 void vp8_init_temporal_layer_context(VP8_COMP *cpi, VP8_CONFIG *oxcf,
274 const int layer,
275 double prev_layer_framerate) {
276 LAYER_CONTEXT *lc = &cpi->layer_context[layer];
277
278 lc->framerate = cpi->output_framerate / cpi->oxcf.rate_decimator[layer];
279 lc->target_bandwidth = cpi->oxcf.target_bitrate[layer] * 1000;
280
281 lc->starting_buffer_level_in_ms = oxcf->starting_buffer_level;
282 lc->optimal_buffer_level_in_ms = oxcf->optimal_buffer_level;
283 lc->maximum_buffer_size_in_ms = oxcf->maximum_buffer_size;
284
285 lc->starting_buffer_level =
286 rescale((int)(oxcf->starting_buffer_level), lc->target_bandwidth, 1000);
287
288 if (oxcf->optimal_buffer_level == 0) {
289 lc->optimal_buffer_level = lc->target_bandwidth / 8;
290 } else {
291 lc->optimal_buffer_level =
292 rescale((int)(oxcf->optimal_buffer_level), lc->target_bandwidth, 1000);
293 }
294
295 if (oxcf->maximum_buffer_size == 0) {
296 lc->maximum_buffer_size = lc->target_bandwidth / 8;
297 } else {
298 lc->maximum_buffer_size =
299 rescale((int)(oxcf->maximum_buffer_size), lc->target_bandwidth, 1000);
300 }
301
302 /* Work out the average size of a frame within this layer */
303 if (layer > 0) {
304 lc->avg_frame_size_for_layer =
305 (int)round((cpi->oxcf.target_bitrate[layer] -
306 cpi->oxcf.target_bitrate[layer - 1]) *
307 1000 / (lc->framerate - prev_layer_framerate));
308 }
309
310 lc->active_worst_quality = cpi->oxcf.worst_allowed_q;
311 lc->active_best_quality = cpi->oxcf.best_allowed_q;
312 lc->avg_frame_qindex = cpi->oxcf.worst_allowed_q;
313
314 lc->buffer_level = lc->starting_buffer_level;
315 lc->bits_off_target = lc->starting_buffer_level;
316
317 lc->total_actual_bits = 0;
318 lc->ni_av_qi = 0;
319 lc->ni_tot_qi = 0;
320 lc->ni_frames = 0;
321 lc->rate_correction_factor = 1.0;
322 lc->key_frame_rate_correction_factor = 1.0;
323 lc->gf_rate_correction_factor = 1.0;
324 lc->inter_frame_target = 0;
325 }
326
327 // Upon a run-time change in temporal layers, reset the layer context parameters
328 // for any "new" layers. For "existing" layers, let them inherit the parameters
329 // from the previous layer state (at the same layer #). In future we may want
330 // to better map the previous layer state(s) to the "new" ones.
vp8_reset_temporal_layer_change(VP8_COMP * cpi,VP8_CONFIG * oxcf,const int prev_num_layers)331 void vp8_reset_temporal_layer_change(VP8_COMP *cpi, VP8_CONFIG *oxcf,
332 const int prev_num_layers) {
333 int i;
334 double prev_layer_framerate = 0;
335 const int curr_num_layers = cpi->oxcf.number_of_layers;
336 // If the previous state was 1 layer, get current layer context from cpi.
337 // We need this to set the layer context for the new layers below.
338 if (prev_num_layers == 1) {
339 cpi->current_layer = 0;
340 vp8_save_layer_context(cpi);
341 }
342 for (i = 0; i < curr_num_layers; ++i) {
343 LAYER_CONTEXT *lc = &cpi->layer_context[i];
344 if (i >= prev_num_layers) {
345 vp8_init_temporal_layer_context(cpi, oxcf, i, prev_layer_framerate);
346 }
347 // The initial buffer levels are set based on their starting levels.
348 // We could set the buffer levels based on the previous state (normalized
349 // properly by the layer bandwidths) but we would need to keep track of
350 // the previous set of layer bandwidths (i.e., target_bitrate[i])
351 // before the layer change. For now, reset to the starting levels.
352 lc->buffer_level =
353 cpi->oxcf.starting_buffer_level_in_ms * cpi->oxcf.target_bitrate[i];
354 lc->bits_off_target = lc->buffer_level;
355 // TDOD(marpan): Should we set the rate_correction_factor and
356 // active_worst/best_quality to values derived from the previous layer
357 // state (to smooth-out quality dips/rate fluctuation at transition)?
358
359 // We need to treat the 1 layer case separately: oxcf.target_bitrate[i]
360 // is not set for 1 layer, and the vp8_restore_layer_context/save_context()
361 // are not called in the encoding loop, so we need to call it here to
362 // pass the layer context state to |cpi|.
363 if (curr_num_layers == 1) {
364 lc->target_bandwidth = cpi->oxcf.target_bandwidth;
365 lc->buffer_level =
366 cpi->oxcf.starting_buffer_level_in_ms * lc->target_bandwidth / 1000;
367 lc->bits_off_target = lc->buffer_level;
368 vp8_restore_layer_context(cpi, 0);
369 }
370 prev_layer_framerate = cpi->output_framerate / cpi->oxcf.rate_decimator[i];
371 }
372 }
373
setup_features(VP8_COMP * cpi)374 static void setup_features(VP8_COMP *cpi) {
375 // If segmentation enabled set the update flags
376 if (cpi->mb.e_mbd.segmentation_enabled) {
377 cpi->mb.e_mbd.update_mb_segmentation_map = 1;
378 cpi->mb.e_mbd.update_mb_segmentation_data = 1;
379 } else {
380 cpi->mb.e_mbd.update_mb_segmentation_map = 0;
381 cpi->mb.e_mbd.update_mb_segmentation_data = 0;
382 }
383
384 cpi->mb.e_mbd.mode_ref_lf_delta_enabled = 0;
385 cpi->mb.e_mbd.mode_ref_lf_delta_update = 0;
386 memset(cpi->mb.e_mbd.ref_lf_deltas, 0, sizeof(cpi->mb.e_mbd.ref_lf_deltas));
387 memset(cpi->mb.e_mbd.mode_lf_deltas, 0, sizeof(cpi->mb.e_mbd.mode_lf_deltas));
388 memset(cpi->mb.e_mbd.last_ref_lf_deltas, 0,
389 sizeof(cpi->mb.e_mbd.ref_lf_deltas));
390 memset(cpi->mb.e_mbd.last_mode_lf_deltas, 0,
391 sizeof(cpi->mb.e_mbd.mode_lf_deltas));
392
393 set_default_lf_deltas(cpi);
394 }
395
396 static void dealloc_raw_frame_buffers(VP8_COMP *cpi);
397
initialize_enc(void)398 static void initialize_enc(void) {
399 vpx_dsp_rtcd();
400 vp8_init_intra_predictors();
401 }
402
vp8_initialize_enc(void)403 void vp8_initialize_enc(void) { once(initialize_enc); }
404
dealloc_compressor_data(VP8_COMP * cpi)405 static void dealloc_compressor_data(VP8_COMP *cpi) {
406 vpx_free(cpi->tplist);
407 cpi->tplist = NULL;
408
409 /* Delete last frame MV storage buffers */
410 vpx_free(cpi->lfmv);
411 cpi->lfmv = 0;
412
413 vpx_free(cpi->lf_ref_frame_sign_bias);
414 cpi->lf_ref_frame_sign_bias = 0;
415
416 vpx_free(cpi->lf_ref_frame);
417 cpi->lf_ref_frame = 0;
418
419 /* Delete sementation map */
420 vpx_free(cpi->segmentation_map);
421 cpi->segmentation_map = 0;
422
423 vpx_free(cpi->active_map);
424 cpi->active_map = 0;
425
426 vp8_de_alloc_frame_buffers(&cpi->common);
427
428 vp8_yv12_de_alloc_frame_buffer(&cpi->pick_lf_lvl_frame);
429 vp8_yv12_de_alloc_frame_buffer(&cpi->scaled_source);
430 dealloc_raw_frame_buffers(cpi);
431
432 vpx_free(cpi->tok);
433 cpi->tok = 0;
434
435 /* Structure used to monitor GF usage */
436 vpx_free(cpi->gf_active_flags);
437 cpi->gf_active_flags = 0;
438
439 /* Activity mask based per mb zbin adjustments */
440 vpx_free(cpi->mb_activity_map);
441 cpi->mb_activity_map = 0;
442
443 vpx_free(cpi->mb.pip);
444 cpi->mb.pip = 0;
445
446 #if CONFIG_MULTITHREAD
447 vpx_free(cpi->mt_current_mb_col);
448 cpi->mt_current_mb_col = NULL;
449 #endif
450 }
451
enable_segmentation(VP8_COMP * cpi)452 static void enable_segmentation(VP8_COMP *cpi) {
453 /* Set the appropriate feature bit */
454 cpi->mb.e_mbd.segmentation_enabled = 1;
455 cpi->mb.e_mbd.update_mb_segmentation_map = 1;
456 cpi->mb.e_mbd.update_mb_segmentation_data = 1;
457 }
disable_segmentation(VP8_COMP * cpi)458 static void disable_segmentation(VP8_COMP *cpi) {
459 /* Clear the appropriate feature bit */
460 cpi->mb.e_mbd.segmentation_enabled = 0;
461 }
462
463 /* Valid values for a segment are 0 to 3
464 * Segmentation map is arrange as [Rows][Columns]
465 */
set_segmentation_map(VP8_COMP * cpi,unsigned char * segmentation_map)466 static void set_segmentation_map(VP8_COMP *cpi,
467 unsigned char *segmentation_map) {
468 /* Copy in the new segmentation map */
469 memcpy(cpi->segmentation_map, segmentation_map,
470 (cpi->common.mb_rows * cpi->common.mb_cols));
471
472 /* Signal that the map should be updated. */
473 cpi->mb.e_mbd.update_mb_segmentation_map = 1;
474 cpi->mb.e_mbd.update_mb_segmentation_data = 1;
475 }
476
477 /* The values given for each segment can be either deltas (from the default
478 * value chosen for the frame) or absolute values.
479 *
480 * Valid range for abs values is:
481 * (0-127 for MB_LVL_ALT_Q), (0-63 for SEGMENT_ALT_LF)
482 * Valid range for delta values are:
483 * (+/-127 for MB_LVL_ALT_Q), (+/-63 for SEGMENT_ALT_LF)
484 *
485 * abs_delta = SEGMENT_DELTADATA (deltas)
486 * abs_delta = SEGMENT_ABSDATA (use the absolute values given).
487 *
488 */
set_segment_data(VP8_COMP * cpi,signed char * feature_data,unsigned char abs_delta)489 static void set_segment_data(VP8_COMP *cpi, signed char *feature_data,
490 unsigned char abs_delta) {
491 cpi->mb.e_mbd.mb_segement_abs_delta = abs_delta;
492 memcpy(cpi->segment_feature_data, feature_data,
493 sizeof(cpi->segment_feature_data));
494 }
495
496 /* A simple function to cyclically refresh the background at a lower Q */
cyclic_background_refresh(VP8_COMP * cpi,int Q,int lf_adjustment)497 static void cyclic_background_refresh(VP8_COMP *cpi, int Q, int lf_adjustment) {
498 unsigned char *seg_map = cpi->segmentation_map;
499 signed char feature_data[MB_LVL_MAX][MAX_MB_SEGMENTS];
500 int i;
501 int block_count = cpi->cyclic_refresh_mode_max_mbs_perframe;
502 int mbs_in_frame = cpi->common.mb_rows * cpi->common.mb_cols;
503
504 cpi->cyclic_refresh_q = Q / 2;
505
506 if (cpi->oxcf.screen_content_mode) {
507 // Modify quality ramp-up based on Q. Above some Q level, increase the
508 // number of blocks to be refreshed, and reduce it below the thredhold.
509 // Turn-off under certain conditions (i.e., away from key frame, and if
510 // we are at good quality (low Q) and most of the blocks were
511 // skipped-encoded
512 // in previous frame.
513 int qp_thresh = (cpi->oxcf.screen_content_mode == 2) ? 80 : 100;
514 if (Q >= qp_thresh) {
515 cpi->cyclic_refresh_mode_max_mbs_perframe =
516 (cpi->common.mb_rows * cpi->common.mb_cols) / 10;
517 } else if (cpi->frames_since_key > 250 && Q < 20 &&
518 cpi->mb.skip_true_count > (int)(0.95 * mbs_in_frame)) {
519 cpi->cyclic_refresh_mode_max_mbs_perframe = 0;
520 } else {
521 cpi->cyclic_refresh_mode_max_mbs_perframe =
522 (cpi->common.mb_rows * cpi->common.mb_cols) / 20;
523 }
524 block_count = cpi->cyclic_refresh_mode_max_mbs_perframe;
525 }
526
527 // Set every macroblock to be eligible for update.
528 // For key frame this will reset seg map to 0.
529 memset(cpi->segmentation_map, 0, mbs_in_frame);
530
531 if (cpi->common.frame_type != KEY_FRAME && block_count > 0) {
532 /* Cycle through the macro_block rows */
533 /* MB loop to set local segmentation map */
534 i = cpi->cyclic_refresh_mode_index;
535 assert(i < mbs_in_frame);
536 do {
537 /* If the MB is as a candidate for clean up then mark it for
538 * possible boost/refresh (segment 1) The segment id may get
539 * reset to 0 later if the MB gets coded anything other than
540 * last frame 0,0 as only (last frame 0,0) MBs are eligable for
541 * refresh : that is to say Mbs likely to be background blocks.
542 */
543 if (cpi->cyclic_refresh_map[i] == 0) {
544 seg_map[i] = 1;
545 block_count--;
546 } else if (cpi->cyclic_refresh_map[i] < 0) {
547 cpi->cyclic_refresh_map[i]++;
548 }
549
550 i++;
551 if (i == mbs_in_frame) i = 0;
552
553 } while (block_count && i != cpi->cyclic_refresh_mode_index);
554
555 cpi->cyclic_refresh_mode_index = i;
556
557 #if CONFIG_TEMPORAL_DENOISING
558 if (cpi->oxcf.noise_sensitivity > 0) {
559 if (cpi->denoiser.denoiser_mode == kDenoiserOnYUVAggressive &&
560 Q < (int)cpi->denoiser.denoise_pars.qp_thresh &&
561 (cpi->frames_since_key >
562 2 * cpi->denoiser.denoise_pars.consec_zerolast)) {
563 // Under aggressive denoising, use segmentation to turn off loop
564 // filter below some qp thresh. The filter is reduced for all
565 // blocks that have been encoded as ZEROMV LAST x frames in a row,
566 // where x is set by cpi->denoiser.denoise_pars.consec_zerolast.
567 // This is to avoid "dot" artifacts that can occur from repeated
568 // loop filtering on noisy input source.
569 cpi->cyclic_refresh_q = Q;
570 // lf_adjustment = -MAX_LOOP_FILTER;
571 lf_adjustment = -40;
572 for (i = 0; i < mbs_in_frame; ++i) {
573 seg_map[i] = (cpi->consec_zero_last[i] >
574 cpi->denoiser.denoise_pars.consec_zerolast)
575 ? 1
576 : 0;
577 }
578 }
579 }
580 #endif
581 }
582
583 /* Activate segmentation. */
584 cpi->mb.e_mbd.update_mb_segmentation_map = 1;
585 cpi->mb.e_mbd.update_mb_segmentation_data = 1;
586 enable_segmentation(cpi);
587
588 /* Set up the quant segment data */
589 feature_data[MB_LVL_ALT_Q][0] = 0;
590 feature_data[MB_LVL_ALT_Q][1] = (cpi->cyclic_refresh_q - Q);
591 feature_data[MB_LVL_ALT_Q][2] = 0;
592 feature_data[MB_LVL_ALT_Q][3] = 0;
593
594 /* Set up the loop segment data */
595 feature_data[MB_LVL_ALT_LF][0] = 0;
596 feature_data[MB_LVL_ALT_LF][1] = lf_adjustment;
597 feature_data[MB_LVL_ALT_LF][2] = 0;
598 feature_data[MB_LVL_ALT_LF][3] = 0;
599
600 /* Initialise the feature data structure */
601 set_segment_data(cpi, &feature_data[0][0], SEGMENT_DELTADATA);
602 }
603
compute_skin_map(VP8_COMP * cpi)604 static void compute_skin_map(VP8_COMP *cpi) {
605 int mb_row, mb_col, num_bl;
606 VP8_COMMON *cm = &cpi->common;
607 const uint8_t *src_y = cpi->Source->y_buffer;
608 const uint8_t *src_u = cpi->Source->u_buffer;
609 const uint8_t *src_v = cpi->Source->v_buffer;
610 const int src_ystride = cpi->Source->y_stride;
611 const int src_uvstride = cpi->Source->uv_stride;
612
613 const SKIN_DETECTION_BLOCK_SIZE bsize =
614 (cm->Width * cm->Height <= 352 * 288) ? SKIN_8X8 : SKIN_16X16;
615
616 for (mb_row = 0; mb_row < cm->mb_rows; mb_row++) {
617 num_bl = 0;
618 for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) {
619 const int bl_index = mb_row * cm->mb_cols + mb_col;
620 cpi->skin_map[bl_index] =
621 vp8_compute_skin_block(src_y, src_u, src_v, src_ystride, src_uvstride,
622 bsize, cpi->consec_zero_last[bl_index], 0);
623 num_bl++;
624 src_y += 16;
625 src_u += 8;
626 src_v += 8;
627 }
628 src_y += (src_ystride << 4) - (num_bl << 4);
629 src_u += (src_uvstride << 3) - (num_bl << 3);
630 src_v += (src_uvstride << 3) - (num_bl << 3);
631 }
632
633 // Remove isolated skin blocks (none of its neighbors are skin) and isolated
634 // non-skin blocks (all of its neighbors are skin). Skip the boundary.
635 for (mb_row = 1; mb_row < cm->mb_rows - 1; mb_row++) {
636 for (mb_col = 1; mb_col < cm->mb_cols - 1; mb_col++) {
637 const int bl_index = mb_row * cm->mb_cols + mb_col;
638 int num_neighbor = 0;
639 int mi, mj;
640 int non_skin_threshold = 8;
641
642 for (mi = -1; mi <= 1; mi += 1) {
643 for (mj = -1; mj <= 1; mj += 1) {
644 int bl_neighbor_index = (mb_row + mi) * cm->mb_cols + mb_col + mj;
645 if (cpi->skin_map[bl_neighbor_index]) num_neighbor++;
646 }
647 }
648
649 if (cpi->skin_map[bl_index] && num_neighbor < 2)
650 cpi->skin_map[bl_index] = 0;
651 if (!cpi->skin_map[bl_index] && num_neighbor == non_skin_threshold)
652 cpi->skin_map[bl_index] = 1;
653 }
654 }
655 }
656
set_default_lf_deltas(VP8_COMP * cpi)657 static void set_default_lf_deltas(VP8_COMP *cpi) {
658 cpi->mb.e_mbd.mode_ref_lf_delta_enabled = 1;
659 cpi->mb.e_mbd.mode_ref_lf_delta_update = 1;
660
661 memset(cpi->mb.e_mbd.ref_lf_deltas, 0, sizeof(cpi->mb.e_mbd.ref_lf_deltas));
662 memset(cpi->mb.e_mbd.mode_lf_deltas, 0, sizeof(cpi->mb.e_mbd.mode_lf_deltas));
663
664 /* Test of ref frame deltas */
665 cpi->mb.e_mbd.ref_lf_deltas[INTRA_FRAME] = 2;
666 cpi->mb.e_mbd.ref_lf_deltas[LAST_FRAME] = 0;
667 cpi->mb.e_mbd.ref_lf_deltas[GOLDEN_FRAME] = -2;
668 cpi->mb.e_mbd.ref_lf_deltas[ALTREF_FRAME] = -2;
669
670 cpi->mb.e_mbd.mode_lf_deltas[0] = 4; /* BPRED */
671
672 if (cpi->oxcf.Mode == MODE_REALTIME) {
673 cpi->mb.e_mbd.mode_lf_deltas[1] = -12; /* Zero */
674 } else {
675 cpi->mb.e_mbd.mode_lf_deltas[1] = -2; /* Zero */
676 }
677
678 cpi->mb.e_mbd.mode_lf_deltas[2] = 2; /* New mv */
679 cpi->mb.e_mbd.mode_lf_deltas[3] = 4; /* Split mv */
680 }
681
682 /* Convenience macros for mapping speed and mode into a continuous
683 * range
684 */
685 #define GOOD(x) ((x) + 1)
686 #define RT(x) ((x) + 7)
687
speed_map(int speed,const int * map)688 static int speed_map(int speed, const int *map) {
689 int res;
690
691 do {
692 res = *map++;
693 } while (speed >= *map++);
694 return res;
695 }
696
697 static const int thresh_mult_map_znn[] = {
698 /* map common to zero, nearest, and near */
699 0, GOOD(2), 1500, GOOD(3), 2000, RT(0), 1000, RT(2), 2000, INT_MAX
700 };
701
702 static const int thresh_mult_map_vhpred[] = { 1000, GOOD(2), 1500, GOOD(3),
703 2000, RT(0), 1000, RT(1),
704 2000, RT(7), INT_MAX, INT_MAX };
705
706 static const int thresh_mult_map_bpred[] = { 2000, GOOD(0), 2500, GOOD(2),
707 5000, GOOD(3), 7500, RT(0),
708 2500, RT(1), 5000, RT(6),
709 INT_MAX, INT_MAX };
710
711 static const int thresh_mult_map_tm[] = { 1000, GOOD(2), 1500, GOOD(3),
712 2000, RT(0), 0, RT(1),
713 1000, RT(2), 2000, RT(7),
714 INT_MAX, INT_MAX };
715
716 static const int thresh_mult_map_new1[] = { 1000, GOOD(2), 2000,
717 RT(0), 2000, INT_MAX };
718
719 static const int thresh_mult_map_new2[] = { 1000, GOOD(2), 2000, GOOD(3),
720 2500, GOOD(5), 4000, RT(0),
721 2000, RT(2), 2500, RT(5),
722 4000, INT_MAX };
723
724 static const int thresh_mult_map_split1[] = {
725 2500, GOOD(0), 1700, GOOD(2), 10000, GOOD(3), 25000, GOOD(4), INT_MAX,
726 RT(0), 5000, RT(1), 10000, RT(2), 25000, RT(3), INT_MAX, INT_MAX
727 };
728
729 static const int thresh_mult_map_split2[] = {
730 5000, GOOD(0), 4500, GOOD(2), 20000, GOOD(3), 50000, GOOD(4), INT_MAX,
731 RT(0), 10000, RT(1), 20000, RT(2), 50000, RT(3), INT_MAX, INT_MAX
732 };
733
734 static const int mode_check_freq_map_zn2[] = {
735 /* {zero,nearest}{2,3} */
736 0, RT(10), 1 << 1, RT(11), 1 << 2, RT(12), 1 << 3, INT_MAX
737 };
738
739 static const int mode_check_freq_map_vhbpred[] = { 0, GOOD(5), 2, RT(0),
740 0, RT(3), 2, RT(5),
741 4, INT_MAX };
742
743 static const int mode_check_freq_map_near2[] = {
744 0, GOOD(5), 2, RT(0), 0, RT(3), 2,
745 RT(10), 1 << 2, RT(11), 1 << 3, RT(12), 1 << 4, INT_MAX
746 };
747
748 static const int mode_check_freq_map_new1[] = {
749 0, RT(10), 1 << 1, RT(11), 1 << 2, RT(12), 1 << 3, INT_MAX
750 };
751
752 static const int mode_check_freq_map_new2[] = { 0, GOOD(5), 4, RT(0),
753 0, RT(3), 4, RT(10),
754 1 << 3, RT(11), 1 << 4, RT(12),
755 1 << 5, INT_MAX };
756
757 static const int mode_check_freq_map_split1[] = { 0, GOOD(2), 2, GOOD(3),
758 7, RT(1), 2, RT(2),
759 7, INT_MAX };
760
761 static const int mode_check_freq_map_split2[] = { 0, GOOD(1), 2, GOOD(2),
762 4, GOOD(3), 15, RT(1),
763 4, RT(2), 15, INT_MAX };
764
vp8_set_speed_features(VP8_COMP * cpi)765 void vp8_set_speed_features(VP8_COMP *cpi) {
766 SPEED_FEATURES *sf = &cpi->sf;
767 int Mode = cpi->compressor_speed;
768 int Speed = cpi->Speed;
769 int Speed2;
770 int i;
771 VP8_COMMON *cm = &cpi->common;
772 int last_improved_quant = sf->improved_quant;
773 int ref_frames;
774
775 /* Initialise default mode frequency sampling variables */
776 for (i = 0; i < MAX_MODES; ++i) {
777 cpi->mode_check_freq[i] = 0;
778 }
779
780 cpi->mb.mbs_tested_so_far = 0;
781 cpi->mb.mbs_zero_last_dot_suppress = 0;
782
783 /* best quality defaults */
784 sf->RD = 1;
785 sf->search_method = NSTEP;
786 sf->improved_quant = 1;
787 sf->improved_dct = 1;
788 sf->auto_filter = 1;
789 sf->recode_loop = 1;
790 sf->quarter_pixel_search = 1;
791 sf->half_pixel_search = 1;
792 sf->iterative_sub_pixel = 1;
793 sf->optimize_coefficients = 1;
794 sf->use_fastquant_for_pick = 0;
795 sf->no_skip_block4x4_search = 1;
796
797 sf->first_step = 0;
798 sf->max_step_search_steps = MAX_MVSEARCH_STEPS;
799 sf->improved_mv_pred = 1;
800
801 /* default thresholds to 0 */
802 for (i = 0; i < MAX_MODES; ++i) sf->thresh_mult[i] = 0;
803
804 /* Count enabled references */
805 ref_frames = 1;
806 if (cpi->ref_frame_flags & VP8_LAST_FRAME) ref_frames++;
807 if (cpi->ref_frame_flags & VP8_GOLD_FRAME) ref_frames++;
808 if (cpi->ref_frame_flags & VP8_ALTR_FRAME) ref_frames++;
809
810 /* Convert speed to continuous range, with clamping */
811 if (Mode == 0) {
812 Speed = 0;
813 } else if (Mode == 2) {
814 Speed = RT(Speed);
815 } else {
816 if (Speed > 5) Speed = 5;
817 Speed = GOOD(Speed);
818 }
819
820 sf->thresh_mult[THR_ZERO1] = sf->thresh_mult[THR_NEAREST1] =
821 sf->thresh_mult[THR_NEAR1] = sf->thresh_mult[THR_DC] = 0; /* always */
822
823 sf->thresh_mult[THR_ZERO2] = sf->thresh_mult[THR_ZERO3] =
824 sf->thresh_mult[THR_NEAREST2] = sf->thresh_mult[THR_NEAREST3] =
825 sf->thresh_mult[THR_NEAR2] = sf->thresh_mult[THR_NEAR3] =
826 speed_map(Speed, thresh_mult_map_znn);
827
828 sf->thresh_mult[THR_V_PRED] = sf->thresh_mult[THR_H_PRED] =
829 speed_map(Speed, thresh_mult_map_vhpred);
830 sf->thresh_mult[THR_B_PRED] = speed_map(Speed, thresh_mult_map_bpred);
831 sf->thresh_mult[THR_TM] = speed_map(Speed, thresh_mult_map_tm);
832 sf->thresh_mult[THR_NEW1] = speed_map(Speed, thresh_mult_map_new1);
833 sf->thresh_mult[THR_NEW2] = sf->thresh_mult[THR_NEW3] =
834 speed_map(Speed, thresh_mult_map_new2);
835 sf->thresh_mult[THR_SPLIT1] = speed_map(Speed, thresh_mult_map_split1);
836 sf->thresh_mult[THR_SPLIT2] = sf->thresh_mult[THR_SPLIT3] =
837 speed_map(Speed, thresh_mult_map_split2);
838
839 // Special case for temporal layers.
840 // Reduce the thresholds for zero/nearest/near for GOLDEN, if GOLDEN is
841 // used as second reference. We don't modify thresholds for ALTREF case
842 // since ALTREF is usually used as long-term reference in temporal layers.
843 if ((cpi->Speed <= 6) && (cpi->oxcf.number_of_layers > 1) &&
844 (cpi->ref_frame_flags & VP8_LAST_FRAME) &&
845 (cpi->ref_frame_flags & VP8_GOLD_FRAME)) {
846 if (cpi->closest_reference_frame == GOLDEN_FRAME) {
847 sf->thresh_mult[THR_ZERO2] = sf->thresh_mult[THR_ZERO2] >> 3;
848 sf->thresh_mult[THR_NEAREST2] = sf->thresh_mult[THR_NEAREST2] >> 3;
849 sf->thresh_mult[THR_NEAR2] = sf->thresh_mult[THR_NEAR2] >> 3;
850 } else {
851 sf->thresh_mult[THR_ZERO2] = sf->thresh_mult[THR_ZERO2] >> 1;
852 sf->thresh_mult[THR_NEAREST2] = sf->thresh_mult[THR_NEAREST2] >> 1;
853 sf->thresh_mult[THR_NEAR2] = sf->thresh_mult[THR_NEAR2] >> 1;
854 }
855 }
856
857 cpi->mode_check_freq[THR_ZERO1] = cpi->mode_check_freq[THR_NEAREST1] =
858 cpi->mode_check_freq[THR_NEAR1] = cpi->mode_check_freq[THR_TM] =
859 cpi->mode_check_freq[THR_DC] = 0; /* always */
860
861 cpi->mode_check_freq[THR_ZERO2] = cpi->mode_check_freq[THR_ZERO3] =
862 cpi->mode_check_freq[THR_NEAREST2] = cpi->mode_check_freq[THR_NEAREST3] =
863 speed_map(Speed, mode_check_freq_map_zn2);
864
865 cpi->mode_check_freq[THR_NEAR2] = cpi->mode_check_freq[THR_NEAR3] =
866 speed_map(Speed, mode_check_freq_map_near2);
867
868 cpi->mode_check_freq[THR_V_PRED] = cpi->mode_check_freq[THR_H_PRED] =
869 cpi->mode_check_freq[THR_B_PRED] =
870 speed_map(Speed, mode_check_freq_map_vhbpred);
871
872 // For real-time mode at speed 10 keep the mode_check_freq threshold
873 // for NEW1 similar to that of speed 9.
874 Speed2 = Speed;
875 if (cpi->Speed == 10 && Mode == 2) Speed2 = RT(9);
876 cpi->mode_check_freq[THR_NEW1] = speed_map(Speed2, mode_check_freq_map_new1);
877
878 cpi->mode_check_freq[THR_NEW2] = cpi->mode_check_freq[THR_NEW3] =
879 speed_map(Speed, mode_check_freq_map_new2);
880
881 cpi->mode_check_freq[THR_SPLIT1] =
882 speed_map(Speed, mode_check_freq_map_split1);
883 cpi->mode_check_freq[THR_SPLIT2] = cpi->mode_check_freq[THR_SPLIT3] =
884 speed_map(Speed, mode_check_freq_map_split2);
885 Speed = cpi->Speed;
886 switch (Mode) {
887 #if !CONFIG_REALTIME_ONLY
888 case 0: /* best quality mode */
889 sf->first_step = 0;
890 sf->max_step_search_steps = MAX_MVSEARCH_STEPS;
891 break;
892 case 1:
893 case 3:
894 if (Speed > 0) {
895 /* Disable coefficient optimization above speed 0 */
896 sf->optimize_coefficients = 0;
897 sf->use_fastquant_for_pick = 1;
898 sf->no_skip_block4x4_search = 0;
899
900 sf->first_step = 1;
901 }
902
903 if (Speed > 2) {
904 sf->improved_quant = 0;
905 sf->improved_dct = 0;
906
907 /* Only do recode loop on key frames, golden frames and
908 * alt ref frames
909 */
910 sf->recode_loop = 2;
911 }
912
913 if (Speed > 3) {
914 sf->auto_filter = 1;
915 sf->recode_loop = 0; /* recode loop off */
916 sf->RD = 0; /* Turn rd off */
917 }
918
919 if (Speed > 4) {
920 sf->auto_filter = 0; /* Faster selection of loop filter */
921 }
922
923 break;
924 #endif
925 case 2:
926 sf->optimize_coefficients = 0;
927 sf->recode_loop = 0;
928 sf->auto_filter = 1;
929 sf->iterative_sub_pixel = 1;
930 sf->search_method = NSTEP;
931
932 if (Speed > 0) {
933 sf->improved_quant = 0;
934 sf->improved_dct = 0;
935
936 sf->use_fastquant_for_pick = 1;
937 sf->no_skip_block4x4_search = 0;
938 sf->first_step = 1;
939 }
940
941 if (Speed > 2) sf->auto_filter = 0; /* Faster selection of loop filter */
942
943 if (Speed > 3) {
944 sf->RD = 0;
945 sf->auto_filter = 1;
946 }
947
948 if (Speed > 4) {
949 sf->auto_filter = 0; /* Faster selection of loop filter */
950 sf->search_method = HEX;
951 sf->iterative_sub_pixel = 0;
952 }
953
954 if (Speed > 6) {
955 unsigned int sum = 0;
956 unsigned int total_mbs = cm->MBs;
957 int thresh;
958 unsigned int total_skip;
959
960 int min = 2000;
961
962 if (cpi->oxcf.encode_breakout > 2000) min = cpi->oxcf.encode_breakout;
963
964 min >>= 7;
965
966 for (i = 0; i < min; ++i) {
967 sum += cpi->mb.error_bins[i];
968 }
969
970 total_skip = sum;
971 sum = 0;
972
973 /* i starts from 2 to make sure thresh started from 2048 */
974 for (; i < 1024; ++i) {
975 sum += cpi->mb.error_bins[i];
976
977 if (10 * sum >=
978 (unsigned int)(cpi->Speed - 6) * (total_mbs - total_skip)) {
979 break;
980 }
981 }
982
983 i--;
984 thresh = (i << 7);
985
986 if (thresh < 2000) thresh = 2000;
987
988 if (ref_frames > 1) {
989 sf->thresh_mult[THR_NEW1] = thresh;
990 sf->thresh_mult[THR_NEAREST1] = thresh >> 1;
991 sf->thresh_mult[THR_NEAR1] = thresh >> 1;
992 }
993
994 if (ref_frames > 2) {
995 sf->thresh_mult[THR_NEW2] = thresh << 1;
996 sf->thresh_mult[THR_NEAREST2] = thresh;
997 sf->thresh_mult[THR_NEAR2] = thresh;
998 }
999
1000 if (ref_frames > 3) {
1001 sf->thresh_mult[THR_NEW3] = thresh << 1;
1002 sf->thresh_mult[THR_NEAREST3] = thresh;
1003 sf->thresh_mult[THR_NEAR3] = thresh;
1004 }
1005
1006 sf->improved_mv_pred = 0;
1007 }
1008
1009 if (Speed > 8) sf->quarter_pixel_search = 0;
1010
1011 if (cm->version == 0) {
1012 cm->filter_type = NORMAL_LOOPFILTER;
1013
1014 if (Speed >= 14) cm->filter_type = SIMPLE_LOOPFILTER;
1015 } else {
1016 cm->filter_type = SIMPLE_LOOPFILTER;
1017 }
1018
1019 /* This has a big hit on quality. Last resort */
1020 if (Speed >= 15) sf->half_pixel_search = 0;
1021
1022 memset(cpi->mb.error_bins, 0, sizeof(cpi->mb.error_bins));
1023
1024 } /* switch */
1025
1026 /* Slow quant, dct and trellis not worthwhile for first pass
1027 * so make sure they are always turned off.
1028 */
1029 if (cpi->pass == 1) {
1030 sf->improved_quant = 0;
1031 sf->optimize_coefficients = 0;
1032 sf->improved_dct = 0;
1033 }
1034
1035 if (cpi->sf.search_method == NSTEP) {
1036 vp8_init3smotion_compensation(&cpi->mb,
1037 cm->yv12_fb[cm->lst_fb_idx].y_stride);
1038 } else if (cpi->sf.search_method == DIAMOND) {
1039 vp8_init_dsmotion_compensation(&cpi->mb,
1040 cm->yv12_fb[cm->lst_fb_idx].y_stride);
1041 }
1042
1043 if (cpi->sf.improved_dct) {
1044 cpi->mb.short_fdct8x4 = vp8_short_fdct8x4;
1045 cpi->mb.short_fdct4x4 = vp8_short_fdct4x4;
1046 } else {
1047 /* No fast FDCT defined for any platform at this time. */
1048 cpi->mb.short_fdct8x4 = vp8_short_fdct8x4;
1049 cpi->mb.short_fdct4x4 = vp8_short_fdct4x4;
1050 }
1051
1052 cpi->mb.short_walsh4x4 = vp8_short_walsh4x4;
1053
1054 if (cpi->sf.improved_quant) {
1055 cpi->mb.quantize_b = vp8_regular_quantize_b;
1056 } else {
1057 cpi->mb.quantize_b = vp8_fast_quantize_b;
1058 }
1059 if (cpi->sf.improved_quant != last_improved_quant) vp8cx_init_quantizer(cpi);
1060
1061 if (cpi->sf.iterative_sub_pixel == 1) {
1062 cpi->find_fractional_mv_step = vp8_find_best_sub_pixel_step_iteratively;
1063 } else if (cpi->sf.quarter_pixel_search) {
1064 cpi->find_fractional_mv_step = vp8_find_best_sub_pixel_step;
1065 } else if (cpi->sf.half_pixel_search) {
1066 cpi->find_fractional_mv_step = vp8_find_best_half_pixel_step;
1067 } else {
1068 cpi->find_fractional_mv_step = vp8_skip_fractional_mv_step;
1069 }
1070
1071 if (cpi->sf.optimize_coefficients == 1 && cpi->pass != 1) {
1072 cpi->mb.optimize = 1;
1073 } else {
1074 cpi->mb.optimize = 0;
1075 }
1076
1077 if (cpi->common.full_pixel) {
1078 cpi->find_fractional_mv_step = vp8_skip_fractional_mv_step;
1079 }
1080
1081 #ifdef SPEEDSTATS
1082 frames_at_speed[cpi->Speed]++;
1083 #endif
1084 }
1085 #undef GOOD
1086 #undef RT
1087
alloc_raw_frame_buffers(VP8_COMP * cpi)1088 static void alloc_raw_frame_buffers(VP8_COMP *cpi) {
1089 #if VP8_TEMPORAL_ALT_REF
1090 int width = (cpi->oxcf.Width + 15) & ~15;
1091 int height = (cpi->oxcf.Height + 15) & ~15;
1092 #endif
1093
1094 cpi->lookahead = vp8_lookahead_init(cpi->oxcf.Width, cpi->oxcf.Height,
1095 cpi->oxcf.lag_in_frames);
1096 if (!cpi->lookahead) {
1097 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1098 "Failed to allocate lag buffers");
1099 }
1100
1101 #if VP8_TEMPORAL_ALT_REF
1102
1103 if (vp8_yv12_alloc_frame_buffer(&cpi->alt_ref_buffer, width, height,
1104 VP8BORDERINPIXELS)) {
1105 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1106 "Failed to allocate altref buffer");
1107 }
1108
1109 #endif
1110 }
1111
dealloc_raw_frame_buffers(VP8_COMP * cpi)1112 static void dealloc_raw_frame_buffers(VP8_COMP *cpi) {
1113 #if VP8_TEMPORAL_ALT_REF
1114 vp8_yv12_de_alloc_frame_buffer(&cpi->alt_ref_buffer);
1115 #endif
1116 vp8_lookahead_destroy(cpi->lookahead);
1117 }
1118
vp8_alloc_partition_data(VP8_COMP * cpi)1119 static int vp8_alloc_partition_data(VP8_COMP *cpi) {
1120 vpx_free(cpi->mb.pip);
1121
1122 cpi->mb.pip =
1123 vpx_calloc((cpi->common.mb_cols + 1) * (cpi->common.mb_rows + 1),
1124 sizeof(PARTITION_INFO));
1125 if (!cpi->mb.pip) return 1;
1126
1127 cpi->mb.pi = cpi->mb.pip + cpi->common.mode_info_stride + 1;
1128
1129 return 0;
1130 }
1131
vp8_alloc_compressor_data(VP8_COMP * cpi)1132 void vp8_alloc_compressor_data(VP8_COMP *cpi) {
1133 VP8_COMMON *cm = &cpi->common;
1134
1135 int width = cm->Width;
1136 int height = cm->Height;
1137
1138 if (vp8_alloc_frame_buffers(cm, width, height)) {
1139 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1140 "Failed to allocate frame buffers");
1141 }
1142
1143 if (vp8_alloc_partition_data(cpi)) {
1144 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1145 "Failed to allocate partition data");
1146 }
1147
1148 if ((width & 0xf) != 0) width += 16 - (width & 0xf);
1149
1150 if ((height & 0xf) != 0) height += 16 - (height & 0xf);
1151
1152 if (vp8_yv12_alloc_frame_buffer(&cpi->pick_lf_lvl_frame, width, height,
1153 VP8BORDERINPIXELS)) {
1154 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1155 "Failed to allocate last frame buffer");
1156 }
1157
1158 if (vp8_yv12_alloc_frame_buffer(&cpi->scaled_source, width, height,
1159 VP8BORDERINPIXELS)) {
1160 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1161 "Failed to allocate scaled source buffer");
1162 }
1163
1164 vpx_free(cpi->tok);
1165
1166 {
1167 #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
1168 unsigned int tokens = 8 * 24 * 16; /* one MB for each thread */
1169 #else
1170 unsigned int tokens = cm->mb_rows * cm->mb_cols * 24 * 16;
1171 #endif
1172 CHECK_MEM_ERROR(cpi->tok, vpx_calloc(tokens, sizeof(*cpi->tok)));
1173 }
1174
1175 /* Data used for real time vc mode to see if gf needs refreshing */
1176 cpi->zeromv_count = 0;
1177
1178 /* Structures used to monitor GF usage */
1179 vpx_free(cpi->gf_active_flags);
1180 CHECK_MEM_ERROR(
1181 cpi->gf_active_flags,
1182 vpx_calloc(sizeof(*cpi->gf_active_flags), cm->mb_rows * cm->mb_cols));
1183 cpi->gf_active_count = cm->mb_rows * cm->mb_cols;
1184
1185 vpx_free(cpi->mb_activity_map);
1186 CHECK_MEM_ERROR(
1187 cpi->mb_activity_map,
1188 vpx_calloc(sizeof(*cpi->mb_activity_map), cm->mb_rows * cm->mb_cols));
1189
1190 /* allocate memory for storing last frame's MVs for MV prediction. */
1191 vpx_free(cpi->lfmv);
1192 CHECK_MEM_ERROR(cpi->lfmv, vpx_calloc((cm->mb_rows + 2) * (cm->mb_cols + 2),
1193 sizeof(*cpi->lfmv)));
1194 vpx_free(cpi->lf_ref_frame_sign_bias);
1195 CHECK_MEM_ERROR(cpi->lf_ref_frame_sign_bias,
1196 vpx_calloc((cm->mb_rows + 2) * (cm->mb_cols + 2),
1197 sizeof(*cpi->lf_ref_frame_sign_bias)));
1198 vpx_free(cpi->lf_ref_frame);
1199 CHECK_MEM_ERROR(cpi->lf_ref_frame,
1200 vpx_calloc((cm->mb_rows + 2) * (cm->mb_cols + 2),
1201 sizeof(*cpi->lf_ref_frame)));
1202
1203 /* Create the encoder segmentation map and set all entries to 0 */
1204 vpx_free(cpi->segmentation_map);
1205 CHECK_MEM_ERROR(
1206 cpi->segmentation_map,
1207 vpx_calloc(cm->mb_rows * cm->mb_cols, sizeof(*cpi->segmentation_map)));
1208 cpi->cyclic_refresh_mode_index = 0;
1209 vpx_free(cpi->active_map);
1210 CHECK_MEM_ERROR(cpi->active_map, vpx_calloc(cm->mb_rows * cm->mb_cols,
1211 sizeof(*cpi->active_map)));
1212 memset(cpi->active_map, 1, (cm->mb_rows * cm->mb_cols));
1213
1214 #if CONFIG_MULTITHREAD
1215 if (width < 640) {
1216 cpi->mt_sync_range = 1;
1217 } else if (width <= 1280) {
1218 cpi->mt_sync_range = 4;
1219 } else if (width <= 2560) {
1220 cpi->mt_sync_range = 8;
1221 } else {
1222 cpi->mt_sync_range = 16;
1223 }
1224
1225 if (cpi->oxcf.multi_threaded > 1) {
1226 int i;
1227
1228 vpx_free(cpi->mt_current_mb_col);
1229 CHECK_MEM_ERROR(cpi->mt_current_mb_col,
1230 vpx_malloc(sizeof(*cpi->mt_current_mb_col) * cm->mb_rows));
1231 for (i = 0; i < cm->mb_rows; ++i)
1232 vpx_atomic_init(&cpi->mt_current_mb_col[i], 0);
1233 }
1234
1235 #endif
1236
1237 vpx_free(cpi->tplist);
1238 CHECK_MEM_ERROR(cpi->tplist, vpx_malloc(sizeof(TOKENLIST) * cm->mb_rows));
1239
1240 #if CONFIG_TEMPORAL_DENOISING
1241 if (cpi->oxcf.noise_sensitivity > 0) {
1242 vp8_denoiser_free(&cpi->denoiser);
1243 if (vp8_denoiser_allocate(&cpi->denoiser, width, height, cm->mb_rows,
1244 cm->mb_cols, cpi->oxcf.noise_sensitivity)) {
1245 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1246 "Failed to allocate denoiser");
1247 }
1248 }
1249 #endif
1250 }
1251
1252 /* Quant MOD */
1253 static const int q_trans[] = {
1254 0, 1, 2, 3, 4, 5, 7, 8, 9, 10, 12, 13, 15, 17, 18, 19,
1255 20, 21, 23, 24, 25, 26, 27, 28, 29, 30, 31, 33, 35, 37, 39, 41,
1256 43, 45, 47, 49, 51, 53, 55, 57, 59, 61, 64, 67, 70, 73, 76, 79,
1257 82, 85, 88, 91, 94, 97, 100, 103, 106, 109, 112, 115, 118, 121, 124, 127,
1258 };
1259
vp8_reverse_trans(int x)1260 int vp8_reverse_trans(int x) {
1261 int i;
1262
1263 for (i = 0; i < 64; ++i) {
1264 if (q_trans[i] >= x) return i;
1265 }
1266
1267 return 63;
1268 }
vp8_new_framerate(VP8_COMP * cpi,double framerate)1269 void vp8_new_framerate(VP8_COMP *cpi, double framerate) {
1270 if (framerate < .1) framerate = 30;
1271
1272 cpi->framerate = framerate;
1273 cpi->output_framerate = framerate;
1274 cpi->per_frame_bandwidth =
1275 (int)round(cpi->oxcf.target_bandwidth / cpi->output_framerate);
1276 cpi->av_per_frame_bandwidth = cpi->per_frame_bandwidth;
1277 cpi->min_frame_bandwidth = (int)(cpi->av_per_frame_bandwidth *
1278 cpi->oxcf.two_pass_vbrmin_section / 100);
1279
1280 /* Set Maximum gf/arf interval */
1281 cpi->max_gf_interval = ((int)(cpi->output_framerate / 2.0) + 2);
1282
1283 if (cpi->max_gf_interval < 12) cpi->max_gf_interval = 12;
1284
1285 /* Extended interval for genuinely static scenes */
1286 cpi->twopass.static_scene_max_gf_interval = cpi->key_frame_frequency >> 1;
1287
1288 /* Special conditions when altr ref frame enabled in lagged compress mode */
1289 if (cpi->oxcf.play_alternate && cpi->oxcf.lag_in_frames) {
1290 if (cpi->max_gf_interval > cpi->oxcf.lag_in_frames - 1) {
1291 cpi->max_gf_interval = cpi->oxcf.lag_in_frames - 1;
1292 }
1293
1294 if (cpi->twopass.static_scene_max_gf_interval >
1295 cpi->oxcf.lag_in_frames - 1) {
1296 cpi->twopass.static_scene_max_gf_interval = cpi->oxcf.lag_in_frames - 1;
1297 }
1298 }
1299
1300 if (cpi->max_gf_interval > cpi->twopass.static_scene_max_gf_interval) {
1301 cpi->max_gf_interval = cpi->twopass.static_scene_max_gf_interval;
1302 }
1303 }
1304
init_config(VP8_COMP * cpi,VP8_CONFIG * oxcf)1305 static void init_config(VP8_COMP *cpi, VP8_CONFIG *oxcf) {
1306 VP8_COMMON *cm = &cpi->common;
1307
1308 cpi->oxcf = *oxcf;
1309
1310 cpi->auto_gold = 1;
1311 cpi->auto_adjust_gold_quantizer = 1;
1312
1313 cm->version = oxcf->Version;
1314 vp8_setup_version(cm);
1315
1316 /* Frame rate is not available on the first frame, as it's derived from
1317 * the observed timestamps. The actual value used here doesn't matter
1318 * too much, as it will adapt quickly.
1319 */
1320 if (oxcf->timebase.num > 0) {
1321 cpi->framerate =
1322 (double)(oxcf->timebase.den) / (double)(oxcf->timebase.num);
1323 } else {
1324 cpi->framerate = 30;
1325 }
1326
1327 /* If the reciprocal of the timebase seems like a reasonable framerate,
1328 * then use that as a guess, otherwise use 30.
1329 */
1330 if (cpi->framerate > 180) cpi->framerate = 30;
1331
1332 cpi->ref_framerate = cpi->framerate;
1333
1334 cpi->ref_frame_flags = VP8_ALTR_FRAME | VP8_GOLD_FRAME | VP8_LAST_FRAME;
1335
1336 cm->refresh_golden_frame = 0;
1337 cm->refresh_last_frame = 1;
1338 cm->refresh_entropy_probs = 1;
1339
1340 /* change includes all joint functionality */
1341 vp8_change_config(cpi, oxcf);
1342
1343 /* Initialize active best and worst q and average q values. */
1344 cpi->active_worst_quality = cpi->oxcf.worst_allowed_q;
1345 cpi->active_best_quality = cpi->oxcf.best_allowed_q;
1346 cpi->avg_frame_qindex = cpi->oxcf.worst_allowed_q;
1347
1348 /* Initialise the starting buffer levels */
1349 cpi->buffer_level = cpi->oxcf.starting_buffer_level;
1350 cpi->bits_off_target = cpi->oxcf.starting_buffer_level;
1351
1352 cpi->rolling_target_bits = cpi->av_per_frame_bandwidth;
1353 cpi->rolling_actual_bits = cpi->av_per_frame_bandwidth;
1354 cpi->long_rolling_target_bits = cpi->av_per_frame_bandwidth;
1355 cpi->long_rolling_actual_bits = cpi->av_per_frame_bandwidth;
1356
1357 cpi->total_actual_bits = 0;
1358 cpi->total_target_vs_actual = 0;
1359
1360 /* Temporal scalabilty */
1361 if (cpi->oxcf.number_of_layers > 1) {
1362 unsigned int i;
1363 double prev_layer_framerate = 0;
1364
1365 for (i = 0; i < cpi->oxcf.number_of_layers; ++i) {
1366 vp8_init_temporal_layer_context(cpi, oxcf, i, prev_layer_framerate);
1367 prev_layer_framerate =
1368 cpi->output_framerate / cpi->oxcf.rate_decimator[i];
1369 }
1370 }
1371
1372 #if VP8_TEMPORAL_ALT_REF
1373 {
1374 int i;
1375
1376 cpi->fixed_divide[0] = 0;
1377
1378 for (i = 1; i < 512; ++i) cpi->fixed_divide[i] = 0x80000 / i;
1379 }
1380 #endif
1381 }
1382
vp8_update_layer_contexts(VP8_COMP * cpi)1383 void vp8_update_layer_contexts(VP8_COMP *cpi) {
1384 VP8_CONFIG *oxcf = &cpi->oxcf;
1385
1386 /* Update snapshots of the layer contexts to reflect new parameters */
1387 if (oxcf->number_of_layers > 1) {
1388 unsigned int i;
1389 double prev_layer_framerate = 0;
1390
1391 assert(oxcf->number_of_layers <= VPX_TS_MAX_LAYERS);
1392 for (i = 0; i < oxcf->number_of_layers && i < VPX_TS_MAX_LAYERS; ++i) {
1393 LAYER_CONTEXT *lc = &cpi->layer_context[i];
1394
1395 lc->framerate = cpi->ref_framerate / oxcf->rate_decimator[i];
1396 lc->target_bandwidth = oxcf->target_bitrate[i] * 1000;
1397
1398 lc->starting_buffer_level = rescale(
1399 (int)oxcf->starting_buffer_level_in_ms, lc->target_bandwidth, 1000);
1400
1401 if (oxcf->optimal_buffer_level == 0) {
1402 lc->optimal_buffer_level = lc->target_bandwidth / 8;
1403 } else {
1404 lc->optimal_buffer_level = rescale(
1405 (int)oxcf->optimal_buffer_level_in_ms, lc->target_bandwidth, 1000);
1406 }
1407
1408 if (oxcf->maximum_buffer_size == 0) {
1409 lc->maximum_buffer_size = lc->target_bandwidth / 8;
1410 } else {
1411 lc->maximum_buffer_size = rescale((int)oxcf->maximum_buffer_size_in_ms,
1412 lc->target_bandwidth, 1000);
1413 }
1414
1415 /* Work out the average size of a frame within this layer */
1416 if (i > 0) {
1417 lc->avg_frame_size_for_layer =
1418 (int)round((oxcf->target_bitrate[i] - oxcf->target_bitrate[i - 1]) *
1419 1000 / (lc->framerate - prev_layer_framerate));
1420 }
1421
1422 prev_layer_framerate = lc->framerate;
1423 }
1424 }
1425 }
1426
vp8_change_config(VP8_COMP * cpi,VP8_CONFIG * oxcf)1427 void vp8_change_config(VP8_COMP *cpi, VP8_CONFIG *oxcf) {
1428 VP8_COMMON *cm = &cpi->common;
1429 int last_w, last_h;
1430 unsigned int prev_number_of_layers;
1431 unsigned int raw_target_rate;
1432
1433 if (!cpi) return;
1434
1435 if (!oxcf) return;
1436
1437 if (cm->version != oxcf->Version) {
1438 cm->version = oxcf->Version;
1439 vp8_setup_version(cm);
1440 }
1441
1442 last_w = cpi->oxcf.Width;
1443 last_h = cpi->oxcf.Height;
1444 prev_number_of_layers = cpi->oxcf.number_of_layers;
1445
1446 cpi->oxcf = *oxcf;
1447
1448 switch (cpi->oxcf.Mode) {
1449 case MODE_REALTIME:
1450 cpi->pass = 0;
1451 cpi->compressor_speed = 2;
1452
1453 if (cpi->oxcf.cpu_used < -16) {
1454 cpi->oxcf.cpu_used = -16;
1455 }
1456
1457 if (cpi->oxcf.cpu_used > 16) cpi->oxcf.cpu_used = 16;
1458
1459 break;
1460
1461 case MODE_GOODQUALITY:
1462 cpi->pass = 0;
1463 cpi->compressor_speed = 1;
1464
1465 if (cpi->oxcf.cpu_used < -5) {
1466 cpi->oxcf.cpu_used = -5;
1467 }
1468
1469 if (cpi->oxcf.cpu_used > 5) cpi->oxcf.cpu_used = 5;
1470
1471 break;
1472
1473 case MODE_BESTQUALITY:
1474 cpi->pass = 0;
1475 cpi->compressor_speed = 0;
1476 break;
1477
1478 case MODE_FIRSTPASS:
1479 cpi->pass = 1;
1480 cpi->compressor_speed = 1;
1481 break;
1482 case MODE_SECONDPASS:
1483 cpi->pass = 2;
1484 cpi->compressor_speed = 1;
1485
1486 if (cpi->oxcf.cpu_used < -5) {
1487 cpi->oxcf.cpu_used = -5;
1488 }
1489
1490 if (cpi->oxcf.cpu_used > 5) cpi->oxcf.cpu_used = 5;
1491
1492 break;
1493 case MODE_SECONDPASS_BEST:
1494 cpi->pass = 2;
1495 cpi->compressor_speed = 0;
1496 break;
1497 }
1498
1499 if (cpi->pass == 0) cpi->auto_worst_q = 1;
1500
1501 cpi->oxcf.worst_allowed_q = q_trans[oxcf->worst_allowed_q];
1502 cpi->oxcf.best_allowed_q = q_trans[oxcf->best_allowed_q];
1503 cpi->oxcf.cq_level = q_trans[cpi->oxcf.cq_level];
1504
1505 if (oxcf->fixed_q >= 0) {
1506 if (oxcf->worst_allowed_q < 0) {
1507 cpi->oxcf.fixed_q = q_trans[0];
1508 } else {
1509 cpi->oxcf.fixed_q = q_trans[oxcf->worst_allowed_q];
1510 }
1511
1512 if (oxcf->alt_q < 0) {
1513 cpi->oxcf.alt_q = q_trans[0];
1514 } else {
1515 cpi->oxcf.alt_q = q_trans[oxcf->alt_q];
1516 }
1517
1518 if (oxcf->key_q < 0) {
1519 cpi->oxcf.key_q = q_trans[0];
1520 } else {
1521 cpi->oxcf.key_q = q_trans[oxcf->key_q];
1522 }
1523
1524 if (oxcf->gold_q < 0) {
1525 cpi->oxcf.gold_q = q_trans[0];
1526 } else {
1527 cpi->oxcf.gold_q = q_trans[oxcf->gold_q];
1528 }
1529 }
1530
1531 cpi->ext_refresh_frame_flags_pending = 0;
1532
1533 cpi->baseline_gf_interval =
1534 cpi->oxcf.alt_freq ? cpi->oxcf.alt_freq : DEFAULT_GF_INTERVAL;
1535
1536 // GF behavior for 1 pass CBR, used when error_resilience is off.
1537 if (!cpi->oxcf.error_resilient_mode &&
1538 cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER &&
1539 cpi->oxcf.Mode == MODE_REALTIME)
1540 cpi->baseline_gf_interval = cpi->gf_interval_onepass_cbr;
1541
1542 #if (CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING)
1543 cpi->oxcf.token_partitions = 3;
1544 #endif
1545
1546 if (cpi->oxcf.token_partitions >= 0 && cpi->oxcf.token_partitions <= 3) {
1547 cm->multi_token_partition = (TOKEN_PARTITION)cpi->oxcf.token_partitions;
1548 }
1549
1550 setup_features(cpi);
1551
1552 if (!cpi->use_roi_static_threshold) {
1553 int i;
1554 for (i = 0; i < MAX_MB_SEGMENTS; ++i) {
1555 cpi->segment_encode_breakout[i] = cpi->oxcf.encode_breakout;
1556 }
1557 }
1558
1559 /* At the moment the first order values may not be > MAXQ */
1560 if (cpi->oxcf.fixed_q > MAXQ) cpi->oxcf.fixed_q = MAXQ;
1561
1562 /* local file playback mode == really big buffer */
1563 if (cpi->oxcf.end_usage == USAGE_LOCAL_FILE_PLAYBACK) {
1564 cpi->oxcf.starting_buffer_level = 60000;
1565 cpi->oxcf.optimal_buffer_level = 60000;
1566 cpi->oxcf.maximum_buffer_size = 240000;
1567 cpi->oxcf.starting_buffer_level_in_ms = 60000;
1568 cpi->oxcf.optimal_buffer_level_in_ms = 60000;
1569 cpi->oxcf.maximum_buffer_size_in_ms = 240000;
1570 }
1571
1572 raw_target_rate = (unsigned int)((int64_t)cpi->oxcf.Width * cpi->oxcf.Height *
1573 8 * 3 * cpi->framerate / 1000);
1574 if (cpi->oxcf.target_bandwidth > raw_target_rate)
1575 cpi->oxcf.target_bandwidth = raw_target_rate;
1576 /* Convert target bandwidth from Kbit/s to Bit/s */
1577 cpi->oxcf.target_bandwidth *= 1000;
1578
1579 cpi->oxcf.starting_buffer_level = rescale(
1580 (int)cpi->oxcf.starting_buffer_level, cpi->oxcf.target_bandwidth, 1000);
1581
1582 /* Set or reset optimal and maximum buffer levels. */
1583 if (cpi->oxcf.optimal_buffer_level == 0) {
1584 cpi->oxcf.optimal_buffer_level = cpi->oxcf.target_bandwidth / 8;
1585 } else {
1586 cpi->oxcf.optimal_buffer_level = rescale(
1587 (int)cpi->oxcf.optimal_buffer_level, cpi->oxcf.target_bandwidth, 1000);
1588 }
1589
1590 if (cpi->oxcf.maximum_buffer_size == 0) {
1591 cpi->oxcf.maximum_buffer_size = cpi->oxcf.target_bandwidth / 8;
1592 } else {
1593 cpi->oxcf.maximum_buffer_size = rescale((int)cpi->oxcf.maximum_buffer_size,
1594 cpi->oxcf.target_bandwidth, 1000);
1595 }
1596 // Under a configuration change, where maximum_buffer_size may change,
1597 // keep buffer level clipped to the maximum allowed buffer size.
1598 if (cpi->bits_off_target > cpi->oxcf.maximum_buffer_size) {
1599 cpi->bits_off_target = cpi->oxcf.maximum_buffer_size;
1600 cpi->buffer_level = cpi->bits_off_target;
1601 }
1602
1603 /* Set up frame rate and related parameters rate control values. */
1604 vp8_new_framerate(cpi, cpi->framerate);
1605
1606 /* Set absolute upper and lower quality limits */
1607 cpi->worst_quality = cpi->oxcf.worst_allowed_q;
1608 cpi->best_quality = cpi->oxcf.best_allowed_q;
1609
1610 /* active values should only be modified if out of new range */
1611 if (cpi->active_worst_quality > cpi->oxcf.worst_allowed_q) {
1612 cpi->active_worst_quality = cpi->oxcf.worst_allowed_q;
1613 }
1614 /* less likely */
1615 else if (cpi->active_worst_quality < cpi->oxcf.best_allowed_q) {
1616 cpi->active_worst_quality = cpi->oxcf.best_allowed_q;
1617 }
1618 if (cpi->active_best_quality < cpi->oxcf.best_allowed_q) {
1619 cpi->active_best_quality = cpi->oxcf.best_allowed_q;
1620 }
1621 /* less likely */
1622 else if (cpi->active_best_quality > cpi->oxcf.worst_allowed_q) {
1623 cpi->active_best_quality = cpi->oxcf.worst_allowed_q;
1624 }
1625
1626 cpi->buffered_mode = cpi->oxcf.optimal_buffer_level > 0;
1627
1628 cpi->cq_target_quality = cpi->oxcf.cq_level;
1629
1630 /* Only allow dropped frames in buffered mode */
1631 cpi->drop_frames_allowed = cpi->oxcf.allow_df && cpi->buffered_mode;
1632
1633 cpi->target_bandwidth = cpi->oxcf.target_bandwidth;
1634
1635 // Check if the number of temporal layers has changed, and if so reset the
1636 // pattern counter and set/initialize the temporal layer context for the
1637 // new layer configuration.
1638 if (cpi->oxcf.number_of_layers != prev_number_of_layers) {
1639 // If the number of temporal layers are changed we must start at the
1640 // base of the pattern cycle, so set the layer id to 0 and reset
1641 // the temporal pattern counter.
1642 if (cpi->temporal_layer_id > 0) {
1643 cpi->temporal_layer_id = 0;
1644 }
1645 cpi->temporal_pattern_counter = 0;
1646 vp8_reset_temporal_layer_change(cpi, oxcf, prev_number_of_layers);
1647 }
1648
1649 if (!cpi->initial_width) {
1650 cpi->initial_width = cpi->oxcf.Width;
1651 cpi->initial_height = cpi->oxcf.Height;
1652 }
1653
1654 cm->Width = cpi->oxcf.Width;
1655 cm->Height = cpi->oxcf.Height;
1656 assert(cm->Width <= cpi->initial_width);
1657 assert(cm->Height <= cpi->initial_height);
1658
1659 /* TODO(jkoleszar): if an internal spatial resampling is active,
1660 * and we downsize the input image, maybe we should clear the
1661 * internal scale immediately rather than waiting for it to
1662 * correct.
1663 */
1664
1665 /* VP8 sharpness level mapping 0-7 (vs 0-10 in general VPx dialogs) */
1666 if (cpi->oxcf.Sharpness > 7) cpi->oxcf.Sharpness = 7;
1667
1668 cm->sharpness_level = cpi->oxcf.Sharpness;
1669
1670 if (cm->horiz_scale != NORMAL || cm->vert_scale != NORMAL) {
1671 int hr, hs, vr, vs;
1672
1673 Scale2Ratio(cm->horiz_scale, &hr, &hs);
1674 Scale2Ratio(cm->vert_scale, &vr, &vs);
1675
1676 /* always go to the next whole number */
1677 cm->Width = (hs - 1 + cpi->oxcf.Width * hr) / hs;
1678 cm->Height = (vs - 1 + cpi->oxcf.Height * vr) / vs;
1679 }
1680
1681 if (last_w != cpi->oxcf.Width || last_h != cpi->oxcf.Height) {
1682 cpi->force_next_frame_intra = 1;
1683 }
1684
1685 if (((cm->Width + 15) & ~15) != cm->yv12_fb[cm->lst_fb_idx].y_width ||
1686 ((cm->Height + 15) & ~15) != cm->yv12_fb[cm->lst_fb_idx].y_height ||
1687 cm->yv12_fb[cm->lst_fb_idx].y_width == 0) {
1688 dealloc_raw_frame_buffers(cpi);
1689 alloc_raw_frame_buffers(cpi);
1690 vp8_alloc_compressor_data(cpi);
1691 }
1692
1693 if (cpi->oxcf.fixed_q >= 0) {
1694 cpi->last_q[0] = cpi->oxcf.fixed_q;
1695 cpi->last_q[1] = cpi->oxcf.fixed_q;
1696 }
1697
1698 cpi->Speed = cpi->oxcf.cpu_used;
1699
1700 /* force to allowlag to 0 if lag_in_frames is 0; */
1701 if (cpi->oxcf.lag_in_frames == 0) {
1702 cpi->oxcf.allow_lag = 0;
1703 }
1704 /* Limit on lag buffers as these are not currently dynamically allocated */
1705 else if (cpi->oxcf.lag_in_frames > MAX_LAG_BUFFERS) {
1706 cpi->oxcf.lag_in_frames = MAX_LAG_BUFFERS;
1707 }
1708
1709 /* YX Temp */
1710 cpi->alt_ref_source = NULL;
1711 cpi->is_src_frame_alt_ref = 0;
1712
1713 #if CONFIG_TEMPORAL_DENOISING
1714 if (cpi->oxcf.noise_sensitivity) {
1715 if (!cpi->denoiser.yv12_mc_running_avg.buffer_alloc) {
1716 int width = (cpi->oxcf.Width + 15) & ~15;
1717 int height = (cpi->oxcf.Height + 15) & ~15;
1718 if (vp8_denoiser_allocate(&cpi->denoiser, width, height, cm->mb_rows,
1719 cm->mb_cols, cpi->oxcf.noise_sensitivity)) {
1720 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1721 "Failed to allocate denoiser");
1722 }
1723 }
1724 }
1725 #endif
1726
1727 #if 0
1728 /* Experimental RD Code */
1729 cpi->frame_distortion = 0;
1730 cpi->last_frame_distortion = 0;
1731 #endif
1732 }
1733
1734 #ifndef M_LOG2_E
1735 #define M_LOG2_E 0.693147180559945309417
1736 #endif
1737 #define log2f(x) (log(x) / (float)M_LOG2_E)
1738
cal_mvsadcosts(int * mvsadcost[2])1739 static void cal_mvsadcosts(int *mvsadcost[2]) {
1740 int i = 1;
1741
1742 mvsadcost[0][0] = 300;
1743 mvsadcost[1][0] = 300;
1744
1745 do {
1746 double z = 256 * (2 * (log2f(8 * i) + .6));
1747 mvsadcost[0][i] = (int)z;
1748 mvsadcost[1][i] = (int)z;
1749 mvsadcost[0][-i] = (int)z;
1750 mvsadcost[1][-i] = (int)z;
1751 } while (++i <= mvfp_max);
1752 }
1753
vp8_create_compressor(VP8_CONFIG * oxcf)1754 struct VP8_COMP *vp8_create_compressor(VP8_CONFIG *oxcf) {
1755 int i;
1756
1757 VP8_COMP *cpi;
1758 VP8_COMMON *cm;
1759
1760 cpi = vpx_memalign(32, sizeof(VP8_COMP));
1761 /* Check that the CPI instance is valid */
1762 if (!cpi) return 0;
1763
1764 cm = &cpi->common;
1765
1766 memset(cpi, 0, sizeof(VP8_COMP));
1767
1768 if (setjmp(cm->error.jmp)) {
1769 cpi->common.error.setjmp = 0;
1770 vp8_remove_compressor(&cpi);
1771 return 0;
1772 }
1773
1774 cpi->common.error.setjmp = 1;
1775
1776 CHECK_MEM_ERROR(cpi->mb.ss, vpx_calloc(sizeof(search_site),
1777 (MAX_MVSEARCH_STEPS * 8) + 1));
1778
1779 vp8_create_common(&cpi->common);
1780
1781 init_config(cpi, oxcf);
1782
1783 memcpy(cpi->base_skip_false_prob, vp8cx_base_skip_false_prob,
1784 sizeof(vp8cx_base_skip_false_prob));
1785 cpi->common.current_video_frame = 0;
1786 cpi->temporal_pattern_counter = 0;
1787 cpi->temporal_layer_id = -1;
1788 cpi->kf_overspend_bits = 0;
1789 cpi->kf_bitrate_adjustment = 0;
1790 cpi->frames_till_gf_update_due = 0;
1791 cpi->gf_overspend_bits = 0;
1792 cpi->non_gf_bitrate_adjustment = 0;
1793 cpi->prob_last_coded = 128;
1794 cpi->prob_gf_coded = 128;
1795 cpi->prob_intra_coded = 63;
1796
1797 /* Prime the recent reference frame usage counters.
1798 * Hereafter they will be maintained as a sort of moving average
1799 */
1800 cpi->recent_ref_frame_usage[INTRA_FRAME] = 1;
1801 cpi->recent_ref_frame_usage[LAST_FRAME] = 1;
1802 cpi->recent_ref_frame_usage[GOLDEN_FRAME] = 1;
1803 cpi->recent_ref_frame_usage[ALTREF_FRAME] = 1;
1804
1805 /* Set reference frame sign bias for ALTREF frame to 1 (for now) */
1806 cpi->common.ref_frame_sign_bias[ALTREF_FRAME] = 1;
1807
1808 cpi->twopass.gf_decay_rate = 0;
1809 cpi->baseline_gf_interval = DEFAULT_GF_INTERVAL;
1810
1811 cpi->gold_is_last = 0;
1812 cpi->alt_is_last = 0;
1813 cpi->gold_is_alt = 0;
1814
1815 cpi->active_map_enabled = 0;
1816
1817 cpi->use_roi_static_threshold = 0;
1818
1819 #if 0
1820 /* Experimental code for lagged and one pass */
1821 /* Initialise one_pass GF frames stats */
1822 /* Update stats used for GF selection */
1823 if (cpi->pass == 0)
1824 {
1825 cpi->one_pass_frame_index = 0;
1826
1827 for (i = 0; i < MAX_LAG_BUFFERS; ++i)
1828 {
1829 cpi->one_pass_frame_stats[i].frames_so_far = 0;
1830 cpi->one_pass_frame_stats[i].frame_intra_error = 0.0;
1831 cpi->one_pass_frame_stats[i].frame_coded_error = 0.0;
1832 cpi->one_pass_frame_stats[i].frame_pcnt_inter = 0.0;
1833 cpi->one_pass_frame_stats[i].frame_pcnt_motion = 0.0;
1834 cpi->one_pass_frame_stats[i].frame_mvr = 0.0;
1835 cpi->one_pass_frame_stats[i].frame_mvr_abs = 0.0;
1836 cpi->one_pass_frame_stats[i].frame_mvc = 0.0;
1837 cpi->one_pass_frame_stats[i].frame_mvc_abs = 0.0;
1838 }
1839 }
1840 #endif
1841
1842 cpi->mse_source_denoised = 0;
1843
1844 /* Should we use the cyclic refresh method.
1845 * Currently there is no external control for this.
1846 * Enable it for error_resilient_mode, or for 1 pass CBR mode.
1847 */
1848 cpi->cyclic_refresh_mode_enabled =
1849 (cpi->oxcf.error_resilient_mode ||
1850 (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER &&
1851 cpi->oxcf.Mode <= 2));
1852 cpi->cyclic_refresh_mode_max_mbs_perframe =
1853 (cpi->common.mb_rows * cpi->common.mb_cols) / 7;
1854 if (cpi->oxcf.number_of_layers == 1) {
1855 cpi->cyclic_refresh_mode_max_mbs_perframe =
1856 (cpi->common.mb_rows * cpi->common.mb_cols) / 20;
1857 } else if (cpi->oxcf.number_of_layers == 2) {
1858 cpi->cyclic_refresh_mode_max_mbs_perframe =
1859 (cpi->common.mb_rows * cpi->common.mb_cols) / 10;
1860 }
1861 cpi->cyclic_refresh_mode_index = 0;
1862 cpi->cyclic_refresh_q = 32;
1863
1864 // GF behavior for 1 pass CBR, used when error_resilience is off.
1865 cpi->gf_update_onepass_cbr = 0;
1866 cpi->gf_noboost_onepass_cbr = 0;
1867 if (!cpi->oxcf.error_resilient_mode &&
1868 cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER && cpi->oxcf.Mode <= 2) {
1869 cpi->gf_update_onepass_cbr = 1;
1870 cpi->gf_noboost_onepass_cbr = 1;
1871 cpi->gf_interval_onepass_cbr =
1872 cpi->cyclic_refresh_mode_max_mbs_perframe > 0
1873 ? (2 * (cpi->common.mb_rows * cpi->common.mb_cols) /
1874 cpi->cyclic_refresh_mode_max_mbs_perframe)
1875 : 10;
1876 cpi->gf_interval_onepass_cbr =
1877 VPXMIN(40, VPXMAX(6, cpi->gf_interval_onepass_cbr));
1878 cpi->baseline_gf_interval = cpi->gf_interval_onepass_cbr;
1879 }
1880
1881 if (cpi->cyclic_refresh_mode_enabled) {
1882 CHECK_MEM_ERROR(cpi->cyclic_refresh_map,
1883 vpx_calloc((cpi->common.mb_rows * cpi->common.mb_cols), 1));
1884 } else {
1885 cpi->cyclic_refresh_map = (signed char *)NULL;
1886 }
1887
1888 CHECK_MEM_ERROR(cpi->skin_map, vpx_calloc(cm->mb_rows * cm->mb_cols,
1889 sizeof(cpi->skin_map[0])));
1890
1891 CHECK_MEM_ERROR(cpi->consec_zero_last,
1892 vpx_calloc(cm->mb_rows * cm->mb_cols, 1));
1893 CHECK_MEM_ERROR(cpi->consec_zero_last_mvbias,
1894 vpx_calloc((cpi->common.mb_rows * cpi->common.mb_cols), 1));
1895
1896 /*Initialize the feed-forward activity masking.*/
1897 cpi->activity_avg = 90 << 12;
1898
1899 /* Give a sensible default for the first frame. */
1900 cpi->frames_since_key = 8;
1901 cpi->key_frame_frequency = cpi->oxcf.key_freq;
1902 cpi->this_key_frame_forced = 0;
1903 cpi->next_key_frame_forced = 0;
1904
1905 cpi->source_alt_ref_pending = 0;
1906 cpi->source_alt_ref_active = 0;
1907 cpi->common.refresh_alt_ref_frame = 0;
1908
1909 cpi->force_maxqp = 0;
1910 cpi->frames_since_last_drop_overshoot = 0;
1911 cpi->rt_always_update_correction_factor = 0;
1912
1913 cpi->b_calculate_psnr = CONFIG_INTERNAL_STATS;
1914 #if CONFIG_INTERNAL_STATS
1915 cpi->b_calculate_ssimg = 0;
1916
1917 cpi->count = 0;
1918 cpi->bytes = 0;
1919
1920 if (cpi->b_calculate_psnr) {
1921 cpi->total_sq_error = 0.0;
1922 cpi->total_sq_error2 = 0.0;
1923 cpi->total_y = 0.0;
1924 cpi->total_u = 0.0;
1925 cpi->total_v = 0.0;
1926 cpi->total = 0.0;
1927 cpi->totalp_y = 0.0;
1928 cpi->totalp_u = 0.0;
1929 cpi->totalp_v = 0.0;
1930 cpi->totalp = 0.0;
1931 cpi->tot_recode_hits = 0;
1932 cpi->summed_quality = 0;
1933 cpi->summed_weights = 0;
1934 }
1935
1936 #endif
1937
1938 cpi->first_time_stamp_ever = 0x7FFFFFFF;
1939
1940 cpi->frames_till_gf_update_due = 0;
1941 cpi->key_frame_count = 1;
1942
1943 cpi->ni_av_qi = cpi->oxcf.worst_allowed_q;
1944 cpi->ni_tot_qi = 0;
1945 cpi->ni_frames = 0;
1946 cpi->total_byte_count = 0;
1947
1948 cpi->drop_frame = 0;
1949
1950 cpi->rate_correction_factor = 1.0;
1951 cpi->key_frame_rate_correction_factor = 1.0;
1952 cpi->gf_rate_correction_factor = 1.0;
1953 cpi->twopass.est_max_qcorrection_factor = 1.0;
1954
1955 for (i = 0; i < KEY_FRAME_CONTEXT; ++i) {
1956 cpi->prior_key_frame_distance[i] = (int)cpi->output_framerate;
1957 }
1958
1959 #ifdef OUTPUT_YUV_SRC
1960 yuv_file = fopen("bd.yuv", "ab");
1961 #endif
1962 #ifdef OUTPUT_YUV_DENOISED
1963 yuv_denoised_file = fopen("denoised.yuv", "ab");
1964 #endif
1965 #ifdef OUTPUT_YUV_SKINMAP
1966 yuv_skinmap_file = fopen("skinmap.yuv", "wb");
1967 #endif
1968
1969 #if 0
1970 framepsnr = fopen("framepsnr.stt", "a");
1971 kf_list = fopen("kf_list.stt", "w");
1972 #endif
1973
1974 cpi->output_pkt_list = oxcf->output_pkt_list;
1975
1976 #if !CONFIG_REALTIME_ONLY
1977
1978 if (cpi->pass == 1) {
1979 vp8_init_first_pass(cpi);
1980 } else if (cpi->pass == 2) {
1981 size_t packet_sz = sizeof(FIRSTPASS_STATS);
1982 int packets = (int)(oxcf->two_pass_stats_in.sz / packet_sz);
1983
1984 cpi->twopass.stats_in_start = oxcf->two_pass_stats_in.buf;
1985 cpi->twopass.stats_in = cpi->twopass.stats_in_start;
1986 cpi->twopass.stats_in_end =
1987 (void *)((char *)cpi->twopass.stats_in + (packets - 1) * packet_sz);
1988 vp8_init_second_pass(cpi);
1989 }
1990
1991 #endif
1992
1993 if (cpi->compressor_speed == 2) {
1994 cpi->avg_encode_time = 0;
1995 cpi->avg_pick_mode_time = 0;
1996 }
1997
1998 vp8_set_speed_features(cpi);
1999
2000 /* Set starting values of RD threshold multipliers (128 = *1) */
2001 for (i = 0; i < MAX_MODES; ++i) {
2002 cpi->mb.rd_thresh_mult[i] = 128;
2003 }
2004
2005 #if CONFIG_MULTITHREAD
2006 if (vp8cx_create_encoder_threads(cpi)) {
2007 vp8_remove_compressor(&cpi);
2008 return 0;
2009 }
2010 #endif
2011
2012 cpi->fn_ptr[BLOCK_16X16].sdf = vpx_sad16x16;
2013 cpi->fn_ptr[BLOCK_16X16].vf = vpx_variance16x16;
2014 cpi->fn_ptr[BLOCK_16X16].svf = vpx_sub_pixel_variance16x16;
2015 cpi->fn_ptr[BLOCK_16X16].sdx4df = vpx_sad16x16x4d;
2016
2017 cpi->fn_ptr[BLOCK_16X8].sdf = vpx_sad16x8;
2018 cpi->fn_ptr[BLOCK_16X8].vf = vpx_variance16x8;
2019 cpi->fn_ptr[BLOCK_16X8].svf = vpx_sub_pixel_variance16x8;
2020 cpi->fn_ptr[BLOCK_16X8].sdx4df = vpx_sad16x8x4d;
2021
2022 cpi->fn_ptr[BLOCK_8X16].sdf = vpx_sad8x16;
2023 cpi->fn_ptr[BLOCK_8X16].vf = vpx_variance8x16;
2024 cpi->fn_ptr[BLOCK_8X16].svf = vpx_sub_pixel_variance8x16;
2025 cpi->fn_ptr[BLOCK_8X16].sdx4df = vpx_sad8x16x4d;
2026
2027 cpi->fn_ptr[BLOCK_8X8].sdf = vpx_sad8x8;
2028 cpi->fn_ptr[BLOCK_8X8].vf = vpx_variance8x8;
2029 cpi->fn_ptr[BLOCK_8X8].svf = vpx_sub_pixel_variance8x8;
2030 cpi->fn_ptr[BLOCK_8X8].sdx4df = vpx_sad8x8x4d;
2031
2032 cpi->fn_ptr[BLOCK_4X4].sdf = vpx_sad4x4;
2033 cpi->fn_ptr[BLOCK_4X4].vf = vpx_variance4x4;
2034 cpi->fn_ptr[BLOCK_4X4].svf = vpx_sub_pixel_variance4x4;
2035 cpi->fn_ptr[BLOCK_4X4].sdx4df = vpx_sad4x4x4d;
2036
2037 #if VPX_ARCH_X86 || VPX_ARCH_X86_64
2038 cpi->fn_ptr[BLOCK_16X16].copymem = vp8_copy32xn;
2039 cpi->fn_ptr[BLOCK_16X8].copymem = vp8_copy32xn;
2040 cpi->fn_ptr[BLOCK_8X16].copymem = vp8_copy32xn;
2041 cpi->fn_ptr[BLOCK_8X8].copymem = vp8_copy32xn;
2042 cpi->fn_ptr[BLOCK_4X4].copymem = vp8_copy32xn;
2043 #endif
2044
2045 cpi->diamond_search_sad = vp8_diamond_search_sad;
2046 cpi->refining_search_sad = vp8_refining_search_sad;
2047
2048 /* make sure frame 1 is okay */
2049 cpi->mb.error_bins[0] = cpi->common.MBs;
2050
2051 /* vp8cx_init_quantizer() is first called here. Add check in
2052 * vp8cx_frame_init_quantizer() so that vp8cx_init_quantizer is only
2053 * called later when needed. This will avoid unnecessary calls of
2054 * vp8cx_init_quantizer() for every frame.
2055 */
2056 vp8cx_init_quantizer(cpi);
2057
2058 vp8_loop_filter_init(cm);
2059
2060 cpi->common.error.setjmp = 0;
2061
2062 #if CONFIG_MULTI_RES_ENCODING
2063
2064 /* Calculate # of MBs in a row in lower-resolution level image. */
2065 if (cpi->oxcf.mr_encoder_id > 0) vp8_cal_low_res_mb_cols(cpi);
2066
2067 #endif
2068
2069 /* setup RD costs to MACROBLOCK struct */
2070
2071 cpi->mb.mvcost[0] = &cpi->rd_costs.mvcosts[0][mv_max + 1];
2072 cpi->mb.mvcost[1] = &cpi->rd_costs.mvcosts[1][mv_max + 1];
2073 cpi->mb.mvsadcost[0] = &cpi->rd_costs.mvsadcosts[0][mvfp_max + 1];
2074 cpi->mb.mvsadcost[1] = &cpi->rd_costs.mvsadcosts[1][mvfp_max + 1];
2075
2076 cal_mvsadcosts(cpi->mb.mvsadcost);
2077
2078 cpi->mb.mbmode_cost = cpi->rd_costs.mbmode_cost;
2079 cpi->mb.intra_uv_mode_cost = cpi->rd_costs.intra_uv_mode_cost;
2080 cpi->mb.bmode_costs = cpi->rd_costs.bmode_costs;
2081 cpi->mb.inter_bmode_costs = cpi->rd_costs.inter_bmode_costs;
2082 cpi->mb.token_costs = cpi->rd_costs.token_costs;
2083
2084 /* setup block ptrs & offsets */
2085 vp8_setup_block_ptrs(&cpi->mb);
2086 vp8_setup_block_dptrs(&cpi->mb.e_mbd);
2087
2088 return cpi;
2089 }
2090
vp8_remove_compressor(VP8_COMP ** comp)2091 void vp8_remove_compressor(VP8_COMP **comp) {
2092 VP8_COMP *cpi = *comp;
2093
2094 if (!cpi) return;
2095
2096 if (cpi && (cpi->common.current_video_frame > 0)) {
2097 #if !CONFIG_REALTIME_ONLY
2098
2099 if (cpi->pass == 2) {
2100 vp8_end_second_pass(cpi);
2101 }
2102
2103 #endif
2104
2105 #if CONFIG_INTERNAL_STATS
2106
2107 if (cpi->pass != 1) {
2108 FILE *f = fopen("opsnr.stt", "a");
2109 double time_encoded =
2110 (cpi->last_end_time_stamp_seen - cpi->first_time_stamp_ever) /
2111 10000000.000;
2112 double dr = (double)cpi->bytes * 8.0 / 1000.0 / time_encoded;
2113
2114 if (cpi->b_calculate_psnr) {
2115 if (cpi->oxcf.number_of_layers > 1) {
2116 int i;
2117
2118 fprintf(f,
2119 "Layer\tBitrate\tAVGPsnr\tGLBPsnr\tAVPsnrP\t"
2120 "GLPsnrP\tVPXSSIM\n");
2121 for (i = 0; i < (int)cpi->oxcf.number_of_layers; ++i) {
2122 double dr =
2123 (double)cpi->bytes_in_layer[i] * 8.0 / 1000.0 / time_encoded;
2124 double samples = 3.0 / 2 * cpi->frames_in_layer[i] *
2125 cpi->common.Width * cpi->common.Height;
2126 double total_psnr =
2127 vpx_sse_to_psnr(samples, 255.0, cpi->total_error2[i]);
2128 double total_psnr2 =
2129 vpx_sse_to_psnr(samples, 255.0, cpi->total_error2_p[i]);
2130 double total_ssim =
2131 100 * pow(cpi->sum_ssim[i] / cpi->sum_weights[i], 8.0);
2132
2133 fprintf(f,
2134 "%5d\t%7.3f\t%7.3f\t%7.3f\t%7.3f\t"
2135 "%7.3f\t%7.3f\n",
2136 i, dr, cpi->sum_psnr[i] / cpi->frames_in_layer[i],
2137 total_psnr, cpi->sum_psnr_p[i] / cpi->frames_in_layer[i],
2138 total_psnr2, total_ssim);
2139 }
2140 } else {
2141 double samples =
2142 3.0 / 2 * cpi->count * cpi->common.Width * cpi->common.Height;
2143 double total_psnr =
2144 vpx_sse_to_psnr(samples, 255.0, cpi->total_sq_error);
2145 double total_psnr2 =
2146 vpx_sse_to_psnr(samples, 255.0, cpi->total_sq_error2);
2147 double total_ssim =
2148 100 * pow(cpi->summed_quality / cpi->summed_weights, 8.0);
2149
2150 fprintf(f,
2151 "Bitrate\tAVGPsnr\tGLBPsnr\tAVPsnrP\t"
2152 "GLPsnrP\tVPXSSIM\n");
2153 fprintf(f,
2154 "%7.3f\t%7.3f\t%7.3f\t%7.3f\t%7.3f\t"
2155 "%7.3f\n",
2156 dr, cpi->total / cpi->count, total_psnr,
2157 cpi->totalp / cpi->count, total_psnr2, total_ssim);
2158 }
2159 }
2160 fclose(f);
2161 #if 0
2162 f = fopen("qskip.stt", "a");
2163 fprintf(f, "minq:%d -maxq:%d skiptrue:skipfalse = %d:%d\n", cpi->oxcf.best_allowed_q, cpi->oxcf.worst_allowed_q, skiptruecount, skipfalsecount);
2164 fclose(f);
2165 #endif
2166 }
2167
2168 #endif
2169
2170 #ifdef SPEEDSTATS
2171
2172 if (cpi->compressor_speed == 2) {
2173 int i;
2174 FILE *f = fopen("cxspeed.stt", "a");
2175 cnt_pm /= cpi->common.MBs;
2176
2177 for (i = 0; i < 16; ++i) fprintf(f, "%5d", frames_at_speed[i]);
2178
2179 fprintf(f, "\n");
2180 fclose(f);
2181 }
2182
2183 #endif
2184
2185 #ifdef MODE_STATS
2186 {
2187 extern int count_mb_seg[4];
2188 FILE *f = fopen("modes.stt", "a");
2189 double dr = (double)cpi->framerate * (double)bytes * (double)8 /
2190 (double)count / (double)1000;
2191 fprintf(f, "intra_mode in Intra Frames:\n");
2192 fprintf(f, "Y: %8d, %8d, %8d, %8d, %8d\n", y_modes[0], y_modes[1],
2193 y_modes[2], y_modes[3], y_modes[4]);
2194 fprintf(f, "UV:%8d, %8d, %8d, %8d\n", uv_modes[0], uv_modes[1],
2195 uv_modes[2], uv_modes[3]);
2196 fprintf(f, "B: ");
2197 {
2198 int i;
2199
2200 for (i = 0; i < 10; ++i) fprintf(f, "%8d, ", b_modes[i]);
2201
2202 fprintf(f, "\n");
2203 }
2204
2205 fprintf(f, "Modes in Inter Frames:\n");
2206 fprintf(f, "Y: %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d\n",
2207 inter_y_modes[0], inter_y_modes[1], inter_y_modes[2],
2208 inter_y_modes[3], inter_y_modes[4], inter_y_modes[5],
2209 inter_y_modes[6], inter_y_modes[7], inter_y_modes[8],
2210 inter_y_modes[9]);
2211 fprintf(f, "UV:%8d, %8d, %8d, %8d\n", inter_uv_modes[0],
2212 inter_uv_modes[1], inter_uv_modes[2], inter_uv_modes[3]);
2213 fprintf(f, "B: ");
2214 {
2215 int i;
2216
2217 for (i = 0; i < 15; ++i) fprintf(f, "%8d, ", inter_b_modes[i]);
2218
2219 fprintf(f, "\n");
2220 }
2221 fprintf(f, "P:%8d, %8d, %8d, %8d\n", count_mb_seg[0], count_mb_seg[1],
2222 count_mb_seg[2], count_mb_seg[3]);
2223 fprintf(f, "PB:%8d, %8d, %8d, %8d\n", inter_b_modes[LEFT4X4],
2224 inter_b_modes[ABOVE4X4], inter_b_modes[ZERO4X4],
2225 inter_b_modes[NEW4X4]);
2226
2227 fclose(f);
2228 }
2229 #endif
2230
2231 #if defined(SECTIONBITS_OUTPUT)
2232
2233 if (0) {
2234 int i;
2235 FILE *f = fopen("tokenbits.stt", "a");
2236
2237 for (i = 0; i < 28; ++i) fprintf(f, "%8d", (int)(Sectionbits[i] / 256));
2238
2239 fprintf(f, "\n");
2240 fclose(f);
2241 }
2242
2243 #endif
2244
2245 #if 0
2246 {
2247 printf("\n_pick_loop_filter_level:%d\n", cpi->time_pick_lpf / 1000);
2248 printf("\n_frames recive_data encod_mb_row compress_frame Total\n");
2249 printf("%6d %10ld %10ld %10ld %10ld\n", cpi->common.current_video_frame, cpi->time_receive_data / 1000, cpi->time_encode_mb_row / 1000, cpi->time_compress_data / 1000, (cpi->time_receive_data + cpi->time_compress_data) / 1000);
2250 }
2251 #endif
2252 }
2253
2254 #if CONFIG_MULTITHREAD
2255 vp8cx_remove_encoder_threads(cpi);
2256 #endif
2257
2258 #if CONFIG_TEMPORAL_DENOISING
2259 vp8_denoiser_free(&cpi->denoiser);
2260 #endif
2261 dealloc_compressor_data(cpi);
2262 vpx_free(cpi->mb.ss);
2263 vpx_free(cpi->tok);
2264 vpx_free(cpi->skin_map);
2265 vpx_free(cpi->cyclic_refresh_map);
2266 vpx_free(cpi->consec_zero_last);
2267 vpx_free(cpi->consec_zero_last_mvbias);
2268
2269 vp8_remove_common(&cpi->common);
2270 vpx_free(cpi);
2271 *comp = 0;
2272
2273 #ifdef OUTPUT_YUV_SRC
2274 fclose(yuv_file);
2275 #endif
2276 #ifdef OUTPUT_YUV_DENOISED
2277 fclose(yuv_denoised_file);
2278 #endif
2279 #ifdef OUTPUT_YUV_SKINMAP
2280 fclose(yuv_skinmap_file);
2281 #endif
2282
2283 #if 0
2284
2285 if (keyfile)
2286 fclose(keyfile);
2287
2288 if (framepsnr)
2289 fclose(framepsnr);
2290
2291 if (kf_list)
2292 fclose(kf_list);
2293
2294 #endif
2295 }
2296
calc_plane_error(unsigned char * orig,int orig_stride,unsigned char * recon,int recon_stride,unsigned int cols,unsigned int rows)2297 static uint64_t calc_plane_error(unsigned char *orig, int orig_stride,
2298 unsigned char *recon, int recon_stride,
2299 unsigned int cols, unsigned int rows) {
2300 unsigned int row, col;
2301 uint64_t total_sse = 0;
2302 int diff;
2303
2304 for (row = 0; row + 16 <= rows; row += 16) {
2305 for (col = 0; col + 16 <= cols; col += 16) {
2306 unsigned int sse;
2307
2308 vpx_mse16x16(orig + col, orig_stride, recon + col, recon_stride, &sse);
2309 total_sse += sse;
2310 }
2311
2312 /* Handle odd-sized width */
2313 if (col < cols) {
2314 unsigned int border_row, border_col;
2315 unsigned char *border_orig = orig;
2316 unsigned char *border_recon = recon;
2317
2318 for (border_row = 0; border_row < 16; ++border_row) {
2319 for (border_col = col; border_col < cols; ++border_col) {
2320 diff = border_orig[border_col] - border_recon[border_col];
2321 total_sse += diff * diff;
2322 }
2323
2324 border_orig += orig_stride;
2325 border_recon += recon_stride;
2326 }
2327 }
2328
2329 orig += orig_stride * 16;
2330 recon += recon_stride * 16;
2331 }
2332
2333 /* Handle odd-sized height */
2334 for (; row < rows; ++row) {
2335 for (col = 0; col < cols; ++col) {
2336 diff = orig[col] - recon[col];
2337 total_sse += diff * diff;
2338 }
2339
2340 orig += orig_stride;
2341 recon += recon_stride;
2342 }
2343
2344 vpx_clear_system_state();
2345 return total_sse;
2346 }
2347
generate_psnr_packet(VP8_COMP * cpi)2348 static void generate_psnr_packet(VP8_COMP *cpi) {
2349 YV12_BUFFER_CONFIG *orig = cpi->Source;
2350 YV12_BUFFER_CONFIG *recon = cpi->common.frame_to_show;
2351 struct vpx_codec_cx_pkt pkt;
2352 uint64_t sse;
2353 int i;
2354 unsigned int width = cpi->common.Width;
2355 unsigned int height = cpi->common.Height;
2356
2357 pkt.kind = VPX_CODEC_PSNR_PKT;
2358 sse = calc_plane_error(orig->y_buffer, orig->y_stride, recon->y_buffer,
2359 recon->y_stride, width, height);
2360 pkt.data.psnr.sse[0] = sse;
2361 pkt.data.psnr.sse[1] = sse;
2362 pkt.data.psnr.samples[0] = width * height;
2363 pkt.data.psnr.samples[1] = width * height;
2364
2365 width = (width + 1) / 2;
2366 height = (height + 1) / 2;
2367
2368 sse = calc_plane_error(orig->u_buffer, orig->uv_stride, recon->u_buffer,
2369 recon->uv_stride, width, height);
2370 pkt.data.psnr.sse[0] += sse;
2371 pkt.data.psnr.sse[2] = sse;
2372 pkt.data.psnr.samples[0] += width * height;
2373 pkt.data.psnr.samples[2] = width * height;
2374
2375 sse = calc_plane_error(orig->v_buffer, orig->uv_stride, recon->v_buffer,
2376 recon->uv_stride, width, height);
2377 pkt.data.psnr.sse[0] += sse;
2378 pkt.data.psnr.sse[3] = sse;
2379 pkt.data.psnr.samples[0] += width * height;
2380 pkt.data.psnr.samples[3] = width * height;
2381
2382 for (i = 0; i < 4; ++i) {
2383 pkt.data.psnr.psnr[i] = vpx_sse_to_psnr(pkt.data.psnr.samples[i], 255.0,
2384 (double)(pkt.data.psnr.sse[i]));
2385 }
2386
2387 vpx_codec_pkt_list_add(cpi->output_pkt_list, &pkt);
2388 }
2389
vp8_use_as_reference(VP8_COMP * cpi,int ref_frame_flags)2390 int vp8_use_as_reference(VP8_COMP *cpi, int ref_frame_flags) {
2391 if (ref_frame_flags > 7) return -1;
2392
2393 cpi->ref_frame_flags = ref_frame_flags;
2394 return 0;
2395 }
vp8_update_reference(VP8_COMP * cpi,int ref_frame_flags)2396 int vp8_update_reference(VP8_COMP *cpi, int ref_frame_flags) {
2397 if (ref_frame_flags > 7) return -1;
2398
2399 cpi->common.refresh_golden_frame = 0;
2400 cpi->common.refresh_alt_ref_frame = 0;
2401 cpi->common.refresh_last_frame = 0;
2402
2403 if (ref_frame_flags & VP8_LAST_FRAME) cpi->common.refresh_last_frame = 1;
2404
2405 if (ref_frame_flags & VP8_GOLD_FRAME) cpi->common.refresh_golden_frame = 1;
2406
2407 if (ref_frame_flags & VP8_ALTR_FRAME) cpi->common.refresh_alt_ref_frame = 1;
2408
2409 cpi->ext_refresh_frame_flags_pending = 1;
2410 return 0;
2411 }
2412
vp8_get_reference(VP8_COMP * cpi,enum vpx_ref_frame_type ref_frame_flag,YV12_BUFFER_CONFIG * sd)2413 int vp8_get_reference(VP8_COMP *cpi, enum vpx_ref_frame_type ref_frame_flag,
2414 YV12_BUFFER_CONFIG *sd) {
2415 VP8_COMMON *cm = &cpi->common;
2416 int ref_fb_idx;
2417
2418 if (ref_frame_flag == VP8_LAST_FRAME) {
2419 ref_fb_idx = cm->lst_fb_idx;
2420 } else if (ref_frame_flag == VP8_GOLD_FRAME) {
2421 ref_fb_idx = cm->gld_fb_idx;
2422 } else if (ref_frame_flag == VP8_ALTR_FRAME) {
2423 ref_fb_idx = cm->alt_fb_idx;
2424 } else {
2425 return -1;
2426 }
2427
2428 vp8_yv12_copy_frame(&cm->yv12_fb[ref_fb_idx], sd);
2429
2430 return 0;
2431 }
vp8_set_reference(VP8_COMP * cpi,enum vpx_ref_frame_type ref_frame_flag,YV12_BUFFER_CONFIG * sd)2432 int vp8_set_reference(VP8_COMP *cpi, enum vpx_ref_frame_type ref_frame_flag,
2433 YV12_BUFFER_CONFIG *sd) {
2434 VP8_COMMON *cm = &cpi->common;
2435
2436 int ref_fb_idx;
2437
2438 if (ref_frame_flag == VP8_LAST_FRAME) {
2439 ref_fb_idx = cm->lst_fb_idx;
2440 } else if (ref_frame_flag == VP8_GOLD_FRAME) {
2441 ref_fb_idx = cm->gld_fb_idx;
2442 } else if (ref_frame_flag == VP8_ALTR_FRAME) {
2443 ref_fb_idx = cm->alt_fb_idx;
2444 } else {
2445 return -1;
2446 }
2447
2448 vp8_yv12_copy_frame(sd, &cm->yv12_fb[ref_fb_idx]);
2449
2450 return 0;
2451 }
vp8_update_entropy(VP8_COMP * cpi,int update)2452 int vp8_update_entropy(VP8_COMP *cpi, int update) {
2453 VP8_COMMON *cm = &cpi->common;
2454 cm->refresh_entropy_probs = update;
2455
2456 return 0;
2457 }
2458
scale_and_extend_source(YV12_BUFFER_CONFIG * sd,VP8_COMP * cpi)2459 static void scale_and_extend_source(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi) {
2460 VP8_COMMON *cm = &cpi->common;
2461
2462 /* are we resizing the image */
2463 if (cm->horiz_scale != 0 || cm->vert_scale != 0) {
2464 #if CONFIG_SPATIAL_RESAMPLING
2465 int hr, hs, vr, vs;
2466 int tmp_height;
2467
2468 if (cm->vert_scale == 3) {
2469 tmp_height = 9;
2470 } else {
2471 tmp_height = 11;
2472 }
2473
2474 Scale2Ratio(cm->horiz_scale, &hr, &hs);
2475 Scale2Ratio(cm->vert_scale, &vr, &vs);
2476
2477 vpx_scale_frame(sd, &cpi->scaled_source, cm->temp_scale_frame.y_buffer,
2478 tmp_height, hs, hr, vs, vr, 0);
2479
2480 vp8_yv12_extend_frame_borders(&cpi->scaled_source);
2481 cpi->Source = &cpi->scaled_source;
2482 #endif
2483 } else {
2484 cpi->Source = sd;
2485 }
2486 }
2487
resize_key_frame(VP8_COMP * cpi)2488 static int resize_key_frame(VP8_COMP *cpi) {
2489 #if CONFIG_SPATIAL_RESAMPLING
2490 VP8_COMMON *cm = &cpi->common;
2491
2492 /* Do we need to apply resampling for one pass cbr.
2493 * In one pass this is more limited than in two pass cbr.
2494 * The test and any change is only made once per key frame sequence.
2495 */
2496 if (cpi->oxcf.allow_spatial_resampling &&
2497 (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER)) {
2498 int hr, hs, vr, vs;
2499 int new_width, new_height;
2500
2501 /* If we are below the resample DOWN watermark then scale down a
2502 * notch.
2503 */
2504 if (cpi->buffer_level < (cpi->oxcf.resample_down_water_mark *
2505 cpi->oxcf.optimal_buffer_level / 100)) {
2506 cm->horiz_scale =
2507 (cm->horiz_scale < ONETWO) ? cm->horiz_scale + 1 : ONETWO;
2508 cm->vert_scale = (cm->vert_scale < ONETWO) ? cm->vert_scale + 1 : ONETWO;
2509 }
2510 /* Should we now start scaling back up */
2511 else if (cpi->buffer_level > (cpi->oxcf.resample_up_water_mark *
2512 cpi->oxcf.optimal_buffer_level / 100)) {
2513 cm->horiz_scale =
2514 (cm->horiz_scale > NORMAL) ? cm->horiz_scale - 1 : NORMAL;
2515 cm->vert_scale = (cm->vert_scale > NORMAL) ? cm->vert_scale - 1 : NORMAL;
2516 }
2517
2518 /* Get the new height and width */
2519 Scale2Ratio(cm->horiz_scale, &hr, &hs);
2520 Scale2Ratio(cm->vert_scale, &vr, &vs);
2521 new_width = ((hs - 1) + (cpi->oxcf.Width * hr)) / hs;
2522 new_height = ((vs - 1) + (cpi->oxcf.Height * vr)) / vs;
2523
2524 /* If the image size has changed we need to reallocate the buffers
2525 * and resample the source image
2526 */
2527 if ((cm->Width != new_width) || (cm->Height != new_height)) {
2528 cm->Width = new_width;
2529 cm->Height = new_height;
2530 vp8_alloc_compressor_data(cpi);
2531 scale_and_extend_source(cpi->un_scaled_source, cpi);
2532 return 1;
2533 }
2534 }
2535
2536 #endif
2537 return 0;
2538 }
2539
update_alt_ref_frame_stats(VP8_COMP * cpi)2540 static void update_alt_ref_frame_stats(VP8_COMP *cpi) {
2541 VP8_COMMON *cm = &cpi->common;
2542
2543 /* Select an interval before next GF or altref */
2544 if (!cpi->auto_gold) cpi->frames_till_gf_update_due = DEFAULT_GF_INTERVAL;
2545
2546 if ((cpi->pass != 2) && cpi->frames_till_gf_update_due) {
2547 cpi->current_gf_interval = cpi->frames_till_gf_update_due;
2548
2549 /* Set the bits per frame that we should try and recover in
2550 * subsequent inter frames to account for the extra GF spend...
2551 * note that his does not apply for GF updates that occur
2552 * coincident with a key frame as the extra cost of key frames is
2553 * dealt with elsewhere.
2554 */
2555 cpi->gf_overspend_bits += cpi->projected_frame_size;
2556 cpi->non_gf_bitrate_adjustment =
2557 cpi->gf_overspend_bits / cpi->frames_till_gf_update_due;
2558 }
2559
2560 /* Update data structure that monitors level of reference to last GF */
2561 memset(cpi->gf_active_flags, 1, (cm->mb_rows * cm->mb_cols));
2562 cpi->gf_active_count = cm->mb_rows * cm->mb_cols;
2563
2564 /* this frame refreshes means next frames don't unless specified by user */
2565 cpi->frames_since_golden = 0;
2566
2567 /* Clear the alternate reference update pending flag. */
2568 cpi->source_alt_ref_pending = 0;
2569
2570 /* Set the alternate reference frame active flag */
2571 cpi->source_alt_ref_active = 1;
2572 }
update_golden_frame_stats(VP8_COMP * cpi)2573 static void update_golden_frame_stats(VP8_COMP *cpi) {
2574 VP8_COMMON *cm = &cpi->common;
2575
2576 /* Update the Golden frame usage counts. */
2577 if (cm->refresh_golden_frame) {
2578 /* Select an interval before next GF */
2579 if (!cpi->auto_gold) cpi->frames_till_gf_update_due = DEFAULT_GF_INTERVAL;
2580
2581 if ((cpi->pass != 2) && (cpi->frames_till_gf_update_due > 0)) {
2582 cpi->current_gf_interval = cpi->frames_till_gf_update_due;
2583
2584 /* Set the bits per frame that we should try and recover in
2585 * subsequent inter frames to account for the extra GF spend...
2586 * note that his does not apply for GF updates that occur
2587 * coincident with a key frame as the extra cost of key frames
2588 * is dealt with elsewhere.
2589 */
2590 if ((cm->frame_type != KEY_FRAME) && !cpi->source_alt_ref_active) {
2591 /* Calcluate GF bits to be recovered
2592 * Projected size - av frame bits available for inter
2593 * frames for clip as a whole
2594 */
2595 cpi->gf_overspend_bits +=
2596 (cpi->projected_frame_size - cpi->inter_frame_target);
2597 }
2598
2599 cpi->non_gf_bitrate_adjustment =
2600 cpi->gf_overspend_bits / cpi->frames_till_gf_update_due;
2601 }
2602
2603 /* Update data structure that monitors level of reference to last GF */
2604 memset(cpi->gf_active_flags, 1, (cm->mb_rows * cm->mb_cols));
2605 cpi->gf_active_count = cm->mb_rows * cm->mb_cols;
2606
2607 /* this frame refreshes means next frames don't unless specified by
2608 * user
2609 */
2610 cm->refresh_golden_frame = 0;
2611 cpi->frames_since_golden = 0;
2612
2613 cpi->recent_ref_frame_usage[INTRA_FRAME] = 1;
2614 cpi->recent_ref_frame_usage[LAST_FRAME] = 1;
2615 cpi->recent_ref_frame_usage[GOLDEN_FRAME] = 1;
2616 cpi->recent_ref_frame_usage[ALTREF_FRAME] = 1;
2617
2618 /* ******** Fixed Q test code only ************ */
2619 /* If we are going to use the ALT reference for the next group of
2620 * frames set a flag to say so.
2621 */
2622 if (cpi->oxcf.fixed_q >= 0 && cpi->oxcf.play_alternate &&
2623 !cpi->common.refresh_alt_ref_frame) {
2624 cpi->source_alt_ref_pending = 1;
2625 cpi->frames_till_gf_update_due = cpi->baseline_gf_interval;
2626 }
2627
2628 if (!cpi->source_alt_ref_pending) cpi->source_alt_ref_active = 0;
2629
2630 /* Decrement count down till next gf */
2631 if (cpi->frames_till_gf_update_due > 0) cpi->frames_till_gf_update_due--;
2632
2633 } else if (!cpi->common.refresh_alt_ref_frame) {
2634 /* Decrement count down till next gf */
2635 if (cpi->frames_till_gf_update_due > 0) cpi->frames_till_gf_update_due--;
2636
2637 if (cpi->frames_till_alt_ref_frame) cpi->frames_till_alt_ref_frame--;
2638
2639 cpi->frames_since_golden++;
2640
2641 if (cpi->frames_since_golden > 1) {
2642 cpi->recent_ref_frame_usage[INTRA_FRAME] +=
2643 cpi->mb.count_mb_ref_frame_usage[INTRA_FRAME];
2644 cpi->recent_ref_frame_usage[LAST_FRAME] +=
2645 cpi->mb.count_mb_ref_frame_usage[LAST_FRAME];
2646 cpi->recent_ref_frame_usage[GOLDEN_FRAME] +=
2647 cpi->mb.count_mb_ref_frame_usage[GOLDEN_FRAME];
2648 cpi->recent_ref_frame_usage[ALTREF_FRAME] +=
2649 cpi->mb.count_mb_ref_frame_usage[ALTREF_FRAME];
2650 }
2651 }
2652 }
2653
2654 /* This function updates the reference frame probability estimates that
2655 * will be used during mode selection
2656 */
update_rd_ref_frame_probs(VP8_COMP * cpi)2657 static void update_rd_ref_frame_probs(VP8_COMP *cpi) {
2658 VP8_COMMON *cm = &cpi->common;
2659
2660 const int *const rfct = cpi->mb.count_mb_ref_frame_usage;
2661 const int rf_intra = rfct[INTRA_FRAME];
2662 const int rf_inter =
2663 rfct[LAST_FRAME] + rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME];
2664
2665 if (cm->frame_type == KEY_FRAME) {
2666 cpi->prob_intra_coded = 255;
2667 cpi->prob_last_coded = 128;
2668 cpi->prob_gf_coded = 128;
2669 } else if (!(rf_intra + rf_inter)) {
2670 cpi->prob_intra_coded = 63;
2671 cpi->prob_last_coded = 128;
2672 cpi->prob_gf_coded = 128;
2673 }
2674
2675 /* update reference frame costs since we can do better than what we got
2676 * last frame.
2677 */
2678 if (cpi->oxcf.number_of_layers == 1) {
2679 if (cpi->common.refresh_alt_ref_frame) {
2680 cpi->prob_intra_coded += 40;
2681 if (cpi->prob_intra_coded > 255) cpi->prob_intra_coded = 255;
2682 cpi->prob_last_coded = 200;
2683 cpi->prob_gf_coded = 1;
2684 } else if (cpi->frames_since_golden == 0) {
2685 cpi->prob_last_coded = 214;
2686 } else if (cpi->frames_since_golden == 1) {
2687 cpi->prob_last_coded = 192;
2688 cpi->prob_gf_coded = 220;
2689 } else if (cpi->source_alt_ref_active) {
2690 cpi->prob_gf_coded -= 20;
2691
2692 if (cpi->prob_gf_coded < 10) cpi->prob_gf_coded = 10;
2693 }
2694 if (!cpi->source_alt_ref_active) cpi->prob_gf_coded = 255;
2695 }
2696 }
2697
2698 #if !CONFIG_REALTIME_ONLY
2699 /* 1 = key, 0 = inter */
decide_key_frame(VP8_COMP * cpi)2700 static int decide_key_frame(VP8_COMP *cpi) {
2701 VP8_COMMON *cm = &cpi->common;
2702
2703 int code_key_frame = 0;
2704
2705 cpi->kf_boost = 0;
2706
2707 if (cpi->Speed > 11) return 0;
2708
2709 /* Clear down mmx registers */
2710 vpx_clear_system_state();
2711
2712 if ((cpi->compressor_speed == 2) && (cpi->Speed >= 5) && (cpi->sf.RD == 0)) {
2713 double change = 1.0 *
2714 abs((int)(cpi->mb.intra_error - cpi->last_intra_error)) /
2715 (1 + cpi->last_intra_error);
2716 double change2 =
2717 1.0 *
2718 abs((int)(cpi->mb.prediction_error - cpi->last_prediction_error)) /
2719 (1 + cpi->last_prediction_error);
2720 double minerror = cm->MBs * 256;
2721
2722 cpi->last_intra_error = cpi->mb.intra_error;
2723 cpi->last_prediction_error = cpi->mb.prediction_error;
2724
2725 if (10 * cpi->mb.intra_error / (1 + cpi->mb.prediction_error) < 15 &&
2726 cpi->mb.prediction_error > minerror &&
2727 (change > .25 || change2 > .25)) {
2728 /*(change > 1.4 || change < .75)&& cpi->this_frame_percent_intra >
2729 * cpi->last_frame_percent_intra + 3*/
2730 return 1;
2731 }
2732
2733 return 0;
2734 }
2735
2736 /* If the following are true we might as well code a key frame */
2737 if (((cpi->this_frame_percent_intra == 100) &&
2738 (cpi->this_frame_percent_intra > (cpi->last_frame_percent_intra + 2))) ||
2739 ((cpi->this_frame_percent_intra > 95) &&
2740 (cpi->this_frame_percent_intra >=
2741 (cpi->last_frame_percent_intra + 5)))) {
2742 code_key_frame = 1;
2743 }
2744 /* in addition if the following are true and this is not a golden frame
2745 * then code a key frame Note that on golden frames there often seems
2746 * to be a pop in intra useage anyway hence this restriction is
2747 * designed to prevent spurious key frames. The Intra pop needs to be
2748 * investigated.
2749 */
2750 else if (((cpi->this_frame_percent_intra > 60) &&
2751 (cpi->this_frame_percent_intra >
2752 (cpi->last_frame_percent_intra * 2))) ||
2753 ((cpi->this_frame_percent_intra > 75) &&
2754 (cpi->this_frame_percent_intra >
2755 (cpi->last_frame_percent_intra * 3 / 2))) ||
2756 ((cpi->this_frame_percent_intra > 90) &&
2757 (cpi->this_frame_percent_intra >
2758 (cpi->last_frame_percent_intra + 10)))) {
2759 if (!cm->refresh_golden_frame) code_key_frame = 1;
2760 }
2761
2762 return code_key_frame;
2763 }
2764
Pass1Encode(VP8_COMP * cpi)2765 static void Pass1Encode(VP8_COMP *cpi) {
2766 vp8_set_quantizer(cpi, 26);
2767 vp8_first_pass(cpi);
2768 }
2769 #endif
2770
2771 #if 0
2772 void write_cx_frame_to_file(YV12_BUFFER_CONFIG *frame, int this_frame)
2773 {
2774
2775 /* write the frame */
2776 FILE *yframe;
2777 int i;
2778 char filename[255];
2779
2780 sprintf(filename, "cx\\y%04d.raw", this_frame);
2781 yframe = fopen(filename, "wb");
2782
2783 for (i = 0; i < frame->y_height; ++i)
2784 fwrite(frame->y_buffer + i * frame->y_stride, frame->y_width, 1, yframe);
2785
2786 fclose(yframe);
2787 sprintf(filename, "cx\\u%04d.raw", this_frame);
2788 yframe = fopen(filename, "wb");
2789
2790 for (i = 0; i < frame->uv_height; ++i)
2791 fwrite(frame->u_buffer + i * frame->uv_stride, frame->uv_width, 1, yframe);
2792
2793 fclose(yframe);
2794 sprintf(filename, "cx\\v%04d.raw", this_frame);
2795 yframe = fopen(filename, "wb");
2796
2797 for (i = 0; i < frame->uv_height; ++i)
2798 fwrite(frame->v_buffer + i * frame->uv_stride, frame->uv_width, 1, yframe);
2799
2800 fclose(yframe);
2801 }
2802 #endif
2803
2804 #if !CONFIG_REALTIME_ONLY
2805 /* Function to test for conditions that indeicate we should loop
2806 * back and recode a frame.
2807 */
recode_loop_test(VP8_COMP * cpi,int high_limit,int low_limit,int q,int maxq,int minq)2808 static int recode_loop_test(VP8_COMP *cpi, int high_limit, int low_limit, int q,
2809 int maxq, int minq) {
2810 int force_recode = 0;
2811 VP8_COMMON *cm = &cpi->common;
2812
2813 /* Is frame recode allowed at all
2814 * Yes if either recode mode 1 is selected or mode two is selcted
2815 * and the frame is a key frame. golden frame or alt_ref_frame
2816 */
2817 if ((cpi->sf.recode_loop == 1) ||
2818 ((cpi->sf.recode_loop == 2) &&
2819 ((cm->frame_type == KEY_FRAME) || cm->refresh_golden_frame ||
2820 cm->refresh_alt_ref_frame))) {
2821 /* General over and under shoot tests */
2822 if (((cpi->projected_frame_size > high_limit) && (q < maxq)) ||
2823 ((cpi->projected_frame_size < low_limit) && (q > minq))) {
2824 force_recode = 1;
2825 }
2826 /* Special Constrained quality tests */
2827 else if (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) {
2828 /* Undershoot and below auto cq level */
2829 if ((q > cpi->cq_target_quality) &&
2830 (cpi->projected_frame_size < ((cpi->this_frame_target * 7) >> 3))) {
2831 force_recode = 1;
2832 }
2833 /* Severe undershoot and between auto and user cq level */
2834 else if ((q > cpi->oxcf.cq_level) &&
2835 (cpi->projected_frame_size < cpi->min_frame_bandwidth) &&
2836 (cpi->active_best_quality > cpi->oxcf.cq_level)) {
2837 force_recode = 1;
2838 cpi->active_best_quality = cpi->oxcf.cq_level;
2839 }
2840 }
2841 }
2842
2843 return force_recode;
2844 }
2845 #endif // !CONFIG_REALTIME_ONLY
2846
update_reference_frames(VP8_COMP * cpi)2847 static void update_reference_frames(VP8_COMP *cpi) {
2848 VP8_COMMON *cm = &cpi->common;
2849 YV12_BUFFER_CONFIG *yv12_fb = cm->yv12_fb;
2850
2851 /* At this point the new frame has been encoded.
2852 * If any buffer copy / swapping is signaled it should be done here.
2853 */
2854
2855 if (cm->frame_type == KEY_FRAME) {
2856 yv12_fb[cm->new_fb_idx].flags |= VP8_GOLD_FRAME | VP8_ALTR_FRAME;
2857
2858 yv12_fb[cm->gld_fb_idx].flags &= ~VP8_GOLD_FRAME;
2859 yv12_fb[cm->alt_fb_idx].flags &= ~VP8_ALTR_FRAME;
2860
2861 cm->alt_fb_idx = cm->gld_fb_idx = cm->new_fb_idx;
2862
2863 cpi->current_ref_frames[GOLDEN_FRAME] = cm->current_video_frame;
2864 cpi->current_ref_frames[ALTREF_FRAME] = cm->current_video_frame;
2865 } else {
2866 if (cm->refresh_alt_ref_frame) {
2867 assert(!cm->copy_buffer_to_arf);
2868
2869 cm->yv12_fb[cm->new_fb_idx].flags |= VP8_ALTR_FRAME;
2870 cm->yv12_fb[cm->alt_fb_idx].flags &= ~VP8_ALTR_FRAME;
2871 cm->alt_fb_idx = cm->new_fb_idx;
2872
2873 cpi->current_ref_frames[ALTREF_FRAME] = cm->current_video_frame;
2874 } else if (cm->copy_buffer_to_arf) {
2875 assert(!(cm->copy_buffer_to_arf & ~0x3));
2876
2877 if (cm->copy_buffer_to_arf == 1) {
2878 if (cm->alt_fb_idx != cm->lst_fb_idx) {
2879 yv12_fb[cm->lst_fb_idx].flags |= VP8_ALTR_FRAME;
2880 yv12_fb[cm->alt_fb_idx].flags &= ~VP8_ALTR_FRAME;
2881 cm->alt_fb_idx = cm->lst_fb_idx;
2882
2883 cpi->current_ref_frames[ALTREF_FRAME] =
2884 cpi->current_ref_frames[LAST_FRAME];
2885 }
2886 } else {
2887 if (cm->alt_fb_idx != cm->gld_fb_idx) {
2888 yv12_fb[cm->gld_fb_idx].flags |= VP8_ALTR_FRAME;
2889 yv12_fb[cm->alt_fb_idx].flags &= ~VP8_ALTR_FRAME;
2890 cm->alt_fb_idx = cm->gld_fb_idx;
2891
2892 cpi->current_ref_frames[ALTREF_FRAME] =
2893 cpi->current_ref_frames[GOLDEN_FRAME];
2894 }
2895 }
2896 }
2897
2898 if (cm->refresh_golden_frame) {
2899 assert(!cm->copy_buffer_to_gf);
2900
2901 cm->yv12_fb[cm->new_fb_idx].flags |= VP8_GOLD_FRAME;
2902 cm->yv12_fb[cm->gld_fb_idx].flags &= ~VP8_GOLD_FRAME;
2903 cm->gld_fb_idx = cm->new_fb_idx;
2904
2905 cpi->current_ref_frames[GOLDEN_FRAME] = cm->current_video_frame;
2906 } else if (cm->copy_buffer_to_gf) {
2907 assert(!(cm->copy_buffer_to_arf & ~0x3));
2908
2909 if (cm->copy_buffer_to_gf == 1) {
2910 if (cm->gld_fb_idx != cm->lst_fb_idx) {
2911 yv12_fb[cm->lst_fb_idx].flags |= VP8_GOLD_FRAME;
2912 yv12_fb[cm->gld_fb_idx].flags &= ~VP8_GOLD_FRAME;
2913 cm->gld_fb_idx = cm->lst_fb_idx;
2914
2915 cpi->current_ref_frames[GOLDEN_FRAME] =
2916 cpi->current_ref_frames[LAST_FRAME];
2917 }
2918 } else {
2919 if (cm->alt_fb_idx != cm->gld_fb_idx) {
2920 yv12_fb[cm->alt_fb_idx].flags |= VP8_GOLD_FRAME;
2921 yv12_fb[cm->gld_fb_idx].flags &= ~VP8_GOLD_FRAME;
2922 cm->gld_fb_idx = cm->alt_fb_idx;
2923
2924 cpi->current_ref_frames[GOLDEN_FRAME] =
2925 cpi->current_ref_frames[ALTREF_FRAME];
2926 }
2927 }
2928 }
2929 }
2930
2931 if (cm->refresh_last_frame) {
2932 cm->yv12_fb[cm->new_fb_idx].flags |= VP8_LAST_FRAME;
2933 cm->yv12_fb[cm->lst_fb_idx].flags &= ~VP8_LAST_FRAME;
2934 cm->lst_fb_idx = cm->new_fb_idx;
2935
2936 cpi->current_ref_frames[LAST_FRAME] = cm->current_video_frame;
2937 }
2938
2939 #if CONFIG_TEMPORAL_DENOISING
2940 if (cpi->oxcf.noise_sensitivity) {
2941 /* we shouldn't have to keep multiple copies as we know in advance which
2942 * buffer we should start - for now to get something up and running
2943 * I've chosen to copy the buffers
2944 */
2945 if (cm->frame_type == KEY_FRAME) {
2946 int i;
2947 for (i = LAST_FRAME; i < MAX_REF_FRAMES; ++i)
2948 vp8_yv12_copy_frame(cpi->Source, &cpi->denoiser.yv12_running_avg[i]);
2949 } else {
2950 vp8_yv12_extend_frame_borders(
2951 &cpi->denoiser.yv12_running_avg[INTRA_FRAME]);
2952
2953 if (cm->refresh_alt_ref_frame || cm->copy_buffer_to_arf) {
2954 vp8_yv12_copy_frame(&cpi->denoiser.yv12_running_avg[INTRA_FRAME],
2955 &cpi->denoiser.yv12_running_avg[ALTREF_FRAME]);
2956 }
2957 if (cm->refresh_golden_frame || cm->copy_buffer_to_gf) {
2958 vp8_yv12_copy_frame(&cpi->denoiser.yv12_running_avg[INTRA_FRAME],
2959 &cpi->denoiser.yv12_running_avg[GOLDEN_FRAME]);
2960 }
2961 if (cm->refresh_last_frame) {
2962 vp8_yv12_copy_frame(&cpi->denoiser.yv12_running_avg[INTRA_FRAME],
2963 &cpi->denoiser.yv12_running_avg[LAST_FRAME]);
2964 }
2965 }
2966 if (cpi->oxcf.noise_sensitivity == 4)
2967 vp8_yv12_copy_frame(cpi->Source, &cpi->denoiser.yv12_last_source);
2968 }
2969 #endif
2970 }
2971
measure_square_diff_partial(YV12_BUFFER_CONFIG * source,YV12_BUFFER_CONFIG * dest,VP8_COMP * cpi)2972 static int measure_square_diff_partial(YV12_BUFFER_CONFIG *source,
2973 YV12_BUFFER_CONFIG *dest,
2974 VP8_COMP *cpi) {
2975 int i, j;
2976 int Total = 0;
2977 int num_blocks = 0;
2978 int skip = 2;
2979 int min_consec_zero_last = 10;
2980 int tot_num_blocks = (source->y_height * source->y_width) >> 8;
2981 unsigned char *src = source->y_buffer;
2982 unsigned char *dst = dest->y_buffer;
2983
2984 /* Loop through the Y plane, every |skip| blocks along rows and colmumns,
2985 * summing the square differences, and only for blocks that have been
2986 * zero_last mode at least |x| frames in a row.
2987 */
2988 for (i = 0; i < source->y_height; i += 16 * skip) {
2989 int block_index_row = (i >> 4) * cpi->common.mb_cols;
2990 for (j = 0; j < source->y_width; j += 16 * skip) {
2991 int index = block_index_row + (j >> 4);
2992 if (cpi->consec_zero_last[index] >= min_consec_zero_last) {
2993 unsigned int sse;
2994 Total += vpx_mse16x16(src + j, source->y_stride, dst + j,
2995 dest->y_stride, &sse);
2996 num_blocks++;
2997 }
2998 }
2999 src += 16 * skip * source->y_stride;
3000 dst += 16 * skip * dest->y_stride;
3001 }
3002 // Only return non-zero if we have at least ~1/16 samples for estimate.
3003 if (num_blocks > (tot_num_blocks >> 4)) {
3004 assert(num_blocks != 0);
3005 return (Total / num_blocks);
3006 } else {
3007 return 0;
3008 }
3009 }
3010
3011 #if CONFIG_TEMPORAL_DENOISING
process_denoiser_mode_change(VP8_COMP * cpi)3012 static void process_denoiser_mode_change(VP8_COMP *cpi) {
3013 const VP8_COMMON *const cm = &cpi->common;
3014 int i, j;
3015 int total = 0;
3016 int num_blocks = 0;
3017 // Number of blocks skipped along row/column in computing the
3018 // nmse (normalized mean square error) of source.
3019 int skip = 2;
3020 // Only select blocks for computing nmse that have been encoded
3021 // as ZERO LAST min_consec_zero_last frames in a row.
3022 // Scale with number of temporal layers.
3023 int min_consec_zero_last = 12 / cpi->oxcf.number_of_layers;
3024 // Decision is tested for changing the denoising mode every
3025 // num_mode_change times this function is called. Note that this
3026 // function called every 8 frames, so (8 * num_mode_change) is number
3027 // of frames where denoising mode change is tested for switch.
3028 int num_mode_change = 20;
3029 // Framerate factor, to compensate for larger mse at lower framerates.
3030 // Use ref_framerate, which is full source framerate for temporal layers.
3031 // TODO(marpan): Adjust this factor.
3032 int fac_framerate = cpi->ref_framerate < 25.0f ? 80 : 100;
3033 int tot_num_blocks = cm->mb_rows * cm->mb_cols;
3034 int ystride = cpi->Source->y_stride;
3035 unsigned char *src = cpi->Source->y_buffer;
3036 unsigned char *dst = cpi->denoiser.yv12_last_source.y_buffer;
3037 static const unsigned char const_source[16] = { 128, 128, 128, 128, 128, 128,
3038 128, 128, 128, 128, 128, 128,
3039 128, 128, 128, 128 };
3040 int bandwidth = (int)(cpi->target_bandwidth);
3041 // For temporal layers, use full bandwidth (top layer).
3042 if (cpi->oxcf.number_of_layers > 1) {
3043 LAYER_CONTEXT *lc = &cpi->layer_context[cpi->oxcf.number_of_layers - 1];
3044 bandwidth = (int)(lc->target_bandwidth);
3045 }
3046 // Loop through the Y plane, every skip blocks along rows and columns,
3047 // summing the normalized mean square error, only for blocks that have
3048 // been encoded as ZEROMV LAST at least min_consec_zero_last least frames in
3049 // a row and have small sum difference between current and previous frame.
3050 // Normalization here is by the contrast of the current frame block.
3051 for (i = 0; i < cm->Height; i += 16 * skip) {
3052 int block_index_row = (i >> 4) * cm->mb_cols;
3053 for (j = 0; j < cm->Width; j += 16 * skip) {
3054 int index = block_index_row + (j >> 4);
3055 if (cpi->consec_zero_last[index] >= min_consec_zero_last) {
3056 unsigned int sse;
3057 const unsigned int var =
3058 vpx_variance16x16(src + j, ystride, dst + j, ystride, &sse);
3059 // Only consider this block as valid for noise measurement
3060 // if the sum_diff average of the current and previous frame
3061 // is small (to avoid effects from lighting change).
3062 if ((sse - var) < 128) {
3063 unsigned int sse2;
3064 const unsigned int act =
3065 vpx_variance16x16(src + j, ystride, const_source, 0, &sse2);
3066 if (act > 0) total += sse / act;
3067 num_blocks++;
3068 }
3069 }
3070 }
3071 src += 16 * skip * ystride;
3072 dst += 16 * skip * ystride;
3073 }
3074 total = total * fac_framerate / 100;
3075
3076 // Only consider this frame as valid sample if we have computed nmse over
3077 // at least ~1/16 blocks, and Total > 0 (Total == 0 can happen if the
3078 // application inputs duplicate frames, or contrast is all zero).
3079 if (total > 0 && (num_blocks > (tot_num_blocks >> 4))) {
3080 // Update the recursive mean square source_diff.
3081 total = (total << 8) / num_blocks;
3082 if (cpi->denoiser.nmse_source_diff_count == 0) {
3083 // First sample in new interval.
3084 cpi->denoiser.nmse_source_diff = total;
3085 cpi->denoiser.qp_avg = cm->base_qindex;
3086 } else {
3087 // For subsequent samples, use average with weight ~1/4 for new sample.
3088 cpi->denoiser.nmse_source_diff =
3089 (int)((total + 3 * cpi->denoiser.nmse_source_diff) >> 2);
3090 cpi->denoiser.qp_avg =
3091 (int)((cm->base_qindex + 3 * cpi->denoiser.qp_avg) >> 2);
3092 }
3093 cpi->denoiser.nmse_source_diff_count++;
3094 }
3095 // Check for changing the denoiser mode, when we have obtained #samples =
3096 // num_mode_change. Condition the change also on the bitrate and QP.
3097 if (cpi->denoiser.nmse_source_diff_count == num_mode_change) {
3098 // Check for going up: from normal to aggressive mode.
3099 if ((cpi->denoiser.denoiser_mode == kDenoiserOnYUV) &&
3100 (cpi->denoiser.nmse_source_diff >
3101 cpi->denoiser.threshold_aggressive_mode) &&
3102 (cpi->denoiser.qp_avg < cpi->denoiser.qp_threshold_up &&
3103 bandwidth > cpi->denoiser.bitrate_threshold)) {
3104 vp8_denoiser_set_parameters(&cpi->denoiser, kDenoiserOnYUVAggressive);
3105 } else {
3106 // Check for going down: from aggressive to normal mode.
3107 if (((cpi->denoiser.denoiser_mode == kDenoiserOnYUVAggressive) &&
3108 (cpi->denoiser.nmse_source_diff <
3109 cpi->denoiser.threshold_aggressive_mode)) ||
3110 ((cpi->denoiser.denoiser_mode == kDenoiserOnYUVAggressive) &&
3111 (cpi->denoiser.qp_avg > cpi->denoiser.qp_threshold_down ||
3112 bandwidth < cpi->denoiser.bitrate_threshold))) {
3113 vp8_denoiser_set_parameters(&cpi->denoiser, kDenoiserOnYUV);
3114 }
3115 }
3116 // Reset metric and counter for next interval.
3117 cpi->denoiser.nmse_source_diff = 0;
3118 cpi->denoiser.qp_avg = 0;
3119 cpi->denoiser.nmse_source_diff_count = 0;
3120 }
3121 }
3122 #endif
3123
vp8_loopfilter_frame(VP8_COMP * cpi,VP8_COMMON * cm)3124 void vp8_loopfilter_frame(VP8_COMP *cpi, VP8_COMMON *cm) {
3125 const FRAME_TYPE frame_type = cm->frame_type;
3126
3127 int update_any_ref_buffers = 1;
3128 if (cpi->common.refresh_last_frame == 0 &&
3129 cpi->common.refresh_golden_frame == 0 &&
3130 cpi->common.refresh_alt_ref_frame == 0) {
3131 update_any_ref_buffers = 0;
3132 }
3133
3134 if (cm->no_lpf) {
3135 cm->filter_level = 0;
3136 } else {
3137 struct vpx_usec_timer timer;
3138
3139 vpx_clear_system_state();
3140
3141 vpx_usec_timer_start(&timer);
3142 if (cpi->sf.auto_filter == 0) {
3143 #if CONFIG_TEMPORAL_DENOISING
3144 if (cpi->oxcf.noise_sensitivity && cm->frame_type != KEY_FRAME) {
3145 // Use the denoised buffer for selecting base loop filter level.
3146 // Denoised signal for current frame is stored in INTRA_FRAME.
3147 // No denoising on key frames.
3148 vp8cx_pick_filter_level_fast(
3149 &cpi->denoiser.yv12_running_avg[INTRA_FRAME], cpi);
3150 } else {
3151 vp8cx_pick_filter_level_fast(cpi->Source, cpi);
3152 }
3153 #else
3154 vp8cx_pick_filter_level_fast(cpi->Source, cpi);
3155 #endif
3156 } else {
3157 #if CONFIG_TEMPORAL_DENOISING
3158 if (cpi->oxcf.noise_sensitivity && cm->frame_type != KEY_FRAME) {
3159 // Use the denoised buffer for selecting base loop filter level.
3160 // Denoised signal for current frame is stored in INTRA_FRAME.
3161 // No denoising on key frames.
3162 vp8cx_pick_filter_level(&cpi->denoiser.yv12_running_avg[INTRA_FRAME],
3163 cpi);
3164 } else {
3165 vp8cx_pick_filter_level(cpi->Source, cpi);
3166 }
3167 #else
3168 vp8cx_pick_filter_level(cpi->Source, cpi);
3169 #endif
3170 }
3171
3172 if (cm->filter_level > 0) {
3173 vp8cx_set_alt_lf_level(cpi, cm->filter_level);
3174 }
3175
3176 vpx_usec_timer_mark(&timer);
3177 cpi->time_pick_lpf += vpx_usec_timer_elapsed(&timer);
3178 }
3179
3180 #if CONFIG_MULTITHREAD
3181 if (vpx_atomic_load_acquire(&cpi->b_multi_threaded)) {
3182 sem_post(&cpi->h_event_end_lpf); /* signal that we have set filter_level */
3183 }
3184 #endif
3185
3186 // No need to apply loop-filter if the encoded frame does not update
3187 // any reference buffers.
3188 if (cm->filter_level > 0 && update_any_ref_buffers) {
3189 vp8_loop_filter_frame(cm, &cpi->mb.e_mbd, frame_type);
3190 }
3191
3192 vp8_yv12_extend_frame_borders(cm->frame_to_show);
3193 }
3194
encode_frame_to_data_rate(VP8_COMP * cpi,size_t * size,unsigned char * dest,unsigned char * dest_end,unsigned int * frame_flags)3195 static void encode_frame_to_data_rate(VP8_COMP *cpi, size_t *size,
3196 unsigned char *dest,
3197 unsigned char *dest_end,
3198 unsigned int *frame_flags) {
3199 int Q;
3200 int frame_over_shoot_limit;
3201 int frame_under_shoot_limit;
3202
3203 int Loop = 0;
3204 int loop_count;
3205
3206 VP8_COMMON *cm = &cpi->common;
3207 int active_worst_qchanged = 0;
3208
3209 #if !CONFIG_REALTIME_ONLY
3210 int q_low;
3211 int q_high;
3212 int zbin_oq_high;
3213 int zbin_oq_low = 0;
3214 int top_index;
3215 int bottom_index;
3216 int overshoot_seen = 0;
3217 int undershoot_seen = 0;
3218 #endif
3219
3220 int drop_mark = (int)(cpi->oxcf.drop_frames_water_mark *
3221 cpi->oxcf.optimal_buffer_level / 100);
3222 int drop_mark75 = drop_mark * 2 / 3;
3223 int drop_mark50 = drop_mark / 4;
3224 int drop_mark25 = drop_mark / 8;
3225
3226 /* Clear down mmx registers to allow floating point in what follows */
3227 vpx_clear_system_state();
3228
3229 if (cpi->force_next_frame_intra) {
3230 cm->frame_type = KEY_FRAME; /* delayed intra frame */
3231 cpi->force_next_frame_intra = 0;
3232 }
3233
3234 /* For an alt ref frame in 2 pass we skip the call to the second pass
3235 * function that sets the target bandwidth
3236 */
3237 switch (cpi->pass) {
3238 #if !CONFIG_REALTIME_ONLY
3239 case 2:
3240 if (cpi->common.refresh_alt_ref_frame) {
3241 /* Per frame bit target for the alt ref frame */
3242 cpi->per_frame_bandwidth = cpi->twopass.gf_bits;
3243 /* per second target bitrate */
3244 cpi->target_bandwidth =
3245 (int)(cpi->twopass.gf_bits * cpi->output_framerate);
3246 }
3247 break;
3248 #endif // !CONFIG_REALTIME_ONLY
3249 default:
3250 cpi->per_frame_bandwidth =
3251 (int)round(cpi->target_bandwidth / cpi->output_framerate);
3252 break;
3253 }
3254
3255 /* Default turn off buffer to buffer copying */
3256 cm->copy_buffer_to_gf = 0;
3257 cm->copy_buffer_to_arf = 0;
3258
3259 /* Clear zbin over-quant value and mode boost values. */
3260 cpi->mb.zbin_over_quant = 0;
3261 cpi->mb.zbin_mode_boost = 0;
3262
3263 /* Enable or disable mode based tweaking of the zbin
3264 * For 2 Pass Only used where GF/ARF prediction quality
3265 * is above a threshold
3266 */
3267 cpi->mb.zbin_mode_boost_enabled = 1;
3268 if (cpi->pass == 2) {
3269 if (cpi->gfu_boost <= 400) {
3270 cpi->mb.zbin_mode_boost_enabled = 0;
3271 }
3272 }
3273
3274 /* Current default encoder behaviour for the altref sign bias */
3275 if (cpi->source_alt_ref_active) {
3276 cpi->common.ref_frame_sign_bias[ALTREF_FRAME] = 1;
3277 } else {
3278 cpi->common.ref_frame_sign_bias[ALTREF_FRAME] = 0;
3279 }
3280
3281 /* Check to see if a key frame is signaled
3282 * For two pass with auto key frame enabled cm->frame_type may already
3283 * be set, but not for one pass.
3284 */
3285 if ((cm->current_video_frame == 0) || (cm->frame_flags & FRAMEFLAGS_KEY) ||
3286 (cpi->oxcf.auto_key &&
3287 (cpi->frames_since_key % cpi->key_frame_frequency == 0))) {
3288 /* Key frame from VFW/auto-keyframe/first frame */
3289 cm->frame_type = KEY_FRAME;
3290 #if CONFIG_TEMPORAL_DENOISING
3291 if (cpi->oxcf.noise_sensitivity == 4) {
3292 // For adaptive mode, reset denoiser to normal mode on key frame.
3293 vp8_denoiser_set_parameters(&cpi->denoiser, kDenoiserOnYUV);
3294 }
3295 #endif
3296 }
3297
3298 #if CONFIG_MULTI_RES_ENCODING
3299 if (cpi->oxcf.mr_total_resolutions > 1) {
3300 LOWER_RES_FRAME_INFO *low_res_frame_info =
3301 (LOWER_RES_FRAME_INFO *)cpi->oxcf.mr_low_res_mode_info;
3302
3303 if (cpi->oxcf.mr_encoder_id) {
3304 // Check if lower resolution is available for motion vector reuse.
3305 if (cm->frame_type != KEY_FRAME) {
3306 cpi->mr_low_res_mv_avail = 1;
3307 cpi->mr_low_res_mv_avail &= !(low_res_frame_info->is_frame_dropped);
3308
3309 if (cpi->ref_frame_flags & VP8_LAST_FRAME)
3310 cpi->mr_low_res_mv_avail &=
3311 (cpi->current_ref_frames[LAST_FRAME] ==
3312 low_res_frame_info->low_res_ref_frames[LAST_FRAME]);
3313
3314 if (cpi->ref_frame_flags & VP8_GOLD_FRAME)
3315 cpi->mr_low_res_mv_avail &=
3316 (cpi->current_ref_frames[GOLDEN_FRAME] ==
3317 low_res_frame_info->low_res_ref_frames[GOLDEN_FRAME]);
3318
3319 // Don't use altref to determine whether low res is available.
3320 // TODO (marpan): Should we make this type of condition on a
3321 // per-reference frame basis?
3322 /*
3323 if (cpi->ref_frame_flags & VP8_ALTR_FRAME)
3324 cpi->mr_low_res_mv_avail &= (cpi->current_ref_frames[ALTREF_FRAME]
3325 == low_res_frame_info->low_res_ref_frames[ALTREF_FRAME]);
3326 */
3327 }
3328 // Disable motion vector reuse (i.e., disable any usage of the low_res)
3329 // if the previous lower stream is skipped/disabled.
3330 if (low_res_frame_info->skip_encoding_prev_stream) {
3331 cpi->mr_low_res_mv_avail = 0;
3332 }
3333 }
3334 // This stream is not skipped (i.e., it's being encoded), so set this skip
3335 // flag to 0. This is needed for the next stream (i.e., which is the next
3336 // frame to be encoded).
3337 low_res_frame_info->skip_encoding_prev_stream = 0;
3338
3339 // On a key frame: For the lowest resolution, keep track of the key frame
3340 // counter value. For the higher resolutions, reset the current video
3341 // frame counter to that of the lowest resolution.
3342 // This is done to the handle the case where we may stop/start encoding
3343 // higher layer(s). The restart-encoding of higher layer is only signaled
3344 // by a key frame for now.
3345 // TODO (marpan): Add flag to indicate restart-encoding of higher layer.
3346 if (cm->frame_type == KEY_FRAME) {
3347 if (cpi->oxcf.mr_encoder_id) {
3348 // If the initial starting value of the buffer level is zero (this can
3349 // happen because we may have not started encoding this higher stream),
3350 // then reset it to non-zero value based on |starting_buffer_level|.
3351 if (cpi->common.current_video_frame == 0 && cpi->buffer_level == 0) {
3352 unsigned int i;
3353 cpi->bits_off_target = cpi->oxcf.starting_buffer_level;
3354 cpi->buffer_level = cpi->oxcf.starting_buffer_level;
3355 for (i = 0; i < cpi->oxcf.number_of_layers; ++i) {
3356 LAYER_CONTEXT *lc = &cpi->layer_context[i];
3357 lc->bits_off_target = lc->starting_buffer_level;
3358 lc->buffer_level = lc->starting_buffer_level;
3359 }
3360 }
3361 cpi->common.current_video_frame =
3362 low_res_frame_info->key_frame_counter_value;
3363 } else {
3364 low_res_frame_info->key_frame_counter_value =
3365 cpi->common.current_video_frame;
3366 }
3367 }
3368 }
3369 #endif
3370
3371 // Find the reference frame closest to the current frame.
3372 cpi->closest_reference_frame = LAST_FRAME;
3373 if (cm->frame_type != KEY_FRAME) {
3374 int i;
3375 MV_REFERENCE_FRAME closest_ref = INTRA_FRAME;
3376 if (cpi->ref_frame_flags & VP8_LAST_FRAME) {
3377 closest_ref = LAST_FRAME;
3378 } else if (cpi->ref_frame_flags & VP8_GOLD_FRAME) {
3379 closest_ref = GOLDEN_FRAME;
3380 } else if (cpi->ref_frame_flags & VP8_ALTR_FRAME) {
3381 closest_ref = ALTREF_FRAME;
3382 }
3383 for (i = 1; i <= 3; ++i) {
3384 vpx_ref_frame_type_t ref_frame_type =
3385 (vpx_ref_frame_type_t)((i == 3) ? 4 : i);
3386 if (cpi->ref_frame_flags & ref_frame_type) {
3387 if ((cm->current_video_frame - cpi->current_ref_frames[i]) <
3388 (cm->current_video_frame - cpi->current_ref_frames[closest_ref])) {
3389 closest_ref = i;
3390 }
3391 }
3392 }
3393 cpi->closest_reference_frame = closest_ref;
3394 }
3395
3396 /* Set various flags etc to special state if it is a key frame */
3397 if (cm->frame_type == KEY_FRAME) {
3398 int i;
3399
3400 // Set the loop filter deltas and segmentation map update
3401 setup_features(cpi);
3402
3403 /* The alternate reference frame cannot be active for a key frame */
3404 cpi->source_alt_ref_active = 0;
3405
3406 /* Reset the RD threshold multipliers to default of * 1 (128) */
3407 for (i = 0; i < MAX_MODES; ++i) {
3408 cpi->mb.rd_thresh_mult[i] = 128;
3409 }
3410
3411 // Reset the zero_last counter to 0 on key frame.
3412 memset(cpi->consec_zero_last, 0, cm->mb_rows * cm->mb_cols);
3413 memset(cpi->consec_zero_last_mvbias, 0,
3414 (cpi->common.mb_rows * cpi->common.mb_cols));
3415 }
3416
3417 #if 0
3418 /* Experimental code for lagged compress and one pass
3419 * Initialise one_pass GF frames stats
3420 * Update stats used for GF selection
3421 */
3422 {
3423 cpi->one_pass_frame_index = cm->current_video_frame % MAX_LAG_BUFFERS;
3424
3425 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frames_so_far = 0;
3426 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_intra_error = 0.0;
3427 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_coded_error = 0.0;
3428 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_pcnt_inter = 0.0;
3429 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_pcnt_motion = 0.0;
3430 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_mvr = 0.0;
3431 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_mvr_abs = 0.0;
3432 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_mvc = 0.0;
3433 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_mvc_abs = 0.0;
3434 }
3435 #endif
3436
3437 update_rd_ref_frame_probs(cpi);
3438
3439 if (cpi->drop_frames_allowed) {
3440 /* The reset to decimation 0 is only done here for one pass.
3441 * Once it is set two pass leaves decimation on till the next kf.
3442 */
3443 if ((cpi->buffer_level > drop_mark) && (cpi->decimation_factor > 0)) {
3444 cpi->decimation_factor--;
3445 }
3446
3447 if (cpi->buffer_level > drop_mark75 && cpi->decimation_factor > 0) {
3448 cpi->decimation_factor = 1;
3449
3450 } else if (cpi->buffer_level < drop_mark25 &&
3451 (cpi->decimation_factor == 2 || cpi->decimation_factor == 3)) {
3452 cpi->decimation_factor = 3;
3453 } else if (cpi->buffer_level < drop_mark50 &&
3454 (cpi->decimation_factor == 1 || cpi->decimation_factor == 2)) {
3455 cpi->decimation_factor = 2;
3456 } else if (cpi->buffer_level < drop_mark75 &&
3457 (cpi->decimation_factor == 0 || cpi->decimation_factor == 1)) {
3458 cpi->decimation_factor = 1;
3459 }
3460 }
3461
3462 /* The following decimates the frame rate according to a regular
3463 * pattern (i.e. to 1/2 or 2/3 frame rate) This can be used to help
3464 * prevent buffer under-run in CBR mode. Alternatively it might be
3465 * desirable in some situations to drop frame rate but throw more bits
3466 * at each frame.
3467 *
3468 * Note that dropping a key frame can be problematic if spatial
3469 * resampling is also active
3470 */
3471 if (cpi->decimation_factor > 0 && cpi->drop_frames_allowed) {
3472 switch (cpi->decimation_factor) {
3473 case 1:
3474 cpi->per_frame_bandwidth = cpi->per_frame_bandwidth * 3 / 2;
3475 break;
3476 case 2:
3477 cpi->per_frame_bandwidth = cpi->per_frame_bandwidth * 5 / 4;
3478 break;
3479 case 3:
3480 cpi->per_frame_bandwidth = cpi->per_frame_bandwidth * 5 / 4;
3481 break;
3482 }
3483
3484 /* Note that we should not throw out a key frame (especially when
3485 * spatial resampling is enabled).
3486 */
3487 if (cm->frame_type == KEY_FRAME) {
3488 cpi->decimation_count = cpi->decimation_factor;
3489 } else if (cpi->decimation_count > 0) {
3490 cpi->decimation_count--;
3491
3492 cpi->bits_off_target += cpi->av_per_frame_bandwidth;
3493 if (cpi->bits_off_target > cpi->oxcf.maximum_buffer_size) {
3494 cpi->bits_off_target = cpi->oxcf.maximum_buffer_size;
3495 }
3496
3497 #if CONFIG_MULTI_RES_ENCODING
3498 vp8_store_drop_frame_info(cpi);
3499 #endif
3500
3501 cm->current_video_frame++;
3502 cpi->frames_since_key++;
3503 cpi->ext_refresh_frame_flags_pending = 0;
3504 // We advance the temporal pattern for dropped frames.
3505 cpi->temporal_pattern_counter++;
3506
3507 #if CONFIG_INTERNAL_STATS
3508 cpi->count++;
3509 #endif
3510
3511 cpi->buffer_level = cpi->bits_off_target;
3512
3513 if (cpi->oxcf.number_of_layers > 1) {
3514 unsigned int i;
3515
3516 /* Propagate bits saved by dropping the frame to higher
3517 * layers
3518 */
3519 for (i = cpi->current_layer + 1; i < cpi->oxcf.number_of_layers; ++i) {
3520 LAYER_CONTEXT *lc = &cpi->layer_context[i];
3521 lc->bits_off_target += (int)(lc->target_bandwidth / lc->framerate);
3522 if (lc->bits_off_target > lc->maximum_buffer_size) {
3523 lc->bits_off_target = lc->maximum_buffer_size;
3524 }
3525 lc->buffer_level = lc->bits_off_target;
3526 }
3527 }
3528
3529 return;
3530 } else {
3531 cpi->decimation_count = cpi->decimation_factor;
3532 }
3533 } else {
3534 cpi->decimation_count = 0;
3535 }
3536
3537 /* Decide how big to make the frame */
3538 if (!vp8_pick_frame_size(cpi)) {
3539 /*TODO: 2 drop_frame and return code could be put together. */
3540 #if CONFIG_MULTI_RES_ENCODING
3541 vp8_store_drop_frame_info(cpi);
3542 #endif
3543 cm->current_video_frame++;
3544 cpi->frames_since_key++;
3545 cpi->ext_refresh_frame_flags_pending = 0;
3546 // We advance the temporal pattern for dropped frames.
3547 cpi->temporal_pattern_counter++;
3548 return;
3549 }
3550
3551 /* Reduce active_worst_allowed_q for CBR if our buffer is getting too full.
3552 * This has a knock on effect on active best quality as well.
3553 * For CBR if the buffer reaches its maximum level then we can no longer
3554 * save up bits for later frames so we might as well use them up
3555 * on the current frame.
3556 */
3557 if ((cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) &&
3558 (cpi->buffer_level >= cpi->oxcf.optimal_buffer_level) &&
3559 cpi->buffered_mode) {
3560 /* Max adjustment is 1/4 */
3561 int Adjustment = cpi->active_worst_quality / 4;
3562
3563 if (Adjustment) {
3564 int buff_lvl_step;
3565
3566 if (cpi->buffer_level < cpi->oxcf.maximum_buffer_size) {
3567 buff_lvl_step = (int)((cpi->oxcf.maximum_buffer_size -
3568 cpi->oxcf.optimal_buffer_level) /
3569 Adjustment);
3570
3571 if (buff_lvl_step) {
3572 Adjustment =
3573 (int)((cpi->buffer_level - cpi->oxcf.optimal_buffer_level) /
3574 buff_lvl_step);
3575 } else {
3576 Adjustment = 0;
3577 }
3578 }
3579
3580 cpi->active_worst_quality -= Adjustment;
3581
3582 if (cpi->active_worst_quality < cpi->active_best_quality) {
3583 cpi->active_worst_quality = cpi->active_best_quality;
3584 }
3585 }
3586 }
3587
3588 /* Set an active best quality and if necessary active worst quality
3589 * There is some odd behavior for one pass here that needs attention.
3590 */
3591 if ((cpi->pass == 2) || (cpi->ni_frames > 150)) {
3592 vpx_clear_system_state();
3593
3594 Q = cpi->active_worst_quality;
3595
3596 if (cm->frame_type == KEY_FRAME) {
3597 if (cpi->pass == 2) {
3598 if (cpi->gfu_boost > 600) {
3599 cpi->active_best_quality = kf_low_motion_minq[Q];
3600 } else {
3601 cpi->active_best_quality = kf_high_motion_minq[Q];
3602 }
3603
3604 /* Special case for key frames forced because we have reached
3605 * the maximum key frame interval. Here force the Q to a range
3606 * based on the ambient Q to reduce the risk of popping
3607 */
3608 if (cpi->this_key_frame_forced) {
3609 if (cpi->active_best_quality > cpi->avg_frame_qindex * 7 / 8) {
3610 cpi->active_best_quality = cpi->avg_frame_qindex * 7 / 8;
3611 } else if (cpi->active_best_quality < (cpi->avg_frame_qindex >> 2)) {
3612 cpi->active_best_quality = cpi->avg_frame_qindex >> 2;
3613 }
3614 }
3615 }
3616 /* One pass more conservative */
3617 else {
3618 cpi->active_best_quality = kf_high_motion_minq[Q];
3619 }
3620 }
3621
3622 else if (cpi->oxcf.number_of_layers == 1 &&
3623 (cm->refresh_golden_frame || cpi->common.refresh_alt_ref_frame)) {
3624 /* Use the lower of cpi->active_worst_quality and recent
3625 * average Q as basis for GF/ARF Q limit unless last frame was
3626 * a key frame.
3627 */
3628 if ((cpi->frames_since_key > 1) &&
3629 (cpi->avg_frame_qindex < cpi->active_worst_quality)) {
3630 Q = cpi->avg_frame_qindex;
3631 }
3632
3633 /* For constrained quality dont allow Q less than the cq level */
3634 if ((cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) &&
3635 (Q < cpi->cq_target_quality)) {
3636 Q = cpi->cq_target_quality;
3637 }
3638
3639 if (cpi->pass == 2) {
3640 if (cpi->gfu_boost > 1000) {
3641 cpi->active_best_quality = gf_low_motion_minq[Q];
3642 } else if (cpi->gfu_boost < 400) {
3643 cpi->active_best_quality = gf_high_motion_minq[Q];
3644 } else {
3645 cpi->active_best_quality = gf_mid_motion_minq[Q];
3646 }
3647
3648 /* Constrained quality use slightly lower active best. */
3649 if (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) {
3650 cpi->active_best_quality = cpi->active_best_quality * 15 / 16;
3651 }
3652 }
3653 /* One pass more conservative */
3654 else {
3655 cpi->active_best_quality = gf_high_motion_minq[Q];
3656 }
3657 } else {
3658 cpi->active_best_quality = inter_minq[Q];
3659
3660 /* For the constant/constrained quality mode we dont want
3661 * q to fall below the cq level.
3662 */
3663 if ((cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) &&
3664 (cpi->active_best_quality < cpi->cq_target_quality)) {
3665 /* If we are strongly undershooting the target rate in the last
3666 * frames then use the user passed in cq value not the auto
3667 * cq value.
3668 */
3669 if (cpi->rolling_actual_bits < cpi->min_frame_bandwidth) {
3670 cpi->active_best_quality = cpi->oxcf.cq_level;
3671 } else {
3672 cpi->active_best_quality = cpi->cq_target_quality;
3673 }
3674 }
3675 }
3676
3677 /* If CBR and the buffer is as full then it is reasonable to allow
3678 * higher quality on the frames to prevent bits just going to waste.
3679 */
3680 if (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) {
3681 /* Note that the use of >= here elliminates the risk of a devide
3682 * by 0 error in the else if clause
3683 */
3684 if (cpi->buffer_level >= cpi->oxcf.maximum_buffer_size) {
3685 cpi->active_best_quality = cpi->best_quality;
3686
3687 } else if (cpi->buffer_level > cpi->oxcf.optimal_buffer_level) {
3688 int Fraction =
3689 (int)(((cpi->buffer_level - cpi->oxcf.optimal_buffer_level) * 128) /
3690 (cpi->oxcf.maximum_buffer_size -
3691 cpi->oxcf.optimal_buffer_level));
3692 int min_qadjustment =
3693 ((cpi->active_best_quality - cpi->best_quality) * Fraction) / 128;
3694
3695 cpi->active_best_quality -= min_qadjustment;
3696 }
3697 }
3698 }
3699 /* Make sure constrained quality mode limits are adhered to for the first
3700 * few frames of one pass encodes
3701 */
3702 else if (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) {
3703 if ((cm->frame_type == KEY_FRAME) || cm->refresh_golden_frame ||
3704 cpi->common.refresh_alt_ref_frame) {
3705 cpi->active_best_quality = cpi->best_quality;
3706 } else if (cpi->active_best_quality < cpi->cq_target_quality) {
3707 cpi->active_best_quality = cpi->cq_target_quality;
3708 }
3709 }
3710
3711 /* Clip the active best and worst quality values to limits */
3712 if (cpi->active_worst_quality > cpi->worst_quality) {
3713 cpi->active_worst_quality = cpi->worst_quality;
3714 }
3715
3716 if (cpi->active_best_quality < cpi->best_quality) {
3717 cpi->active_best_quality = cpi->best_quality;
3718 }
3719
3720 if (cpi->active_worst_quality < cpi->active_best_quality) {
3721 cpi->active_worst_quality = cpi->active_best_quality;
3722 }
3723
3724 /* Determine initial Q to try */
3725 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
3726
3727 #if !CONFIG_REALTIME_ONLY
3728
3729 /* Set highest allowed value for Zbin over quant */
3730 if (cm->frame_type == KEY_FRAME) {
3731 zbin_oq_high = 0;
3732 } else if ((cpi->oxcf.number_of_layers == 1) &&
3733 ((cm->refresh_alt_ref_frame ||
3734 (cm->refresh_golden_frame && !cpi->source_alt_ref_active)))) {
3735 zbin_oq_high = 16;
3736 } else {
3737 zbin_oq_high = ZBIN_OQ_MAX;
3738 }
3739 #endif
3740
3741 compute_skin_map(cpi);
3742
3743 /* Setup background Q adjustment for error resilient mode.
3744 * For multi-layer encodes only enable this for the base layer.
3745 */
3746 if (cpi->cyclic_refresh_mode_enabled) {
3747 // Special case for screen_content_mode with golden frame updates.
3748 int disable_cr_gf =
3749 (cpi->oxcf.screen_content_mode == 2 && cm->refresh_golden_frame);
3750 if (cpi->current_layer == 0 && cpi->force_maxqp == 0 && !disable_cr_gf) {
3751 cyclic_background_refresh(cpi, Q, 0);
3752 } else {
3753 disable_segmentation(cpi);
3754 }
3755 }
3756
3757 vp8_compute_frame_size_bounds(cpi, &frame_under_shoot_limit,
3758 &frame_over_shoot_limit);
3759
3760 #if !CONFIG_REALTIME_ONLY
3761 /* Limit Q range for the adaptive loop. */
3762 bottom_index = cpi->active_best_quality;
3763 top_index = cpi->active_worst_quality;
3764 q_low = cpi->active_best_quality;
3765 q_high = cpi->active_worst_quality;
3766 #endif
3767
3768 vp8_save_coding_context(cpi);
3769
3770 loop_count = 0;
3771
3772 scale_and_extend_source(cpi->un_scaled_source, cpi);
3773
3774 #if CONFIG_TEMPORAL_DENOISING && CONFIG_POSTPROC
3775 // Option to apply spatial blur under the aggressive or adaptive
3776 // (temporal denoising) mode.
3777 if (cpi->oxcf.noise_sensitivity >= 3) {
3778 if (cpi->denoiser.denoise_pars.spatial_blur != 0) {
3779 vp8_de_noise(cm, cpi->Source, cpi->denoiser.denoise_pars.spatial_blur, 1);
3780 }
3781 }
3782 #endif
3783
3784 #if !(CONFIG_REALTIME_ONLY) && CONFIG_POSTPROC && !(CONFIG_TEMPORAL_DENOISING)
3785
3786 if (cpi->oxcf.noise_sensitivity > 0) {
3787 unsigned char *src;
3788 int l = 0;
3789
3790 switch (cpi->oxcf.noise_sensitivity) {
3791 case 1: l = 20; break;
3792 case 2: l = 40; break;
3793 case 3: l = 60; break;
3794 case 4: l = 80; break;
3795 case 5: l = 100; break;
3796 case 6: l = 150; break;
3797 }
3798
3799 if (cm->frame_type == KEY_FRAME) {
3800 vp8_de_noise(cm, cpi->Source, l, 1);
3801 } else {
3802 vp8_de_noise(cm, cpi->Source, l, 1);
3803
3804 src = cpi->Source->y_buffer;
3805
3806 if (cpi->Source->y_stride < 0) {
3807 src += cpi->Source->y_stride * (cpi->Source->y_height - 1);
3808 }
3809 }
3810 }
3811
3812 #endif
3813
3814 #ifdef OUTPUT_YUV_SRC
3815 vpx_write_yuv_frame(yuv_file, cpi->Source);
3816 #endif
3817
3818 do {
3819 vpx_clear_system_state();
3820
3821 vp8_set_quantizer(cpi, Q);
3822
3823 /* setup skip prob for costing in mode/mv decision */
3824 if (cpi->common.mb_no_coeff_skip) {
3825 cpi->prob_skip_false = cpi->base_skip_false_prob[Q];
3826
3827 if (cm->frame_type != KEY_FRAME) {
3828 if (cpi->common.refresh_alt_ref_frame) {
3829 if (cpi->last_skip_false_probs[2] != 0) {
3830 cpi->prob_skip_false = cpi->last_skip_false_probs[2];
3831 }
3832
3833 /*
3834 if(cpi->last_skip_false_probs[2]!=0 && abs(Q-
3835 cpi->last_skip_probs_q[2])<=16 )
3836 cpi->prob_skip_false = cpi->last_skip_false_probs[2];
3837 else if (cpi->last_skip_false_probs[2]!=0)
3838 cpi->prob_skip_false = (cpi->last_skip_false_probs[2] +
3839 cpi->prob_skip_false ) / 2;
3840 */
3841 } else if (cpi->common.refresh_golden_frame) {
3842 if (cpi->last_skip_false_probs[1] != 0) {
3843 cpi->prob_skip_false = cpi->last_skip_false_probs[1];
3844 }
3845
3846 /*
3847 if(cpi->last_skip_false_probs[1]!=0 && abs(Q-
3848 cpi->last_skip_probs_q[1])<=16 )
3849 cpi->prob_skip_false = cpi->last_skip_false_probs[1];
3850 else if (cpi->last_skip_false_probs[1]!=0)
3851 cpi->prob_skip_false = (cpi->last_skip_false_probs[1] +
3852 cpi->prob_skip_false ) / 2;
3853 */
3854 } else {
3855 if (cpi->last_skip_false_probs[0] != 0) {
3856 cpi->prob_skip_false = cpi->last_skip_false_probs[0];
3857 }
3858
3859 /*
3860 if(cpi->last_skip_false_probs[0]!=0 && abs(Q-
3861 cpi->last_skip_probs_q[0])<=16 )
3862 cpi->prob_skip_false = cpi->last_skip_false_probs[0];
3863 else if(cpi->last_skip_false_probs[0]!=0)
3864 cpi->prob_skip_false = (cpi->last_skip_false_probs[0] +
3865 cpi->prob_skip_false ) / 2;
3866 */
3867 }
3868
3869 /* as this is for cost estimate, let's make sure it does not
3870 * go extreme eitehr way
3871 */
3872 if (cpi->prob_skip_false < 5) cpi->prob_skip_false = 5;
3873
3874 if (cpi->prob_skip_false > 250) cpi->prob_skip_false = 250;
3875
3876 if (cpi->oxcf.number_of_layers == 1 && cpi->is_src_frame_alt_ref) {
3877 cpi->prob_skip_false = 1;
3878 }
3879 }
3880
3881 #if 0
3882
3883 if (cpi->pass != 1)
3884 {
3885 FILE *f = fopen("skip.stt", "a");
3886 fprintf(f, "%d, %d, %4d ", cpi->common.refresh_golden_frame, cpi->common.refresh_alt_ref_frame, cpi->prob_skip_false);
3887 fclose(f);
3888 }
3889
3890 #endif
3891 }
3892
3893 if (cm->frame_type == KEY_FRAME) {
3894 if (resize_key_frame(cpi)) {
3895 /* If the frame size has changed, need to reset Q, quantizer,
3896 * and background refresh.
3897 */
3898 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
3899 if (cpi->cyclic_refresh_mode_enabled) {
3900 if (cpi->current_layer == 0) {
3901 cyclic_background_refresh(cpi, Q, 0);
3902 } else {
3903 disable_segmentation(cpi);
3904 }
3905 }
3906 // Reset the zero_last counter to 0 on key frame.
3907 memset(cpi->consec_zero_last, 0, cm->mb_rows * cm->mb_cols);
3908 memset(cpi->consec_zero_last_mvbias, 0,
3909 (cpi->common.mb_rows * cpi->common.mb_cols));
3910 vp8_set_quantizer(cpi, Q);
3911 }
3912
3913 vp8_setup_key_frame(cpi);
3914 }
3915
3916 #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
3917 {
3918 if (cpi->oxcf.error_resilient_mode) cm->refresh_entropy_probs = 0;
3919
3920 if (cpi->oxcf.error_resilient_mode & VPX_ERROR_RESILIENT_PARTITIONS) {
3921 if (cm->frame_type == KEY_FRAME) cm->refresh_entropy_probs = 1;
3922 }
3923
3924 if (cm->refresh_entropy_probs == 0) {
3925 /* save a copy for later refresh */
3926 memcpy(&cm->lfc, &cm->fc, sizeof(cm->fc));
3927 }
3928
3929 vp8_update_coef_context(cpi);
3930
3931 vp8_update_coef_probs(cpi);
3932
3933 /* transform / motion compensation build reconstruction frame
3934 * +pack coef partitions
3935 */
3936 vp8_encode_frame(cpi);
3937
3938 /* cpi->projected_frame_size is not needed for RT mode */
3939 }
3940 #else
3941 /* transform / motion compensation build reconstruction frame */
3942 vp8_encode_frame(cpi);
3943
3944 if (cpi->pass == 0 && cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) {
3945 if (vp8_drop_encodedframe_overshoot(cpi, Q)) {
3946 vpx_clear_system_state();
3947 return;
3948 }
3949 if (cm->frame_type != KEY_FRAME)
3950 cpi->last_pred_err_mb =
3951 (int)(cpi->mb.prediction_error / cpi->common.MBs);
3952 }
3953
3954 cpi->projected_frame_size -= vp8_estimate_entropy_savings(cpi);
3955 cpi->projected_frame_size =
3956 (cpi->projected_frame_size > 0) ? cpi->projected_frame_size : 0;
3957 #endif
3958 vpx_clear_system_state();
3959
3960 /* Test to see if the stats generated for this frame indicate that
3961 * we should have coded a key frame (assuming that we didn't)!
3962 */
3963
3964 if (cpi->pass != 2 && cpi->oxcf.auto_key && cm->frame_type != KEY_FRAME &&
3965 cpi->compressor_speed != 2) {
3966 #if !CONFIG_REALTIME_ONLY
3967 if (decide_key_frame(cpi)) {
3968 /* Reset all our sizing numbers and recode */
3969 cm->frame_type = KEY_FRAME;
3970
3971 vp8_pick_frame_size(cpi);
3972
3973 /* Clear the Alt reference frame active flag when we have
3974 * a key frame
3975 */
3976 cpi->source_alt_ref_active = 0;
3977
3978 // Set the loop filter deltas and segmentation map update
3979 setup_features(cpi);
3980
3981 vp8_restore_coding_context(cpi);
3982
3983 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
3984
3985 vp8_compute_frame_size_bounds(cpi, &frame_under_shoot_limit,
3986 &frame_over_shoot_limit);
3987
3988 /* Limit Q range for the adaptive loop. */
3989 bottom_index = cpi->active_best_quality;
3990 top_index = cpi->active_worst_quality;
3991 q_low = cpi->active_best_quality;
3992 q_high = cpi->active_worst_quality;
3993
3994 loop_count++;
3995 Loop = 1;
3996
3997 continue;
3998 }
3999 #endif
4000 }
4001
4002 vpx_clear_system_state();
4003
4004 if (frame_over_shoot_limit == 0) frame_over_shoot_limit = 1;
4005
4006 /* Are we are overshooting and up against the limit of active max Q. */
4007 if (!cpi->rt_always_update_correction_factor &&
4008 ((cpi->pass != 2) ||
4009 (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER)) &&
4010 (Q == cpi->active_worst_quality) &&
4011 (cpi->active_worst_quality < cpi->worst_quality) &&
4012 (cpi->projected_frame_size > frame_over_shoot_limit)) {
4013 int over_size_percent =
4014 ((cpi->projected_frame_size - frame_over_shoot_limit) * 100) /
4015 frame_over_shoot_limit;
4016
4017 /* If so is there any scope for relaxing it */
4018 while ((cpi->active_worst_quality < cpi->worst_quality) &&
4019 (over_size_percent > 0)) {
4020 cpi->active_worst_quality++;
4021 /* Assume 1 qstep = about 4% on frame size. */
4022 over_size_percent = (int)(over_size_percent * 0.96);
4023 }
4024 #if !CONFIG_REALTIME_ONLY
4025 top_index = cpi->active_worst_quality;
4026 #endif // !CONFIG_REALTIME_ONLY
4027 /* If we have updated the active max Q do not call
4028 * vp8_update_rate_correction_factors() this loop.
4029 */
4030 active_worst_qchanged = 1;
4031 } else {
4032 active_worst_qchanged = 0;
4033 }
4034
4035 #if CONFIG_REALTIME_ONLY
4036 Loop = 0;
4037 #else
4038 /* Special case handling for forced key frames */
4039 if ((cm->frame_type == KEY_FRAME) && cpi->this_key_frame_forced) {
4040 int last_q = Q;
4041 int kf_err = vp8_calc_ss_err(cpi->Source, &cm->yv12_fb[cm->new_fb_idx]);
4042
4043 /* The key frame is not good enough */
4044 if (kf_err > ((cpi->ambient_err * 7) >> 3)) {
4045 /* Lower q_high */
4046 q_high = (Q > q_low) ? (Q - 1) : q_low;
4047
4048 /* Adjust Q */
4049 Q = (q_high + q_low) >> 1;
4050 }
4051 /* The key frame is much better than the previous frame */
4052 else if (kf_err < (cpi->ambient_err >> 1)) {
4053 /* Raise q_low */
4054 q_low = (Q < q_high) ? (Q + 1) : q_high;
4055
4056 /* Adjust Q */
4057 Q = (q_high + q_low + 1) >> 1;
4058 }
4059
4060 /* Clamp Q to upper and lower limits: */
4061 if (Q > q_high) {
4062 Q = q_high;
4063 } else if (Q < q_low) {
4064 Q = q_low;
4065 }
4066
4067 Loop = Q != last_q;
4068 }
4069
4070 /* Is the projected frame size out of range and are we allowed
4071 * to attempt to recode.
4072 */
4073 else if (recode_loop_test(cpi, frame_over_shoot_limit,
4074 frame_under_shoot_limit, Q, top_index,
4075 bottom_index)) {
4076 int last_q = Q;
4077 int Retries = 0;
4078
4079 /* Frame size out of permitted range. Update correction factor
4080 * & compute new Q to try...
4081 */
4082
4083 /* Frame is too large */
4084 if (cpi->projected_frame_size > cpi->this_frame_target) {
4085 /* Raise Qlow as to at least the current value */
4086 q_low = (Q < q_high) ? (Q + 1) : q_high;
4087
4088 /* If we are using over quant do the same for zbin_oq_low */
4089 if (cpi->mb.zbin_over_quant > 0) {
4090 zbin_oq_low = (cpi->mb.zbin_over_quant < zbin_oq_high)
4091 ? (cpi->mb.zbin_over_quant + 1)
4092 : zbin_oq_high;
4093 }
4094
4095 if (undershoot_seen) {
4096 /* Update rate_correction_factor unless
4097 * cpi->active_worst_quality has changed.
4098 */
4099 if (!active_worst_qchanged) {
4100 vp8_update_rate_correction_factors(cpi, 1);
4101 }
4102
4103 Q = (q_high + q_low + 1) / 2;
4104
4105 /* Adjust cpi->zbin_over_quant (only allowed when Q
4106 * is max)
4107 */
4108 if (Q < MAXQ) {
4109 cpi->mb.zbin_over_quant = 0;
4110 } else {
4111 zbin_oq_low = (cpi->mb.zbin_over_quant < zbin_oq_high)
4112 ? (cpi->mb.zbin_over_quant + 1)
4113 : zbin_oq_high;
4114 cpi->mb.zbin_over_quant = (zbin_oq_high + zbin_oq_low) / 2;
4115 }
4116 } else {
4117 /* Update rate_correction_factor unless
4118 * cpi->active_worst_quality has changed.
4119 */
4120 if (!active_worst_qchanged) {
4121 vp8_update_rate_correction_factors(cpi, 0);
4122 }
4123
4124 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
4125
4126 while (((Q < q_low) || (cpi->mb.zbin_over_quant < zbin_oq_low)) &&
4127 (Retries < 10)) {
4128 vp8_update_rate_correction_factors(cpi, 0);
4129 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
4130 Retries++;
4131 }
4132 }
4133
4134 overshoot_seen = 1;
4135 }
4136 /* Frame is too small */
4137 else {
4138 if (cpi->mb.zbin_over_quant == 0) {
4139 /* Lower q_high if not using over quant */
4140 q_high = (Q > q_low) ? (Q - 1) : q_low;
4141 } else {
4142 /* else lower zbin_oq_high */
4143 zbin_oq_high = (cpi->mb.zbin_over_quant > zbin_oq_low)
4144 ? (cpi->mb.zbin_over_quant - 1)
4145 : zbin_oq_low;
4146 }
4147
4148 if (overshoot_seen) {
4149 /* Update rate_correction_factor unless
4150 * cpi->active_worst_quality has changed.
4151 */
4152 if (!active_worst_qchanged) {
4153 vp8_update_rate_correction_factors(cpi, 1);
4154 }
4155
4156 Q = (q_high + q_low) / 2;
4157
4158 /* Adjust cpi->zbin_over_quant (only allowed when Q
4159 * is max)
4160 */
4161 if (Q < MAXQ) {
4162 cpi->mb.zbin_over_quant = 0;
4163 } else {
4164 cpi->mb.zbin_over_quant = (zbin_oq_high + zbin_oq_low) / 2;
4165 }
4166 } else {
4167 /* Update rate_correction_factor unless
4168 * cpi->active_worst_quality has changed.
4169 */
4170 if (!active_worst_qchanged) {
4171 vp8_update_rate_correction_factors(cpi, 0);
4172 }
4173
4174 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
4175
4176 /* Special case reset for qlow for constrained quality.
4177 * This should only trigger where there is very substantial
4178 * undershoot on a frame and the auto cq level is above
4179 * the user passsed in value.
4180 */
4181 if ((cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) &&
4182 (Q < q_low)) {
4183 q_low = Q;
4184 }
4185
4186 while (((Q > q_high) || (cpi->mb.zbin_over_quant > zbin_oq_high)) &&
4187 (Retries < 10)) {
4188 vp8_update_rate_correction_factors(cpi, 0);
4189 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
4190 Retries++;
4191 }
4192 }
4193
4194 undershoot_seen = 1;
4195 }
4196
4197 /* Clamp Q to upper and lower limits: */
4198 if (Q > q_high) {
4199 Q = q_high;
4200 } else if (Q < q_low) {
4201 Q = q_low;
4202 }
4203
4204 /* Clamp cpi->zbin_over_quant */
4205 cpi->mb.zbin_over_quant =
4206 (cpi->mb.zbin_over_quant < zbin_oq_low) ? zbin_oq_low
4207 : (cpi->mb.zbin_over_quant > zbin_oq_high) ? zbin_oq_high
4208 : cpi->mb.zbin_over_quant;
4209
4210 Loop = Q != last_q;
4211 } else {
4212 Loop = 0;
4213 }
4214 #endif // CONFIG_REALTIME_ONLY
4215
4216 if (cpi->is_src_frame_alt_ref) Loop = 0;
4217
4218 if (Loop == 1) {
4219 vp8_restore_coding_context(cpi);
4220 loop_count++;
4221 #if CONFIG_INTERNAL_STATS
4222 cpi->tot_recode_hits++;
4223 #endif
4224 }
4225 } while (Loop == 1);
4226
4227 #if defined(DROP_UNCODED_FRAMES)
4228 /* if there are no coded macroblocks at all drop this frame */
4229 if (cpi->common.MBs == cpi->mb.skip_true_count &&
4230 (cpi->drop_frame_count & 7) != 7 && cm->frame_type != KEY_FRAME) {
4231 cpi->common.current_video_frame++;
4232 cpi->frames_since_key++;
4233 cpi->drop_frame_count++;
4234 cpi->ext_refresh_frame_flags_pending = 0;
4235 // We advance the temporal pattern for dropped frames.
4236 cpi->temporal_pattern_counter++;
4237 return;
4238 }
4239 cpi->drop_frame_count = 0;
4240 #endif
4241
4242 #if 0
4243 /* Experimental code for lagged and one pass
4244 * Update stats used for one pass GF selection
4245 */
4246 {
4247 cpi->one_pass_frame_stats[cpi->one_pass_frame_index].frame_coded_error = (double)cpi->prediction_error;
4248 cpi->one_pass_frame_stats[cpi->one_pass_frame_index].frame_intra_error = (double)cpi->intra_error;
4249 cpi->one_pass_frame_stats[cpi->one_pass_frame_index].frame_pcnt_inter = (double)(100 - cpi->this_frame_percent_intra) / 100.0;
4250 }
4251 #endif
4252
4253 /* Special case code to reduce pulsing when key frames are forced at a
4254 * fixed interval. Note the reconstruction error if it is the frame before
4255 * the force key frame
4256 */
4257 if (cpi->next_key_frame_forced && (cpi->twopass.frames_to_key == 0)) {
4258 cpi->ambient_err =
4259 vp8_calc_ss_err(cpi->Source, &cm->yv12_fb[cm->new_fb_idx]);
4260 }
4261
4262 /* This frame's MVs are saved and will be used in next frame's MV predictor.
4263 * Last frame has one more line(add to bottom) and one more column(add to
4264 * right) than cm->mip. The edge elements are initialized to 0.
4265 */
4266 #if CONFIG_MULTI_RES_ENCODING
4267 if (!cpi->oxcf.mr_encoder_id && cm->show_frame)
4268 #else
4269 if (cm->show_frame) /* do not save for altref frame */
4270 #endif
4271 {
4272 int mb_row;
4273 int mb_col;
4274 /* Point to beginning of allocated MODE_INFO arrays. */
4275 MODE_INFO *tmp = cm->mip;
4276
4277 if (cm->frame_type != KEY_FRAME) {
4278 for (mb_row = 0; mb_row < cm->mb_rows + 1; ++mb_row) {
4279 for (mb_col = 0; mb_col < cm->mb_cols + 1; ++mb_col) {
4280 if (tmp->mbmi.ref_frame != INTRA_FRAME) {
4281 cpi->lfmv[mb_col + mb_row * (cm->mode_info_stride + 1)].as_int =
4282 tmp->mbmi.mv.as_int;
4283 }
4284
4285 cpi->lf_ref_frame_sign_bias[mb_col +
4286 mb_row * (cm->mode_info_stride + 1)] =
4287 cm->ref_frame_sign_bias[tmp->mbmi.ref_frame];
4288 cpi->lf_ref_frame[mb_col + mb_row * (cm->mode_info_stride + 1)] =
4289 tmp->mbmi.ref_frame;
4290 tmp++;
4291 }
4292 }
4293 }
4294 }
4295
4296 /* Count last ref frame 0,0 usage on current encoded frame. */
4297 {
4298 int mb_row;
4299 int mb_col;
4300 /* Point to beginning of MODE_INFO arrays. */
4301 MODE_INFO *tmp = cm->mi;
4302
4303 cpi->zeromv_count = 0;
4304
4305 if (cm->frame_type != KEY_FRAME) {
4306 for (mb_row = 0; mb_row < cm->mb_rows; ++mb_row) {
4307 for (mb_col = 0; mb_col < cm->mb_cols; ++mb_col) {
4308 if (tmp->mbmi.mode == ZEROMV && tmp->mbmi.ref_frame == LAST_FRAME) {
4309 cpi->zeromv_count++;
4310 }
4311 tmp++;
4312 }
4313 tmp++;
4314 }
4315 }
4316 }
4317
4318 #if CONFIG_MULTI_RES_ENCODING
4319 vp8_cal_dissimilarity(cpi);
4320 #endif
4321
4322 /* Update the GF useage maps.
4323 * This is done after completing the compression of a frame when all
4324 * modes etc. are finalized but before loop filter
4325 */
4326 if (cpi->oxcf.number_of_layers == 1) {
4327 vp8_update_gf_useage_maps(cpi, cm, &cpi->mb);
4328 }
4329
4330 if (cm->frame_type == KEY_FRAME) cm->refresh_last_frame = 1;
4331
4332 #if 0
4333 {
4334 FILE *f = fopen("gfactive.stt", "a");
4335 fprintf(f, "%8d %8d %8d %8d %8d\n", cm->current_video_frame, (100 * cpi->gf_active_count) / (cpi->common.mb_rows * cpi->common.mb_cols), cpi->this_iiratio, cpi->next_iiratio, cm->refresh_golden_frame);
4336 fclose(f);
4337 }
4338 #endif
4339
4340 /* For inter frames the current default behavior is that when
4341 * cm->refresh_golden_frame is set we copy the old GF over to the ARF buffer
4342 * This is purely an encoder decision at present.
4343 * Avoid this behavior when refresh flags are set by the user.
4344 */
4345 if (!cpi->oxcf.error_resilient_mode && cm->refresh_golden_frame &&
4346 !cpi->ext_refresh_frame_flags_pending) {
4347 cm->copy_buffer_to_arf = 2;
4348 } else {
4349 cm->copy_buffer_to_arf = 0;
4350 }
4351
4352 cm->frame_to_show = &cm->yv12_fb[cm->new_fb_idx];
4353
4354 #if CONFIG_TEMPORAL_DENOISING
4355 // Get some measure of the amount of noise, by measuring the (partial) mse
4356 // between source and denoised buffer, for y channel. Partial refers to
4357 // computing the sse for a sub-sample of the frame (i.e., skip x blocks along
4358 // row/column),
4359 // and only for blocks in that set that are consecutive ZEROMV_LAST mode.
4360 // Do this every ~8 frames, to further reduce complexity.
4361 // TODO(marpan): Keep this for now for the case cpi->oxcf.noise_sensitivity <
4362 // 4,
4363 // should be removed in favor of the process_denoiser_mode_change() function
4364 // below.
4365 if (cpi->oxcf.noise_sensitivity > 0 && cpi->oxcf.noise_sensitivity < 4 &&
4366 !cpi->oxcf.screen_content_mode && cpi->frames_since_key % 8 == 0 &&
4367 cm->frame_type != KEY_FRAME) {
4368 cpi->mse_source_denoised = measure_square_diff_partial(
4369 &cpi->denoiser.yv12_running_avg[INTRA_FRAME], cpi->Source, cpi);
4370 }
4371
4372 // For the adaptive denoising mode (noise_sensitivity == 4), sample the mse
4373 // of source diff (between current and previous frame), and determine if we
4374 // should switch the denoiser mode. Sampling refers to computing the mse for
4375 // a sub-sample of the frame (i.e., skip x blocks along row/column), and
4376 // only for blocks in that set that have used ZEROMV LAST, along with some
4377 // constraint on the sum diff between blocks. This process is called every
4378 // ~8 frames, to further reduce complexity.
4379 if (cpi->oxcf.noise_sensitivity == 4 && !cpi->oxcf.screen_content_mode &&
4380 cpi->frames_since_key % 8 == 0 && cm->frame_type != KEY_FRAME) {
4381 process_denoiser_mode_change(cpi);
4382 }
4383 #endif
4384
4385 #ifdef OUTPUT_YUV_SKINMAP
4386 if (cpi->common.current_video_frame > 1) {
4387 vp8_compute_skin_map(cpi, yuv_skinmap_file);
4388 }
4389 #endif
4390
4391 #if CONFIG_MULTITHREAD
4392 if (vpx_atomic_load_acquire(&cpi->b_multi_threaded)) {
4393 /* start loopfilter in separate thread */
4394 sem_post(&cpi->h_event_start_lpf);
4395 cpi->b_lpf_running = 1;
4396 /* wait for the filter_level to be picked so that we can continue with
4397 * stream packing */
4398 sem_wait(&cpi->h_event_end_lpf);
4399 } else
4400 #endif
4401 {
4402 vp8_loopfilter_frame(cpi, cm);
4403 }
4404
4405 update_reference_frames(cpi);
4406
4407 #ifdef OUTPUT_YUV_DENOISED
4408 vpx_write_yuv_frame(yuv_denoised_file,
4409 &cpi->denoiser.yv12_running_avg[INTRA_FRAME]);
4410 #endif
4411
4412 #if !(CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING)
4413 if (cpi->oxcf.error_resilient_mode) {
4414 cm->refresh_entropy_probs = 0;
4415 }
4416 #endif
4417
4418 /* build the bitstream */
4419 vp8_pack_bitstream(cpi, dest, dest_end, size);
4420
4421 /* Move storing frame_type out of the above loop since it is also
4422 * needed in motion search besides loopfilter */
4423 cm->last_frame_type = cm->frame_type;
4424
4425 /* Update rate control heuristics */
4426 cpi->total_byte_count += (*size);
4427 cpi->projected_frame_size = (int)(*size) << 3;
4428
4429 if (cpi->oxcf.number_of_layers > 1) {
4430 unsigned int i;
4431 for (i = cpi->current_layer + 1; i < cpi->oxcf.number_of_layers; ++i) {
4432 cpi->layer_context[i].total_byte_count += (*size);
4433 }
4434 }
4435
4436 if (!active_worst_qchanged) vp8_update_rate_correction_factors(cpi, 2);
4437
4438 cpi->last_q[cm->frame_type] = cm->base_qindex;
4439
4440 if (cm->frame_type == KEY_FRAME) {
4441 vp8_adjust_key_frame_context(cpi);
4442 }
4443
4444 /* Keep a record of ambient average Q. */
4445 if (cm->frame_type != KEY_FRAME) {
4446 cpi->avg_frame_qindex =
4447 (2 + 3 * cpi->avg_frame_qindex + cm->base_qindex) >> 2;
4448 }
4449
4450 /* Keep a record from which we can calculate the average Q excluding
4451 * GF updates and key frames
4452 */
4453 if ((cm->frame_type != KEY_FRAME) &&
4454 ((cpi->oxcf.number_of_layers > 1) ||
4455 (!cm->refresh_golden_frame && !cm->refresh_alt_ref_frame))) {
4456 cpi->ni_frames++;
4457
4458 /* Calculate the average Q for normal inter frames (not key or GFU
4459 * frames).
4460 */
4461 if (cpi->pass == 2) {
4462 cpi->ni_tot_qi += Q;
4463 cpi->ni_av_qi = (cpi->ni_tot_qi / cpi->ni_frames);
4464 } else {
4465 /* Damp value for first few frames */
4466 if (cpi->ni_frames > 150) {
4467 cpi->ni_tot_qi += Q;
4468 cpi->ni_av_qi = (cpi->ni_tot_qi / cpi->ni_frames);
4469 }
4470 /* For one pass, early in the clip ... average the current frame Q
4471 * value with the worstq entered by the user as a dampening measure
4472 */
4473 else {
4474 cpi->ni_tot_qi += Q;
4475 cpi->ni_av_qi =
4476 ((cpi->ni_tot_qi / cpi->ni_frames) + cpi->worst_quality + 1) / 2;
4477 }
4478
4479 /* If the average Q is higher than what was used in the last
4480 * frame (after going through the recode loop to keep the frame
4481 * size within range) then use the last frame value - 1. The -1
4482 * is designed to stop Q and hence the data rate, from
4483 * progressively falling away during difficult sections, but at
4484 * the same time reduce the number of itterations around the
4485 * recode loop.
4486 */
4487 if (Q > cpi->ni_av_qi) cpi->ni_av_qi = Q - 1;
4488 }
4489 }
4490
4491 /* Update the buffer level variable. */
4492 /* Non-viewable frames are a special case and are treated as pure overhead. */
4493 if (!cm->show_frame) {
4494 cpi->bits_off_target -= cpi->projected_frame_size;
4495 } else {
4496 cpi->bits_off_target +=
4497 cpi->av_per_frame_bandwidth - cpi->projected_frame_size;
4498 }
4499
4500 /* Clip the buffer level to the maximum specified buffer size */
4501 if (cpi->bits_off_target > cpi->oxcf.maximum_buffer_size) {
4502 cpi->bits_off_target = cpi->oxcf.maximum_buffer_size;
4503 }
4504
4505 // Don't let the buffer level go below some threshold, given here
4506 // by -|maximum_buffer_size|. For now we only do this for
4507 // screen content input.
4508 if (cpi->oxcf.screen_content_mode &&
4509 cpi->bits_off_target < -cpi->oxcf.maximum_buffer_size) {
4510 cpi->bits_off_target = -cpi->oxcf.maximum_buffer_size;
4511 }
4512
4513 /* Rolling monitors of whether we are over or underspending used to
4514 * help regulate min and Max Q in two pass.
4515 */
4516 cpi->rolling_target_bits = (int)ROUND64_POWER_OF_TWO(
4517 (int64_t)cpi->rolling_target_bits * 3 + cpi->this_frame_target, 2);
4518 cpi->rolling_actual_bits = (int)ROUND64_POWER_OF_TWO(
4519 (int64_t)cpi->rolling_actual_bits * 3 + cpi->projected_frame_size, 2);
4520 cpi->long_rolling_target_bits = (int)ROUND64_POWER_OF_TWO(
4521 (int64_t)cpi->long_rolling_target_bits * 31 + cpi->this_frame_target, 5);
4522 cpi->long_rolling_actual_bits = (int)ROUND64_POWER_OF_TWO(
4523 (int64_t)cpi->long_rolling_actual_bits * 31 + cpi->projected_frame_size,
4524 5);
4525
4526 /* Actual bits spent */
4527 cpi->total_actual_bits += cpi->projected_frame_size;
4528
4529 #if 0 && CONFIG_INTERNAL_STATS
4530 /* Debug stats */
4531 cpi->total_target_vs_actual +=
4532 (cpi->this_frame_target - cpi->projected_frame_size);
4533 #endif
4534
4535 cpi->buffer_level = cpi->bits_off_target;
4536
4537 /* Propagate values to higher temporal layers */
4538 if (cpi->oxcf.number_of_layers > 1) {
4539 unsigned int i;
4540
4541 for (i = cpi->current_layer + 1; i < cpi->oxcf.number_of_layers; ++i) {
4542 LAYER_CONTEXT *lc = &cpi->layer_context[i];
4543 int bits_off_for_this_layer = (int)round(
4544 lc->target_bandwidth / lc->framerate - cpi->projected_frame_size);
4545
4546 lc->bits_off_target += bits_off_for_this_layer;
4547
4548 /* Clip buffer level to maximum buffer size for the layer */
4549 if (lc->bits_off_target > lc->maximum_buffer_size) {
4550 lc->bits_off_target = lc->maximum_buffer_size;
4551 }
4552
4553 lc->total_actual_bits += cpi->projected_frame_size;
4554 lc->total_target_vs_actual += bits_off_for_this_layer;
4555 lc->buffer_level = lc->bits_off_target;
4556 }
4557 }
4558
4559 /* Update bits left to the kf and gf groups to account for overshoot
4560 * or undershoot on these frames
4561 */
4562 if (cm->frame_type == KEY_FRAME) {
4563 cpi->twopass.kf_group_bits +=
4564 cpi->this_frame_target - cpi->projected_frame_size;
4565
4566 if (cpi->twopass.kf_group_bits < 0) cpi->twopass.kf_group_bits = 0;
4567 } else if (cm->refresh_golden_frame || cm->refresh_alt_ref_frame) {
4568 cpi->twopass.gf_group_bits +=
4569 cpi->this_frame_target - cpi->projected_frame_size;
4570
4571 if (cpi->twopass.gf_group_bits < 0) cpi->twopass.gf_group_bits = 0;
4572 }
4573
4574 if (cm->frame_type != KEY_FRAME) {
4575 if (cpi->common.refresh_alt_ref_frame) {
4576 cpi->last_skip_false_probs[2] = cpi->prob_skip_false;
4577 cpi->last_skip_probs_q[2] = cm->base_qindex;
4578 } else if (cpi->common.refresh_golden_frame) {
4579 cpi->last_skip_false_probs[1] = cpi->prob_skip_false;
4580 cpi->last_skip_probs_q[1] = cm->base_qindex;
4581 } else {
4582 cpi->last_skip_false_probs[0] = cpi->prob_skip_false;
4583 cpi->last_skip_probs_q[0] = cm->base_qindex;
4584
4585 /* update the baseline */
4586 cpi->base_skip_false_prob[cm->base_qindex] = cpi->prob_skip_false;
4587 }
4588 }
4589
4590 #if 0 && CONFIG_INTERNAL_STATS
4591 {
4592 FILE *f = fopen("tmp.stt", "a");
4593
4594 vpx_clear_system_state();
4595
4596 if (cpi->twopass.total_left_stats.coded_error != 0.0)
4597 fprintf(f, "%10d %10d %10d %10d %10d %10"PRId64" %10"PRId64
4598 "%10"PRId64" %10d %6d %6d %6d %6d %5d %5d %5d %8d "
4599 "%8.2lf %"PRId64" %10.3lf %10"PRId64" %8d\n",
4600 cpi->common.current_video_frame, cpi->this_frame_target,
4601 cpi->projected_frame_size,
4602 (cpi->projected_frame_size - cpi->this_frame_target),
4603 cpi->total_target_vs_actual,
4604 cpi->buffer_level,
4605 (cpi->oxcf.starting_buffer_level-cpi->bits_off_target),
4606 cpi->total_actual_bits, cm->base_qindex,
4607 cpi->active_best_quality, cpi->active_worst_quality,
4608 cpi->ni_av_qi, cpi->cq_target_quality,
4609 cm->refresh_golden_frame, cm->refresh_alt_ref_frame,
4610 cm->frame_type, cpi->gfu_boost,
4611 cpi->twopass.est_max_qcorrection_factor,
4612 cpi->twopass.bits_left,
4613 cpi->twopass.total_left_stats.coded_error,
4614 (double)cpi->twopass.bits_left /
4615 cpi->twopass.total_left_stats.coded_error,
4616 cpi->tot_recode_hits);
4617 else
4618 fprintf(f, "%10d %10d %10d %10d %10d %10"PRId64" %10"PRId64
4619 "%10"PRId64" %10d %6d %6d %6d %6d %5d %5d %5d %8d "
4620 "%8.2lf %"PRId64" %10.3lf %8d\n",
4621 cpi->common.current_video_frame, cpi->this_frame_target,
4622 cpi->projected_frame_size,
4623 (cpi->projected_frame_size - cpi->this_frame_target),
4624 cpi->total_target_vs_actual,
4625 cpi->buffer_level,
4626 (cpi->oxcf.starting_buffer_level-cpi->bits_off_target),
4627 cpi->total_actual_bits, cm->base_qindex,
4628 cpi->active_best_quality, cpi->active_worst_quality,
4629 cpi->ni_av_qi, cpi->cq_target_quality,
4630 cm->refresh_golden_frame, cm->refresh_alt_ref_frame,
4631 cm->frame_type, cpi->gfu_boost,
4632 cpi->twopass.est_max_qcorrection_factor,
4633 cpi->twopass.bits_left,
4634 cpi->twopass.total_left_stats.coded_error,
4635 cpi->tot_recode_hits);
4636
4637 fclose(f);
4638
4639 {
4640 FILE *fmodes = fopen("Modes.stt", "a");
4641
4642 fprintf(fmodes, "%6d:%1d:%1d:%1d ",
4643 cpi->common.current_video_frame,
4644 cm->frame_type, cm->refresh_golden_frame,
4645 cm->refresh_alt_ref_frame);
4646
4647 fprintf(fmodes, "\n");
4648
4649 fclose(fmodes);
4650 }
4651 }
4652
4653 #endif
4654
4655 cpi->ext_refresh_frame_flags_pending = 0;
4656
4657 if (cm->refresh_golden_frame == 1) {
4658 cm->frame_flags = cm->frame_flags | FRAMEFLAGS_GOLDEN;
4659 } else {
4660 cm->frame_flags = cm->frame_flags & ~FRAMEFLAGS_GOLDEN;
4661 }
4662
4663 if (cm->refresh_alt_ref_frame == 1) {
4664 cm->frame_flags = cm->frame_flags | FRAMEFLAGS_ALTREF;
4665 } else {
4666 cm->frame_flags = cm->frame_flags & ~FRAMEFLAGS_ALTREF;
4667 }
4668
4669 if (cm->refresh_last_frame & cm->refresh_golden_frame) { /* both refreshed */
4670 cpi->gold_is_last = 1;
4671 } else if (cm->refresh_last_frame ^ cm->refresh_golden_frame) {
4672 /* 1 refreshed but not the other */
4673 cpi->gold_is_last = 0;
4674 }
4675
4676 if (cm->refresh_last_frame & cm->refresh_alt_ref_frame) { /* both refreshed */
4677 cpi->alt_is_last = 1;
4678 } else if (cm->refresh_last_frame ^ cm->refresh_alt_ref_frame) {
4679 /* 1 refreshed but not the other */
4680 cpi->alt_is_last = 0;
4681 }
4682
4683 if (cm->refresh_alt_ref_frame &
4684 cm->refresh_golden_frame) { /* both refreshed */
4685 cpi->gold_is_alt = 1;
4686 } else if (cm->refresh_alt_ref_frame ^ cm->refresh_golden_frame) {
4687 /* 1 refreshed but not the other */
4688 cpi->gold_is_alt = 0;
4689 }
4690
4691 cpi->ref_frame_flags = VP8_ALTR_FRAME | VP8_GOLD_FRAME | VP8_LAST_FRAME;
4692
4693 if (cpi->gold_is_last) cpi->ref_frame_flags &= ~VP8_GOLD_FRAME;
4694
4695 if (cpi->alt_is_last) cpi->ref_frame_flags &= ~VP8_ALTR_FRAME;
4696
4697 if (cpi->gold_is_alt) cpi->ref_frame_flags &= ~VP8_ALTR_FRAME;
4698
4699 if (!cpi->oxcf.error_resilient_mode) {
4700 if (cpi->oxcf.play_alternate && cm->refresh_alt_ref_frame &&
4701 (cm->frame_type != KEY_FRAME)) {
4702 /* Update the alternate reference frame stats as appropriate. */
4703 update_alt_ref_frame_stats(cpi);
4704 } else {
4705 /* Update the Golden frame stats as appropriate. */
4706 update_golden_frame_stats(cpi);
4707 }
4708 }
4709
4710 if (cm->frame_type == KEY_FRAME) {
4711 /* Tell the caller that the frame was coded as a key frame */
4712 *frame_flags = cm->frame_flags | FRAMEFLAGS_KEY;
4713
4714 /* As this frame is a key frame the next defaults to an inter frame. */
4715 cm->frame_type = INTER_FRAME;
4716
4717 cpi->last_frame_percent_intra = 100;
4718 } else {
4719 *frame_flags = cm->frame_flags & ~FRAMEFLAGS_KEY;
4720
4721 cpi->last_frame_percent_intra = cpi->this_frame_percent_intra;
4722 }
4723
4724 /* Clear the one shot update flags for segmentation map and mode/ref
4725 * loop filter deltas.
4726 */
4727 cpi->mb.e_mbd.update_mb_segmentation_map = 0;
4728 cpi->mb.e_mbd.update_mb_segmentation_data = 0;
4729 cpi->mb.e_mbd.mode_ref_lf_delta_update = 0;
4730
4731 /* Dont increment frame counters if this was an altref buffer update
4732 * not a real frame
4733 */
4734 if (cm->show_frame) {
4735 cm->current_video_frame++;
4736 cpi->frames_since_key++;
4737 cpi->temporal_pattern_counter++;
4738 }
4739
4740 #if 0
4741 {
4742 char filename[512];
4743 FILE *recon_file;
4744 sprintf(filename, "enc%04d.yuv", (int) cm->current_video_frame);
4745 recon_file = fopen(filename, "wb");
4746 fwrite(cm->yv12_fb[cm->lst_fb_idx].buffer_alloc,
4747 cm->yv12_fb[cm->lst_fb_idx].frame_size, 1, recon_file);
4748 fclose(recon_file);
4749 }
4750 #endif
4751
4752 /* DEBUG */
4753 /* vpx_write_yuv_frame("encoder_recon.yuv", cm->frame_to_show); */
4754 }
4755 #if !CONFIG_REALTIME_ONLY
Pass2Encode(VP8_COMP * cpi,size_t * size,unsigned char * dest,unsigned char * dest_end,unsigned int * frame_flags)4756 static void Pass2Encode(VP8_COMP *cpi, size_t *size, unsigned char *dest,
4757 unsigned char *dest_end, unsigned int *frame_flags) {
4758 if (!cpi->common.refresh_alt_ref_frame) vp8_second_pass(cpi);
4759
4760 encode_frame_to_data_rate(cpi, size, dest, dest_end, frame_flags);
4761 cpi->twopass.bits_left -= 8 * (int)(*size);
4762
4763 if (!cpi->common.refresh_alt_ref_frame) {
4764 double two_pass_min_rate =
4765 (double)(cpi->oxcf.target_bandwidth *
4766 cpi->oxcf.two_pass_vbrmin_section / 100);
4767 cpi->twopass.bits_left += (int64_t)(two_pass_min_rate / cpi->framerate);
4768 }
4769 }
4770 #endif
4771
vp8_receive_raw_frame(VP8_COMP * cpi,unsigned int frame_flags,YV12_BUFFER_CONFIG * sd,int64_t time_stamp,int64_t end_time)4772 int vp8_receive_raw_frame(VP8_COMP *cpi, unsigned int frame_flags,
4773 YV12_BUFFER_CONFIG *sd, int64_t time_stamp,
4774 int64_t end_time) {
4775 struct vpx_usec_timer timer;
4776 int res = 0;
4777
4778 vpx_usec_timer_start(&timer);
4779
4780 /* Reinit the lookahead buffer if the frame size changes */
4781 if (sd->y_width != cpi->oxcf.Width || sd->y_height != cpi->oxcf.Height) {
4782 assert(cpi->oxcf.lag_in_frames < 2);
4783 dealloc_raw_frame_buffers(cpi);
4784 alloc_raw_frame_buffers(cpi);
4785 }
4786
4787 if (vp8_lookahead_push(cpi->lookahead, sd, time_stamp, end_time, frame_flags,
4788 cpi->active_map_enabled ? cpi->active_map : NULL)) {
4789 res = -1;
4790 }
4791 vpx_usec_timer_mark(&timer);
4792 cpi->time_receive_data += vpx_usec_timer_elapsed(&timer);
4793
4794 return res;
4795 }
4796
frame_is_reference(const VP8_COMP * cpi)4797 static int frame_is_reference(const VP8_COMP *cpi) {
4798 const VP8_COMMON *cm = &cpi->common;
4799 const MACROBLOCKD *xd = &cpi->mb.e_mbd;
4800
4801 return cm->frame_type == KEY_FRAME || cm->refresh_last_frame ||
4802 cm->refresh_golden_frame || cm->refresh_alt_ref_frame ||
4803 cm->copy_buffer_to_gf || cm->copy_buffer_to_arf ||
4804 cm->refresh_entropy_probs || xd->mode_ref_lf_delta_update ||
4805 xd->update_mb_segmentation_map || xd->update_mb_segmentation_data;
4806 }
4807
vp8_get_compressed_data(VP8_COMP * cpi,unsigned int * frame_flags,size_t * size,unsigned char * dest,unsigned char * dest_end,int64_t * time_stamp,int64_t * time_end,int flush)4808 int vp8_get_compressed_data(VP8_COMP *cpi, unsigned int *frame_flags,
4809 size_t *size, unsigned char *dest,
4810 unsigned char *dest_end, int64_t *time_stamp,
4811 int64_t *time_end, int flush) {
4812 VP8_COMMON *cm;
4813 struct vpx_usec_timer tsctimer;
4814 struct vpx_usec_timer ticktimer;
4815 struct vpx_usec_timer cmptimer;
4816 YV12_BUFFER_CONFIG *force_src_buffer = NULL;
4817
4818 if (!cpi) return -1;
4819
4820 cm = &cpi->common;
4821
4822 vpx_usec_timer_start(&cmptimer);
4823
4824 cpi->source = NULL;
4825
4826 #if !CONFIG_REALTIME_ONLY
4827 /* Should we code an alternate reference frame */
4828 if (cpi->oxcf.error_resilient_mode == 0 && cpi->oxcf.play_alternate &&
4829 cpi->source_alt_ref_pending) {
4830 if ((cpi->source = vp8_lookahead_peek(
4831 cpi->lookahead, cpi->frames_till_gf_update_due, PEEK_FORWARD))) {
4832 cpi->alt_ref_source = cpi->source;
4833 if (cpi->oxcf.arnr_max_frames > 0) {
4834 vp8_temporal_filter_prepare_c(cpi, cpi->frames_till_gf_update_due);
4835 force_src_buffer = &cpi->alt_ref_buffer;
4836 }
4837 cpi->frames_till_alt_ref_frame = cpi->frames_till_gf_update_due;
4838 cm->refresh_alt_ref_frame = 1;
4839 cm->refresh_golden_frame = 0;
4840 cm->refresh_last_frame = 0;
4841 cm->show_frame = 0;
4842 /* Clear Pending alt Ref flag. */
4843 cpi->source_alt_ref_pending = 0;
4844 cpi->is_src_frame_alt_ref = 0;
4845 }
4846 }
4847 #endif
4848
4849 if (!cpi->source) {
4850 /* Read last frame source if we are encoding first pass. */
4851 if (cpi->pass == 1 && cm->current_video_frame > 0) {
4852 if ((cpi->last_source =
4853 vp8_lookahead_peek(cpi->lookahead, 1, PEEK_BACKWARD)) == NULL) {
4854 return -1;
4855 }
4856 }
4857
4858 if ((cpi->source = vp8_lookahead_pop(cpi->lookahead, flush))) {
4859 cm->show_frame = 1;
4860
4861 cpi->is_src_frame_alt_ref =
4862 cpi->alt_ref_source && (cpi->source == cpi->alt_ref_source);
4863
4864 if (cpi->is_src_frame_alt_ref) cpi->alt_ref_source = NULL;
4865 }
4866 }
4867
4868 if (cpi->source) {
4869 cpi->Source = force_src_buffer ? force_src_buffer : &cpi->source->img;
4870 cpi->un_scaled_source = cpi->Source;
4871 *time_stamp = cpi->source->ts_start;
4872 *time_end = cpi->source->ts_end;
4873 *frame_flags = cpi->source->flags;
4874
4875 if (cpi->pass == 1 && cm->current_video_frame > 0) {
4876 cpi->last_frame_unscaled_source = &cpi->last_source->img;
4877 }
4878 } else {
4879 *size = 0;
4880 #if !CONFIG_REALTIME_ONLY
4881
4882 if (flush && cpi->pass == 1 && !cpi->twopass.first_pass_done) {
4883 vp8_end_first_pass(cpi); /* get last stats packet */
4884 cpi->twopass.first_pass_done = 1;
4885 }
4886
4887 #endif
4888
4889 return -1;
4890 }
4891
4892 if (cpi->source->ts_start < cpi->first_time_stamp_ever) {
4893 cpi->first_time_stamp_ever = cpi->source->ts_start;
4894 cpi->last_end_time_stamp_seen = cpi->source->ts_start;
4895 }
4896
4897 /* adjust frame rates based on timestamps given */
4898 if (cm->show_frame) {
4899 int64_t this_duration;
4900 int step = 0;
4901
4902 if (cpi->source->ts_start == cpi->first_time_stamp_ever) {
4903 this_duration = cpi->source->ts_end - cpi->source->ts_start;
4904 step = 1;
4905 } else {
4906 int64_t last_duration;
4907
4908 this_duration = cpi->source->ts_end - cpi->last_end_time_stamp_seen;
4909 last_duration = cpi->last_end_time_stamp_seen - cpi->last_time_stamp_seen;
4910 // Cap this to avoid overflow of (this_duration - last_duration) * 10
4911 this_duration = VPXMIN(this_duration, INT64_MAX / 10);
4912 /* do a step update if the duration changes by 10% */
4913 if (last_duration) {
4914 step = (int)(((this_duration - last_duration) * 10 / last_duration));
4915 }
4916 }
4917
4918 if (this_duration) {
4919 if (step) {
4920 cpi->ref_framerate = 10000000.0 / this_duration;
4921 } else {
4922 double avg_duration, interval;
4923
4924 /* Average this frame's rate into the last second's average
4925 * frame rate. If we haven't seen 1 second yet, then average
4926 * over the whole interval seen.
4927 */
4928 interval = (double)(cpi->source->ts_end - cpi->first_time_stamp_ever);
4929 if (interval > 10000000.0) interval = 10000000;
4930
4931 avg_duration = 10000000.0 / cpi->ref_framerate;
4932 avg_duration *= (interval - avg_duration + this_duration);
4933 avg_duration /= interval;
4934
4935 cpi->ref_framerate = 10000000.0 / avg_duration;
4936 }
4937 #if CONFIG_MULTI_RES_ENCODING
4938 if (cpi->oxcf.mr_total_resolutions > 1) {
4939 LOWER_RES_FRAME_INFO *low_res_frame_info =
4940 (LOWER_RES_FRAME_INFO *)cpi->oxcf.mr_low_res_mode_info;
4941 // Frame rate should be the same for all spatial layers in
4942 // multi-res-encoding (simulcast), so we constrain the frame for
4943 // higher layers to be that of lowest resolution. This is needed
4944 // as he application may decide to skip encoding a high layer and
4945 // then start again, in which case a big jump in time-stamps will
4946 // be received for that high layer, which will yield an incorrect
4947 // frame rate (from time-stamp adjustment in above calculation).
4948 if (cpi->oxcf.mr_encoder_id) {
4949 if (!low_res_frame_info->skip_encoding_base_stream)
4950 cpi->ref_framerate = low_res_frame_info->low_res_framerate;
4951 } else {
4952 // Keep track of frame rate for lowest resolution.
4953 low_res_frame_info->low_res_framerate = cpi->ref_framerate;
4954 // The base stream is being encoded so set skip flag to 0.
4955 low_res_frame_info->skip_encoding_base_stream = 0;
4956 }
4957 }
4958 #endif
4959 if (cpi->oxcf.number_of_layers > 1) {
4960 unsigned int i;
4961
4962 /* Update frame rates for each layer */
4963 assert(cpi->oxcf.number_of_layers <= VPX_TS_MAX_LAYERS);
4964 for (i = 0; i < cpi->oxcf.number_of_layers && i < VPX_TS_MAX_LAYERS;
4965 ++i) {
4966 LAYER_CONTEXT *lc = &cpi->layer_context[i];
4967 lc->framerate = cpi->ref_framerate / cpi->oxcf.rate_decimator[i];
4968 }
4969 } else {
4970 vp8_new_framerate(cpi, cpi->ref_framerate);
4971 }
4972 }
4973
4974 cpi->last_time_stamp_seen = cpi->source->ts_start;
4975 cpi->last_end_time_stamp_seen = cpi->source->ts_end;
4976 }
4977
4978 if (cpi->oxcf.number_of_layers > 1) {
4979 int layer;
4980
4981 vp8_update_layer_contexts(cpi);
4982
4983 /* Restore layer specific context & set frame rate */
4984 if (cpi->temporal_layer_id >= 0) {
4985 layer = cpi->temporal_layer_id;
4986 } else {
4987 layer =
4988 cpi->oxcf
4989 .layer_id[cpi->temporal_pattern_counter % cpi->oxcf.periodicity];
4990 }
4991 vp8_restore_layer_context(cpi, layer);
4992 vp8_new_framerate(cpi, cpi->layer_context[layer].framerate);
4993 }
4994
4995 if (cpi->compressor_speed == 2) {
4996 vpx_usec_timer_start(&tsctimer);
4997 vpx_usec_timer_start(&ticktimer);
4998 }
4999
5000 cpi->lf_zeromv_pct = (cpi->zeromv_count * 100) / cm->MBs;
5001
5002 #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
5003 {
5004 int i;
5005 const int num_part = (1 << cm->multi_token_partition);
5006 /* the available bytes in dest */
5007 const unsigned long dest_size = dest_end - dest;
5008 const int tok_part_buff_size = (dest_size * 9) / (10 * num_part);
5009
5010 unsigned char *dp = dest;
5011
5012 cpi->partition_d[0] = dp;
5013 dp += dest_size / 10; /* reserve 1/10 for control partition */
5014 cpi->partition_d_end[0] = dp;
5015
5016 for (i = 0; i < num_part; ++i) {
5017 cpi->partition_d[i + 1] = dp;
5018 dp += tok_part_buff_size;
5019 cpi->partition_d_end[i + 1] = dp;
5020 }
5021 }
5022 #endif
5023
5024 /* start with a 0 size frame */
5025 *size = 0;
5026
5027 /* Clear down mmx registers */
5028 vpx_clear_system_state();
5029
5030 cm->frame_type = INTER_FRAME;
5031 cm->frame_flags = *frame_flags;
5032
5033 #if 0
5034
5035 if (cm->refresh_alt_ref_frame)
5036 {
5037 cm->refresh_golden_frame = 0;
5038 cm->refresh_last_frame = 0;
5039 }
5040 else
5041 {
5042 cm->refresh_golden_frame = 0;
5043 cm->refresh_last_frame = 1;
5044 }
5045
5046 #endif
5047 /* find a free buffer for the new frame */
5048 {
5049 int i = 0;
5050 for (; i < NUM_YV12_BUFFERS; ++i) {
5051 if (!cm->yv12_fb[i].flags) {
5052 cm->new_fb_idx = i;
5053 break;
5054 }
5055 }
5056
5057 assert(i < NUM_YV12_BUFFERS);
5058 }
5059 switch (cpi->pass) {
5060 #if !CONFIG_REALTIME_ONLY
5061 case 1: Pass1Encode(cpi); break;
5062 case 2: Pass2Encode(cpi, size, dest, dest_end, frame_flags); break;
5063 #endif // !CONFIG_REALTIME_ONLY
5064 default:
5065 encode_frame_to_data_rate(cpi, size, dest, dest_end, frame_flags);
5066 break;
5067 }
5068
5069 if (cpi->compressor_speed == 2) {
5070 unsigned int duration, duration2;
5071 vpx_usec_timer_mark(&tsctimer);
5072 vpx_usec_timer_mark(&ticktimer);
5073
5074 duration = (int)(vpx_usec_timer_elapsed(&ticktimer));
5075 duration2 = (unsigned int)((double)duration / 2);
5076
5077 if (cm->frame_type != KEY_FRAME) {
5078 if (cpi->avg_encode_time == 0) {
5079 cpi->avg_encode_time = duration;
5080 } else {
5081 cpi->avg_encode_time = (7 * cpi->avg_encode_time + duration) >> 3;
5082 }
5083 }
5084
5085 if (duration2) {
5086 {
5087 if (cpi->avg_pick_mode_time == 0) {
5088 cpi->avg_pick_mode_time = duration2;
5089 } else {
5090 cpi->avg_pick_mode_time =
5091 (7 * cpi->avg_pick_mode_time + duration2) >> 3;
5092 }
5093 }
5094 }
5095 }
5096
5097 if (cm->refresh_entropy_probs == 0) {
5098 memcpy(&cm->fc, &cm->lfc, sizeof(cm->fc));
5099 }
5100
5101 /* Save the contexts separately for alt ref, gold and last. */
5102 /* (TODO jbb -> Optimize this with pointers to avoid extra copies. ) */
5103 if (cm->refresh_alt_ref_frame) memcpy(&cpi->lfc_a, &cm->fc, sizeof(cm->fc));
5104
5105 if (cm->refresh_golden_frame) memcpy(&cpi->lfc_g, &cm->fc, sizeof(cm->fc));
5106
5107 if (cm->refresh_last_frame) memcpy(&cpi->lfc_n, &cm->fc, sizeof(cm->fc));
5108
5109 /* if its a dropped frame honor the requests on subsequent frames */
5110 if (*size > 0) {
5111 cpi->droppable = !frame_is_reference(cpi);
5112
5113 /* return to normal state */
5114 cm->refresh_entropy_probs = 1;
5115 cm->refresh_alt_ref_frame = 0;
5116 cm->refresh_golden_frame = 0;
5117 cm->refresh_last_frame = 1;
5118 cm->frame_type = INTER_FRAME;
5119 }
5120
5121 /* Save layer specific state */
5122 if (cpi->oxcf.number_of_layers > 1) vp8_save_layer_context(cpi);
5123
5124 vpx_usec_timer_mark(&cmptimer);
5125 cpi->time_compress_data += vpx_usec_timer_elapsed(&cmptimer);
5126
5127 if (cpi->b_calculate_psnr && cpi->pass != 1 && cm->show_frame) {
5128 generate_psnr_packet(cpi);
5129 }
5130
5131 #if CONFIG_INTERNAL_STATS
5132
5133 if (cpi->pass != 1) {
5134 cpi->bytes += *size;
5135
5136 if (cm->show_frame) {
5137 cpi->common.show_frame_mi = cpi->common.mi;
5138 cpi->count++;
5139
5140 if (cpi->b_calculate_psnr) {
5141 uint64_t ye, ue, ve;
5142 double frame_psnr;
5143 YV12_BUFFER_CONFIG *orig = cpi->Source;
5144 YV12_BUFFER_CONFIG *recon = cpi->common.frame_to_show;
5145 unsigned int y_width = cpi->common.Width;
5146 unsigned int y_height = cpi->common.Height;
5147 unsigned int uv_width = (y_width + 1) / 2;
5148 unsigned int uv_height = (y_height + 1) / 2;
5149 int y_samples = y_height * y_width;
5150 int uv_samples = uv_height * uv_width;
5151 int t_samples = y_samples + 2 * uv_samples;
5152 double sq_error;
5153
5154 ye = calc_plane_error(orig->y_buffer, orig->y_stride, recon->y_buffer,
5155 recon->y_stride, y_width, y_height);
5156
5157 ue = calc_plane_error(orig->u_buffer, orig->uv_stride, recon->u_buffer,
5158 recon->uv_stride, uv_width, uv_height);
5159
5160 ve = calc_plane_error(orig->v_buffer, orig->uv_stride, recon->v_buffer,
5161 recon->uv_stride, uv_width, uv_height);
5162
5163 sq_error = (double)(ye + ue + ve);
5164
5165 frame_psnr = vpx_sse_to_psnr(t_samples, 255.0, sq_error);
5166
5167 cpi->total_y += vpx_sse_to_psnr(y_samples, 255.0, (double)ye);
5168 cpi->total_u += vpx_sse_to_psnr(uv_samples, 255.0, (double)ue);
5169 cpi->total_v += vpx_sse_to_psnr(uv_samples, 255.0, (double)ve);
5170 cpi->total_sq_error += sq_error;
5171 cpi->total += frame_psnr;
5172 #if CONFIG_POSTPROC
5173 {
5174 YV12_BUFFER_CONFIG *pp = &cm->post_proc_buffer;
5175 double sq_error2;
5176 double frame_psnr2, frame_ssim2 = 0;
5177 double weight = 0;
5178
5179 vp8_deblock(cm, cm->frame_to_show, &cm->post_proc_buffer,
5180 cm->filter_level * 10 / 6);
5181 vpx_clear_system_state();
5182
5183 ye = calc_plane_error(orig->y_buffer, orig->y_stride, pp->y_buffer,
5184 pp->y_stride, y_width, y_height);
5185
5186 ue = calc_plane_error(orig->u_buffer, orig->uv_stride, pp->u_buffer,
5187 pp->uv_stride, uv_width, uv_height);
5188
5189 ve = calc_plane_error(orig->v_buffer, orig->uv_stride, pp->v_buffer,
5190 pp->uv_stride, uv_width, uv_height);
5191
5192 sq_error2 = (double)(ye + ue + ve);
5193
5194 frame_psnr2 = vpx_sse_to_psnr(t_samples, 255.0, sq_error2);
5195
5196 cpi->totalp_y += vpx_sse_to_psnr(y_samples, 255.0, (double)ye);
5197 cpi->totalp_u += vpx_sse_to_psnr(uv_samples, 255.0, (double)ue);
5198 cpi->totalp_v += vpx_sse_to_psnr(uv_samples, 255.0, (double)ve);
5199 cpi->total_sq_error2 += sq_error2;
5200 cpi->totalp += frame_psnr2;
5201
5202 frame_ssim2 =
5203 vpx_calc_ssim(cpi->Source, &cm->post_proc_buffer, &weight);
5204
5205 cpi->summed_quality += frame_ssim2 * weight;
5206 cpi->summed_weights += weight;
5207
5208 if (cpi->oxcf.number_of_layers > 1) {
5209 unsigned int i;
5210
5211 for (i = cpi->current_layer; i < cpi->oxcf.number_of_layers; ++i) {
5212 cpi->frames_in_layer[i]++;
5213
5214 cpi->bytes_in_layer[i] += *size;
5215 cpi->sum_psnr[i] += frame_psnr;
5216 cpi->sum_psnr_p[i] += frame_psnr2;
5217 cpi->total_error2[i] += sq_error;
5218 cpi->total_error2_p[i] += sq_error2;
5219 cpi->sum_ssim[i] += frame_ssim2 * weight;
5220 cpi->sum_weights[i] += weight;
5221 }
5222 }
5223 }
5224 #endif
5225 }
5226 }
5227 }
5228
5229 #if 0
5230
5231 if (cpi->common.frame_type != 0 && cpi->common.base_qindex == cpi->oxcf.worst_allowed_q)
5232 {
5233 skiptruecount += cpi->skip_true_count;
5234 skipfalsecount += cpi->skip_false_count;
5235 }
5236
5237 #endif
5238 #if 0
5239
5240 if (cpi->pass != 1)
5241 {
5242 FILE *f = fopen("skip.stt", "a");
5243 fprintf(f, "frame:%4d flags:%4x Q:%4d P:%4d Size:%5d\n", cpi->common.current_video_frame, *frame_flags, cpi->common.base_qindex, cpi->prob_skip_false, *size);
5244
5245 if (cpi->is_src_frame_alt_ref == 1)
5246 fprintf(f, "skipcount: %4d framesize: %d\n", cpi->skip_true_count , *size);
5247
5248 fclose(f);
5249 }
5250
5251 #endif
5252 #endif
5253
5254 cpi->common.error.setjmp = 0;
5255
5256 #if CONFIG_MULTITHREAD
5257 /* wait for the lpf thread done */
5258 if (vpx_atomic_load_acquire(&cpi->b_multi_threaded) && cpi->b_lpf_running) {
5259 sem_wait(&cpi->h_event_end_lpf);
5260 cpi->b_lpf_running = 0;
5261 }
5262 #endif
5263
5264 return 0;
5265 }
5266
vp8_get_preview_raw_frame(VP8_COMP * cpi,YV12_BUFFER_CONFIG * dest,vp8_ppflags_t * flags)5267 int vp8_get_preview_raw_frame(VP8_COMP *cpi, YV12_BUFFER_CONFIG *dest,
5268 vp8_ppflags_t *flags) {
5269 if (cpi->common.refresh_alt_ref_frame) {
5270 return -1;
5271 } else {
5272 int ret;
5273
5274 #if CONFIG_POSTPROC
5275 cpi->common.show_frame_mi = cpi->common.mi;
5276 ret = vp8_post_proc_frame(&cpi->common, dest, flags);
5277 #else
5278 (void)flags;
5279
5280 if (cpi->common.frame_to_show) {
5281 *dest = *cpi->common.frame_to_show;
5282 dest->y_width = cpi->common.Width;
5283 dest->y_height = cpi->common.Height;
5284 dest->uv_height = cpi->common.Height / 2;
5285 ret = 0;
5286 } else {
5287 ret = -1;
5288 }
5289
5290 #endif
5291 vpx_clear_system_state();
5292 return ret;
5293 }
5294 }
5295
vp8_set_roimap(VP8_COMP * cpi,unsigned char * map,unsigned int rows,unsigned int cols,int delta_q[4],int delta_lf[4],unsigned int threshold[4])5296 int vp8_set_roimap(VP8_COMP *cpi, unsigned char *map, unsigned int rows,
5297 unsigned int cols, int delta_q[4], int delta_lf[4],
5298 unsigned int threshold[4]) {
5299 signed char feature_data[MB_LVL_MAX][MAX_MB_SEGMENTS];
5300 int internal_delta_q[MAX_MB_SEGMENTS];
5301 const int range = 63;
5302 int i;
5303
5304 // Check number of rows and columns match
5305 if (cpi->common.mb_rows != (int)rows || cpi->common.mb_cols != (int)cols) {
5306 return -1;
5307 }
5308
5309 for (i = 0; i < MAX_MB_SEGMENTS; ++i) {
5310 // Note abs() alone can't be used as the behavior of abs(INT_MIN) is
5311 // undefined.
5312 if (delta_q[i] > range || delta_q[i] < -range || delta_lf[i] > range ||
5313 delta_lf[i] < -range) {
5314 return -1;
5315 }
5316 }
5317
5318 // Also disable segmentation if no deltas are specified.
5319 if (!map || (delta_q[0] == 0 && delta_q[1] == 0 && delta_q[2] == 0 &&
5320 delta_q[3] == 0 && delta_lf[0] == 0 && delta_lf[1] == 0 &&
5321 delta_lf[2] == 0 && delta_lf[3] == 0 && threshold[0] == 0 &&
5322 threshold[1] == 0 && threshold[2] == 0 && threshold[3] == 0)) {
5323 disable_segmentation(cpi);
5324 return 0;
5325 }
5326
5327 // Translate the external delta q values to internal values.
5328 for (i = 0; i < MAX_MB_SEGMENTS; ++i) {
5329 internal_delta_q[i] =
5330 (delta_q[i] >= 0) ? q_trans[delta_q[i]] : -q_trans[-delta_q[i]];
5331 }
5332
5333 /* Set the segmentation Map */
5334 set_segmentation_map(cpi, map);
5335
5336 /* Activate segmentation. */
5337 enable_segmentation(cpi);
5338
5339 /* Set up the quant segment data */
5340 feature_data[MB_LVL_ALT_Q][0] = internal_delta_q[0];
5341 feature_data[MB_LVL_ALT_Q][1] = internal_delta_q[1];
5342 feature_data[MB_LVL_ALT_Q][2] = internal_delta_q[2];
5343 feature_data[MB_LVL_ALT_Q][3] = internal_delta_q[3];
5344
5345 /* Set up the loop segment data s */
5346 feature_data[MB_LVL_ALT_LF][0] = delta_lf[0];
5347 feature_data[MB_LVL_ALT_LF][1] = delta_lf[1];
5348 feature_data[MB_LVL_ALT_LF][2] = delta_lf[2];
5349 feature_data[MB_LVL_ALT_LF][3] = delta_lf[3];
5350
5351 cpi->segment_encode_breakout[0] = threshold[0];
5352 cpi->segment_encode_breakout[1] = threshold[1];
5353 cpi->segment_encode_breakout[2] = threshold[2];
5354 cpi->segment_encode_breakout[3] = threshold[3];
5355
5356 /* Initialise the feature data structure */
5357 set_segment_data(cpi, &feature_data[0][0], SEGMENT_DELTADATA);
5358
5359 if (threshold[0] != 0 || threshold[1] != 0 || threshold[2] != 0 ||
5360 threshold[3] != 0)
5361 cpi->use_roi_static_threshold = 1;
5362 cpi->cyclic_refresh_mode_enabled = 0;
5363
5364 return 0;
5365 }
5366
vp8_set_active_map(VP8_COMP * cpi,unsigned char * map,unsigned int rows,unsigned int cols)5367 int vp8_set_active_map(VP8_COMP *cpi, unsigned char *map, unsigned int rows,
5368 unsigned int cols) {
5369 if ((int)rows == cpi->common.mb_rows && (int)cols == cpi->common.mb_cols) {
5370 if (map) {
5371 memcpy(cpi->active_map, map, rows * cols);
5372 cpi->active_map_enabled = 1;
5373 } else {
5374 cpi->active_map_enabled = 0;
5375 }
5376
5377 return 0;
5378 } else {
5379 return -1;
5380 }
5381 }
5382
vp8_set_internal_size(VP8_COMP * cpi,VPX_SCALING horiz_mode,VPX_SCALING vert_mode)5383 int vp8_set_internal_size(VP8_COMP *cpi, VPX_SCALING horiz_mode,
5384 VPX_SCALING vert_mode) {
5385 if (horiz_mode <= ONETWO) {
5386 cpi->common.horiz_scale = horiz_mode;
5387 } else {
5388 return -1;
5389 }
5390
5391 if (vert_mode <= ONETWO) {
5392 cpi->common.vert_scale = vert_mode;
5393 } else {
5394 return -1;
5395 }
5396
5397 return 0;
5398 }
5399
vp8_calc_ss_err(YV12_BUFFER_CONFIG * source,YV12_BUFFER_CONFIG * dest)5400 int vp8_calc_ss_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest) {
5401 int i, j;
5402 int Total = 0;
5403
5404 unsigned char *src = source->y_buffer;
5405 unsigned char *dst = dest->y_buffer;
5406
5407 /* Loop through the Y plane raw and reconstruction data summing
5408 * (square differences)
5409 */
5410 for (i = 0; i < source->y_height; i += 16) {
5411 for (j = 0; j < source->y_width; j += 16) {
5412 unsigned int sse;
5413 Total += vpx_mse16x16(src + j, source->y_stride, dst + j, dest->y_stride,
5414 &sse);
5415 }
5416
5417 src += 16 * source->y_stride;
5418 dst += 16 * dest->y_stride;
5419 }
5420
5421 return Total;
5422 }
5423
vp8_get_quantizer(VP8_COMP * cpi)5424 int vp8_get_quantizer(VP8_COMP *cpi) { return cpi->common.base_qindex; }
5425