1 /*
2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #include "vpx_config.h"
12 #include "./vpx_scale_rtcd.h"
13 #include "./vpx_dsp_rtcd.h"
14 #include "./vp8_rtcd.h"
15 #include "vp8/common/onyxc_int.h"
16 #include "vp8/common/blockd.h"
17 #include "onyx_int.h"
18 #include "vp8/common/systemdependent.h"
19 #include "vp8/encoder/quantize.h"
20 #include "vp8/common/alloccommon.h"
21 #include "mcomp.h"
22 #include "firstpass.h"
23 #include "vpx_dsp/psnr.h"
24 #include "vpx_scale/vpx_scale.h"
25 #include "vp8/common/extend.h"
26 #include "ratectrl.h"
27 #include "vp8/common/quant_common.h"
28 #include "segmentation.h"
29 #if CONFIG_POSTPROC
30 #include "vp8/common/postproc.h"
31 #endif
32 #include "vpx_mem/vpx_mem.h"
33 #include "vp8/common/reconintra.h"
34 #include "vp8/common/swapyv12buffer.h"
35 #include "vp8/common/threading.h"
36 #include "vpx_ports/system_state.h"
37 #include "vpx_ports/vpx_timer.h"
38 #if ARCH_ARM
39 #include "vpx_ports/arm.h"
40 #endif
41 #if CONFIG_MULTI_RES_ENCODING
42 #include "mr_dissim.h"
43 #endif
44 #include "encodeframe.h"
45
46 #include <assert.h>
47 #include <math.h>
48 #include <stdio.h>
49 #include <limits.h>
50
51 #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
52 extern int vp8_update_coef_context(VP8_COMP *cpi);
53 extern void vp8_update_coef_probs(VP8_COMP *cpi);
54 #endif
55
56 extern void vp8cx_pick_filter_level_fast(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi);
57 extern void vp8cx_set_alt_lf_level(VP8_COMP *cpi, int filt_val);
58 extern void vp8cx_pick_filter_level(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi);
59
60 extern void vp8_deblock_frame(YV12_BUFFER_CONFIG *source,
61 YV12_BUFFER_CONFIG *post, int filt_lvl,
62 int low_var_thresh, int flag);
63 extern void print_parms(VP8_CONFIG *ocf, char *filenam);
64 extern unsigned int vp8_get_processor_freq();
65 extern void print_tree_update_probs();
66 extern int vp8cx_create_encoder_threads(VP8_COMP *cpi);
67 extern void vp8cx_remove_encoder_threads(VP8_COMP *cpi);
68
69 int vp8_estimate_entropy_savings(VP8_COMP *cpi);
70
71 int vp8_calc_ss_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest);
72
73 extern void vp8_temporal_filter_prepare_c(VP8_COMP *cpi, int distance);
74
75 static void set_default_lf_deltas(VP8_COMP *cpi);
76
77 extern const int vp8_gf_interval_table[101];
78
79 #if CONFIG_INTERNAL_STATS
80 #include "math.h"
81 #include "vpx_dsp/ssim.h"
82 #endif
83
84 #ifdef OUTPUT_YUV_SRC
85 FILE *yuv_file;
86 #endif
87 #ifdef OUTPUT_YUV_DENOISED
88 FILE *yuv_denoised_file;
89 #endif
90
91 #if 0
92 FILE *framepsnr;
93 FILE *kf_list;
94 FILE *keyfile;
95 #endif
96
97 #if 0
98 extern int skip_true_count;
99 extern int skip_false_count;
100 #endif
101
102 #ifdef VP8_ENTROPY_STATS
103 extern int intra_mode_stats[10][10][10];
104 #endif
105
106 #ifdef SPEEDSTATS
107 unsigned int frames_at_speed[16] = { 0, 0, 0, 0, 0, 0, 0, 0,
108 0, 0, 0, 0, 0, 0, 0, 0 };
109 unsigned int tot_pm = 0;
110 unsigned int cnt_pm = 0;
111 unsigned int tot_ef = 0;
112 unsigned int cnt_ef = 0;
113 #endif
114
115 #ifdef MODE_STATS
116 extern unsigned __int64 Sectionbits[50];
117 extern int y_modes[5];
118 extern int uv_modes[4];
119 extern int b_modes[10];
120
121 extern int inter_y_modes[10];
122 extern int inter_uv_modes[4];
123 extern unsigned int inter_b_modes[15];
124 #endif
125
126 extern const int vp8_bits_per_mb[2][QINDEX_RANGE];
127
128 extern const int qrounding_factors[129];
129 extern const int qzbin_factors[129];
130 extern void vp8cx_init_quantizer(VP8_COMP *cpi);
131 extern const int vp8cx_base_skip_false_prob[128];
132
133 /* Tables relating active max Q to active min Q */
134 static const unsigned char kf_low_motion_minq[QINDEX_RANGE] = {
135 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
136 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
137 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,
138 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 4, 4, 4, 5, 5, 5,
139 5, 5, 6, 6, 6, 6, 7, 7, 8, 8, 8, 8, 9, 9, 10, 10, 10, 10, 11,
140 11, 11, 11, 12, 12, 13, 13, 13, 13, 14, 14, 15, 15, 15, 15, 16, 16, 16, 16,
141 17, 17, 18, 18, 18, 18, 19, 20, 20, 21, 21, 22, 23, 23
142 };
143 static const unsigned char kf_high_motion_minq[QINDEX_RANGE] = {
144 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
145 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1,
146 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 5,
147 5, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 8, 8, 8, 8, 9, 9, 10, 10,
148 10, 10, 11, 11, 11, 11, 12, 12, 13, 13, 13, 13, 14, 14, 15, 15, 15, 15, 16,
149 16, 16, 16, 17, 17, 18, 18, 18, 18, 19, 19, 20, 20, 20, 20, 21, 21, 21, 21,
150 22, 22, 23, 23, 24, 25, 25, 26, 26, 27, 28, 28, 29, 30
151 };
152 static const unsigned char gf_low_motion_minq[QINDEX_RANGE] = {
153 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3,
154 3, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8,
155 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15,
156 15, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20, 21, 21, 22, 22, 23, 23, 24, 24,
157 25, 25, 26, 26, 27, 27, 28, 28, 29, 29, 30, 30, 31, 31, 32, 32, 33, 33, 34,
158 34, 35, 35, 36, 36, 37, 37, 38, 38, 39, 39, 40, 40, 41, 41, 42, 42, 43, 44,
159 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58
160 };
161 static const unsigned char gf_mid_motion_minq[QINDEX_RANGE] = {
162 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 2, 2, 3, 3, 3, 4, 4, 4, 5,
163 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10, 10, 11,
164 11, 11, 12, 12, 12, 12, 13, 13, 13, 14, 14, 14, 15, 15, 16, 16, 17, 17, 18,
165 18, 19, 19, 20, 20, 21, 21, 22, 22, 23, 23, 24, 24, 25, 25, 26, 26, 27, 27,
166 28, 28, 29, 29, 30, 30, 31, 31, 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 37,
167 37, 38, 39, 39, 40, 40, 41, 41, 42, 42, 43, 43, 44, 45, 46, 47, 48, 49, 50,
168 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64
169 };
170 static const unsigned char gf_high_motion_minq[QINDEX_RANGE] = {
171 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5,
172 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10, 11, 11,
173 12, 12, 13, 13, 14, 14, 15, 15, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20, 21,
174 21, 22, 22, 23, 23, 24, 24, 25, 25, 26, 26, 27, 27, 28, 28, 29, 29, 30, 30,
175 31, 31, 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 37, 37, 38, 38, 39, 39, 40,
176 40, 41, 41, 42, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56,
177 57, 58, 59, 60, 62, 64, 66, 68, 70, 72, 74, 76, 78, 80
178 };
179 static const unsigned char inter_minq[QINDEX_RANGE] = {
180 0, 0, 1, 1, 2, 3, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 9, 10, 11,
181 11, 12, 13, 13, 14, 15, 15, 16, 17, 17, 18, 19, 20, 20, 21, 22, 22, 23, 24,
182 24, 25, 26, 27, 27, 28, 29, 30, 30, 31, 32, 33, 33, 34, 35, 36, 36, 37, 38,
183 39, 39, 40, 41, 42, 42, 43, 44, 45, 46, 46, 47, 48, 49, 50, 50, 51, 52, 53,
184 54, 55, 55, 56, 57, 58, 59, 60, 60, 61, 62, 63, 64, 65, 66, 67, 67, 68, 69,
185 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 86,
186 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100
187 };
188
189 #ifdef PACKET_TESTING
190 extern FILE *vpxlogc;
191 #endif
192
save_layer_context(VP8_COMP * cpi)193 static void save_layer_context(VP8_COMP *cpi) {
194 LAYER_CONTEXT *lc = &cpi->layer_context[cpi->current_layer];
195
196 /* Save layer dependent coding state */
197 lc->target_bandwidth = cpi->target_bandwidth;
198 lc->starting_buffer_level = cpi->oxcf.starting_buffer_level;
199 lc->optimal_buffer_level = cpi->oxcf.optimal_buffer_level;
200 lc->maximum_buffer_size = cpi->oxcf.maximum_buffer_size;
201 lc->starting_buffer_level_in_ms = cpi->oxcf.starting_buffer_level_in_ms;
202 lc->optimal_buffer_level_in_ms = cpi->oxcf.optimal_buffer_level_in_ms;
203 lc->maximum_buffer_size_in_ms = cpi->oxcf.maximum_buffer_size_in_ms;
204 lc->buffer_level = cpi->buffer_level;
205 lc->bits_off_target = cpi->bits_off_target;
206 lc->total_actual_bits = cpi->total_actual_bits;
207 lc->worst_quality = cpi->worst_quality;
208 lc->active_worst_quality = cpi->active_worst_quality;
209 lc->best_quality = cpi->best_quality;
210 lc->active_best_quality = cpi->active_best_quality;
211 lc->ni_av_qi = cpi->ni_av_qi;
212 lc->ni_tot_qi = cpi->ni_tot_qi;
213 lc->ni_frames = cpi->ni_frames;
214 lc->avg_frame_qindex = cpi->avg_frame_qindex;
215 lc->rate_correction_factor = cpi->rate_correction_factor;
216 lc->key_frame_rate_correction_factor = cpi->key_frame_rate_correction_factor;
217 lc->gf_rate_correction_factor = cpi->gf_rate_correction_factor;
218 lc->zbin_over_quant = cpi->mb.zbin_over_quant;
219 lc->inter_frame_target = cpi->inter_frame_target;
220 lc->total_byte_count = cpi->total_byte_count;
221 lc->filter_level = cpi->common.filter_level;
222
223 lc->last_frame_percent_intra = cpi->last_frame_percent_intra;
224
225 memcpy(lc->count_mb_ref_frame_usage, cpi->mb.count_mb_ref_frame_usage,
226 sizeof(cpi->mb.count_mb_ref_frame_usage));
227 }
228
restore_layer_context(VP8_COMP * cpi,const int layer)229 static void restore_layer_context(VP8_COMP *cpi, const int layer) {
230 LAYER_CONTEXT *lc = &cpi->layer_context[layer];
231
232 /* Restore layer dependent coding state */
233 cpi->current_layer = layer;
234 cpi->target_bandwidth = lc->target_bandwidth;
235 cpi->oxcf.target_bandwidth = lc->target_bandwidth;
236 cpi->oxcf.starting_buffer_level = lc->starting_buffer_level;
237 cpi->oxcf.optimal_buffer_level = lc->optimal_buffer_level;
238 cpi->oxcf.maximum_buffer_size = lc->maximum_buffer_size;
239 cpi->oxcf.starting_buffer_level_in_ms = lc->starting_buffer_level_in_ms;
240 cpi->oxcf.optimal_buffer_level_in_ms = lc->optimal_buffer_level_in_ms;
241 cpi->oxcf.maximum_buffer_size_in_ms = lc->maximum_buffer_size_in_ms;
242 cpi->buffer_level = lc->buffer_level;
243 cpi->bits_off_target = lc->bits_off_target;
244 cpi->total_actual_bits = lc->total_actual_bits;
245 cpi->active_worst_quality = lc->active_worst_quality;
246 cpi->active_best_quality = lc->active_best_quality;
247 cpi->ni_av_qi = lc->ni_av_qi;
248 cpi->ni_tot_qi = lc->ni_tot_qi;
249 cpi->ni_frames = lc->ni_frames;
250 cpi->avg_frame_qindex = lc->avg_frame_qindex;
251 cpi->rate_correction_factor = lc->rate_correction_factor;
252 cpi->key_frame_rate_correction_factor = lc->key_frame_rate_correction_factor;
253 cpi->gf_rate_correction_factor = lc->gf_rate_correction_factor;
254 cpi->mb.zbin_over_quant = lc->zbin_over_quant;
255 cpi->inter_frame_target = lc->inter_frame_target;
256 cpi->total_byte_count = lc->total_byte_count;
257 cpi->common.filter_level = lc->filter_level;
258
259 cpi->last_frame_percent_intra = lc->last_frame_percent_intra;
260
261 memcpy(cpi->mb.count_mb_ref_frame_usage, lc->count_mb_ref_frame_usage,
262 sizeof(cpi->mb.count_mb_ref_frame_usage));
263 }
264
rescale(int val,int num,int denom)265 static int rescale(int val, int num, int denom) {
266 int64_t llnum = num;
267 int64_t llden = denom;
268 int64_t llval = val;
269
270 return (int)(llval * llnum / llden);
271 }
272
init_temporal_layer_context(VP8_COMP * cpi,VP8_CONFIG * oxcf,const int layer,double prev_layer_framerate)273 static void init_temporal_layer_context(VP8_COMP *cpi, VP8_CONFIG *oxcf,
274 const int layer,
275 double prev_layer_framerate) {
276 LAYER_CONTEXT *lc = &cpi->layer_context[layer];
277
278 lc->framerate = cpi->output_framerate / cpi->oxcf.rate_decimator[layer];
279 lc->target_bandwidth = cpi->oxcf.target_bitrate[layer] * 1000;
280
281 lc->starting_buffer_level_in_ms = oxcf->starting_buffer_level;
282 lc->optimal_buffer_level_in_ms = oxcf->optimal_buffer_level;
283 lc->maximum_buffer_size_in_ms = oxcf->maximum_buffer_size;
284
285 lc->starting_buffer_level =
286 rescale((int)(oxcf->starting_buffer_level), lc->target_bandwidth, 1000);
287
288 if (oxcf->optimal_buffer_level == 0) {
289 lc->optimal_buffer_level = lc->target_bandwidth / 8;
290 } else {
291 lc->optimal_buffer_level =
292 rescale((int)(oxcf->optimal_buffer_level), lc->target_bandwidth, 1000);
293 }
294
295 if (oxcf->maximum_buffer_size == 0) {
296 lc->maximum_buffer_size = lc->target_bandwidth / 8;
297 } else {
298 lc->maximum_buffer_size =
299 rescale((int)(oxcf->maximum_buffer_size), lc->target_bandwidth, 1000);
300 }
301
302 /* Work out the average size of a frame within this layer */
303 if (layer > 0) {
304 lc->avg_frame_size_for_layer =
305 (int)((cpi->oxcf.target_bitrate[layer] -
306 cpi->oxcf.target_bitrate[layer - 1]) *
307 1000 / (lc->framerate - prev_layer_framerate));
308 }
309
310 lc->active_worst_quality = cpi->oxcf.worst_allowed_q;
311 lc->active_best_quality = cpi->oxcf.best_allowed_q;
312 lc->avg_frame_qindex = cpi->oxcf.worst_allowed_q;
313
314 lc->buffer_level = lc->starting_buffer_level;
315 lc->bits_off_target = lc->starting_buffer_level;
316
317 lc->total_actual_bits = 0;
318 lc->ni_av_qi = 0;
319 lc->ni_tot_qi = 0;
320 lc->ni_frames = 0;
321 lc->rate_correction_factor = 1.0;
322 lc->key_frame_rate_correction_factor = 1.0;
323 lc->gf_rate_correction_factor = 1.0;
324 lc->inter_frame_target = 0;
325 }
326
327 // Upon a run-time change in temporal layers, reset the layer context parameters
328 // for any "new" layers. For "existing" layers, let them inherit the parameters
329 // from the previous layer state (at the same layer #). In future we may want
330 // to better map the previous layer state(s) to the "new" ones.
reset_temporal_layer_change(VP8_COMP * cpi,VP8_CONFIG * oxcf,const int prev_num_layers)331 static void reset_temporal_layer_change(VP8_COMP *cpi, VP8_CONFIG *oxcf,
332 const int prev_num_layers) {
333 int i;
334 double prev_layer_framerate = 0;
335 const int curr_num_layers = cpi->oxcf.number_of_layers;
336 // If the previous state was 1 layer, get current layer context from cpi.
337 // We need this to set the layer context for the new layers below.
338 if (prev_num_layers == 1) {
339 cpi->current_layer = 0;
340 save_layer_context(cpi);
341 }
342 for (i = 0; i < curr_num_layers; ++i) {
343 LAYER_CONTEXT *lc = &cpi->layer_context[i];
344 if (i >= prev_num_layers) {
345 init_temporal_layer_context(cpi, oxcf, i, prev_layer_framerate);
346 }
347 // The initial buffer levels are set based on their starting levels.
348 // We could set the buffer levels based on the previous state (normalized
349 // properly by the layer bandwidths) but we would need to keep track of
350 // the previous set of layer bandwidths (i.e., target_bitrate[i])
351 // before the layer change. For now, reset to the starting levels.
352 lc->buffer_level =
353 cpi->oxcf.starting_buffer_level_in_ms * cpi->oxcf.target_bitrate[i];
354 lc->bits_off_target = lc->buffer_level;
355 // TDOD(marpan): Should we set the rate_correction_factor and
356 // active_worst/best_quality to values derived from the previous layer
357 // state (to smooth-out quality dips/rate fluctuation at transition)?
358
359 // We need to treat the 1 layer case separately: oxcf.target_bitrate[i]
360 // is not set for 1 layer, and the restore_layer_context/save_context()
361 // are not called in the encoding loop, so we need to call it here to
362 // pass the layer context state to |cpi|.
363 if (curr_num_layers == 1) {
364 lc->target_bandwidth = cpi->oxcf.target_bandwidth;
365 lc->buffer_level =
366 cpi->oxcf.starting_buffer_level_in_ms * lc->target_bandwidth / 1000;
367 lc->bits_off_target = lc->buffer_level;
368 restore_layer_context(cpi, 0);
369 }
370 prev_layer_framerate = cpi->output_framerate / cpi->oxcf.rate_decimator[i];
371 }
372 }
373
setup_features(VP8_COMP * cpi)374 static void setup_features(VP8_COMP *cpi) {
375 // If segmentation enabled set the update flags
376 if (cpi->mb.e_mbd.segmentation_enabled) {
377 cpi->mb.e_mbd.update_mb_segmentation_map = 1;
378 cpi->mb.e_mbd.update_mb_segmentation_data = 1;
379 } else {
380 cpi->mb.e_mbd.update_mb_segmentation_map = 0;
381 cpi->mb.e_mbd.update_mb_segmentation_data = 0;
382 }
383
384 cpi->mb.e_mbd.mode_ref_lf_delta_enabled = 0;
385 cpi->mb.e_mbd.mode_ref_lf_delta_update = 0;
386 memset(cpi->mb.e_mbd.ref_lf_deltas, 0, sizeof(cpi->mb.e_mbd.ref_lf_deltas));
387 memset(cpi->mb.e_mbd.mode_lf_deltas, 0, sizeof(cpi->mb.e_mbd.mode_lf_deltas));
388 memset(cpi->mb.e_mbd.last_ref_lf_deltas, 0,
389 sizeof(cpi->mb.e_mbd.ref_lf_deltas));
390 memset(cpi->mb.e_mbd.last_mode_lf_deltas, 0,
391 sizeof(cpi->mb.e_mbd.mode_lf_deltas));
392
393 set_default_lf_deltas(cpi);
394 }
395
396 static void dealloc_raw_frame_buffers(VP8_COMP *cpi);
397
vp8_initialize_enc(void)398 void vp8_initialize_enc(void) {
399 static volatile int init_done = 0;
400
401 if (!init_done) {
402 vpx_dsp_rtcd();
403 vp8_init_intra_predictors();
404 init_done = 1;
405 }
406 }
407
dealloc_compressor_data(VP8_COMP * cpi)408 static void dealloc_compressor_data(VP8_COMP *cpi) {
409 vpx_free(cpi->tplist);
410 cpi->tplist = NULL;
411
412 /* Delete last frame MV storage buffers */
413 vpx_free(cpi->lfmv);
414 cpi->lfmv = 0;
415
416 vpx_free(cpi->lf_ref_frame_sign_bias);
417 cpi->lf_ref_frame_sign_bias = 0;
418
419 vpx_free(cpi->lf_ref_frame);
420 cpi->lf_ref_frame = 0;
421
422 /* Delete sementation map */
423 vpx_free(cpi->segmentation_map);
424 cpi->segmentation_map = 0;
425
426 vpx_free(cpi->active_map);
427 cpi->active_map = 0;
428
429 vp8_de_alloc_frame_buffers(&cpi->common);
430
431 vp8_yv12_de_alloc_frame_buffer(&cpi->pick_lf_lvl_frame);
432 vp8_yv12_de_alloc_frame_buffer(&cpi->scaled_source);
433 dealloc_raw_frame_buffers(cpi);
434
435 vpx_free(cpi->tok);
436 cpi->tok = 0;
437
438 /* Structure used to monitor GF usage */
439 vpx_free(cpi->gf_active_flags);
440 cpi->gf_active_flags = 0;
441
442 /* Activity mask based per mb zbin adjustments */
443 vpx_free(cpi->mb_activity_map);
444 cpi->mb_activity_map = 0;
445
446 vpx_free(cpi->mb.pip);
447 cpi->mb.pip = 0;
448
449 #if CONFIG_MULTITHREAD
450 /* De-allocate mutex */
451 if (cpi->pmutex != NULL) {
452 VP8_COMMON *const pc = &cpi->common;
453 int i;
454
455 for (i = 0; i < pc->mb_rows; ++i) {
456 pthread_mutex_destroy(&cpi->pmutex[i]);
457 }
458 vpx_free(cpi->pmutex);
459 cpi->pmutex = NULL;
460 }
461
462 vpx_free(cpi->mt_current_mb_col);
463 cpi->mt_current_mb_col = NULL;
464 #endif
465 }
466
enable_segmentation(VP8_COMP * cpi)467 static void enable_segmentation(VP8_COMP *cpi) {
468 /* Set the appropriate feature bit */
469 cpi->mb.e_mbd.segmentation_enabled = 1;
470 cpi->mb.e_mbd.update_mb_segmentation_map = 1;
471 cpi->mb.e_mbd.update_mb_segmentation_data = 1;
472 }
disable_segmentation(VP8_COMP * cpi)473 static void disable_segmentation(VP8_COMP *cpi) {
474 /* Clear the appropriate feature bit */
475 cpi->mb.e_mbd.segmentation_enabled = 0;
476 }
477
478 /* Valid values for a segment are 0 to 3
479 * Segmentation map is arrange as [Rows][Columns]
480 */
set_segmentation_map(VP8_COMP * cpi,unsigned char * segmentation_map)481 static void set_segmentation_map(VP8_COMP *cpi,
482 unsigned char *segmentation_map) {
483 /* Copy in the new segmentation map */
484 memcpy(cpi->segmentation_map, segmentation_map,
485 (cpi->common.mb_rows * cpi->common.mb_cols));
486
487 /* Signal that the map should be updated. */
488 cpi->mb.e_mbd.update_mb_segmentation_map = 1;
489 cpi->mb.e_mbd.update_mb_segmentation_data = 1;
490 }
491
492 /* The values given for each segment can be either deltas (from the default
493 * value chosen for the frame) or absolute values.
494 *
495 * Valid range for abs values is:
496 * (0-127 for MB_LVL_ALT_Q), (0-63 for SEGMENT_ALT_LF)
497 * Valid range for delta values are:
498 * (+/-127 for MB_LVL_ALT_Q), (+/-63 for SEGMENT_ALT_LF)
499 *
500 * abs_delta = SEGMENT_DELTADATA (deltas)
501 * abs_delta = SEGMENT_ABSDATA (use the absolute values given).
502 *
503 */
set_segment_data(VP8_COMP * cpi,signed char * feature_data,unsigned char abs_delta)504 static void set_segment_data(VP8_COMP *cpi, signed char *feature_data,
505 unsigned char abs_delta) {
506 cpi->mb.e_mbd.mb_segement_abs_delta = abs_delta;
507 memcpy(cpi->segment_feature_data, feature_data,
508 sizeof(cpi->segment_feature_data));
509 }
510
511 /* A simple function to cyclically refresh the background at a lower Q */
cyclic_background_refresh(VP8_COMP * cpi,int Q,int lf_adjustment)512 static void cyclic_background_refresh(VP8_COMP *cpi, int Q, int lf_adjustment) {
513 unsigned char *seg_map = cpi->segmentation_map;
514 signed char feature_data[MB_LVL_MAX][MAX_MB_SEGMENTS];
515 int i;
516 int block_count = cpi->cyclic_refresh_mode_max_mbs_perframe;
517 int mbs_in_frame = cpi->common.mb_rows * cpi->common.mb_cols;
518
519 cpi->cyclic_refresh_q = Q / 2;
520
521 if (cpi->oxcf.screen_content_mode) {
522 // Modify quality ramp-up based on Q. Above some Q level, increase the
523 // number of blocks to be refreshed, and reduce it below the thredhold.
524 // Turn-off under certain conditions (i.e., away from key frame, and if
525 // we are at good quality (low Q) and most of the blocks were
526 // skipped-encoded
527 // in previous frame.
528 int qp_thresh = (cpi->oxcf.screen_content_mode == 2) ? 80 : 100;
529 if (Q >= qp_thresh) {
530 cpi->cyclic_refresh_mode_max_mbs_perframe =
531 (cpi->common.mb_rows * cpi->common.mb_cols) / 10;
532 } else if (cpi->frames_since_key > 250 && Q < 20 &&
533 cpi->mb.skip_true_count > (int)(0.95 * mbs_in_frame)) {
534 cpi->cyclic_refresh_mode_max_mbs_perframe = 0;
535 } else {
536 cpi->cyclic_refresh_mode_max_mbs_perframe =
537 (cpi->common.mb_rows * cpi->common.mb_cols) / 20;
538 }
539 block_count = cpi->cyclic_refresh_mode_max_mbs_perframe;
540 }
541
542 // Set every macroblock to be eligible for update.
543 // For key frame this will reset seg map to 0.
544 memset(cpi->segmentation_map, 0, mbs_in_frame);
545
546 if (cpi->common.frame_type != KEY_FRAME && block_count > 0) {
547 /* Cycle through the macro_block rows */
548 /* MB loop to set local segmentation map */
549 i = cpi->cyclic_refresh_mode_index;
550 assert(i < mbs_in_frame);
551 do {
552 /* If the MB is as a candidate for clean up then mark it for
553 * possible boost/refresh (segment 1) The segment id may get
554 * reset to 0 later if the MB gets coded anything other than
555 * last frame 0,0 as only (last frame 0,0) MBs are eligable for
556 * refresh : that is to say Mbs likely to be background blocks.
557 */
558 if (cpi->cyclic_refresh_map[i] == 0) {
559 seg_map[i] = 1;
560 block_count--;
561 } else if (cpi->cyclic_refresh_map[i] < 0) {
562 cpi->cyclic_refresh_map[i]++;
563 }
564
565 i++;
566 if (i == mbs_in_frame) i = 0;
567
568 } while (block_count && i != cpi->cyclic_refresh_mode_index);
569
570 cpi->cyclic_refresh_mode_index = i;
571
572 #if CONFIG_TEMPORAL_DENOISING
573 if (cpi->oxcf.noise_sensitivity > 0) {
574 if (cpi->denoiser.denoiser_mode == kDenoiserOnYUVAggressive &&
575 Q < (int)cpi->denoiser.denoise_pars.qp_thresh &&
576 (cpi->frames_since_key >
577 2 * cpi->denoiser.denoise_pars.consec_zerolast)) {
578 // Under aggressive denoising, use segmentation to turn off loop
579 // filter below some qp thresh. The filter is reduced for all
580 // blocks that have been encoded as ZEROMV LAST x frames in a row,
581 // where x is set by cpi->denoiser.denoise_pars.consec_zerolast.
582 // This is to avoid "dot" artifacts that can occur from repeated
583 // loop filtering on noisy input source.
584 cpi->cyclic_refresh_q = Q;
585 // lf_adjustment = -MAX_LOOP_FILTER;
586 lf_adjustment = -40;
587 for (i = 0; i < mbs_in_frame; ++i) {
588 seg_map[i] = (cpi->consec_zero_last[i] >
589 cpi->denoiser.denoise_pars.consec_zerolast)
590 ? 1
591 : 0;
592 }
593 }
594 }
595 #endif
596 }
597
598 /* Activate segmentation. */
599 cpi->mb.e_mbd.update_mb_segmentation_map = 1;
600 cpi->mb.e_mbd.update_mb_segmentation_data = 1;
601 enable_segmentation(cpi);
602
603 /* Set up the quant segment data */
604 feature_data[MB_LVL_ALT_Q][0] = 0;
605 feature_data[MB_LVL_ALT_Q][1] = (cpi->cyclic_refresh_q - Q);
606 feature_data[MB_LVL_ALT_Q][2] = 0;
607 feature_data[MB_LVL_ALT_Q][3] = 0;
608
609 /* Set up the loop segment data */
610 feature_data[MB_LVL_ALT_LF][0] = 0;
611 feature_data[MB_LVL_ALT_LF][1] = lf_adjustment;
612 feature_data[MB_LVL_ALT_LF][2] = 0;
613 feature_data[MB_LVL_ALT_LF][3] = 0;
614
615 /* Initialise the feature data structure */
616 set_segment_data(cpi, &feature_data[0][0], SEGMENT_DELTADATA);
617 }
618
set_default_lf_deltas(VP8_COMP * cpi)619 static void set_default_lf_deltas(VP8_COMP *cpi) {
620 cpi->mb.e_mbd.mode_ref_lf_delta_enabled = 1;
621 cpi->mb.e_mbd.mode_ref_lf_delta_update = 1;
622
623 memset(cpi->mb.e_mbd.ref_lf_deltas, 0, sizeof(cpi->mb.e_mbd.ref_lf_deltas));
624 memset(cpi->mb.e_mbd.mode_lf_deltas, 0, sizeof(cpi->mb.e_mbd.mode_lf_deltas));
625
626 /* Test of ref frame deltas */
627 cpi->mb.e_mbd.ref_lf_deltas[INTRA_FRAME] = 2;
628 cpi->mb.e_mbd.ref_lf_deltas[LAST_FRAME] = 0;
629 cpi->mb.e_mbd.ref_lf_deltas[GOLDEN_FRAME] = -2;
630 cpi->mb.e_mbd.ref_lf_deltas[ALTREF_FRAME] = -2;
631
632 cpi->mb.e_mbd.mode_lf_deltas[0] = 4; /* BPRED */
633
634 if (cpi->oxcf.Mode == MODE_REALTIME) {
635 cpi->mb.e_mbd.mode_lf_deltas[1] = -12; /* Zero */
636 } else {
637 cpi->mb.e_mbd.mode_lf_deltas[1] = -2; /* Zero */
638 }
639
640 cpi->mb.e_mbd.mode_lf_deltas[2] = 2; /* New mv */
641 cpi->mb.e_mbd.mode_lf_deltas[3] = 4; /* Split mv */
642 }
643
644 /* Convenience macros for mapping speed and mode into a continuous
645 * range
646 */
647 #define GOOD(x) (x + 1)
648 #define RT(x) (x + 7)
649
speed_map(int speed,const int * map)650 static int speed_map(int speed, const int *map) {
651 int res;
652
653 do {
654 res = *map++;
655 } while (speed >= *map++);
656 return res;
657 }
658
659 static const int thresh_mult_map_znn[] = {
660 /* map common to zero, nearest, and near */
661 0, GOOD(2), 1500, GOOD(3), 2000, RT(0), 1000, RT(2), 2000, INT_MAX
662 };
663
664 static const int thresh_mult_map_vhpred[] = { 1000, GOOD(2), 1500, GOOD(3),
665 2000, RT(0), 1000, RT(1),
666 2000, RT(7), INT_MAX, INT_MAX };
667
668 static const int thresh_mult_map_bpred[] = { 2000, GOOD(0), 2500, GOOD(2),
669 5000, GOOD(3), 7500, RT(0),
670 2500, RT(1), 5000, RT(6),
671 INT_MAX, INT_MAX };
672
673 static const int thresh_mult_map_tm[] = { 1000, GOOD(2), 1500, GOOD(3),
674 2000, RT(0), 0, RT(1),
675 1000, RT(2), 2000, RT(7),
676 INT_MAX, INT_MAX };
677
678 static const int thresh_mult_map_new1[] = { 1000, GOOD(2), 2000,
679 RT(0), 2000, INT_MAX };
680
681 static const int thresh_mult_map_new2[] = { 1000, GOOD(2), 2000, GOOD(3),
682 2500, GOOD(5), 4000, RT(0),
683 2000, RT(2), 2500, RT(5),
684 4000, INT_MAX };
685
686 static const int thresh_mult_map_split1[] = {
687 2500, GOOD(0), 1700, GOOD(2), 10000, GOOD(3), 25000, GOOD(4), INT_MAX,
688 RT(0), 5000, RT(1), 10000, RT(2), 25000, RT(3), INT_MAX, INT_MAX
689 };
690
691 static const int thresh_mult_map_split2[] = {
692 5000, GOOD(0), 4500, GOOD(2), 20000, GOOD(3), 50000, GOOD(4), INT_MAX,
693 RT(0), 10000, RT(1), 20000, RT(2), 50000, RT(3), INT_MAX, INT_MAX
694 };
695
696 static const int mode_check_freq_map_zn2[] = {
697 /* {zero,nearest}{2,3} */
698 0, RT(10), 1 << 1, RT(11), 1 << 2, RT(12), 1 << 3, INT_MAX
699 };
700
701 static const int mode_check_freq_map_vhbpred[] = {
702 0, GOOD(5), 2, RT(0), 0, RT(3), 2, RT(5), 4, INT_MAX
703 };
704
705 static const int mode_check_freq_map_near2[] = {
706 0, GOOD(5), 2, RT(0), 0, RT(3), 2,
707 RT(10), 1 << 2, RT(11), 1 << 3, RT(12), 1 << 4, INT_MAX
708 };
709
710 static const int mode_check_freq_map_new1[] = {
711 0, RT(10), 1 << 1, RT(11), 1 << 2, RT(12), 1 << 3, INT_MAX
712 };
713
714 static const int mode_check_freq_map_new2[] = { 0, GOOD(5), 4, RT(0),
715 0, RT(3), 4, RT(10),
716 1 << 3, RT(11), 1 << 4, RT(12),
717 1 << 5, INT_MAX };
718
719 static const int mode_check_freq_map_split1[] = {
720 0, GOOD(2), 2, GOOD(3), 7, RT(1), 2, RT(2), 7, INT_MAX
721 };
722
723 static const int mode_check_freq_map_split2[] = {
724 0, GOOD(1), 2, GOOD(2), 4, GOOD(3), 15, RT(1), 4, RT(2), 15, INT_MAX
725 };
726
vp8_set_speed_features(VP8_COMP * cpi)727 void vp8_set_speed_features(VP8_COMP *cpi) {
728 SPEED_FEATURES *sf = &cpi->sf;
729 int Mode = cpi->compressor_speed;
730 int Speed = cpi->Speed;
731 int Speed2;
732 int i;
733 VP8_COMMON *cm = &cpi->common;
734 int last_improved_quant = sf->improved_quant;
735 int ref_frames;
736
737 /* Initialise default mode frequency sampling variables */
738 for (i = 0; i < MAX_MODES; ++i) {
739 cpi->mode_check_freq[i] = 0;
740 }
741
742 cpi->mb.mbs_tested_so_far = 0;
743 cpi->mb.mbs_zero_last_dot_suppress = 0;
744
745 /* best quality defaults */
746 sf->RD = 1;
747 sf->search_method = NSTEP;
748 sf->improved_quant = 1;
749 sf->improved_dct = 1;
750 sf->auto_filter = 1;
751 sf->recode_loop = 1;
752 sf->quarter_pixel_search = 1;
753 sf->half_pixel_search = 1;
754 sf->iterative_sub_pixel = 1;
755 sf->optimize_coefficients = 1;
756 sf->use_fastquant_for_pick = 0;
757 sf->no_skip_block4x4_search = 1;
758
759 sf->first_step = 0;
760 sf->max_step_search_steps = MAX_MVSEARCH_STEPS;
761 sf->improved_mv_pred = 1;
762
763 /* default thresholds to 0 */
764 for (i = 0; i < MAX_MODES; ++i) sf->thresh_mult[i] = 0;
765
766 /* Count enabled references */
767 ref_frames = 1;
768 if (cpi->ref_frame_flags & VP8_LAST_FRAME) ref_frames++;
769 if (cpi->ref_frame_flags & VP8_GOLD_FRAME) ref_frames++;
770 if (cpi->ref_frame_flags & VP8_ALTR_FRAME) ref_frames++;
771
772 /* Convert speed to continuous range, with clamping */
773 if (Mode == 0) {
774 Speed = 0;
775 } else if (Mode == 2) {
776 Speed = RT(Speed);
777 } else {
778 if (Speed > 5) Speed = 5;
779 Speed = GOOD(Speed);
780 }
781
782 sf->thresh_mult[THR_ZERO1] = sf->thresh_mult[THR_NEAREST1] =
783 sf->thresh_mult[THR_NEAR1] = sf->thresh_mult[THR_DC] = 0; /* always */
784
785 sf->thresh_mult[THR_ZERO2] = sf->thresh_mult[THR_ZERO3] =
786 sf->thresh_mult[THR_NEAREST2] = sf->thresh_mult[THR_NEAREST3] =
787 sf->thresh_mult[THR_NEAR2] = sf->thresh_mult[THR_NEAR3] =
788 speed_map(Speed, thresh_mult_map_znn);
789
790 sf->thresh_mult[THR_V_PRED] = sf->thresh_mult[THR_H_PRED] =
791 speed_map(Speed, thresh_mult_map_vhpred);
792 sf->thresh_mult[THR_B_PRED] = speed_map(Speed, thresh_mult_map_bpred);
793 sf->thresh_mult[THR_TM] = speed_map(Speed, thresh_mult_map_tm);
794 sf->thresh_mult[THR_NEW1] = speed_map(Speed, thresh_mult_map_new1);
795 sf->thresh_mult[THR_NEW2] = sf->thresh_mult[THR_NEW3] =
796 speed_map(Speed, thresh_mult_map_new2);
797 sf->thresh_mult[THR_SPLIT1] = speed_map(Speed, thresh_mult_map_split1);
798 sf->thresh_mult[THR_SPLIT2] = sf->thresh_mult[THR_SPLIT3] =
799 speed_map(Speed, thresh_mult_map_split2);
800
801 // Special case for temporal layers.
802 // Reduce the thresholds for zero/nearest/near for GOLDEN, if GOLDEN is
803 // used as second reference. We don't modify thresholds for ALTREF case
804 // since ALTREF is usually used as long-term reference in temporal layers.
805 if ((cpi->Speed <= 6) && (cpi->oxcf.number_of_layers > 1) &&
806 (cpi->ref_frame_flags & VP8_LAST_FRAME) &&
807 (cpi->ref_frame_flags & VP8_GOLD_FRAME)) {
808 if (cpi->closest_reference_frame == GOLDEN_FRAME) {
809 sf->thresh_mult[THR_ZERO2] = sf->thresh_mult[THR_ZERO2] >> 3;
810 sf->thresh_mult[THR_NEAREST2] = sf->thresh_mult[THR_NEAREST2] >> 3;
811 sf->thresh_mult[THR_NEAR2] = sf->thresh_mult[THR_NEAR2] >> 3;
812 } else {
813 sf->thresh_mult[THR_ZERO2] = sf->thresh_mult[THR_ZERO2] >> 1;
814 sf->thresh_mult[THR_NEAREST2] = sf->thresh_mult[THR_NEAREST2] >> 1;
815 sf->thresh_mult[THR_NEAR2] = sf->thresh_mult[THR_NEAR2] >> 1;
816 }
817 }
818
819 cpi->mode_check_freq[THR_ZERO1] = cpi->mode_check_freq[THR_NEAREST1] =
820 cpi->mode_check_freq[THR_NEAR1] = cpi->mode_check_freq[THR_TM] =
821 cpi->mode_check_freq[THR_DC] = 0; /* always */
822
823 cpi->mode_check_freq[THR_ZERO2] = cpi->mode_check_freq[THR_ZERO3] =
824 cpi->mode_check_freq[THR_NEAREST2] = cpi->mode_check_freq[THR_NEAREST3] =
825 speed_map(Speed, mode_check_freq_map_zn2);
826
827 cpi->mode_check_freq[THR_NEAR2] = cpi->mode_check_freq[THR_NEAR3] =
828 speed_map(Speed, mode_check_freq_map_near2);
829
830 cpi->mode_check_freq[THR_V_PRED] = cpi->mode_check_freq[THR_H_PRED] =
831 cpi->mode_check_freq[THR_B_PRED] =
832 speed_map(Speed, mode_check_freq_map_vhbpred);
833
834 // For real-time mode at speed 10 keep the mode_check_freq threshold
835 // for NEW1 similar to that of speed 9.
836 Speed2 = Speed;
837 if (cpi->Speed == 10 && Mode == 2) Speed2 = RT(9);
838 cpi->mode_check_freq[THR_NEW1] = speed_map(Speed2, mode_check_freq_map_new1);
839
840 cpi->mode_check_freq[THR_NEW2] = cpi->mode_check_freq[THR_NEW3] =
841 speed_map(Speed, mode_check_freq_map_new2);
842
843 cpi->mode_check_freq[THR_SPLIT1] =
844 speed_map(Speed, mode_check_freq_map_split1);
845 cpi->mode_check_freq[THR_SPLIT2] = cpi->mode_check_freq[THR_SPLIT3] =
846 speed_map(Speed, mode_check_freq_map_split2);
847 Speed = cpi->Speed;
848 switch (Mode) {
849 #if !CONFIG_REALTIME_ONLY
850 case 0: /* best quality mode */
851 sf->first_step = 0;
852 sf->max_step_search_steps = MAX_MVSEARCH_STEPS;
853 break;
854 case 1:
855 case 3:
856 if (Speed > 0) {
857 /* Disable coefficient optimization above speed 0 */
858 sf->optimize_coefficients = 0;
859 sf->use_fastquant_for_pick = 1;
860 sf->no_skip_block4x4_search = 0;
861
862 sf->first_step = 1;
863 }
864
865 if (Speed > 2) {
866 sf->improved_quant = 0;
867 sf->improved_dct = 0;
868
869 /* Only do recode loop on key frames, golden frames and
870 * alt ref frames
871 */
872 sf->recode_loop = 2;
873 }
874
875 if (Speed > 3) {
876 sf->auto_filter = 1;
877 sf->recode_loop = 0; /* recode loop off */
878 sf->RD = 0; /* Turn rd off */
879 }
880
881 if (Speed > 4) {
882 sf->auto_filter = 0; /* Faster selection of loop filter */
883 }
884
885 break;
886 #endif
887 case 2:
888 sf->optimize_coefficients = 0;
889 sf->recode_loop = 0;
890 sf->auto_filter = 1;
891 sf->iterative_sub_pixel = 1;
892 sf->search_method = NSTEP;
893
894 if (Speed > 0) {
895 sf->improved_quant = 0;
896 sf->improved_dct = 0;
897
898 sf->use_fastquant_for_pick = 1;
899 sf->no_skip_block4x4_search = 0;
900 sf->first_step = 1;
901 }
902
903 if (Speed > 2) sf->auto_filter = 0; /* Faster selection of loop filter */
904
905 if (Speed > 3) {
906 sf->RD = 0;
907 sf->auto_filter = 1;
908 }
909
910 if (Speed > 4) {
911 sf->auto_filter = 0; /* Faster selection of loop filter */
912 sf->search_method = HEX;
913 sf->iterative_sub_pixel = 0;
914 }
915
916 if (Speed > 6) {
917 unsigned int sum = 0;
918 unsigned int total_mbs = cm->MBs;
919 int thresh;
920 unsigned int total_skip;
921
922 int min = 2000;
923
924 if (cpi->oxcf.encode_breakout > 2000) min = cpi->oxcf.encode_breakout;
925
926 min >>= 7;
927
928 for (i = 0; i < min; ++i) {
929 sum += cpi->mb.error_bins[i];
930 }
931
932 total_skip = sum;
933 sum = 0;
934
935 /* i starts from 2 to make sure thresh started from 2048 */
936 for (; i < 1024; ++i) {
937 sum += cpi->mb.error_bins[i];
938
939 if (10 * sum >=
940 (unsigned int)(cpi->Speed - 6) * (total_mbs - total_skip)) {
941 break;
942 }
943 }
944
945 i--;
946 thresh = (i << 7);
947
948 if (thresh < 2000) thresh = 2000;
949
950 if (ref_frames > 1) {
951 sf->thresh_mult[THR_NEW1] = thresh;
952 sf->thresh_mult[THR_NEAREST1] = thresh >> 1;
953 sf->thresh_mult[THR_NEAR1] = thresh >> 1;
954 }
955
956 if (ref_frames > 2) {
957 sf->thresh_mult[THR_NEW2] = thresh << 1;
958 sf->thresh_mult[THR_NEAREST2] = thresh;
959 sf->thresh_mult[THR_NEAR2] = thresh;
960 }
961
962 if (ref_frames > 3) {
963 sf->thresh_mult[THR_NEW3] = thresh << 1;
964 sf->thresh_mult[THR_NEAREST3] = thresh;
965 sf->thresh_mult[THR_NEAR3] = thresh;
966 }
967
968 sf->improved_mv_pred = 0;
969 }
970
971 if (Speed > 8) sf->quarter_pixel_search = 0;
972
973 if (cm->version == 0) {
974 cm->filter_type = NORMAL_LOOPFILTER;
975
976 if (Speed >= 14) cm->filter_type = SIMPLE_LOOPFILTER;
977 } else {
978 cm->filter_type = SIMPLE_LOOPFILTER;
979 }
980
981 /* This has a big hit on quality. Last resort */
982 if (Speed >= 15) sf->half_pixel_search = 0;
983
984 memset(cpi->mb.error_bins, 0, sizeof(cpi->mb.error_bins));
985
986 }; /* switch */
987
988 /* Slow quant, dct and trellis not worthwhile for first pass
989 * so make sure they are always turned off.
990 */
991 if (cpi->pass == 1) {
992 sf->improved_quant = 0;
993 sf->optimize_coefficients = 0;
994 sf->improved_dct = 0;
995 }
996
997 if (cpi->sf.search_method == NSTEP) {
998 vp8_init3smotion_compensation(&cpi->mb,
999 cm->yv12_fb[cm->lst_fb_idx].y_stride);
1000 } else if (cpi->sf.search_method == DIAMOND) {
1001 vp8_init_dsmotion_compensation(&cpi->mb,
1002 cm->yv12_fb[cm->lst_fb_idx].y_stride);
1003 }
1004
1005 if (cpi->sf.improved_dct) {
1006 cpi->mb.short_fdct8x4 = vp8_short_fdct8x4;
1007 cpi->mb.short_fdct4x4 = vp8_short_fdct4x4;
1008 } else {
1009 /* No fast FDCT defined for any platform at this time. */
1010 cpi->mb.short_fdct8x4 = vp8_short_fdct8x4;
1011 cpi->mb.short_fdct4x4 = vp8_short_fdct4x4;
1012 }
1013
1014 cpi->mb.short_walsh4x4 = vp8_short_walsh4x4;
1015
1016 if (cpi->sf.improved_quant) {
1017 cpi->mb.quantize_b = vp8_regular_quantize_b;
1018 } else {
1019 cpi->mb.quantize_b = vp8_fast_quantize_b;
1020 }
1021 if (cpi->sf.improved_quant != last_improved_quant) vp8cx_init_quantizer(cpi);
1022
1023 if (cpi->sf.iterative_sub_pixel == 1) {
1024 cpi->find_fractional_mv_step = vp8_find_best_sub_pixel_step_iteratively;
1025 } else if (cpi->sf.quarter_pixel_search) {
1026 cpi->find_fractional_mv_step = vp8_find_best_sub_pixel_step;
1027 } else if (cpi->sf.half_pixel_search) {
1028 cpi->find_fractional_mv_step = vp8_find_best_half_pixel_step;
1029 } else {
1030 cpi->find_fractional_mv_step = vp8_skip_fractional_mv_step;
1031 }
1032
1033 if (cpi->sf.optimize_coefficients == 1 && cpi->pass != 1) {
1034 cpi->mb.optimize = 1;
1035 } else {
1036 cpi->mb.optimize = 0;
1037 }
1038
1039 if (cpi->common.full_pixel) {
1040 cpi->find_fractional_mv_step = vp8_skip_fractional_mv_step;
1041 }
1042
1043 #ifdef SPEEDSTATS
1044 frames_at_speed[cpi->Speed]++;
1045 #endif
1046 }
1047 #undef GOOD
1048 #undef RT
1049
alloc_raw_frame_buffers(VP8_COMP * cpi)1050 static void alloc_raw_frame_buffers(VP8_COMP *cpi) {
1051 #if VP8_TEMPORAL_ALT_REF
1052 int width = (cpi->oxcf.Width + 15) & ~15;
1053 int height = (cpi->oxcf.Height + 15) & ~15;
1054 #endif
1055
1056 cpi->lookahead = vp8_lookahead_init(cpi->oxcf.Width, cpi->oxcf.Height,
1057 cpi->oxcf.lag_in_frames);
1058 if (!cpi->lookahead) {
1059 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1060 "Failed to allocate lag buffers");
1061 }
1062
1063 #if VP8_TEMPORAL_ALT_REF
1064
1065 if (vp8_yv12_alloc_frame_buffer(&cpi->alt_ref_buffer, width, height,
1066 VP8BORDERINPIXELS)) {
1067 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1068 "Failed to allocate altref buffer");
1069 }
1070
1071 #endif
1072 }
1073
dealloc_raw_frame_buffers(VP8_COMP * cpi)1074 static void dealloc_raw_frame_buffers(VP8_COMP *cpi) {
1075 #if VP8_TEMPORAL_ALT_REF
1076 vp8_yv12_de_alloc_frame_buffer(&cpi->alt_ref_buffer);
1077 #endif
1078 vp8_lookahead_destroy(cpi->lookahead);
1079 }
1080
vp8_alloc_partition_data(VP8_COMP * cpi)1081 static int vp8_alloc_partition_data(VP8_COMP *cpi) {
1082 vpx_free(cpi->mb.pip);
1083
1084 cpi->mb.pip =
1085 vpx_calloc((cpi->common.mb_cols + 1) * (cpi->common.mb_rows + 1),
1086 sizeof(PARTITION_INFO));
1087 if (!cpi->mb.pip) return 1;
1088
1089 cpi->mb.pi = cpi->mb.pip + cpi->common.mode_info_stride + 1;
1090
1091 return 0;
1092 }
1093
vp8_alloc_compressor_data(VP8_COMP * cpi)1094 void vp8_alloc_compressor_data(VP8_COMP *cpi) {
1095 VP8_COMMON *cm = &cpi->common;
1096
1097 int width = cm->Width;
1098 int height = cm->Height;
1099 #if CONFIG_MULTITHREAD
1100 int prev_mb_rows = cm->mb_rows;
1101 #endif
1102
1103 if (vp8_alloc_frame_buffers(cm, width, height)) {
1104 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1105 "Failed to allocate frame buffers");
1106 }
1107
1108 if (vp8_alloc_partition_data(cpi)) {
1109 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1110 "Failed to allocate partition data");
1111 }
1112
1113 if ((width & 0xf) != 0) width += 16 - (width & 0xf);
1114
1115 if ((height & 0xf) != 0) height += 16 - (height & 0xf);
1116
1117 if (vp8_yv12_alloc_frame_buffer(&cpi->pick_lf_lvl_frame, width, height,
1118 VP8BORDERINPIXELS)) {
1119 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1120 "Failed to allocate last frame buffer");
1121 }
1122
1123 if (vp8_yv12_alloc_frame_buffer(&cpi->scaled_source, width, height,
1124 VP8BORDERINPIXELS)) {
1125 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1126 "Failed to allocate scaled source buffer");
1127 }
1128
1129 vpx_free(cpi->tok);
1130
1131 {
1132 #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
1133 unsigned int tokens = 8 * 24 * 16; /* one MB for each thread */
1134 #else
1135 unsigned int tokens = cm->mb_rows * cm->mb_cols * 24 * 16;
1136 #endif
1137 CHECK_MEM_ERROR(cpi->tok, vpx_calloc(tokens, sizeof(*cpi->tok)));
1138 }
1139
1140 /* Data used for real time vc mode to see if gf needs refreshing */
1141 cpi->zeromv_count = 0;
1142
1143 /* Structures used to monitor GF usage */
1144 vpx_free(cpi->gf_active_flags);
1145 CHECK_MEM_ERROR(
1146 cpi->gf_active_flags,
1147 vpx_calloc(sizeof(*cpi->gf_active_flags), cm->mb_rows * cm->mb_cols));
1148 cpi->gf_active_count = cm->mb_rows * cm->mb_cols;
1149
1150 vpx_free(cpi->mb_activity_map);
1151 CHECK_MEM_ERROR(
1152 cpi->mb_activity_map,
1153 vpx_calloc(sizeof(*cpi->mb_activity_map), cm->mb_rows * cm->mb_cols));
1154
1155 /* allocate memory for storing last frame's MVs for MV prediction. */
1156 vpx_free(cpi->lfmv);
1157 CHECK_MEM_ERROR(cpi->lfmv, vpx_calloc((cm->mb_rows + 2) * (cm->mb_cols + 2),
1158 sizeof(*cpi->lfmv)));
1159 vpx_free(cpi->lf_ref_frame_sign_bias);
1160 CHECK_MEM_ERROR(cpi->lf_ref_frame_sign_bias,
1161 vpx_calloc((cm->mb_rows + 2) * (cm->mb_cols + 2),
1162 sizeof(*cpi->lf_ref_frame_sign_bias)));
1163 vpx_free(cpi->lf_ref_frame);
1164 CHECK_MEM_ERROR(cpi->lf_ref_frame,
1165 vpx_calloc((cm->mb_rows + 2) * (cm->mb_cols + 2),
1166 sizeof(*cpi->lf_ref_frame)));
1167
1168 /* Create the encoder segmentation map and set all entries to 0 */
1169 vpx_free(cpi->segmentation_map);
1170 CHECK_MEM_ERROR(
1171 cpi->segmentation_map,
1172 vpx_calloc(cm->mb_rows * cm->mb_cols, sizeof(*cpi->segmentation_map)));
1173 cpi->cyclic_refresh_mode_index = 0;
1174 vpx_free(cpi->active_map);
1175 CHECK_MEM_ERROR(cpi->active_map, vpx_calloc(cm->mb_rows * cm->mb_cols,
1176 sizeof(*cpi->active_map)));
1177 memset(cpi->active_map, 1, (cm->mb_rows * cm->mb_cols));
1178
1179 #if CONFIG_MULTITHREAD
1180 if (width < 640) {
1181 cpi->mt_sync_range = 1;
1182 } else if (width <= 1280) {
1183 cpi->mt_sync_range = 4;
1184 } else if (width <= 2560) {
1185 cpi->mt_sync_range = 8;
1186 } else {
1187 cpi->mt_sync_range = 16;
1188 }
1189
1190 if (cpi->oxcf.multi_threaded > 1) {
1191 int i;
1192
1193 /* De-allocate and re-allocate mutex */
1194 if (cpi->pmutex != NULL) {
1195 for (i = 0; i < prev_mb_rows; ++i) {
1196 pthread_mutex_destroy(&cpi->pmutex[i]);
1197 }
1198 vpx_free(cpi->pmutex);
1199 cpi->pmutex = NULL;
1200 }
1201
1202 CHECK_MEM_ERROR(cpi->pmutex,
1203 vpx_malloc(sizeof(*cpi->pmutex) * cm->mb_rows));
1204 if (cpi->pmutex) {
1205 for (i = 0; i < cm->mb_rows; ++i) {
1206 pthread_mutex_init(&cpi->pmutex[i], NULL);
1207 }
1208 }
1209
1210 vpx_free(cpi->mt_current_mb_col);
1211 CHECK_MEM_ERROR(cpi->mt_current_mb_col,
1212 vpx_malloc(sizeof(*cpi->mt_current_mb_col) * cm->mb_rows));
1213 }
1214
1215 #endif
1216
1217 vpx_free(cpi->tplist);
1218 CHECK_MEM_ERROR(cpi->tplist, vpx_malloc(sizeof(TOKENLIST) * cm->mb_rows));
1219
1220 #if CONFIG_TEMPORAL_DENOISING
1221 if (cpi->oxcf.noise_sensitivity > 0) {
1222 vp8_denoiser_free(&cpi->denoiser);
1223 if (vp8_denoiser_allocate(&cpi->denoiser, width, height, cm->mb_rows,
1224 cm->mb_cols, cpi->oxcf.noise_sensitivity)) {
1225 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1226 "Failed to allocate denoiser");
1227 }
1228 }
1229 #endif
1230 }
1231
1232 /* Quant MOD */
1233 static const int q_trans[] = {
1234 0, 1, 2, 3, 4, 5, 7, 8, 9, 10, 12, 13, 15, 17, 18, 19,
1235 20, 21, 23, 24, 25, 26, 27, 28, 29, 30, 31, 33, 35, 37, 39, 41,
1236 43, 45, 47, 49, 51, 53, 55, 57, 59, 61, 64, 67, 70, 73, 76, 79,
1237 82, 85, 88, 91, 94, 97, 100, 103, 106, 109, 112, 115, 118, 121, 124, 127,
1238 };
1239
vp8_reverse_trans(int x)1240 int vp8_reverse_trans(int x) {
1241 int i;
1242
1243 for (i = 0; i < 64; ++i) {
1244 if (q_trans[i] >= x) return i;
1245 }
1246
1247 return 63;
1248 }
vp8_new_framerate(VP8_COMP * cpi,double framerate)1249 void vp8_new_framerate(VP8_COMP *cpi, double framerate) {
1250 if (framerate < .1) framerate = 30;
1251
1252 cpi->framerate = framerate;
1253 cpi->output_framerate = framerate;
1254 cpi->per_frame_bandwidth =
1255 (int)(cpi->oxcf.target_bandwidth / cpi->output_framerate);
1256 cpi->av_per_frame_bandwidth = cpi->per_frame_bandwidth;
1257 cpi->min_frame_bandwidth = (int)(cpi->av_per_frame_bandwidth *
1258 cpi->oxcf.two_pass_vbrmin_section / 100);
1259
1260 /* Set Maximum gf/arf interval */
1261 cpi->max_gf_interval = ((int)(cpi->output_framerate / 2.0) + 2);
1262
1263 if (cpi->max_gf_interval < 12) cpi->max_gf_interval = 12;
1264
1265 /* Extended interval for genuinely static scenes */
1266 cpi->twopass.static_scene_max_gf_interval = cpi->key_frame_frequency >> 1;
1267
1268 /* Special conditions when altr ref frame enabled in lagged compress mode */
1269 if (cpi->oxcf.play_alternate && cpi->oxcf.lag_in_frames) {
1270 if (cpi->max_gf_interval > cpi->oxcf.lag_in_frames - 1) {
1271 cpi->max_gf_interval = cpi->oxcf.lag_in_frames - 1;
1272 }
1273
1274 if (cpi->twopass.static_scene_max_gf_interval >
1275 cpi->oxcf.lag_in_frames - 1) {
1276 cpi->twopass.static_scene_max_gf_interval = cpi->oxcf.lag_in_frames - 1;
1277 }
1278 }
1279
1280 if (cpi->max_gf_interval > cpi->twopass.static_scene_max_gf_interval) {
1281 cpi->max_gf_interval = cpi->twopass.static_scene_max_gf_interval;
1282 }
1283 }
1284
init_config(VP8_COMP * cpi,VP8_CONFIG * oxcf)1285 static void init_config(VP8_COMP *cpi, VP8_CONFIG *oxcf) {
1286 VP8_COMMON *cm = &cpi->common;
1287
1288 cpi->oxcf = *oxcf;
1289
1290 cpi->auto_gold = 1;
1291 cpi->auto_adjust_gold_quantizer = 1;
1292
1293 cm->version = oxcf->Version;
1294 vp8_setup_version(cm);
1295
1296 /* Frame rate is not available on the first frame, as it's derived from
1297 * the observed timestamps. The actual value used here doesn't matter
1298 * too much, as it will adapt quickly.
1299 */
1300 if (oxcf->timebase.num > 0) {
1301 cpi->framerate =
1302 (double)(oxcf->timebase.den) / (double)(oxcf->timebase.num);
1303 } else {
1304 cpi->framerate = 30;
1305 }
1306
1307 /* If the reciprocal of the timebase seems like a reasonable framerate,
1308 * then use that as a guess, otherwise use 30.
1309 */
1310 if (cpi->framerate > 180) cpi->framerate = 30;
1311
1312 cpi->ref_framerate = cpi->framerate;
1313
1314 cpi->ref_frame_flags = VP8_ALTR_FRAME | VP8_GOLD_FRAME | VP8_LAST_FRAME;
1315
1316 cm->refresh_golden_frame = 0;
1317 cm->refresh_last_frame = 1;
1318 cm->refresh_entropy_probs = 1;
1319
1320 /* change includes all joint functionality */
1321 vp8_change_config(cpi, oxcf);
1322
1323 /* Initialize active best and worst q and average q values. */
1324 cpi->active_worst_quality = cpi->oxcf.worst_allowed_q;
1325 cpi->active_best_quality = cpi->oxcf.best_allowed_q;
1326 cpi->avg_frame_qindex = cpi->oxcf.worst_allowed_q;
1327
1328 /* Initialise the starting buffer levels */
1329 cpi->buffer_level = cpi->oxcf.starting_buffer_level;
1330 cpi->bits_off_target = cpi->oxcf.starting_buffer_level;
1331
1332 cpi->rolling_target_bits = cpi->av_per_frame_bandwidth;
1333 cpi->rolling_actual_bits = cpi->av_per_frame_bandwidth;
1334 cpi->long_rolling_target_bits = cpi->av_per_frame_bandwidth;
1335 cpi->long_rolling_actual_bits = cpi->av_per_frame_bandwidth;
1336
1337 cpi->total_actual_bits = 0;
1338 cpi->total_target_vs_actual = 0;
1339
1340 /* Temporal scalabilty */
1341 if (cpi->oxcf.number_of_layers > 1) {
1342 unsigned int i;
1343 double prev_layer_framerate = 0;
1344
1345 for (i = 0; i < cpi->oxcf.number_of_layers; ++i) {
1346 init_temporal_layer_context(cpi, oxcf, i, prev_layer_framerate);
1347 prev_layer_framerate =
1348 cpi->output_framerate / cpi->oxcf.rate_decimator[i];
1349 }
1350 }
1351
1352 #if VP8_TEMPORAL_ALT_REF
1353 {
1354 int i;
1355
1356 cpi->fixed_divide[0] = 0;
1357
1358 for (i = 1; i < 512; ++i) cpi->fixed_divide[i] = 0x80000 / i;
1359 }
1360 #endif
1361 }
1362
update_layer_contexts(VP8_COMP * cpi)1363 static void update_layer_contexts(VP8_COMP *cpi) {
1364 VP8_CONFIG *oxcf = &cpi->oxcf;
1365
1366 /* Update snapshots of the layer contexts to reflect new parameters */
1367 if (oxcf->number_of_layers > 1) {
1368 unsigned int i;
1369 double prev_layer_framerate = 0;
1370
1371 assert(oxcf->number_of_layers <= VPX_TS_MAX_LAYERS);
1372 for (i = 0; i < oxcf->number_of_layers && i < VPX_TS_MAX_LAYERS; ++i) {
1373 LAYER_CONTEXT *lc = &cpi->layer_context[i];
1374
1375 lc->framerate = cpi->ref_framerate / oxcf->rate_decimator[i];
1376 lc->target_bandwidth = oxcf->target_bitrate[i] * 1000;
1377
1378 lc->starting_buffer_level = rescale(
1379 (int)oxcf->starting_buffer_level_in_ms, lc->target_bandwidth, 1000);
1380
1381 if (oxcf->optimal_buffer_level == 0) {
1382 lc->optimal_buffer_level = lc->target_bandwidth / 8;
1383 } else {
1384 lc->optimal_buffer_level = rescale(
1385 (int)oxcf->optimal_buffer_level_in_ms, lc->target_bandwidth, 1000);
1386 }
1387
1388 if (oxcf->maximum_buffer_size == 0) {
1389 lc->maximum_buffer_size = lc->target_bandwidth / 8;
1390 } else {
1391 lc->maximum_buffer_size = rescale((int)oxcf->maximum_buffer_size_in_ms,
1392 lc->target_bandwidth, 1000);
1393 }
1394
1395 /* Work out the average size of a frame within this layer */
1396 if (i > 0) {
1397 lc->avg_frame_size_for_layer =
1398 (int)((oxcf->target_bitrate[i] - oxcf->target_bitrate[i - 1]) *
1399 1000 / (lc->framerate - prev_layer_framerate));
1400 }
1401
1402 prev_layer_framerate = lc->framerate;
1403 }
1404 }
1405 }
1406
vp8_change_config(VP8_COMP * cpi,VP8_CONFIG * oxcf)1407 void vp8_change_config(VP8_COMP *cpi, VP8_CONFIG *oxcf) {
1408 VP8_COMMON *cm = &cpi->common;
1409 int last_w, last_h;
1410 unsigned int prev_number_of_layers;
1411
1412 if (!cpi) return;
1413
1414 if (!oxcf) return;
1415
1416 if (cm->version != oxcf->Version) {
1417 cm->version = oxcf->Version;
1418 vp8_setup_version(cm);
1419 }
1420
1421 last_w = cpi->oxcf.Width;
1422 last_h = cpi->oxcf.Height;
1423 prev_number_of_layers = cpi->oxcf.number_of_layers;
1424
1425 cpi->oxcf = *oxcf;
1426
1427 switch (cpi->oxcf.Mode) {
1428 case MODE_REALTIME:
1429 cpi->pass = 0;
1430 cpi->compressor_speed = 2;
1431
1432 if (cpi->oxcf.cpu_used < -16) {
1433 cpi->oxcf.cpu_used = -16;
1434 }
1435
1436 if (cpi->oxcf.cpu_used > 16) cpi->oxcf.cpu_used = 16;
1437
1438 break;
1439
1440 case MODE_GOODQUALITY:
1441 cpi->pass = 0;
1442 cpi->compressor_speed = 1;
1443
1444 if (cpi->oxcf.cpu_used < -5) {
1445 cpi->oxcf.cpu_used = -5;
1446 }
1447
1448 if (cpi->oxcf.cpu_used > 5) cpi->oxcf.cpu_used = 5;
1449
1450 break;
1451
1452 case MODE_BESTQUALITY:
1453 cpi->pass = 0;
1454 cpi->compressor_speed = 0;
1455 break;
1456
1457 case MODE_FIRSTPASS:
1458 cpi->pass = 1;
1459 cpi->compressor_speed = 1;
1460 break;
1461 case MODE_SECONDPASS:
1462 cpi->pass = 2;
1463 cpi->compressor_speed = 1;
1464
1465 if (cpi->oxcf.cpu_used < -5) {
1466 cpi->oxcf.cpu_used = -5;
1467 }
1468
1469 if (cpi->oxcf.cpu_used > 5) cpi->oxcf.cpu_used = 5;
1470
1471 break;
1472 case MODE_SECONDPASS_BEST:
1473 cpi->pass = 2;
1474 cpi->compressor_speed = 0;
1475 break;
1476 }
1477
1478 if (cpi->pass == 0) cpi->auto_worst_q = 1;
1479
1480 cpi->oxcf.worst_allowed_q = q_trans[oxcf->worst_allowed_q];
1481 cpi->oxcf.best_allowed_q = q_trans[oxcf->best_allowed_q];
1482 cpi->oxcf.cq_level = q_trans[cpi->oxcf.cq_level];
1483
1484 if (oxcf->fixed_q >= 0) {
1485 if (oxcf->worst_allowed_q < 0) {
1486 cpi->oxcf.fixed_q = q_trans[0];
1487 } else {
1488 cpi->oxcf.fixed_q = q_trans[oxcf->worst_allowed_q];
1489 }
1490
1491 if (oxcf->alt_q < 0) {
1492 cpi->oxcf.alt_q = q_trans[0];
1493 } else {
1494 cpi->oxcf.alt_q = q_trans[oxcf->alt_q];
1495 }
1496
1497 if (oxcf->key_q < 0) {
1498 cpi->oxcf.key_q = q_trans[0];
1499 } else {
1500 cpi->oxcf.key_q = q_trans[oxcf->key_q];
1501 }
1502
1503 if (oxcf->gold_q < 0) {
1504 cpi->oxcf.gold_q = q_trans[0];
1505 } else {
1506 cpi->oxcf.gold_q = q_trans[oxcf->gold_q];
1507 }
1508 }
1509
1510 cpi->baseline_gf_interval =
1511 cpi->oxcf.alt_freq ? cpi->oxcf.alt_freq : DEFAULT_GF_INTERVAL;
1512
1513 // GF behavior for 1 pass CBR, used when error_resilience is off.
1514 if (!cpi->oxcf.error_resilient_mode &&
1515 cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER &&
1516 cpi->oxcf.Mode == MODE_REALTIME)
1517 cpi->baseline_gf_interval = cpi->gf_interval_onepass_cbr;
1518
1519 #if (CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING)
1520 cpi->oxcf.token_partitions = 3;
1521 #endif
1522
1523 if (cpi->oxcf.token_partitions >= 0 && cpi->oxcf.token_partitions <= 3) {
1524 cm->multi_token_partition = (TOKEN_PARTITION)cpi->oxcf.token_partitions;
1525 }
1526
1527 setup_features(cpi);
1528
1529 {
1530 int i;
1531
1532 for (i = 0; i < MAX_MB_SEGMENTS; ++i) {
1533 cpi->segment_encode_breakout[i] = cpi->oxcf.encode_breakout;
1534 }
1535 }
1536
1537 /* At the moment the first order values may not be > MAXQ */
1538 if (cpi->oxcf.fixed_q > MAXQ) cpi->oxcf.fixed_q = MAXQ;
1539
1540 /* local file playback mode == really big buffer */
1541 if (cpi->oxcf.end_usage == USAGE_LOCAL_FILE_PLAYBACK) {
1542 cpi->oxcf.starting_buffer_level = 60000;
1543 cpi->oxcf.optimal_buffer_level = 60000;
1544 cpi->oxcf.maximum_buffer_size = 240000;
1545 cpi->oxcf.starting_buffer_level_in_ms = 60000;
1546 cpi->oxcf.optimal_buffer_level_in_ms = 60000;
1547 cpi->oxcf.maximum_buffer_size_in_ms = 240000;
1548 }
1549
1550 /* Convert target bandwidth from Kbit/s to Bit/s */
1551 cpi->oxcf.target_bandwidth *= 1000;
1552
1553 cpi->oxcf.starting_buffer_level = rescale(
1554 (int)cpi->oxcf.starting_buffer_level, cpi->oxcf.target_bandwidth, 1000);
1555
1556 /* Set or reset optimal and maximum buffer levels. */
1557 if (cpi->oxcf.optimal_buffer_level == 0) {
1558 cpi->oxcf.optimal_buffer_level = cpi->oxcf.target_bandwidth / 8;
1559 } else {
1560 cpi->oxcf.optimal_buffer_level = rescale(
1561 (int)cpi->oxcf.optimal_buffer_level, cpi->oxcf.target_bandwidth, 1000);
1562 }
1563
1564 if (cpi->oxcf.maximum_buffer_size == 0) {
1565 cpi->oxcf.maximum_buffer_size = cpi->oxcf.target_bandwidth / 8;
1566 } else {
1567 cpi->oxcf.maximum_buffer_size = rescale((int)cpi->oxcf.maximum_buffer_size,
1568 cpi->oxcf.target_bandwidth, 1000);
1569 }
1570 // Under a configuration change, where maximum_buffer_size may change,
1571 // keep buffer level clipped to the maximum allowed buffer size.
1572 if (cpi->bits_off_target > cpi->oxcf.maximum_buffer_size) {
1573 cpi->bits_off_target = cpi->oxcf.maximum_buffer_size;
1574 cpi->buffer_level = cpi->bits_off_target;
1575 }
1576
1577 /* Set up frame rate and related parameters rate control values. */
1578 vp8_new_framerate(cpi, cpi->framerate);
1579
1580 /* Set absolute upper and lower quality limits */
1581 cpi->worst_quality = cpi->oxcf.worst_allowed_q;
1582 cpi->best_quality = cpi->oxcf.best_allowed_q;
1583
1584 /* active values should only be modified if out of new range */
1585 if (cpi->active_worst_quality > cpi->oxcf.worst_allowed_q) {
1586 cpi->active_worst_quality = cpi->oxcf.worst_allowed_q;
1587 }
1588 /* less likely */
1589 else if (cpi->active_worst_quality < cpi->oxcf.best_allowed_q) {
1590 cpi->active_worst_quality = cpi->oxcf.best_allowed_q;
1591 }
1592 if (cpi->active_best_quality < cpi->oxcf.best_allowed_q) {
1593 cpi->active_best_quality = cpi->oxcf.best_allowed_q;
1594 }
1595 /* less likely */
1596 else if (cpi->active_best_quality > cpi->oxcf.worst_allowed_q) {
1597 cpi->active_best_quality = cpi->oxcf.worst_allowed_q;
1598 }
1599
1600 cpi->buffered_mode = cpi->oxcf.optimal_buffer_level > 0;
1601
1602 cpi->cq_target_quality = cpi->oxcf.cq_level;
1603
1604 /* Only allow dropped frames in buffered mode */
1605 cpi->drop_frames_allowed = cpi->oxcf.allow_df && cpi->buffered_mode;
1606
1607 cpi->target_bandwidth = cpi->oxcf.target_bandwidth;
1608
1609 // Check if the number of temporal layers has changed, and if so reset the
1610 // pattern counter and set/initialize the temporal layer context for the
1611 // new layer configuration.
1612 if (cpi->oxcf.number_of_layers != prev_number_of_layers) {
1613 // If the number of temporal layers are changed we must start at the
1614 // base of the pattern cycle, so set the layer id to 0 and reset
1615 // the temporal pattern counter.
1616 if (cpi->temporal_layer_id > 0) {
1617 cpi->temporal_layer_id = 0;
1618 }
1619 cpi->temporal_pattern_counter = 0;
1620 reset_temporal_layer_change(cpi, oxcf, prev_number_of_layers);
1621 }
1622
1623 if (!cpi->initial_width) {
1624 cpi->initial_width = cpi->oxcf.Width;
1625 cpi->initial_height = cpi->oxcf.Height;
1626 }
1627
1628 cm->Width = cpi->oxcf.Width;
1629 cm->Height = cpi->oxcf.Height;
1630 assert(cm->Width <= cpi->initial_width);
1631 assert(cm->Height <= cpi->initial_height);
1632
1633 /* TODO(jkoleszar): if an internal spatial resampling is active,
1634 * and we downsize the input image, maybe we should clear the
1635 * internal scale immediately rather than waiting for it to
1636 * correct.
1637 */
1638
1639 /* VP8 sharpness level mapping 0-7 (vs 0-10 in general VPx dialogs) */
1640 if (cpi->oxcf.Sharpness > 7) cpi->oxcf.Sharpness = 7;
1641
1642 cm->sharpness_level = cpi->oxcf.Sharpness;
1643
1644 if (cm->horiz_scale != NORMAL || cm->vert_scale != NORMAL) {
1645 int hr, hs, vr, vs;
1646
1647 Scale2Ratio(cm->horiz_scale, &hr, &hs);
1648 Scale2Ratio(cm->vert_scale, &vr, &vs);
1649
1650 /* always go to the next whole number */
1651 cm->Width = (hs - 1 + cpi->oxcf.Width * hr) / hs;
1652 cm->Height = (vs - 1 + cpi->oxcf.Height * vr) / vs;
1653 }
1654
1655 if (last_w != cpi->oxcf.Width || last_h != cpi->oxcf.Height) {
1656 cpi->force_next_frame_intra = 1;
1657 }
1658
1659 if (((cm->Width + 15) & ~15) != cm->yv12_fb[cm->lst_fb_idx].y_width ||
1660 ((cm->Height + 15) & ~15) != cm->yv12_fb[cm->lst_fb_idx].y_height ||
1661 cm->yv12_fb[cm->lst_fb_idx].y_width == 0) {
1662 dealloc_raw_frame_buffers(cpi);
1663 alloc_raw_frame_buffers(cpi);
1664 vp8_alloc_compressor_data(cpi);
1665 }
1666
1667 if (cpi->oxcf.fixed_q >= 0) {
1668 cpi->last_q[0] = cpi->oxcf.fixed_q;
1669 cpi->last_q[1] = cpi->oxcf.fixed_q;
1670 }
1671
1672 cpi->Speed = cpi->oxcf.cpu_used;
1673
1674 /* force to allowlag to 0 if lag_in_frames is 0; */
1675 if (cpi->oxcf.lag_in_frames == 0) {
1676 cpi->oxcf.allow_lag = 0;
1677 }
1678 /* Limit on lag buffers as these are not currently dynamically allocated */
1679 else if (cpi->oxcf.lag_in_frames > MAX_LAG_BUFFERS) {
1680 cpi->oxcf.lag_in_frames = MAX_LAG_BUFFERS;
1681 }
1682
1683 /* YX Temp */
1684 cpi->alt_ref_source = NULL;
1685 cpi->is_src_frame_alt_ref = 0;
1686
1687 #if CONFIG_TEMPORAL_DENOISING
1688 if (cpi->oxcf.noise_sensitivity) {
1689 if (!cpi->denoiser.yv12_mc_running_avg.buffer_alloc) {
1690 int width = (cpi->oxcf.Width + 15) & ~15;
1691 int height = (cpi->oxcf.Height + 15) & ~15;
1692 if (vp8_denoiser_allocate(&cpi->denoiser, width, height, cm->mb_rows,
1693 cm->mb_cols, cpi->oxcf.noise_sensitivity)) {
1694 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1695 "Failed to allocate denoiser");
1696 }
1697 }
1698 }
1699 #endif
1700
1701 #if 0
1702 /* Experimental RD Code */
1703 cpi->frame_distortion = 0;
1704 cpi->last_frame_distortion = 0;
1705 #endif
1706 }
1707
1708 #ifndef M_LOG2_E
1709 #define M_LOG2_E 0.693147180559945309417
1710 #endif
1711 #define log2f(x) (log(x) / (float)M_LOG2_E)
1712
cal_mvsadcosts(int * mvsadcost[2])1713 static void cal_mvsadcosts(int *mvsadcost[2]) {
1714 int i = 1;
1715
1716 mvsadcost[0][0] = 300;
1717 mvsadcost[1][0] = 300;
1718
1719 do {
1720 double z = 256 * (2 * (log2f(8 * i) + .6));
1721 mvsadcost[0][i] = (int)z;
1722 mvsadcost[1][i] = (int)z;
1723 mvsadcost[0][-i] = (int)z;
1724 mvsadcost[1][-i] = (int)z;
1725 } while (++i <= mvfp_max);
1726 }
1727
vp8_create_compressor(VP8_CONFIG * oxcf)1728 struct VP8_COMP *vp8_create_compressor(VP8_CONFIG *oxcf) {
1729 int i;
1730
1731 VP8_COMP *cpi;
1732 VP8_COMMON *cm;
1733
1734 cpi = vpx_memalign(32, sizeof(VP8_COMP));
1735 /* Check that the CPI instance is valid */
1736 if (!cpi) return 0;
1737
1738 cm = &cpi->common;
1739
1740 memset(cpi, 0, sizeof(VP8_COMP));
1741
1742 if (setjmp(cm->error.jmp)) {
1743 cpi->common.error.setjmp = 0;
1744 vp8_remove_compressor(&cpi);
1745 return 0;
1746 }
1747
1748 cpi->common.error.setjmp = 1;
1749
1750 CHECK_MEM_ERROR(cpi->mb.ss, vpx_calloc(sizeof(search_site),
1751 (MAX_MVSEARCH_STEPS * 8) + 1));
1752
1753 vp8_create_common(&cpi->common);
1754
1755 init_config(cpi, oxcf);
1756
1757 memcpy(cpi->base_skip_false_prob, vp8cx_base_skip_false_prob,
1758 sizeof(vp8cx_base_skip_false_prob));
1759 cpi->common.current_video_frame = 0;
1760 cpi->temporal_pattern_counter = 0;
1761 cpi->temporal_layer_id = -1;
1762 cpi->kf_overspend_bits = 0;
1763 cpi->kf_bitrate_adjustment = 0;
1764 cpi->frames_till_gf_update_due = 0;
1765 cpi->gf_overspend_bits = 0;
1766 cpi->non_gf_bitrate_adjustment = 0;
1767 cpi->prob_last_coded = 128;
1768 cpi->prob_gf_coded = 128;
1769 cpi->prob_intra_coded = 63;
1770
1771 /* Prime the recent reference frame usage counters.
1772 * Hereafter they will be maintained as a sort of moving average
1773 */
1774 cpi->recent_ref_frame_usage[INTRA_FRAME] = 1;
1775 cpi->recent_ref_frame_usage[LAST_FRAME] = 1;
1776 cpi->recent_ref_frame_usage[GOLDEN_FRAME] = 1;
1777 cpi->recent_ref_frame_usage[ALTREF_FRAME] = 1;
1778
1779 /* Set reference frame sign bias for ALTREF frame to 1 (for now) */
1780 cpi->common.ref_frame_sign_bias[ALTREF_FRAME] = 1;
1781
1782 cpi->twopass.gf_decay_rate = 0;
1783 cpi->baseline_gf_interval = DEFAULT_GF_INTERVAL;
1784
1785 cpi->gold_is_last = 0;
1786 cpi->alt_is_last = 0;
1787 cpi->gold_is_alt = 0;
1788
1789 cpi->active_map_enabled = 0;
1790
1791 #if 0
1792 /* Experimental code for lagged and one pass */
1793 /* Initialise one_pass GF frames stats */
1794 /* Update stats used for GF selection */
1795 if (cpi->pass == 0)
1796 {
1797 cpi->one_pass_frame_index = 0;
1798
1799 for (i = 0; i < MAX_LAG_BUFFERS; ++i)
1800 {
1801 cpi->one_pass_frame_stats[i].frames_so_far = 0;
1802 cpi->one_pass_frame_stats[i].frame_intra_error = 0.0;
1803 cpi->one_pass_frame_stats[i].frame_coded_error = 0.0;
1804 cpi->one_pass_frame_stats[i].frame_pcnt_inter = 0.0;
1805 cpi->one_pass_frame_stats[i].frame_pcnt_motion = 0.0;
1806 cpi->one_pass_frame_stats[i].frame_mvr = 0.0;
1807 cpi->one_pass_frame_stats[i].frame_mvr_abs = 0.0;
1808 cpi->one_pass_frame_stats[i].frame_mvc = 0.0;
1809 cpi->one_pass_frame_stats[i].frame_mvc_abs = 0.0;
1810 }
1811 }
1812 #endif
1813
1814 cpi->mse_source_denoised = 0;
1815
1816 /* Should we use the cyclic refresh method.
1817 * Currently there is no external control for this.
1818 * Enable it for error_resilient_mode, or for 1 pass CBR mode.
1819 */
1820 cpi->cyclic_refresh_mode_enabled =
1821 (cpi->oxcf.error_resilient_mode ||
1822 (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER &&
1823 cpi->oxcf.Mode <= 2));
1824 cpi->cyclic_refresh_mode_max_mbs_perframe =
1825 (cpi->common.mb_rows * cpi->common.mb_cols) / 7;
1826 if (cpi->oxcf.number_of_layers == 1) {
1827 cpi->cyclic_refresh_mode_max_mbs_perframe =
1828 (cpi->common.mb_rows * cpi->common.mb_cols) / 20;
1829 } else if (cpi->oxcf.number_of_layers == 2) {
1830 cpi->cyclic_refresh_mode_max_mbs_perframe =
1831 (cpi->common.mb_rows * cpi->common.mb_cols) / 10;
1832 }
1833 cpi->cyclic_refresh_mode_index = 0;
1834 cpi->cyclic_refresh_q = 32;
1835
1836 // GF behavior for 1 pass CBR, used when error_resilience is off.
1837 cpi->gf_update_onepass_cbr = 0;
1838 cpi->gf_noboost_onepass_cbr = 0;
1839 if (!cpi->oxcf.error_resilient_mode &&
1840 cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER && cpi->oxcf.Mode <= 2) {
1841 cpi->gf_update_onepass_cbr = 1;
1842 cpi->gf_noboost_onepass_cbr = 1;
1843 cpi->gf_interval_onepass_cbr =
1844 cpi->cyclic_refresh_mode_max_mbs_perframe > 0
1845 ? (2 * (cpi->common.mb_rows * cpi->common.mb_cols) /
1846 cpi->cyclic_refresh_mode_max_mbs_perframe)
1847 : 10;
1848 cpi->gf_interval_onepass_cbr =
1849 VPXMIN(40, VPXMAX(6, cpi->gf_interval_onepass_cbr));
1850 cpi->baseline_gf_interval = cpi->gf_interval_onepass_cbr;
1851 }
1852
1853 if (cpi->cyclic_refresh_mode_enabled) {
1854 CHECK_MEM_ERROR(cpi->cyclic_refresh_map,
1855 vpx_calloc((cpi->common.mb_rows * cpi->common.mb_cols), 1));
1856 } else {
1857 cpi->cyclic_refresh_map = (signed char *)NULL;
1858 }
1859
1860 CHECK_MEM_ERROR(cpi->consec_zero_last,
1861 vpx_calloc(cm->mb_rows * cm->mb_cols, 1));
1862 CHECK_MEM_ERROR(cpi->consec_zero_last_mvbias,
1863 vpx_calloc((cpi->common.mb_rows * cpi->common.mb_cols), 1));
1864
1865 #ifdef VP8_ENTROPY_STATS
1866 init_context_counters();
1867 #endif
1868
1869 /*Initialize the feed-forward activity masking.*/
1870 cpi->activity_avg = 90 << 12;
1871
1872 /* Give a sensible default for the first frame. */
1873 cpi->frames_since_key = 8;
1874 cpi->key_frame_frequency = cpi->oxcf.key_freq;
1875 cpi->this_key_frame_forced = 0;
1876 cpi->next_key_frame_forced = 0;
1877
1878 cpi->source_alt_ref_pending = 0;
1879 cpi->source_alt_ref_active = 0;
1880 cpi->common.refresh_alt_ref_frame = 0;
1881
1882 cpi->force_maxqp = 0;
1883
1884 cpi->b_calculate_psnr = CONFIG_INTERNAL_STATS;
1885 #if CONFIG_INTERNAL_STATS
1886 cpi->b_calculate_ssimg = 0;
1887
1888 cpi->count = 0;
1889 cpi->bytes = 0;
1890
1891 if (cpi->b_calculate_psnr) {
1892 cpi->total_sq_error = 0.0;
1893 cpi->total_sq_error2 = 0.0;
1894 cpi->total_y = 0.0;
1895 cpi->total_u = 0.0;
1896 cpi->total_v = 0.0;
1897 cpi->total = 0.0;
1898 cpi->totalp_y = 0.0;
1899 cpi->totalp_u = 0.0;
1900 cpi->totalp_v = 0.0;
1901 cpi->totalp = 0.0;
1902 cpi->tot_recode_hits = 0;
1903 cpi->summed_quality = 0;
1904 cpi->summed_weights = 0;
1905 }
1906
1907 #endif
1908
1909 cpi->first_time_stamp_ever = 0x7FFFFFFF;
1910
1911 cpi->frames_till_gf_update_due = 0;
1912 cpi->key_frame_count = 1;
1913
1914 cpi->ni_av_qi = cpi->oxcf.worst_allowed_q;
1915 cpi->ni_tot_qi = 0;
1916 cpi->ni_frames = 0;
1917 cpi->total_byte_count = 0;
1918
1919 cpi->drop_frame = 0;
1920
1921 cpi->rate_correction_factor = 1.0;
1922 cpi->key_frame_rate_correction_factor = 1.0;
1923 cpi->gf_rate_correction_factor = 1.0;
1924 cpi->twopass.est_max_qcorrection_factor = 1.0;
1925
1926 for (i = 0; i < KEY_FRAME_CONTEXT; ++i) {
1927 cpi->prior_key_frame_distance[i] = (int)cpi->output_framerate;
1928 }
1929
1930 #ifdef OUTPUT_YUV_SRC
1931 yuv_file = fopen("bd.yuv", "ab");
1932 #endif
1933 #ifdef OUTPUT_YUV_DENOISED
1934 yuv_denoised_file = fopen("denoised.yuv", "ab");
1935 #endif
1936
1937 #if 0
1938 framepsnr = fopen("framepsnr.stt", "a");
1939 kf_list = fopen("kf_list.stt", "w");
1940 #endif
1941
1942 cpi->output_pkt_list = oxcf->output_pkt_list;
1943
1944 #if !CONFIG_REALTIME_ONLY
1945
1946 if (cpi->pass == 1) {
1947 vp8_init_first_pass(cpi);
1948 } else if (cpi->pass == 2) {
1949 size_t packet_sz = sizeof(FIRSTPASS_STATS);
1950 int packets = (int)(oxcf->two_pass_stats_in.sz / packet_sz);
1951
1952 cpi->twopass.stats_in_start = oxcf->two_pass_stats_in.buf;
1953 cpi->twopass.stats_in = cpi->twopass.stats_in_start;
1954 cpi->twopass.stats_in_end =
1955 (void *)((char *)cpi->twopass.stats_in + (packets - 1) * packet_sz);
1956 vp8_init_second_pass(cpi);
1957 }
1958
1959 #endif
1960
1961 if (cpi->compressor_speed == 2) {
1962 cpi->avg_encode_time = 0;
1963 cpi->avg_pick_mode_time = 0;
1964 }
1965
1966 vp8_set_speed_features(cpi);
1967
1968 /* Set starting values of RD threshold multipliers (128 = *1) */
1969 for (i = 0; i < MAX_MODES; ++i) {
1970 cpi->mb.rd_thresh_mult[i] = 128;
1971 }
1972
1973 #ifdef VP8_ENTROPY_STATS
1974 init_mv_ref_counts();
1975 #endif
1976
1977 #if CONFIG_MULTITHREAD
1978 if (vp8cx_create_encoder_threads(cpi)) {
1979 vp8_remove_compressor(&cpi);
1980 return 0;
1981 }
1982 #endif
1983
1984 cpi->fn_ptr[BLOCK_16X16].sdf = vpx_sad16x16;
1985 cpi->fn_ptr[BLOCK_16X16].vf = vpx_variance16x16;
1986 cpi->fn_ptr[BLOCK_16X16].svf = vpx_sub_pixel_variance16x16;
1987 cpi->fn_ptr[BLOCK_16X16].sdx3f = vpx_sad16x16x3;
1988 cpi->fn_ptr[BLOCK_16X16].sdx8f = vpx_sad16x16x8;
1989 cpi->fn_ptr[BLOCK_16X16].sdx4df = vpx_sad16x16x4d;
1990
1991 cpi->fn_ptr[BLOCK_16X8].sdf = vpx_sad16x8;
1992 cpi->fn_ptr[BLOCK_16X8].vf = vpx_variance16x8;
1993 cpi->fn_ptr[BLOCK_16X8].svf = vpx_sub_pixel_variance16x8;
1994 cpi->fn_ptr[BLOCK_16X8].sdx3f = vpx_sad16x8x3;
1995 cpi->fn_ptr[BLOCK_16X8].sdx8f = vpx_sad16x8x8;
1996 cpi->fn_ptr[BLOCK_16X8].sdx4df = vpx_sad16x8x4d;
1997
1998 cpi->fn_ptr[BLOCK_8X16].sdf = vpx_sad8x16;
1999 cpi->fn_ptr[BLOCK_8X16].vf = vpx_variance8x16;
2000 cpi->fn_ptr[BLOCK_8X16].svf = vpx_sub_pixel_variance8x16;
2001 cpi->fn_ptr[BLOCK_8X16].sdx3f = vpx_sad8x16x3;
2002 cpi->fn_ptr[BLOCK_8X16].sdx8f = vpx_sad8x16x8;
2003 cpi->fn_ptr[BLOCK_8X16].sdx4df = vpx_sad8x16x4d;
2004
2005 cpi->fn_ptr[BLOCK_8X8].sdf = vpx_sad8x8;
2006 cpi->fn_ptr[BLOCK_8X8].vf = vpx_variance8x8;
2007 cpi->fn_ptr[BLOCK_8X8].svf = vpx_sub_pixel_variance8x8;
2008 cpi->fn_ptr[BLOCK_8X8].sdx3f = vpx_sad8x8x3;
2009 cpi->fn_ptr[BLOCK_8X8].sdx8f = vpx_sad8x8x8;
2010 cpi->fn_ptr[BLOCK_8X8].sdx4df = vpx_sad8x8x4d;
2011
2012 cpi->fn_ptr[BLOCK_4X4].sdf = vpx_sad4x4;
2013 cpi->fn_ptr[BLOCK_4X4].vf = vpx_variance4x4;
2014 cpi->fn_ptr[BLOCK_4X4].svf = vpx_sub_pixel_variance4x4;
2015 cpi->fn_ptr[BLOCK_4X4].sdx3f = vpx_sad4x4x3;
2016 cpi->fn_ptr[BLOCK_4X4].sdx8f = vpx_sad4x4x8;
2017 cpi->fn_ptr[BLOCK_4X4].sdx4df = vpx_sad4x4x4d;
2018
2019 #if ARCH_X86 || ARCH_X86_64
2020 cpi->fn_ptr[BLOCK_16X16].copymem = vp8_copy32xn;
2021 cpi->fn_ptr[BLOCK_16X8].copymem = vp8_copy32xn;
2022 cpi->fn_ptr[BLOCK_8X16].copymem = vp8_copy32xn;
2023 cpi->fn_ptr[BLOCK_8X8].copymem = vp8_copy32xn;
2024 cpi->fn_ptr[BLOCK_4X4].copymem = vp8_copy32xn;
2025 #endif
2026
2027 cpi->full_search_sad = vp8_full_search_sad;
2028 cpi->diamond_search_sad = vp8_diamond_search_sad;
2029 cpi->refining_search_sad = vp8_refining_search_sad;
2030
2031 /* make sure frame 1 is okay */
2032 cpi->mb.error_bins[0] = cpi->common.MBs;
2033
2034 /* vp8cx_init_quantizer() is first called here. Add check in
2035 * vp8cx_frame_init_quantizer() so that vp8cx_init_quantizer is only
2036 * called later when needed. This will avoid unnecessary calls of
2037 * vp8cx_init_quantizer() for every frame.
2038 */
2039 vp8cx_init_quantizer(cpi);
2040
2041 vp8_loop_filter_init(cm);
2042
2043 cpi->common.error.setjmp = 0;
2044
2045 #if CONFIG_MULTI_RES_ENCODING
2046
2047 /* Calculate # of MBs in a row in lower-resolution level image. */
2048 if (cpi->oxcf.mr_encoder_id > 0) vp8_cal_low_res_mb_cols(cpi);
2049
2050 #endif
2051
2052 /* setup RD costs to MACROBLOCK struct */
2053
2054 cpi->mb.mvcost[0] = &cpi->rd_costs.mvcosts[0][mv_max + 1];
2055 cpi->mb.mvcost[1] = &cpi->rd_costs.mvcosts[1][mv_max + 1];
2056 cpi->mb.mvsadcost[0] = &cpi->rd_costs.mvsadcosts[0][mvfp_max + 1];
2057 cpi->mb.mvsadcost[1] = &cpi->rd_costs.mvsadcosts[1][mvfp_max + 1];
2058
2059 cal_mvsadcosts(cpi->mb.mvsadcost);
2060
2061 cpi->mb.mbmode_cost = cpi->rd_costs.mbmode_cost;
2062 cpi->mb.intra_uv_mode_cost = cpi->rd_costs.intra_uv_mode_cost;
2063 cpi->mb.bmode_costs = cpi->rd_costs.bmode_costs;
2064 cpi->mb.inter_bmode_costs = cpi->rd_costs.inter_bmode_costs;
2065 cpi->mb.token_costs = cpi->rd_costs.token_costs;
2066
2067 /* setup block ptrs & offsets */
2068 vp8_setup_block_ptrs(&cpi->mb);
2069 vp8_setup_block_dptrs(&cpi->mb.e_mbd);
2070
2071 return cpi;
2072 }
2073
vp8_remove_compressor(VP8_COMP ** ptr)2074 void vp8_remove_compressor(VP8_COMP **ptr) {
2075 VP8_COMP *cpi = *ptr;
2076
2077 if (!cpi) return;
2078
2079 if (cpi && (cpi->common.current_video_frame > 0)) {
2080 #if !CONFIG_REALTIME_ONLY
2081
2082 if (cpi->pass == 2) {
2083 vp8_end_second_pass(cpi);
2084 }
2085
2086 #endif
2087
2088 #ifdef VP8_ENTROPY_STATS
2089 print_context_counters();
2090 print_tree_update_probs();
2091 print_mode_context();
2092 #endif
2093
2094 #if CONFIG_INTERNAL_STATS
2095
2096 if (cpi->pass != 1) {
2097 FILE *f = fopen("opsnr.stt", "a");
2098 double time_encoded =
2099 (cpi->last_end_time_stamp_seen - cpi->first_time_stamp_ever) /
2100 10000000.000;
2101 double dr = (double)cpi->bytes * 8.0 / 1000.0 / time_encoded;
2102
2103 if (cpi->b_calculate_psnr) {
2104 if (cpi->oxcf.number_of_layers > 1) {
2105 int i;
2106
2107 fprintf(f,
2108 "Layer\tBitrate\tAVGPsnr\tGLBPsnr\tAVPsnrP\t"
2109 "GLPsnrP\tVPXSSIM\n");
2110 for (i = 0; i < (int)cpi->oxcf.number_of_layers; ++i) {
2111 double dr =
2112 (double)cpi->bytes_in_layer[i] * 8.0 / 1000.0 / time_encoded;
2113 double samples = 3.0 / 2 * cpi->frames_in_layer[i] *
2114 cpi->common.Width * cpi->common.Height;
2115 double total_psnr =
2116 vpx_sse_to_psnr(samples, 255.0, cpi->total_error2[i]);
2117 double total_psnr2 =
2118 vpx_sse_to_psnr(samples, 255.0, cpi->total_error2_p[i]);
2119 double total_ssim =
2120 100 * pow(cpi->sum_ssim[i] / cpi->sum_weights[i], 8.0);
2121
2122 fprintf(f,
2123 "%5d\t%7.3f\t%7.3f\t%7.3f\t%7.3f\t"
2124 "%7.3f\t%7.3f\n",
2125 i, dr, cpi->sum_psnr[i] / cpi->frames_in_layer[i],
2126 total_psnr, cpi->sum_psnr_p[i] / cpi->frames_in_layer[i],
2127 total_psnr2, total_ssim);
2128 }
2129 } else {
2130 double samples =
2131 3.0 / 2 * cpi->count * cpi->common.Width * cpi->common.Height;
2132 double total_psnr =
2133 vpx_sse_to_psnr(samples, 255.0, cpi->total_sq_error);
2134 double total_psnr2 =
2135 vpx_sse_to_psnr(samples, 255.0, cpi->total_sq_error2);
2136 double total_ssim =
2137 100 * pow(cpi->summed_quality / cpi->summed_weights, 8.0);
2138
2139 fprintf(f,
2140 "Bitrate\tAVGPsnr\tGLBPsnr\tAVPsnrP\t"
2141 "GLPsnrP\tVPXSSIM\n");
2142 fprintf(f,
2143 "%7.3f\t%7.3f\t%7.3f\t%7.3f\t%7.3f\t"
2144 "%7.3f\n",
2145 dr, cpi->total / cpi->count, total_psnr,
2146 cpi->totalp / cpi->count, total_psnr2, total_ssim);
2147 }
2148 }
2149 fclose(f);
2150 #if 0
2151 f = fopen("qskip.stt", "a");
2152 fprintf(f, "minq:%d -maxq:%d skiptrue:skipfalse = %d:%d\n", cpi->oxcf.best_allowed_q, cpi->oxcf.worst_allowed_q, skiptruecount, skipfalsecount);
2153 fclose(f);
2154 #endif
2155 }
2156
2157 #endif
2158
2159 #ifdef SPEEDSTATS
2160
2161 if (cpi->compressor_speed == 2) {
2162 int i;
2163 FILE *f = fopen("cxspeed.stt", "a");
2164 cnt_pm /= cpi->common.MBs;
2165
2166 for (i = 0; i < 16; ++i) fprintf(f, "%5d", frames_at_speed[i]);
2167
2168 fprintf(f, "\n");
2169 fclose(f);
2170 }
2171
2172 #endif
2173
2174 #ifdef MODE_STATS
2175 {
2176 extern int count_mb_seg[4];
2177 FILE *f = fopen("modes.stt", "a");
2178 double dr = (double)cpi->framerate * (double)bytes * (double)8 /
2179 (double)count / (double)1000;
2180 fprintf(f, "intra_mode in Intra Frames:\n");
2181 fprintf(f, "Y: %8d, %8d, %8d, %8d, %8d\n", y_modes[0], y_modes[1],
2182 y_modes[2], y_modes[3], y_modes[4]);
2183 fprintf(f, "UV:%8d, %8d, %8d, %8d\n", uv_modes[0], uv_modes[1],
2184 uv_modes[2], uv_modes[3]);
2185 fprintf(f, "B: ");
2186 {
2187 int i;
2188
2189 for (i = 0; i < 10; ++i) fprintf(f, "%8d, ", b_modes[i]);
2190
2191 fprintf(f, "\n");
2192 }
2193
2194 fprintf(f, "Modes in Inter Frames:\n");
2195 fprintf(f, "Y: %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d\n",
2196 inter_y_modes[0], inter_y_modes[1], inter_y_modes[2],
2197 inter_y_modes[3], inter_y_modes[4], inter_y_modes[5],
2198 inter_y_modes[6], inter_y_modes[7], inter_y_modes[8],
2199 inter_y_modes[9]);
2200 fprintf(f, "UV:%8d, %8d, %8d, %8d\n", inter_uv_modes[0],
2201 inter_uv_modes[1], inter_uv_modes[2], inter_uv_modes[3]);
2202 fprintf(f, "B: ");
2203 {
2204 int i;
2205
2206 for (i = 0; i < 15; ++i) fprintf(f, "%8d, ", inter_b_modes[i]);
2207
2208 fprintf(f, "\n");
2209 }
2210 fprintf(f, "P:%8d, %8d, %8d, %8d\n", count_mb_seg[0], count_mb_seg[1],
2211 count_mb_seg[2], count_mb_seg[3]);
2212 fprintf(f, "PB:%8d, %8d, %8d, %8d\n", inter_b_modes[LEFT4X4],
2213 inter_b_modes[ABOVE4X4], inter_b_modes[ZERO4X4],
2214 inter_b_modes[NEW4X4]);
2215
2216 fclose(f);
2217 }
2218 #endif
2219
2220 #ifdef VP8_ENTROPY_STATS
2221 {
2222 int i, j, k;
2223 FILE *fmode = fopen("modecontext.c", "w");
2224
2225 fprintf(fmode, "\n#include \"entropymode.h\"\n\n");
2226 fprintf(fmode, "const unsigned int vp8_kf_default_bmode_counts ");
2227 fprintf(fmode,
2228 "[VP8_BINTRAMODES] [VP8_BINTRAMODES] [VP8_BINTRAMODES] =\n{\n");
2229
2230 for (i = 0; i < 10; ++i) {
2231 fprintf(fmode, " { /* Above Mode : %d */\n", i);
2232
2233 for (j = 0; j < 10; ++j) {
2234 fprintf(fmode, " {");
2235
2236 for (k = 0; k < 10; ++k) {
2237 if (!intra_mode_stats[i][j][k])
2238 fprintf(fmode, " %5d, ", 1);
2239 else
2240 fprintf(fmode, " %5d, ", intra_mode_stats[i][j][k]);
2241 }
2242
2243 fprintf(fmode, "}, /* left_mode %d */\n", j);
2244 }
2245
2246 fprintf(fmode, " },\n");
2247 }
2248
2249 fprintf(fmode, "};\n");
2250 fclose(fmode);
2251 }
2252 #endif
2253
2254 #if defined(SECTIONBITS_OUTPUT)
2255
2256 if (0) {
2257 int i;
2258 FILE *f = fopen("tokenbits.stt", "a");
2259
2260 for (i = 0; i < 28; ++i) fprintf(f, "%8d", (int)(Sectionbits[i] / 256));
2261
2262 fprintf(f, "\n");
2263 fclose(f);
2264 }
2265
2266 #endif
2267
2268 #if 0
2269 {
2270 printf("\n_pick_loop_filter_level:%d\n", cpi->time_pick_lpf / 1000);
2271 printf("\n_frames recive_data encod_mb_row compress_frame Total\n");
2272 printf("%6d %10ld %10ld %10ld %10ld\n", cpi->common.current_video_frame, cpi->time_receive_data / 1000, cpi->time_encode_mb_row / 1000, cpi->time_compress_data / 1000, (cpi->time_receive_data + cpi->time_compress_data) / 1000);
2273 }
2274 #endif
2275 }
2276
2277 #if CONFIG_MULTITHREAD
2278 vp8cx_remove_encoder_threads(cpi);
2279 #endif
2280
2281 #if CONFIG_TEMPORAL_DENOISING
2282 vp8_denoiser_free(&cpi->denoiser);
2283 #endif
2284 dealloc_compressor_data(cpi);
2285 vpx_free(cpi->mb.ss);
2286 vpx_free(cpi->tok);
2287 vpx_free(cpi->cyclic_refresh_map);
2288 vpx_free(cpi->consec_zero_last);
2289 vpx_free(cpi->consec_zero_last_mvbias);
2290
2291 vp8_remove_common(&cpi->common);
2292 vpx_free(cpi);
2293 *ptr = 0;
2294
2295 #ifdef OUTPUT_YUV_SRC
2296 fclose(yuv_file);
2297 #endif
2298 #ifdef OUTPUT_YUV_DENOISED
2299 fclose(yuv_denoised_file);
2300 #endif
2301
2302 #if 0
2303
2304 if (keyfile)
2305 fclose(keyfile);
2306
2307 if (framepsnr)
2308 fclose(framepsnr);
2309
2310 if (kf_list)
2311 fclose(kf_list);
2312
2313 #endif
2314 }
2315
calc_plane_error(unsigned char * orig,int orig_stride,unsigned char * recon,int recon_stride,unsigned int cols,unsigned int rows)2316 static uint64_t calc_plane_error(unsigned char *orig, int orig_stride,
2317 unsigned char *recon, int recon_stride,
2318 unsigned int cols, unsigned int rows) {
2319 unsigned int row, col;
2320 uint64_t total_sse = 0;
2321 int diff;
2322
2323 for (row = 0; row + 16 <= rows; row += 16) {
2324 for (col = 0; col + 16 <= cols; col += 16) {
2325 unsigned int sse;
2326
2327 vpx_mse16x16(orig + col, orig_stride, recon + col, recon_stride, &sse);
2328 total_sse += sse;
2329 }
2330
2331 /* Handle odd-sized width */
2332 if (col < cols) {
2333 unsigned int border_row, border_col;
2334 unsigned char *border_orig = orig;
2335 unsigned char *border_recon = recon;
2336
2337 for (border_row = 0; border_row < 16; ++border_row) {
2338 for (border_col = col; border_col < cols; ++border_col) {
2339 diff = border_orig[border_col] - border_recon[border_col];
2340 total_sse += diff * diff;
2341 }
2342
2343 border_orig += orig_stride;
2344 border_recon += recon_stride;
2345 }
2346 }
2347
2348 orig += orig_stride * 16;
2349 recon += recon_stride * 16;
2350 }
2351
2352 /* Handle odd-sized height */
2353 for (; row < rows; ++row) {
2354 for (col = 0; col < cols; ++col) {
2355 diff = orig[col] - recon[col];
2356 total_sse += diff * diff;
2357 }
2358
2359 orig += orig_stride;
2360 recon += recon_stride;
2361 }
2362
2363 vpx_clear_system_state();
2364 return total_sse;
2365 }
2366
generate_psnr_packet(VP8_COMP * cpi)2367 static void generate_psnr_packet(VP8_COMP *cpi) {
2368 YV12_BUFFER_CONFIG *orig = cpi->Source;
2369 YV12_BUFFER_CONFIG *recon = cpi->common.frame_to_show;
2370 struct vpx_codec_cx_pkt pkt;
2371 uint64_t sse;
2372 int i;
2373 unsigned int width = cpi->common.Width;
2374 unsigned int height = cpi->common.Height;
2375
2376 pkt.kind = VPX_CODEC_PSNR_PKT;
2377 sse = calc_plane_error(orig->y_buffer, orig->y_stride, recon->y_buffer,
2378 recon->y_stride, width, height);
2379 pkt.data.psnr.sse[0] = sse;
2380 pkt.data.psnr.sse[1] = sse;
2381 pkt.data.psnr.samples[0] = width * height;
2382 pkt.data.psnr.samples[1] = width * height;
2383
2384 width = (width + 1) / 2;
2385 height = (height + 1) / 2;
2386
2387 sse = calc_plane_error(orig->u_buffer, orig->uv_stride, recon->u_buffer,
2388 recon->uv_stride, width, height);
2389 pkt.data.psnr.sse[0] += sse;
2390 pkt.data.psnr.sse[2] = sse;
2391 pkt.data.psnr.samples[0] += width * height;
2392 pkt.data.psnr.samples[2] = width * height;
2393
2394 sse = calc_plane_error(orig->v_buffer, orig->uv_stride, recon->v_buffer,
2395 recon->uv_stride, width, height);
2396 pkt.data.psnr.sse[0] += sse;
2397 pkt.data.psnr.sse[3] = sse;
2398 pkt.data.psnr.samples[0] += width * height;
2399 pkt.data.psnr.samples[3] = width * height;
2400
2401 for (i = 0; i < 4; ++i) {
2402 pkt.data.psnr.psnr[i] = vpx_sse_to_psnr(pkt.data.psnr.samples[i], 255.0,
2403 (double)(pkt.data.psnr.sse[i]));
2404 }
2405
2406 vpx_codec_pkt_list_add(cpi->output_pkt_list, &pkt);
2407 }
2408
vp8_use_as_reference(VP8_COMP * cpi,int ref_frame_flags)2409 int vp8_use_as_reference(VP8_COMP *cpi, int ref_frame_flags) {
2410 if (ref_frame_flags > 7) return -1;
2411
2412 cpi->ref_frame_flags = ref_frame_flags;
2413 return 0;
2414 }
vp8_update_reference(VP8_COMP * cpi,int ref_frame_flags)2415 int vp8_update_reference(VP8_COMP *cpi, int ref_frame_flags) {
2416 if (ref_frame_flags > 7) return -1;
2417
2418 cpi->common.refresh_golden_frame = 0;
2419 cpi->common.refresh_alt_ref_frame = 0;
2420 cpi->common.refresh_last_frame = 0;
2421
2422 if (ref_frame_flags & VP8_LAST_FRAME) cpi->common.refresh_last_frame = 1;
2423
2424 if (ref_frame_flags & VP8_GOLD_FRAME) cpi->common.refresh_golden_frame = 1;
2425
2426 if (ref_frame_flags & VP8_ALTR_FRAME) cpi->common.refresh_alt_ref_frame = 1;
2427
2428 return 0;
2429 }
2430
vp8_get_reference(VP8_COMP * cpi,enum vpx_ref_frame_type ref_frame_flag,YV12_BUFFER_CONFIG * sd)2431 int vp8_get_reference(VP8_COMP *cpi, enum vpx_ref_frame_type ref_frame_flag,
2432 YV12_BUFFER_CONFIG *sd) {
2433 VP8_COMMON *cm = &cpi->common;
2434 int ref_fb_idx;
2435
2436 if (ref_frame_flag == VP8_LAST_FRAME) {
2437 ref_fb_idx = cm->lst_fb_idx;
2438 } else if (ref_frame_flag == VP8_GOLD_FRAME) {
2439 ref_fb_idx = cm->gld_fb_idx;
2440 } else if (ref_frame_flag == VP8_ALTR_FRAME) {
2441 ref_fb_idx = cm->alt_fb_idx;
2442 } else {
2443 return -1;
2444 }
2445
2446 vp8_yv12_copy_frame(&cm->yv12_fb[ref_fb_idx], sd);
2447
2448 return 0;
2449 }
vp8_set_reference(VP8_COMP * cpi,enum vpx_ref_frame_type ref_frame_flag,YV12_BUFFER_CONFIG * sd)2450 int vp8_set_reference(VP8_COMP *cpi, enum vpx_ref_frame_type ref_frame_flag,
2451 YV12_BUFFER_CONFIG *sd) {
2452 VP8_COMMON *cm = &cpi->common;
2453
2454 int ref_fb_idx;
2455
2456 if (ref_frame_flag == VP8_LAST_FRAME) {
2457 ref_fb_idx = cm->lst_fb_idx;
2458 } else if (ref_frame_flag == VP8_GOLD_FRAME) {
2459 ref_fb_idx = cm->gld_fb_idx;
2460 } else if (ref_frame_flag == VP8_ALTR_FRAME) {
2461 ref_fb_idx = cm->alt_fb_idx;
2462 } else {
2463 return -1;
2464 }
2465
2466 vp8_yv12_copy_frame(sd, &cm->yv12_fb[ref_fb_idx]);
2467
2468 return 0;
2469 }
vp8_update_entropy(VP8_COMP * cpi,int update)2470 int vp8_update_entropy(VP8_COMP *cpi, int update) {
2471 VP8_COMMON *cm = &cpi->common;
2472 cm->refresh_entropy_probs = update;
2473
2474 return 0;
2475 }
2476
2477 #if defined(OUTPUT_YUV_SRC) || defined(OUTPUT_YUV_DENOISED)
vp8_write_yuv_frame(FILE * yuv_file,YV12_BUFFER_CONFIG * s)2478 void vp8_write_yuv_frame(FILE *yuv_file, YV12_BUFFER_CONFIG *s) {
2479 unsigned char *src = s->y_buffer;
2480 int h = s->y_height;
2481
2482 do {
2483 fwrite(src, s->y_width, 1, yuv_file);
2484 src += s->y_stride;
2485 } while (--h);
2486
2487 src = s->u_buffer;
2488 h = s->uv_height;
2489
2490 do {
2491 fwrite(src, s->uv_width, 1, yuv_file);
2492 src += s->uv_stride;
2493 } while (--h);
2494
2495 src = s->v_buffer;
2496 h = s->uv_height;
2497
2498 do {
2499 fwrite(src, s->uv_width, 1, yuv_file);
2500 src += s->uv_stride;
2501 } while (--h);
2502 }
2503 #endif
2504
scale_and_extend_source(YV12_BUFFER_CONFIG * sd,VP8_COMP * cpi)2505 static void scale_and_extend_source(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi) {
2506 VP8_COMMON *cm = &cpi->common;
2507
2508 /* are we resizing the image */
2509 if (cm->horiz_scale != 0 || cm->vert_scale != 0) {
2510 #if CONFIG_SPATIAL_RESAMPLING
2511 int hr, hs, vr, vs;
2512 int tmp_height;
2513
2514 if (cm->vert_scale == 3) {
2515 tmp_height = 9;
2516 } else {
2517 tmp_height = 11;
2518 }
2519
2520 Scale2Ratio(cm->horiz_scale, &hr, &hs);
2521 Scale2Ratio(cm->vert_scale, &vr, &vs);
2522
2523 vpx_scale_frame(sd, &cpi->scaled_source, cm->temp_scale_frame.y_buffer,
2524 tmp_height, hs, hr, vs, vr, 0);
2525
2526 vp8_yv12_extend_frame_borders(&cpi->scaled_source);
2527 cpi->Source = &cpi->scaled_source;
2528 #endif
2529 } else {
2530 cpi->Source = sd;
2531 }
2532 }
2533
resize_key_frame(VP8_COMP * cpi)2534 static int resize_key_frame(VP8_COMP *cpi) {
2535 #if CONFIG_SPATIAL_RESAMPLING
2536 VP8_COMMON *cm = &cpi->common;
2537
2538 /* Do we need to apply resampling for one pass cbr.
2539 * In one pass this is more limited than in two pass cbr.
2540 * The test and any change is only made once per key frame sequence.
2541 */
2542 if (cpi->oxcf.allow_spatial_resampling &&
2543 (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER)) {
2544 int hr, hs, vr, vs;
2545 int new_width, new_height;
2546
2547 /* If we are below the resample DOWN watermark then scale down a
2548 * notch.
2549 */
2550 if (cpi->buffer_level < (cpi->oxcf.resample_down_water_mark *
2551 cpi->oxcf.optimal_buffer_level / 100)) {
2552 cm->horiz_scale =
2553 (cm->horiz_scale < ONETWO) ? cm->horiz_scale + 1 : ONETWO;
2554 cm->vert_scale = (cm->vert_scale < ONETWO) ? cm->vert_scale + 1 : ONETWO;
2555 }
2556 /* Should we now start scaling back up */
2557 else if (cpi->buffer_level > (cpi->oxcf.resample_up_water_mark *
2558 cpi->oxcf.optimal_buffer_level / 100)) {
2559 cm->horiz_scale =
2560 (cm->horiz_scale > NORMAL) ? cm->horiz_scale - 1 : NORMAL;
2561 cm->vert_scale = (cm->vert_scale > NORMAL) ? cm->vert_scale - 1 : NORMAL;
2562 }
2563
2564 /* Get the new height and width */
2565 Scale2Ratio(cm->horiz_scale, &hr, &hs);
2566 Scale2Ratio(cm->vert_scale, &vr, &vs);
2567 new_width = ((hs - 1) + (cpi->oxcf.Width * hr)) / hs;
2568 new_height = ((vs - 1) + (cpi->oxcf.Height * vr)) / vs;
2569
2570 /* If the image size has changed we need to reallocate the buffers
2571 * and resample the source image
2572 */
2573 if ((cm->Width != new_width) || (cm->Height != new_height)) {
2574 cm->Width = new_width;
2575 cm->Height = new_height;
2576 vp8_alloc_compressor_data(cpi);
2577 scale_and_extend_source(cpi->un_scaled_source, cpi);
2578 return 1;
2579 }
2580 }
2581
2582 #endif
2583 return 0;
2584 }
2585
update_alt_ref_frame_stats(VP8_COMP * cpi)2586 static void update_alt_ref_frame_stats(VP8_COMP *cpi) {
2587 VP8_COMMON *cm = &cpi->common;
2588
2589 /* Select an interval before next GF or altref */
2590 if (!cpi->auto_gold) cpi->frames_till_gf_update_due = DEFAULT_GF_INTERVAL;
2591
2592 if ((cpi->pass != 2) && cpi->frames_till_gf_update_due) {
2593 cpi->current_gf_interval = cpi->frames_till_gf_update_due;
2594
2595 /* Set the bits per frame that we should try and recover in
2596 * subsequent inter frames to account for the extra GF spend...
2597 * note that his does not apply for GF updates that occur
2598 * coincident with a key frame as the extra cost of key frames is
2599 * dealt with elsewhere.
2600 */
2601 cpi->gf_overspend_bits += cpi->projected_frame_size;
2602 cpi->non_gf_bitrate_adjustment =
2603 cpi->gf_overspend_bits / cpi->frames_till_gf_update_due;
2604 }
2605
2606 /* Update data structure that monitors level of reference to last GF */
2607 memset(cpi->gf_active_flags, 1, (cm->mb_rows * cm->mb_cols));
2608 cpi->gf_active_count = cm->mb_rows * cm->mb_cols;
2609
2610 /* this frame refreshes means next frames don't unless specified by user */
2611 cpi->frames_since_golden = 0;
2612
2613 /* Clear the alternate reference update pending flag. */
2614 cpi->source_alt_ref_pending = 0;
2615
2616 /* Set the alternate reference frame active flag */
2617 cpi->source_alt_ref_active = 1;
2618 }
update_golden_frame_stats(VP8_COMP * cpi)2619 static void update_golden_frame_stats(VP8_COMP *cpi) {
2620 VP8_COMMON *cm = &cpi->common;
2621
2622 /* Update the Golden frame usage counts. */
2623 if (cm->refresh_golden_frame) {
2624 /* Select an interval before next GF */
2625 if (!cpi->auto_gold) cpi->frames_till_gf_update_due = DEFAULT_GF_INTERVAL;
2626
2627 if ((cpi->pass != 2) && (cpi->frames_till_gf_update_due > 0)) {
2628 cpi->current_gf_interval = cpi->frames_till_gf_update_due;
2629
2630 /* Set the bits per frame that we should try and recover in
2631 * subsequent inter frames to account for the extra GF spend...
2632 * note that his does not apply for GF updates that occur
2633 * coincident with a key frame as the extra cost of key frames
2634 * is dealt with elsewhere.
2635 */
2636 if ((cm->frame_type != KEY_FRAME) && !cpi->source_alt_ref_active) {
2637 /* Calcluate GF bits to be recovered
2638 * Projected size - av frame bits available for inter
2639 * frames for clip as a whole
2640 */
2641 cpi->gf_overspend_bits +=
2642 (cpi->projected_frame_size - cpi->inter_frame_target);
2643 }
2644
2645 cpi->non_gf_bitrate_adjustment =
2646 cpi->gf_overspend_bits / cpi->frames_till_gf_update_due;
2647 }
2648
2649 /* Update data structure that monitors level of reference to last GF */
2650 memset(cpi->gf_active_flags, 1, (cm->mb_rows * cm->mb_cols));
2651 cpi->gf_active_count = cm->mb_rows * cm->mb_cols;
2652
2653 /* this frame refreshes means next frames don't unless specified by
2654 * user
2655 */
2656 cm->refresh_golden_frame = 0;
2657 cpi->frames_since_golden = 0;
2658
2659 cpi->recent_ref_frame_usage[INTRA_FRAME] = 1;
2660 cpi->recent_ref_frame_usage[LAST_FRAME] = 1;
2661 cpi->recent_ref_frame_usage[GOLDEN_FRAME] = 1;
2662 cpi->recent_ref_frame_usage[ALTREF_FRAME] = 1;
2663
2664 /* ******** Fixed Q test code only ************ */
2665 /* If we are going to use the ALT reference for the next group of
2666 * frames set a flag to say so.
2667 */
2668 if (cpi->oxcf.fixed_q >= 0 && cpi->oxcf.play_alternate &&
2669 !cpi->common.refresh_alt_ref_frame) {
2670 cpi->source_alt_ref_pending = 1;
2671 cpi->frames_till_gf_update_due = cpi->baseline_gf_interval;
2672 }
2673
2674 if (!cpi->source_alt_ref_pending) cpi->source_alt_ref_active = 0;
2675
2676 /* Decrement count down till next gf */
2677 if (cpi->frames_till_gf_update_due > 0) cpi->frames_till_gf_update_due--;
2678
2679 } else if (!cpi->common.refresh_alt_ref_frame) {
2680 /* Decrement count down till next gf */
2681 if (cpi->frames_till_gf_update_due > 0) cpi->frames_till_gf_update_due--;
2682
2683 if (cpi->frames_till_alt_ref_frame) cpi->frames_till_alt_ref_frame--;
2684
2685 cpi->frames_since_golden++;
2686
2687 if (cpi->frames_since_golden > 1) {
2688 cpi->recent_ref_frame_usage[INTRA_FRAME] +=
2689 cpi->mb.count_mb_ref_frame_usage[INTRA_FRAME];
2690 cpi->recent_ref_frame_usage[LAST_FRAME] +=
2691 cpi->mb.count_mb_ref_frame_usage[LAST_FRAME];
2692 cpi->recent_ref_frame_usage[GOLDEN_FRAME] +=
2693 cpi->mb.count_mb_ref_frame_usage[GOLDEN_FRAME];
2694 cpi->recent_ref_frame_usage[ALTREF_FRAME] +=
2695 cpi->mb.count_mb_ref_frame_usage[ALTREF_FRAME];
2696 }
2697 }
2698 }
2699
2700 /* This function updates the reference frame probability estimates that
2701 * will be used during mode selection
2702 */
update_rd_ref_frame_probs(VP8_COMP * cpi)2703 static void update_rd_ref_frame_probs(VP8_COMP *cpi) {
2704 VP8_COMMON *cm = &cpi->common;
2705
2706 const int *const rfct = cpi->mb.count_mb_ref_frame_usage;
2707 const int rf_intra = rfct[INTRA_FRAME];
2708 const int rf_inter =
2709 rfct[LAST_FRAME] + rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME];
2710
2711 if (cm->frame_type == KEY_FRAME) {
2712 cpi->prob_intra_coded = 255;
2713 cpi->prob_last_coded = 128;
2714 cpi->prob_gf_coded = 128;
2715 } else if (!(rf_intra + rf_inter)) {
2716 cpi->prob_intra_coded = 63;
2717 cpi->prob_last_coded = 128;
2718 cpi->prob_gf_coded = 128;
2719 }
2720
2721 /* update reference frame costs since we can do better than what we got
2722 * last frame.
2723 */
2724 if (cpi->oxcf.number_of_layers == 1) {
2725 if (cpi->common.refresh_alt_ref_frame) {
2726 cpi->prob_intra_coded += 40;
2727 if (cpi->prob_intra_coded > 255) cpi->prob_intra_coded = 255;
2728 cpi->prob_last_coded = 200;
2729 cpi->prob_gf_coded = 1;
2730 } else if (cpi->frames_since_golden == 0) {
2731 cpi->prob_last_coded = 214;
2732 } else if (cpi->frames_since_golden == 1) {
2733 cpi->prob_last_coded = 192;
2734 cpi->prob_gf_coded = 220;
2735 } else if (cpi->source_alt_ref_active) {
2736 cpi->prob_gf_coded -= 20;
2737
2738 if (cpi->prob_gf_coded < 10) cpi->prob_gf_coded = 10;
2739 }
2740 if (!cpi->source_alt_ref_active) cpi->prob_gf_coded = 255;
2741 }
2742 }
2743
2744 #if !CONFIG_REALTIME_ONLY
2745 /* 1 = key, 0 = inter */
decide_key_frame(VP8_COMP * cpi)2746 static int decide_key_frame(VP8_COMP *cpi) {
2747 VP8_COMMON *cm = &cpi->common;
2748
2749 int code_key_frame = 0;
2750
2751 cpi->kf_boost = 0;
2752
2753 if (cpi->Speed > 11) return 0;
2754
2755 /* Clear down mmx registers */
2756 vpx_clear_system_state();
2757
2758 if ((cpi->compressor_speed == 2) && (cpi->Speed >= 5) && (cpi->sf.RD == 0)) {
2759 double change = 1.0 *
2760 abs((int)(cpi->mb.intra_error - cpi->last_intra_error)) /
2761 (1 + cpi->last_intra_error);
2762 double change2 =
2763 1.0 *
2764 abs((int)(cpi->mb.prediction_error - cpi->last_prediction_error)) /
2765 (1 + cpi->last_prediction_error);
2766 double minerror = cm->MBs * 256;
2767
2768 cpi->last_intra_error = cpi->mb.intra_error;
2769 cpi->last_prediction_error = cpi->mb.prediction_error;
2770
2771 if (10 * cpi->mb.intra_error / (1 + cpi->mb.prediction_error) < 15 &&
2772 cpi->mb.prediction_error > minerror &&
2773 (change > .25 || change2 > .25)) {
2774 /*(change > 1.4 || change < .75)&& cpi->this_frame_percent_intra >
2775 * cpi->last_frame_percent_intra + 3*/
2776 return 1;
2777 }
2778
2779 return 0;
2780 }
2781
2782 /* If the following are true we might as well code a key frame */
2783 if (((cpi->this_frame_percent_intra == 100) &&
2784 (cpi->this_frame_percent_intra > (cpi->last_frame_percent_intra + 2))) ||
2785 ((cpi->this_frame_percent_intra > 95) &&
2786 (cpi->this_frame_percent_intra >=
2787 (cpi->last_frame_percent_intra + 5)))) {
2788 code_key_frame = 1;
2789 }
2790 /* in addition if the following are true and this is not a golden frame
2791 * then code a key frame Note that on golden frames there often seems
2792 * to be a pop in intra useage anyway hence this restriction is
2793 * designed to prevent spurious key frames. The Intra pop needs to be
2794 * investigated.
2795 */
2796 else if (((cpi->this_frame_percent_intra > 60) &&
2797 (cpi->this_frame_percent_intra >
2798 (cpi->last_frame_percent_intra * 2))) ||
2799 ((cpi->this_frame_percent_intra > 75) &&
2800 (cpi->this_frame_percent_intra >
2801 (cpi->last_frame_percent_intra * 3 / 2))) ||
2802 ((cpi->this_frame_percent_intra > 90) &&
2803 (cpi->this_frame_percent_intra >
2804 (cpi->last_frame_percent_intra + 10)))) {
2805 if (!cm->refresh_golden_frame) code_key_frame = 1;
2806 }
2807
2808 return code_key_frame;
2809 }
2810
Pass1Encode(VP8_COMP * cpi,size_t * size,unsigned char * dest,unsigned int * frame_flags)2811 static void Pass1Encode(VP8_COMP *cpi, size_t *size, unsigned char *dest,
2812 unsigned int *frame_flags) {
2813 (void)size;
2814 (void)dest;
2815 (void)frame_flags;
2816 vp8_set_quantizer(cpi, 26);
2817
2818 vp8_first_pass(cpi);
2819 }
2820 #endif
2821
2822 #if 0
2823 void write_cx_frame_to_file(YV12_BUFFER_CONFIG *frame, int this_frame)
2824 {
2825
2826 /* write the frame */
2827 FILE *yframe;
2828 int i;
2829 char filename[255];
2830
2831 sprintf(filename, "cx\\y%04d.raw", this_frame);
2832 yframe = fopen(filename, "wb");
2833
2834 for (i = 0; i < frame->y_height; ++i)
2835 fwrite(frame->y_buffer + i * frame->y_stride, frame->y_width, 1, yframe);
2836
2837 fclose(yframe);
2838 sprintf(filename, "cx\\u%04d.raw", this_frame);
2839 yframe = fopen(filename, "wb");
2840
2841 for (i = 0; i < frame->uv_height; ++i)
2842 fwrite(frame->u_buffer + i * frame->uv_stride, frame->uv_width, 1, yframe);
2843
2844 fclose(yframe);
2845 sprintf(filename, "cx\\v%04d.raw", this_frame);
2846 yframe = fopen(filename, "wb");
2847
2848 for (i = 0; i < frame->uv_height; ++i)
2849 fwrite(frame->v_buffer + i * frame->uv_stride, frame->uv_width, 1, yframe);
2850
2851 fclose(yframe);
2852 }
2853 #endif
2854 /* return of 0 means drop frame */
2855
2856 #if !CONFIG_REALTIME_ONLY
2857 /* Function to test for conditions that indeicate we should loop
2858 * back and recode a frame.
2859 */
recode_loop_test(VP8_COMP * cpi,int high_limit,int low_limit,int q,int maxq,int minq)2860 static int recode_loop_test(VP8_COMP *cpi, int high_limit, int low_limit, int q,
2861 int maxq, int minq) {
2862 int force_recode = 0;
2863 VP8_COMMON *cm = &cpi->common;
2864
2865 /* Is frame recode allowed at all
2866 * Yes if either recode mode 1 is selected or mode two is selcted
2867 * and the frame is a key frame. golden frame or alt_ref_frame
2868 */
2869 if ((cpi->sf.recode_loop == 1) ||
2870 ((cpi->sf.recode_loop == 2) &&
2871 ((cm->frame_type == KEY_FRAME) || cm->refresh_golden_frame ||
2872 cm->refresh_alt_ref_frame))) {
2873 /* General over and under shoot tests */
2874 if (((cpi->projected_frame_size > high_limit) && (q < maxq)) ||
2875 ((cpi->projected_frame_size < low_limit) && (q > minq))) {
2876 force_recode = 1;
2877 }
2878 /* Special Constrained quality tests */
2879 else if (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) {
2880 /* Undershoot and below auto cq level */
2881 if ((q > cpi->cq_target_quality) &&
2882 (cpi->projected_frame_size < ((cpi->this_frame_target * 7) >> 3))) {
2883 force_recode = 1;
2884 }
2885 /* Severe undershoot and between auto and user cq level */
2886 else if ((q > cpi->oxcf.cq_level) &&
2887 (cpi->projected_frame_size < cpi->min_frame_bandwidth) &&
2888 (cpi->active_best_quality > cpi->oxcf.cq_level)) {
2889 force_recode = 1;
2890 cpi->active_best_quality = cpi->oxcf.cq_level;
2891 }
2892 }
2893 }
2894
2895 return force_recode;
2896 }
2897 #endif // !CONFIG_REALTIME_ONLY
2898
update_reference_frames(VP8_COMP * cpi)2899 static void update_reference_frames(VP8_COMP *cpi) {
2900 VP8_COMMON *cm = &cpi->common;
2901 YV12_BUFFER_CONFIG *yv12_fb = cm->yv12_fb;
2902
2903 /* At this point the new frame has been encoded.
2904 * If any buffer copy / swapping is signaled it should be done here.
2905 */
2906
2907 if (cm->frame_type == KEY_FRAME) {
2908 yv12_fb[cm->new_fb_idx].flags |= VP8_GOLD_FRAME | VP8_ALTR_FRAME;
2909
2910 yv12_fb[cm->gld_fb_idx].flags &= ~VP8_GOLD_FRAME;
2911 yv12_fb[cm->alt_fb_idx].flags &= ~VP8_ALTR_FRAME;
2912
2913 cm->alt_fb_idx = cm->gld_fb_idx = cm->new_fb_idx;
2914
2915 cpi->current_ref_frames[GOLDEN_FRAME] = cm->current_video_frame;
2916 cpi->current_ref_frames[ALTREF_FRAME] = cm->current_video_frame;
2917 } else /* For non key frames */
2918 {
2919 if (cm->refresh_alt_ref_frame) {
2920 assert(!cm->copy_buffer_to_arf);
2921
2922 cm->yv12_fb[cm->new_fb_idx].flags |= VP8_ALTR_FRAME;
2923 cm->yv12_fb[cm->alt_fb_idx].flags &= ~VP8_ALTR_FRAME;
2924 cm->alt_fb_idx = cm->new_fb_idx;
2925
2926 cpi->current_ref_frames[ALTREF_FRAME] = cm->current_video_frame;
2927 } else if (cm->copy_buffer_to_arf) {
2928 assert(!(cm->copy_buffer_to_arf & ~0x3));
2929
2930 if (cm->copy_buffer_to_arf == 1) {
2931 if (cm->alt_fb_idx != cm->lst_fb_idx) {
2932 yv12_fb[cm->lst_fb_idx].flags |= VP8_ALTR_FRAME;
2933 yv12_fb[cm->alt_fb_idx].flags &= ~VP8_ALTR_FRAME;
2934 cm->alt_fb_idx = cm->lst_fb_idx;
2935
2936 cpi->current_ref_frames[ALTREF_FRAME] =
2937 cpi->current_ref_frames[LAST_FRAME];
2938 }
2939 } else /* if (cm->copy_buffer_to_arf == 2) */
2940 {
2941 if (cm->alt_fb_idx != cm->gld_fb_idx) {
2942 yv12_fb[cm->gld_fb_idx].flags |= VP8_ALTR_FRAME;
2943 yv12_fb[cm->alt_fb_idx].flags &= ~VP8_ALTR_FRAME;
2944 cm->alt_fb_idx = cm->gld_fb_idx;
2945
2946 cpi->current_ref_frames[ALTREF_FRAME] =
2947 cpi->current_ref_frames[GOLDEN_FRAME];
2948 }
2949 }
2950 }
2951
2952 if (cm->refresh_golden_frame) {
2953 assert(!cm->copy_buffer_to_gf);
2954
2955 cm->yv12_fb[cm->new_fb_idx].flags |= VP8_GOLD_FRAME;
2956 cm->yv12_fb[cm->gld_fb_idx].flags &= ~VP8_GOLD_FRAME;
2957 cm->gld_fb_idx = cm->new_fb_idx;
2958
2959 cpi->current_ref_frames[GOLDEN_FRAME] = cm->current_video_frame;
2960 } else if (cm->copy_buffer_to_gf) {
2961 assert(!(cm->copy_buffer_to_arf & ~0x3));
2962
2963 if (cm->copy_buffer_to_gf == 1) {
2964 if (cm->gld_fb_idx != cm->lst_fb_idx) {
2965 yv12_fb[cm->lst_fb_idx].flags |= VP8_GOLD_FRAME;
2966 yv12_fb[cm->gld_fb_idx].flags &= ~VP8_GOLD_FRAME;
2967 cm->gld_fb_idx = cm->lst_fb_idx;
2968
2969 cpi->current_ref_frames[GOLDEN_FRAME] =
2970 cpi->current_ref_frames[LAST_FRAME];
2971 }
2972 } else /* if (cm->copy_buffer_to_gf == 2) */
2973 {
2974 if (cm->alt_fb_idx != cm->gld_fb_idx) {
2975 yv12_fb[cm->alt_fb_idx].flags |= VP8_GOLD_FRAME;
2976 yv12_fb[cm->gld_fb_idx].flags &= ~VP8_GOLD_FRAME;
2977 cm->gld_fb_idx = cm->alt_fb_idx;
2978
2979 cpi->current_ref_frames[GOLDEN_FRAME] =
2980 cpi->current_ref_frames[ALTREF_FRAME];
2981 }
2982 }
2983 }
2984 }
2985
2986 if (cm->refresh_last_frame) {
2987 cm->yv12_fb[cm->new_fb_idx].flags |= VP8_LAST_FRAME;
2988 cm->yv12_fb[cm->lst_fb_idx].flags &= ~VP8_LAST_FRAME;
2989 cm->lst_fb_idx = cm->new_fb_idx;
2990
2991 cpi->current_ref_frames[LAST_FRAME] = cm->current_video_frame;
2992 }
2993
2994 #if CONFIG_TEMPORAL_DENOISING
2995 if (cpi->oxcf.noise_sensitivity) {
2996 /* we shouldn't have to keep multiple copies as we know in advance which
2997 * buffer we should start - for now to get something up and running
2998 * I've chosen to copy the buffers
2999 */
3000 if (cm->frame_type == KEY_FRAME) {
3001 int i;
3002 for (i = LAST_FRAME; i < MAX_REF_FRAMES; ++i)
3003 vp8_yv12_copy_frame(cpi->Source, &cpi->denoiser.yv12_running_avg[i]);
3004 } else /* For non key frames */
3005 {
3006 vp8_yv12_extend_frame_borders(
3007 &cpi->denoiser.yv12_running_avg[INTRA_FRAME]);
3008
3009 if (cm->refresh_alt_ref_frame || cm->copy_buffer_to_arf) {
3010 vp8_yv12_copy_frame(&cpi->denoiser.yv12_running_avg[INTRA_FRAME],
3011 &cpi->denoiser.yv12_running_avg[ALTREF_FRAME]);
3012 }
3013 if (cm->refresh_golden_frame || cm->copy_buffer_to_gf) {
3014 vp8_yv12_copy_frame(&cpi->denoiser.yv12_running_avg[INTRA_FRAME],
3015 &cpi->denoiser.yv12_running_avg[GOLDEN_FRAME]);
3016 }
3017 if (cm->refresh_last_frame) {
3018 vp8_yv12_copy_frame(&cpi->denoiser.yv12_running_avg[INTRA_FRAME],
3019 &cpi->denoiser.yv12_running_avg[LAST_FRAME]);
3020 }
3021 }
3022 if (cpi->oxcf.noise_sensitivity == 4)
3023 vp8_yv12_copy_frame(cpi->Source, &cpi->denoiser.yv12_last_source);
3024 }
3025 #endif
3026 }
3027
measure_square_diff_partial(YV12_BUFFER_CONFIG * source,YV12_BUFFER_CONFIG * dest,VP8_COMP * cpi)3028 static int measure_square_diff_partial(YV12_BUFFER_CONFIG *source,
3029 YV12_BUFFER_CONFIG *dest,
3030 VP8_COMP *cpi) {
3031 int i, j;
3032 int Total = 0;
3033 int num_blocks = 0;
3034 int skip = 2;
3035 int min_consec_zero_last = 10;
3036 int tot_num_blocks = (source->y_height * source->y_width) >> 8;
3037 unsigned char *src = source->y_buffer;
3038 unsigned char *dst = dest->y_buffer;
3039
3040 /* Loop through the Y plane, every |skip| blocks along rows and colmumns,
3041 * summing the square differences, and only for blocks that have been
3042 * zero_last mode at least |x| frames in a row.
3043 */
3044 for (i = 0; i < source->y_height; i += 16 * skip) {
3045 int block_index_row = (i >> 4) * cpi->common.mb_cols;
3046 for (j = 0; j < source->y_width; j += 16 * skip) {
3047 int index = block_index_row + (j >> 4);
3048 if (cpi->consec_zero_last[index] >= min_consec_zero_last) {
3049 unsigned int sse;
3050 Total += vpx_mse16x16(src + j, source->y_stride, dst + j,
3051 dest->y_stride, &sse);
3052 num_blocks++;
3053 }
3054 }
3055 src += 16 * skip * source->y_stride;
3056 dst += 16 * skip * dest->y_stride;
3057 }
3058 // Only return non-zero if we have at least ~1/16 samples for estimate.
3059 if (num_blocks > (tot_num_blocks >> 4)) {
3060 assert(num_blocks != 0);
3061 return (Total / num_blocks);
3062 } else {
3063 return 0;
3064 }
3065 }
3066
3067 #if CONFIG_TEMPORAL_DENOISING
process_denoiser_mode_change(VP8_COMP * cpi)3068 static void process_denoiser_mode_change(VP8_COMP *cpi) {
3069 const VP8_COMMON *const cm = &cpi->common;
3070 int i, j;
3071 int total = 0;
3072 int num_blocks = 0;
3073 // Number of blocks skipped along row/column in computing the
3074 // nmse (normalized mean square error) of source.
3075 int skip = 2;
3076 // Only select blocks for computing nmse that have been encoded
3077 // as ZERO LAST min_consec_zero_last frames in a row.
3078 // Scale with number of temporal layers.
3079 int min_consec_zero_last = 12 / cpi->oxcf.number_of_layers;
3080 // Decision is tested for changing the denoising mode every
3081 // num_mode_change times this function is called. Note that this
3082 // function called every 8 frames, so (8 * num_mode_change) is number
3083 // of frames where denoising mode change is tested for switch.
3084 int num_mode_change = 20;
3085 // Framerate factor, to compensate for larger mse at lower framerates.
3086 // Use ref_framerate, which is full source framerate for temporal layers.
3087 // TODO(marpan): Adjust this factor.
3088 int fac_framerate = cpi->ref_framerate < 25.0f ? 80 : 100;
3089 int tot_num_blocks = cm->mb_rows * cm->mb_cols;
3090 int ystride = cpi->Source->y_stride;
3091 unsigned char *src = cpi->Source->y_buffer;
3092 unsigned char *dst = cpi->denoiser.yv12_last_source.y_buffer;
3093 static const unsigned char const_source[16] = { 128, 128, 128, 128, 128, 128,
3094 128, 128, 128, 128, 128, 128,
3095 128, 128, 128, 128 };
3096 int bandwidth = (int)(cpi->target_bandwidth);
3097 // For temporal layers, use full bandwidth (top layer).
3098 if (cpi->oxcf.number_of_layers > 1) {
3099 LAYER_CONTEXT *lc = &cpi->layer_context[cpi->oxcf.number_of_layers - 1];
3100 bandwidth = (int)(lc->target_bandwidth);
3101 }
3102 // Loop through the Y plane, every skip blocks along rows and columns,
3103 // summing the normalized mean square error, only for blocks that have
3104 // been encoded as ZEROMV LAST at least min_consec_zero_last least frames in
3105 // a row and have small sum difference between current and previous frame.
3106 // Normalization here is by the contrast of the current frame block.
3107 for (i = 0; i < cm->Height; i += 16 * skip) {
3108 int block_index_row = (i >> 4) * cm->mb_cols;
3109 for (j = 0; j < cm->Width; j += 16 * skip) {
3110 int index = block_index_row + (j >> 4);
3111 if (cpi->consec_zero_last[index] >= min_consec_zero_last) {
3112 unsigned int sse;
3113 const unsigned int var =
3114 vpx_variance16x16(src + j, ystride, dst + j, ystride, &sse);
3115 // Only consider this block as valid for noise measurement
3116 // if the sum_diff average of the current and previous frame
3117 // is small (to avoid effects from lighting change).
3118 if ((sse - var) < 128) {
3119 unsigned int sse2;
3120 const unsigned int act =
3121 vpx_variance16x16(src + j, ystride, const_source, 0, &sse2);
3122 if (act > 0) total += sse / act;
3123 num_blocks++;
3124 }
3125 }
3126 }
3127 src += 16 * skip * ystride;
3128 dst += 16 * skip * ystride;
3129 }
3130 total = total * fac_framerate / 100;
3131
3132 // Only consider this frame as valid sample if we have computed nmse over
3133 // at least ~1/16 blocks, and Total > 0 (Total == 0 can happen if the
3134 // application inputs duplicate frames, or contrast is all zero).
3135 if (total > 0 && (num_blocks > (tot_num_blocks >> 4))) {
3136 // Update the recursive mean square source_diff.
3137 total = (total << 8) / num_blocks;
3138 if (cpi->denoiser.nmse_source_diff_count == 0) {
3139 // First sample in new interval.
3140 cpi->denoiser.nmse_source_diff = total;
3141 cpi->denoiser.qp_avg = cm->base_qindex;
3142 } else {
3143 // For subsequent samples, use average with weight ~1/4 for new sample.
3144 cpi->denoiser.nmse_source_diff =
3145 (int)((total + 3 * cpi->denoiser.nmse_source_diff) >> 2);
3146 cpi->denoiser.qp_avg =
3147 (int)((cm->base_qindex + 3 * cpi->denoiser.qp_avg) >> 2);
3148 }
3149 cpi->denoiser.nmse_source_diff_count++;
3150 }
3151 // Check for changing the denoiser mode, when we have obtained #samples =
3152 // num_mode_change. Condition the change also on the bitrate and QP.
3153 if (cpi->denoiser.nmse_source_diff_count == num_mode_change) {
3154 // Check for going up: from normal to aggressive mode.
3155 if ((cpi->denoiser.denoiser_mode == kDenoiserOnYUV) &&
3156 (cpi->denoiser.nmse_source_diff >
3157 cpi->denoiser.threshold_aggressive_mode) &&
3158 (cpi->denoiser.qp_avg < cpi->denoiser.qp_threshold_up &&
3159 bandwidth > cpi->denoiser.bitrate_threshold)) {
3160 vp8_denoiser_set_parameters(&cpi->denoiser, kDenoiserOnYUVAggressive);
3161 } else {
3162 // Check for going down: from aggressive to normal mode.
3163 if (((cpi->denoiser.denoiser_mode == kDenoiserOnYUVAggressive) &&
3164 (cpi->denoiser.nmse_source_diff <
3165 cpi->denoiser.threshold_aggressive_mode)) ||
3166 ((cpi->denoiser.denoiser_mode == kDenoiserOnYUVAggressive) &&
3167 (cpi->denoiser.qp_avg > cpi->denoiser.qp_threshold_down ||
3168 bandwidth < cpi->denoiser.bitrate_threshold))) {
3169 vp8_denoiser_set_parameters(&cpi->denoiser, kDenoiserOnYUV);
3170 }
3171 }
3172 // Reset metric and counter for next interval.
3173 cpi->denoiser.nmse_source_diff = 0;
3174 cpi->denoiser.qp_avg = 0;
3175 cpi->denoiser.nmse_source_diff_count = 0;
3176 }
3177 }
3178 #endif
3179
vp8_loopfilter_frame(VP8_COMP * cpi,VP8_COMMON * cm)3180 void vp8_loopfilter_frame(VP8_COMP *cpi, VP8_COMMON *cm) {
3181 const FRAME_TYPE frame_type = cm->frame_type;
3182
3183 int update_any_ref_buffers = 1;
3184 if (cpi->common.refresh_last_frame == 0 &&
3185 cpi->common.refresh_golden_frame == 0 &&
3186 cpi->common.refresh_alt_ref_frame == 0) {
3187 update_any_ref_buffers = 0;
3188 }
3189
3190 if (cm->no_lpf) {
3191 cm->filter_level = 0;
3192 } else {
3193 struct vpx_usec_timer timer;
3194
3195 vpx_clear_system_state();
3196
3197 vpx_usec_timer_start(&timer);
3198 if (cpi->sf.auto_filter == 0) {
3199 #if CONFIG_TEMPORAL_DENOISING
3200 if (cpi->oxcf.noise_sensitivity && cm->frame_type != KEY_FRAME) {
3201 // Use the denoised buffer for selecting base loop filter level.
3202 // Denoised signal for current frame is stored in INTRA_FRAME.
3203 // No denoising on key frames.
3204 vp8cx_pick_filter_level_fast(
3205 &cpi->denoiser.yv12_running_avg[INTRA_FRAME], cpi);
3206 } else {
3207 vp8cx_pick_filter_level_fast(cpi->Source, cpi);
3208 }
3209 #else
3210 vp8cx_pick_filter_level_fast(cpi->Source, cpi);
3211 #endif
3212 } else {
3213 #if CONFIG_TEMPORAL_DENOISING
3214 if (cpi->oxcf.noise_sensitivity && cm->frame_type != KEY_FRAME) {
3215 // Use the denoised buffer for selecting base loop filter level.
3216 // Denoised signal for current frame is stored in INTRA_FRAME.
3217 // No denoising on key frames.
3218 vp8cx_pick_filter_level(&cpi->denoiser.yv12_running_avg[INTRA_FRAME],
3219 cpi);
3220 } else {
3221 vp8cx_pick_filter_level(cpi->Source, cpi);
3222 }
3223 #else
3224 vp8cx_pick_filter_level(cpi->Source, cpi);
3225 #endif
3226 }
3227
3228 if (cm->filter_level > 0) {
3229 vp8cx_set_alt_lf_level(cpi, cm->filter_level);
3230 }
3231
3232 vpx_usec_timer_mark(&timer);
3233 cpi->time_pick_lpf += vpx_usec_timer_elapsed(&timer);
3234 }
3235
3236 #if CONFIG_MULTITHREAD
3237 if (cpi->b_multi_threaded) {
3238 sem_post(&cpi->h_event_end_lpf); /* signal that we have set filter_level */
3239 }
3240 #endif
3241
3242 // No need to apply loop-filter if the encoded frame does not update
3243 // any reference buffers.
3244 if (cm->filter_level > 0 && update_any_ref_buffers) {
3245 vp8_loop_filter_frame(cm, &cpi->mb.e_mbd, frame_type);
3246 }
3247
3248 vp8_yv12_extend_frame_borders(cm->frame_to_show);
3249 }
3250
encode_frame_to_data_rate(VP8_COMP * cpi,size_t * size,unsigned char * dest,unsigned char * dest_end,unsigned int * frame_flags)3251 static void encode_frame_to_data_rate(VP8_COMP *cpi, size_t *size,
3252 unsigned char *dest,
3253 unsigned char *dest_end,
3254 unsigned int *frame_flags) {
3255 int Q;
3256 int frame_over_shoot_limit;
3257 int frame_under_shoot_limit;
3258
3259 int Loop = 0;
3260 int loop_count;
3261
3262 VP8_COMMON *cm = &cpi->common;
3263 int active_worst_qchanged = 0;
3264
3265 #if !CONFIG_REALTIME_ONLY
3266 int q_low;
3267 int q_high;
3268 int zbin_oq_high;
3269 int zbin_oq_low = 0;
3270 int top_index;
3271 int bottom_index;
3272 int overshoot_seen = 0;
3273 int undershoot_seen = 0;
3274 #endif
3275
3276 int drop_mark = (int)(cpi->oxcf.drop_frames_water_mark *
3277 cpi->oxcf.optimal_buffer_level / 100);
3278 int drop_mark75 = drop_mark * 2 / 3;
3279 int drop_mark50 = drop_mark / 4;
3280 int drop_mark25 = drop_mark / 8;
3281
3282 /* Clear down mmx registers to allow floating point in what follows */
3283 vpx_clear_system_state();
3284
3285 if (cpi->force_next_frame_intra) {
3286 cm->frame_type = KEY_FRAME; /* delayed intra frame */
3287 cpi->force_next_frame_intra = 0;
3288 }
3289
3290 /* For an alt ref frame in 2 pass we skip the call to the second pass
3291 * function that sets the target bandwidth
3292 */
3293 switch (cpi->pass) {
3294 #if !CONFIG_REALTIME_ONLY
3295 case 2:
3296 if (cpi->common.refresh_alt_ref_frame) {
3297 /* Per frame bit target for the alt ref frame */
3298 cpi->per_frame_bandwidth = cpi->twopass.gf_bits;
3299 /* per second target bitrate */
3300 cpi->target_bandwidth =
3301 (int)(cpi->twopass.gf_bits * cpi->output_framerate);
3302 }
3303 break;
3304 #endif // !CONFIG_REALTIME_ONLY
3305 default:
3306 cpi->per_frame_bandwidth =
3307 (int)(cpi->target_bandwidth / cpi->output_framerate);
3308 break;
3309 }
3310
3311 /* Default turn off buffer to buffer copying */
3312 cm->copy_buffer_to_gf = 0;
3313 cm->copy_buffer_to_arf = 0;
3314
3315 /* Clear zbin over-quant value and mode boost values. */
3316 cpi->mb.zbin_over_quant = 0;
3317 cpi->mb.zbin_mode_boost = 0;
3318
3319 /* Enable or disable mode based tweaking of the zbin
3320 * For 2 Pass Only used where GF/ARF prediction quality
3321 * is above a threshold
3322 */
3323 cpi->mb.zbin_mode_boost_enabled = 1;
3324 if (cpi->pass == 2) {
3325 if (cpi->gfu_boost <= 400) {
3326 cpi->mb.zbin_mode_boost_enabled = 0;
3327 }
3328 }
3329
3330 /* Current default encoder behaviour for the altref sign bias */
3331 if (cpi->source_alt_ref_active) {
3332 cpi->common.ref_frame_sign_bias[ALTREF_FRAME] = 1;
3333 } else {
3334 cpi->common.ref_frame_sign_bias[ALTREF_FRAME] = 0;
3335 }
3336
3337 /* Check to see if a key frame is signaled
3338 * For two pass with auto key frame enabled cm->frame_type may already
3339 * be set, but not for one pass.
3340 */
3341 if ((cm->current_video_frame == 0) || (cm->frame_flags & FRAMEFLAGS_KEY) ||
3342 (cpi->oxcf.auto_key &&
3343 (cpi->frames_since_key % cpi->key_frame_frequency == 0))) {
3344 /* Key frame from VFW/auto-keyframe/first frame */
3345 cm->frame_type = KEY_FRAME;
3346 #if CONFIG_TEMPORAL_DENOISING
3347 if (cpi->oxcf.noise_sensitivity == 4) {
3348 // For adaptive mode, reset denoiser to normal mode on key frame.
3349 vp8_denoiser_set_parameters(&cpi->denoiser, kDenoiserOnYUV);
3350 }
3351 #endif
3352 }
3353
3354 #if CONFIG_MULTI_RES_ENCODING
3355 if (cpi->oxcf.mr_total_resolutions > 1) {
3356 LOWER_RES_FRAME_INFO *low_res_frame_info =
3357 (LOWER_RES_FRAME_INFO *)cpi->oxcf.mr_low_res_mode_info;
3358
3359 if (cpi->oxcf.mr_encoder_id) {
3360 // TODO(marpan): This constraint shouldn't be needed, as we would like
3361 // to allow for key frame setting (forced or periodic) defined per
3362 // spatial layer. For now, keep this in.
3363 cm->frame_type = low_res_frame_info->frame_type;
3364
3365 // Check if lower resolution is available for motion vector reuse.
3366 if (cm->frame_type != KEY_FRAME) {
3367 cpi->mr_low_res_mv_avail = 1;
3368 cpi->mr_low_res_mv_avail &= !(low_res_frame_info->is_frame_dropped);
3369
3370 if (cpi->ref_frame_flags & VP8_LAST_FRAME)
3371 cpi->mr_low_res_mv_avail &=
3372 (cpi->current_ref_frames[LAST_FRAME] ==
3373 low_res_frame_info->low_res_ref_frames[LAST_FRAME]);
3374
3375 if (cpi->ref_frame_flags & VP8_GOLD_FRAME)
3376 cpi->mr_low_res_mv_avail &=
3377 (cpi->current_ref_frames[GOLDEN_FRAME] ==
3378 low_res_frame_info->low_res_ref_frames[GOLDEN_FRAME]);
3379
3380 // Don't use altref to determine whether low res is available.
3381 // TODO (marpan): Should we make this type of condition on a
3382 // per-reference frame basis?
3383 /*
3384 if (cpi->ref_frame_flags & VP8_ALTR_FRAME)
3385 cpi->mr_low_res_mv_avail &= (cpi->current_ref_frames[ALTREF_FRAME]
3386 == low_res_frame_info->low_res_ref_frames[ALTREF_FRAME]);
3387 */
3388 }
3389 }
3390
3391 // On a key frame: For the lowest resolution, keep track of the key frame
3392 // counter value. For the higher resolutions, reset the current video
3393 // frame counter to that of the lowest resolution.
3394 // This is done to the handle the case where we may stop/start encoding
3395 // higher layer(s). The restart-encoding of higher layer is only signaled
3396 // by a key frame for now.
3397 // TODO (marpan): Add flag to indicate restart-encoding of higher layer.
3398 if (cm->frame_type == KEY_FRAME) {
3399 if (cpi->oxcf.mr_encoder_id) {
3400 // If the initial starting value of the buffer level is zero (this can
3401 // happen because we may have not started encoding this higher stream),
3402 // then reset it to non-zero value based on |starting_buffer_level|.
3403 if (cpi->common.current_video_frame == 0 && cpi->buffer_level == 0) {
3404 unsigned int i;
3405 cpi->bits_off_target = cpi->oxcf.starting_buffer_level;
3406 cpi->buffer_level = cpi->oxcf.starting_buffer_level;
3407 for (i = 0; i < cpi->oxcf.number_of_layers; ++i) {
3408 LAYER_CONTEXT *lc = &cpi->layer_context[i];
3409 lc->bits_off_target = lc->starting_buffer_level;
3410 lc->buffer_level = lc->starting_buffer_level;
3411 }
3412 }
3413 cpi->common.current_video_frame =
3414 low_res_frame_info->key_frame_counter_value;
3415 } else {
3416 low_res_frame_info->key_frame_counter_value =
3417 cpi->common.current_video_frame;
3418 }
3419 }
3420 }
3421 #endif
3422
3423 // Find the reference frame closest to the current frame.
3424 cpi->closest_reference_frame = LAST_FRAME;
3425 if (cm->frame_type != KEY_FRAME) {
3426 int i;
3427 MV_REFERENCE_FRAME closest_ref = INTRA_FRAME;
3428 if (cpi->ref_frame_flags & VP8_LAST_FRAME) {
3429 closest_ref = LAST_FRAME;
3430 } else if (cpi->ref_frame_flags & VP8_GOLD_FRAME) {
3431 closest_ref = GOLDEN_FRAME;
3432 } else if (cpi->ref_frame_flags & VP8_ALTR_FRAME) {
3433 closest_ref = ALTREF_FRAME;
3434 }
3435 for (i = 1; i <= 3; ++i) {
3436 vpx_ref_frame_type_t ref_frame_type =
3437 (vpx_ref_frame_type_t)((i == 3) ? 4 : i);
3438 if (cpi->ref_frame_flags & ref_frame_type) {
3439 if ((cm->current_video_frame - cpi->current_ref_frames[i]) <
3440 (cm->current_video_frame - cpi->current_ref_frames[closest_ref])) {
3441 closest_ref = i;
3442 }
3443 }
3444 }
3445 cpi->closest_reference_frame = closest_ref;
3446 }
3447
3448 /* Set various flags etc to special state if it is a key frame */
3449 if (cm->frame_type == KEY_FRAME) {
3450 int i;
3451
3452 // Set the loop filter deltas and segmentation map update
3453 setup_features(cpi);
3454
3455 /* The alternate reference frame cannot be active for a key frame */
3456 cpi->source_alt_ref_active = 0;
3457
3458 /* Reset the RD threshold multipliers to default of * 1 (128) */
3459 for (i = 0; i < MAX_MODES; ++i) {
3460 cpi->mb.rd_thresh_mult[i] = 128;
3461 }
3462
3463 // Reset the zero_last counter to 0 on key frame.
3464 memset(cpi->consec_zero_last, 0, cm->mb_rows * cm->mb_cols);
3465 memset(cpi->consec_zero_last_mvbias, 0,
3466 (cpi->common.mb_rows * cpi->common.mb_cols));
3467 }
3468
3469 #if 0
3470 /* Experimental code for lagged compress and one pass
3471 * Initialise one_pass GF frames stats
3472 * Update stats used for GF selection
3473 */
3474 {
3475 cpi->one_pass_frame_index = cm->current_video_frame % MAX_LAG_BUFFERS;
3476
3477 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frames_so_far = 0;
3478 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_intra_error = 0.0;
3479 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_coded_error = 0.0;
3480 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_pcnt_inter = 0.0;
3481 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_pcnt_motion = 0.0;
3482 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_mvr = 0.0;
3483 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_mvr_abs = 0.0;
3484 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_mvc = 0.0;
3485 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_mvc_abs = 0.0;
3486 }
3487 #endif
3488
3489 update_rd_ref_frame_probs(cpi);
3490
3491 if (cpi->drop_frames_allowed) {
3492 /* The reset to decimation 0 is only done here for one pass.
3493 * Once it is set two pass leaves decimation on till the next kf.
3494 */
3495 if ((cpi->buffer_level > drop_mark) && (cpi->decimation_factor > 0)) {
3496 cpi->decimation_factor--;
3497 }
3498
3499 if (cpi->buffer_level > drop_mark75 && cpi->decimation_factor > 0) {
3500 cpi->decimation_factor = 1;
3501
3502 } else if (cpi->buffer_level < drop_mark25 &&
3503 (cpi->decimation_factor == 2 || cpi->decimation_factor == 3)) {
3504 cpi->decimation_factor = 3;
3505 } else if (cpi->buffer_level < drop_mark50 &&
3506 (cpi->decimation_factor == 1 || cpi->decimation_factor == 2)) {
3507 cpi->decimation_factor = 2;
3508 } else if (cpi->buffer_level < drop_mark75 &&
3509 (cpi->decimation_factor == 0 || cpi->decimation_factor == 1)) {
3510 cpi->decimation_factor = 1;
3511 }
3512 }
3513
3514 /* The following decimates the frame rate according to a regular
3515 * pattern (i.e. to 1/2 or 2/3 frame rate) This can be used to help
3516 * prevent buffer under-run in CBR mode. Alternatively it might be
3517 * desirable in some situations to drop frame rate but throw more bits
3518 * at each frame.
3519 *
3520 * Note that dropping a key frame can be problematic if spatial
3521 * resampling is also active
3522 */
3523 if (cpi->decimation_factor > 0) {
3524 switch (cpi->decimation_factor) {
3525 case 1:
3526 cpi->per_frame_bandwidth = cpi->per_frame_bandwidth * 3 / 2;
3527 break;
3528 case 2:
3529 cpi->per_frame_bandwidth = cpi->per_frame_bandwidth * 5 / 4;
3530 break;
3531 case 3:
3532 cpi->per_frame_bandwidth = cpi->per_frame_bandwidth * 5 / 4;
3533 break;
3534 }
3535
3536 /* Note that we should not throw out a key frame (especially when
3537 * spatial resampling is enabled).
3538 */
3539 if (cm->frame_type == KEY_FRAME) {
3540 cpi->decimation_count = cpi->decimation_factor;
3541 } else if (cpi->decimation_count > 0) {
3542 cpi->decimation_count--;
3543
3544 cpi->bits_off_target += cpi->av_per_frame_bandwidth;
3545 if (cpi->bits_off_target > cpi->oxcf.maximum_buffer_size) {
3546 cpi->bits_off_target = cpi->oxcf.maximum_buffer_size;
3547 }
3548
3549 #if CONFIG_MULTI_RES_ENCODING
3550 vp8_store_drop_frame_info(cpi);
3551 #endif
3552
3553 cm->current_video_frame++;
3554 cpi->frames_since_key++;
3555 // We advance the temporal pattern for dropped frames.
3556 cpi->temporal_pattern_counter++;
3557
3558 #if CONFIG_INTERNAL_STATS
3559 cpi->count++;
3560 #endif
3561
3562 cpi->buffer_level = cpi->bits_off_target;
3563
3564 if (cpi->oxcf.number_of_layers > 1) {
3565 unsigned int i;
3566
3567 /* Propagate bits saved by dropping the frame to higher
3568 * layers
3569 */
3570 for (i = cpi->current_layer + 1; i < cpi->oxcf.number_of_layers; ++i) {
3571 LAYER_CONTEXT *lc = &cpi->layer_context[i];
3572 lc->bits_off_target += (int)(lc->target_bandwidth / lc->framerate);
3573 if (lc->bits_off_target > lc->maximum_buffer_size) {
3574 lc->bits_off_target = lc->maximum_buffer_size;
3575 }
3576 lc->buffer_level = lc->bits_off_target;
3577 }
3578 }
3579
3580 return;
3581 } else {
3582 cpi->decimation_count = cpi->decimation_factor;
3583 }
3584 } else {
3585 cpi->decimation_count = 0;
3586 }
3587
3588 /* Decide how big to make the frame */
3589 if (!vp8_pick_frame_size(cpi)) {
3590 /*TODO: 2 drop_frame and return code could be put together. */
3591 #if CONFIG_MULTI_RES_ENCODING
3592 vp8_store_drop_frame_info(cpi);
3593 #endif
3594 cm->current_video_frame++;
3595 cpi->frames_since_key++;
3596 // We advance the temporal pattern for dropped frames.
3597 cpi->temporal_pattern_counter++;
3598 return;
3599 }
3600
3601 /* Reduce active_worst_allowed_q for CBR if our buffer is getting too full.
3602 * This has a knock on effect on active best quality as well.
3603 * For CBR if the buffer reaches its maximum level then we can no longer
3604 * save up bits for later frames so we might as well use them up
3605 * on the current frame.
3606 */
3607 if ((cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) &&
3608 (cpi->buffer_level >= cpi->oxcf.optimal_buffer_level) &&
3609 cpi->buffered_mode) {
3610 /* Max adjustment is 1/4 */
3611 int Adjustment = cpi->active_worst_quality / 4;
3612
3613 if (Adjustment) {
3614 int buff_lvl_step;
3615
3616 if (cpi->buffer_level < cpi->oxcf.maximum_buffer_size) {
3617 buff_lvl_step = (int)((cpi->oxcf.maximum_buffer_size -
3618 cpi->oxcf.optimal_buffer_level) /
3619 Adjustment);
3620
3621 if (buff_lvl_step) {
3622 Adjustment =
3623 (int)((cpi->buffer_level - cpi->oxcf.optimal_buffer_level) /
3624 buff_lvl_step);
3625 } else {
3626 Adjustment = 0;
3627 }
3628 }
3629
3630 cpi->active_worst_quality -= Adjustment;
3631
3632 if (cpi->active_worst_quality < cpi->active_best_quality) {
3633 cpi->active_worst_quality = cpi->active_best_quality;
3634 }
3635 }
3636 }
3637
3638 /* Set an active best quality and if necessary active worst quality
3639 * There is some odd behavior for one pass here that needs attention.
3640 */
3641 if ((cpi->pass == 2) || (cpi->ni_frames > 150)) {
3642 vpx_clear_system_state();
3643
3644 Q = cpi->active_worst_quality;
3645
3646 if (cm->frame_type == KEY_FRAME) {
3647 if (cpi->pass == 2) {
3648 if (cpi->gfu_boost > 600) {
3649 cpi->active_best_quality = kf_low_motion_minq[Q];
3650 } else {
3651 cpi->active_best_quality = kf_high_motion_minq[Q];
3652 }
3653
3654 /* Special case for key frames forced because we have reached
3655 * the maximum key frame interval. Here force the Q to a range
3656 * based on the ambient Q to reduce the risk of popping
3657 */
3658 if (cpi->this_key_frame_forced) {
3659 if (cpi->active_best_quality > cpi->avg_frame_qindex * 7 / 8) {
3660 cpi->active_best_quality = cpi->avg_frame_qindex * 7 / 8;
3661 } else if (cpi->active_best_quality<cpi->avg_frame_qindex>> 2) {
3662 cpi->active_best_quality = cpi->avg_frame_qindex >> 2;
3663 }
3664 }
3665 }
3666 /* One pass more conservative */
3667 else {
3668 cpi->active_best_quality = kf_high_motion_minq[Q];
3669 }
3670 }
3671
3672 else if (cpi->oxcf.number_of_layers == 1 &&
3673 (cm->refresh_golden_frame || cpi->common.refresh_alt_ref_frame)) {
3674 /* Use the lower of cpi->active_worst_quality and recent
3675 * average Q as basis for GF/ARF Q limit unless last frame was
3676 * a key frame.
3677 */
3678 if ((cpi->frames_since_key > 1) &&
3679 (cpi->avg_frame_qindex < cpi->active_worst_quality)) {
3680 Q = cpi->avg_frame_qindex;
3681 }
3682
3683 /* For constrained quality dont allow Q less than the cq level */
3684 if ((cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) &&
3685 (Q < cpi->cq_target_quality)) {
3686 Q = cpi->cq_target_quality;
3687 }
3688
3689 if (cpi->pass == 2) {
3690 if (cpi->gfu_boost > 1000) {
3691 cpi->active_best_quality = gf_low_motion_minq[Q];
3692 } else if (cpi->gfu_boost < 400) {
3693 cpi->active_best_quality = gf_high_motion_minq[Q];
3694 } else {
3695 cpi->active_best_quality = gf_mid_motion_minq[Q];
3696 }
3697
3698 /* Constrained quality use slightly lower active best. */
3699 if (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) {
3700 cpi->active_best_quality = cpi->active_best_quality * 15 / 16;
3701 }
3702 }
3703 /* One pass more conservative */
3704 else {
3705 cpi->active_best_quality = gf_high_motion_minq[Q];
3706 }
3707 } else {
3708 cpi->active_best_quality = inter_minq[Q];
3709
3710 /* For the constant/constrained quality mode we dont want
3711 * q to fall below the cq level.
3712 */
3713 if ((cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) &&
3714 (cpi->active_best_quality < cpi->cq_target_quality)) {
3715 /* If we are strongly undershooting the target rate in the last
3716 * frames then use the user passed in cq value not the auto
3717 * cq value.
3718 */
3719 if (cpi->rolling_actual_bits < cpi->min_frame_bandwidth) {
3720 cpi->active_best_quality = cpi->oxcf.cq_level;
3721 } else {
3722 cpi->active_best_quality = cpi->cq_target_quality;
3723 }
3724 }
3725 }
3726
3727 /* If CBR and the buffer is as full then it is reasonable to allow
3728 * higher quality on the frames to prevent bits just going to waste.
3729 */
3730 if (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) {
3731 /* Note that the use of >= here elliminates the risk of a devide
3732 * by 0 error in the else if clause
3733 */
3734 if (cpi->buffer_level >= cpi->oxcf.maximum_buffer_size) {
3735 cpi->active_best_quality = cpi->best_quality;
3736
3737 } else if (cpi->buffer_level > cpi->oxcf.optimal_buffer_level) {
3738 int Fraction =
3739 (int)(((cpi->buffer_level - cpi->oxcf.optimal_buffer_level) * 128) /
3740 (cpi->oxcf.maximum_buffer_size -
3741 cpi->oxcf.optimal_buffer_level));
3742 int min_qadjustment =
3743 ((cpi->active_best_quality - cpi->best_quality) * Fraction) / 128;
3744
3745 cpi->active_best_quality -= min_qadjustment;
3746 }
3747 }
3748 }
3749 /* Make sure constrained quality mode limits are adhered to for the first
3750 * few frames of one pass encodes
3751 */
3752 else if (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) {
3753 if ((cm->frame_type == KEY_FRAME) || cm->refresh_golden_frame ||
3754 cpi->common.refresh_alt_ref_frame) {
3755 cpi->active_best_quality = cpi->best_quality;
3756 } else if (cpi->active_best_quality < cpi->cq_target_quality) {
3757 cpi->active_best_quality = cpi->cq_target_quality;
3758 }
3759 }
3760
3761 /* Clip the active best and worst quality values to limits */
3762 if (cpi->active_worst_quality > cpi->worst_quality) {
3763 cpi->active_worst_quality = cpi->worst_quality;
3764 }
3765
3766 if (cpi->active_best_quality < cpi->best_quality) {
3767 cpi->active_best_quality = cpi->best_quality;
3768 }
3769
3770 if (cpi->active_worst_quality < cpi->active_best_quality) {
3771 cpi->active_worst_quality = cpi->active_best_quality;
3772 }
3773
3774 /* Determine initial Q to try */
3775 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
3776
3777 #if !CONFIG_REALTIME_ONLY
3778
3779 /* Set highest allowed value for Zbin over quant */
3780 if (cm->frame_type == KEY_FRAME) {
3781 zbin_oq_high = 0;
3782 } else if ((cpi->oxcf.number_of_layers == 1) &&
3783 ((cm->refresh_alt_ref_frame ||
3784 (cm->refresh_golden_frame && !cpi->source_alt_ref_active)))) {
3785 zbin_oq_high = 16;
3786 } else {
3787 zbin_oq_high = ZBIN_OQ_MAX;
3788 }
3789 #endif
3790
3791 /* Setup background Q adjustment for error resilient mode.
3792 * For multi-layer encodes only enable this for the base layer.
3793 */
3794 if (cpi->cyclic_refresh_mode_enabled) {
3795 // Special case for screen_content_mode with golden frame updates.
3796 int disable_cr_gf =
3797 (cpi->oxcf.screen_content_mode == 2 && cm->refresh_golden_frame);
3798 if (cpi->current_layer == 0 && cpi->force_maxqp == 0 && !disable_cr_gf) {
3799 cyclic_background_refresh(cpi, Q, 0);
3800 } else {
3801 disable_segmentation(cpi);
3802 }
3803 }
3804
3805 vp8_compute_frame_size_bounds(cpi, &frame_under_shoot_limit,
3806 &frame_over_shoot_limit);
3807
3808 #if !CONFIG_REALTIME_ONLY
3809 /* Limit Q range for the adaptive loop. */
3810 bottom_index = cpi->active_best_quality;
3811 top_index = cpi->active_worst_quality;
3812 q_low = cpi->active_best_quality;
3813 q_high = cpi->active_worst_quality;
3814 #endif
3815
3816 vp8_save_coding_context(cpi);
3817
3818 loop_count = 0;
3819
3820 scale_and_extend_source(cpi->un_scaled_source, cpi);
3821
3822 #if CONFIG_TEMPORAL_DENOISING && CONFIG_POSTPROC
3823 // Option to apply spatial blur under the aggressive or adaptive
3824 // (temporal denoising) mode.
3825 if (cpi->oxcf.noise_sensitivity >= 3) {
3826 if (cpi->denoiser.denoise_pars.spatial_blur != 0) {
3827 vp8_de_noise(cm, cpi->Source, cpi->Source,
3828 cpi->denoiser.denoise_pars.spatial_blur, 1, 0, 0);
3829 }
3830 }
3831 #endif
3832
3833 #if !(CONFIG_REALTIME_ONLY) && CONFIG_POSTPROC && !(CONFIG_TEMPORAL_DENOISING)
3834
3835 if (cpi->oxcf.noise_sensitivity > 0) {
3836 unsigned char *src;
3837 int l = 0;
3838
3839 switch (cpi->oxcf.noise_sensitivity) {
3840 case 1: l = 20; break;
3841 case 2: l = 40; break;
3842 case 3: l = 60; break;
3843 case 4: l = 80; break;
3844 case 5: l = 100; break;
3845 case 6: l = 150; break;
3846 }
3847
3848 if (cm->frame_type == KEY_FRAME) {
3849 vp8_de_noise(cm, cpi->Source, cpi->Source, l, 1, 0, 1);
3850 } else {
3851 vp8_de_noise(cm, cpi->Source, cpi->Source, l, 1, 0, 1);
3852
3853 src = cpi->Source->y_buffer;
3854
3855 if (cpi->Source->y_stride < 0) {
3856 src += cpi->Source->y_stride * (cpi->Source->y_height - 1);
3857 }
3858 }
3859 }
3860
3861 #endif
3862
3863 #ifdef OUTPUT_YUV_SRC
3864 vp8_write_yuv_frame(yuv_file, cpi->Source);
3865 #endif
3866
3867 do {
3868 vpx_clear_system_state();
3869
3870 vp8_set_quantizer(cpi, Q);
3871
3872 /* setup skip prob for costing in mode/mv decision */
3873 if (cpi->common.mb_no_coeff_skip) {
3874 cpi->prob_skip_false = cpi->base_skip_false_prob[Q];
3875
3876 if (cm->frame_type != KEY_FRAME) {
3877 if (cpi->common.refresh_alt_ref_frame) {
3878 if (cpi->last_skip_false_probs[2] != 0) {
3879 cpi->prob_skip_false = cpi->last_skip_false_probs[2];
3880 }
3881
3882 /*
3883 if(cpi->last_skip_false_probs[2]!=0 && abs(Q-
3884 cpi->last_skip_probs_q[2])<=16 )
3885 cpi->prob_skip_false = cpi->last_skip_false_probs[2];
3886 else if (cpi->last_skip_false_probs[2]!=0)
3887 cpi->prob_skip_false = (cpi->last_skip_false_probs[2] +
3888 cpi->prob_skip_false ) / 2;
3889 */
3890 } else if (cpi->common.refresh_golden_frame) {
3891 if (cpi->last_skip_false_probs[1] != 0) {
3892 cpi->prob_skip_false = cpi->last_skip_false_probs[1];
3893 }
3894
3895 /*
3896 if(cpi->last_skip_false_probs[1]!=0 && abs(Q-
3897 cpi->last_skip_probs_q[1])<=16 )
3898 cpi->prob_skip_false = cpi->last_skip_false_probs[1];
3899 else if (cpi->last_skip_false_probs[1]!=0)
3900 cpi->prob_skip_false = (cpi->last_skip_false_probs[1] +
3901 cpi->prob_skip_false ) / 2;
3902 */
3903 } else {
3904 if (cpi->last_skip_false_probs[0] != 0) {
3905 cpi->prob_skip_false = cpi->last_skip_false_probs[0];
3906 }
3907
3908 /*
3909 if(cpi->last_skip_false_probs[0]!=0 && abs(Q-
3910 cpi->last_skip_probs_q[0])<=16 )
3911 cpi->prob_skip_false = cpi->last_skip_false_probs[0];
3912 else if(cpi->last_skip_false_probs[0]!=0)
3913 cpi->prob_skip_false = (cpi->last_skip_false_probs[0] +
3914 cpi->prob_skip_false ) / 2;
3915 */
3916 }
3917
3918 /* as this is for cost estimate, let's make sure it does not
3919 * go extreme eitehr way
3920 */
3921 if (cpi->prob_skip_false < 5) cpi->prob_skip_false = 5;
3922
3923 if (cpi->prob_skip_false > 250) cpi->prob_skip_false = 250;
3924
3925 if (cpi->oxcf.number_of_layers == 1 && cpi->is_src_frame_alt_ref) {
3926 cpi->prob_skip_false = 1;
3927 }
3928 }
3929
3930 #if 0
3931
3932 if (cpi->pass != 1)
3933 {
3934 FILE *f = fopen("skip.stt", "a");
3935 fprintf(f, "%d, %d, %4d ", cpi->common.refresh_golden_frame, cpi->common.refresh_alt_ref_frame, cpi->prob_skip_false);
3936 fclose(f);
3937 }
3938
3939 #endif
3940 }
3941
3942 if (cm->frame_type == KEY_FRAME) {
3943 if (resize_key_frame(cpi)) {
3944 /* If the frame size has changed, need to reset Q, quantizer,
3945 * and background refresh.
3946 */
3947 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
3948 if (cpi->cyclic_refresh_mode_enabled) {
3949 if (cpi->current_layer == 0) {
3950 cyclic_background_refresh(cpi, Q, 0);
3951 } else {
3952 disable_segmentation(cpi);
3953 }
3954 }
3955 // Reset the zero_last counter to 0 on key frame.
3956 memset(cpi->consec_zero_last, 0, cm->mb_rows * cm->mb_cols);
3957 memset(cpi->consec_zero_last_mvbias, 0,
3958 (cpi->common.mb_rows * cpi->common.mb_cols));
3959 vp8_set_quantizer(cpi, Q);
3960 }
3961
3962 vp8_setup_key_frame(cpi);
3963 }
3964
3965 #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
3966 {
3967 if (cpi->oxcf.error_resilient_mode) cm->refresh_entropy_probs = 0;
3968
3969 if (cpi->oxcf.error_resilient_mode & VPX_ERROR_RESILIENT_PARTITIONS) {
3970 if (cm->frame_type == KEY_FRAME) cm->refresh_entropy_probs = 1;
3971 }
3972
3973 if (cm->refresh_entropy_probs == 0) {
3974 /* save a copy for later refresh */
3975 memcpy(&cm->lfc, &cm->fc, sizeof(cm->fc));
3976 }
3977
3978 vp8_update_coef_context(cpi);
3979
3980 vp8_update_coef_probs(cpi);
3981
3982 /* transform / motion compensation build reconstruction frame
3983 * +pack coef partitions
3984 */
3985 vp8_encode_frame(cpi);
3986
3987 /* cpi->projected_frame_size is not needed for RT mode */
3988 }
3989 #else
3990 /* transform / motion compensation build reconstruction frame */
3991 vp8_encode_frame(cpi);
3992 if (cpi->oxcf.screen_content_mode == 2) {
3993 if (vp8_drop_encodedframe_overshoot(cpi, Q)) return;
3994 }
3995
3996 cpi->projected_frame_size -= vp8_estimate_entropy_savings(cpi);
3997 cpi->projected_frame_size =
3998 (cpi->projected_frame_size > 0) ? cpi->projected_frame_size : 0;
3999 #endif
4000 vpx_clear_system_state();
4001
4002 /* Test to see if the stats generated for this frame indicate that
4003 * we should have coded a key frame (assuming that we didn't)!
4004 */
4005
4006 if (cpi->pass != 2 && cpi->oxcf.auto_key && cm->frame_type != KEY_FRAME &&
4007 cpi->compressor_speed != 2) {
4008 #if !CONFIG_REALTIME_ONLY
4009 if (decide_key_frame(cpi)) {
4010 /* Reset all our sizing numbers and recode */
4011 cm->frame_type = KEY_FRAME;
4012
4013 vp8_pick_frame_size(cpi);
4014
4015 /* Clear the Alt reference frame active flag when we have
4016 * a key frame
4017 */
4018 cpi->source_alt_ref_active = 0;
4019
4020 // Set the loop filter deltas and segmentation map update
4021 setup_features(cpi);
4022
4023 vp8_restore_coding_context(cpi);
4024
4025 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
4026
4027 vp8_compute_frame_size_bounds(cpi, &frame_under_shoot_limit,
4028 &frame_over_shoot_limit);
4029
4030 /* Limit Q range for the adaptive loop. */
4031 bottom_index = cpi->active_best_quality;
4032 top_index = cpi->active_worst_quality;
4033 q_low = cpi->active_best_quality;
4034 q_high = cpi->active_worst_quality;
4035
4036 loop_count++;
4037 Loop = 1;
4038
4039 continue;
4040 }
4041 #endif
4042 }
4043
4044 vpx_clear_system_state();
4045
4046 if (frame_over_shoot_limit == 0) frame_over_shoot_limit = 1;
4047
4048 /* Are we are overshooting and up against the limit of active max Q. */
4049 if (((cpi->pass != 2) ||
4050 (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER)) &&
4051 (Q == cpi->active_worst_quality) &&
4052 (cpi->active_worst_quality < cpi->worst_quality) &&
4053 (cpi->projected_frame_size > frame_over_shoot_limit)) {
4054 int over_size_percent =
4055 ((cpi->projected_frame_size - frame_over_shoot_limit) * 100) /
4056 frame_over_shoot_limit;
4057
4058 /* If so is there any scope for relaxing it */
4059 while ((cpi->active_worst_quality < cpi->worst_quality) &&
4060 (over_size_percent > 0)) {
4061 cpi->active_worst_quality++;
4062 /* Assume 1 qstep = about 4% on frame size. */
4063 over_size_percent = (int)(over_size_percent * 0.96);
4064 }
4065 #if !CONFIG_REALTIME_ONLY
4066 top_index = cpi->active_worst_quality;
4067 #endif // !CONFIG_REALTIME_ONLY
4068 /* If we have updated the active max Q do not call
4069 * vp8_update_rate_correction_factors() this loop.
4070 */
4071 active_worst_qchanged = 1;
4072 } else {
4073 active_worst_qchanged = 0;
4074 }
4075
4076 #if CONFIG_REALTIME_ONLY
4077 Loop = 0;
4078 #else
4079 /* Special case handling for forced key frames */
4080 if ((cm->frame_type == KEY_FRAME) && cpi->this_key_frame_forced) {
4081 int last_q = Q;
4082 int kf_err = vp8_calc_ss_err(cpi->Source, &cm->yv12_fb[cm->new_fb_idx]);
4083
4084 /* The key frame is not good enough */
4085 if (kf_err > ((cpi->ambient_err * 7) >> 3)) {
4086 /* Lower q_high */
4087 q_high = (Q > q_low) ? (Q - 1) : q_low;
4088
4089 /* Adjust Q */
4090 Q = (q_high + q_low) >> 1;
4091 }
4092 /* The key frame is much better than the previous frame */
4093 else if (kf_err < (cpi->ambient_err >> 1)) {
4094 /* Raise q_low */
4095 q_low = (Q < q_high) ? (Q + 1) : q_high;
4096
4097 /* Adjust Q */
4098 Q = (q_high + q_low + 1) >> 1;
4099 }
4100
4101 /* Clamp Q to upper and lower limits: */
4102 if (Q > q_high) {
4103 Q = q_high;
4104 } else if (Q < q_low) {
4105 Q = q_low;
4106 }
4107
4108 Loop = Q != last_q;
4109 }
4110
4111 /* Is the projected frame size out of range and are we allowed
4112 * to attempt to recode.
4113 */
4114 else if (recode_loop_test(cpi, frame_over_shoot_limit,
4115 frame_under_shoot_limit, Q, top_index,
4116 bottom_index)) {
4117 int last_q = Q;
4118 int Retries = 0;
4119
4120 /* Frame size out of permitted range. Update correction factor
4121 * & compute new Q to try...
4122 */
4123
4124 /* Frame is too large */
4125 if (cpi->projected_frame_size > cpi->this_frame_target) {
4126 /* Raise Qlow as to at least the current value */
4127 q_low = (Q < q_high) ? (Q + 1) : q_high;
4128
4129 /* If we are using over quant do the same for zbin_oq_low */
4130 if (cpi->mb.zbin_over_quant > 0) {
4131 zbin_oq_low = (cpi->mb.zbin_over_quant < zbin_oq_high)
4132 ? (cpi->mb.zbin_over_quant + 1)
4133 : zbin_oq_high;
4134 }
4135
4136 if (undershoot_seen) {
4137 /* Update rate_correction_factor unless
4138 * cpi->active_worst_quality has changed.
4139 */
4140 if (!active_worst_qchanged) {
4141 vp8_update_rate_correction_factors(cpi, 1);
4142 }
4143
4144 Q = (q_high + q_low + 1) / 2;
4145
4146 /* Adjust cpi->zbin_over_quant (only allowed when Q
4147 * is max)
4148 */
4149 if (Q < MAXQ) {
4150 cpi->mb.zbin_over_quant = 0;
4151 } else {
4152 zbin_oq_low = (cpi->mb.zbin_over_quant < zbin_oq_high)
4153 ? (cpi->mb.zbin_over_quant + 1)
4154 : zbin_oq_high;
4155 cpi->mb.zbin_over_quant = (zbin_oq_high + zbin_oq_low) / 2;
4156 }
4157 } else {
4158 /* Update rate_correction_factor unless
4159 * cpi->active_worst_quality has changed.
4160 */
4161 if (!active_worst_qchanged) {
4162 vp8_update_rate_correction_factors(cpi, 0);
4163 }
4164
4165 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
4166
4167 while (((Q < q_low) || (cpi->mb.zbin_over_quant < zbin_oq_low)) &&
4168 (Retries < 10)) {
4169 vp8_update_rate_correction_factors(cpi, 0);
4170 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
4171 Retries++;
4172 }
4173 }
4174
4175 overshoot_seen = 1;
4176 }
4177 /* Frame is too small */
4178 else {
4179 if (cpi->mb.zbin_over_quant == 0) {
4180 /* Lower q_high if not using over quant */
4181 q_high = (Q > q_low) ? (Q - 1) : q_low;
4182 } else {
4183 /* else lower zbin_oq_high */
4184 zbin_oq_high = (cpi->mb.zbin_over_quant > zbin_oq_low)
4185 ? (cpi->mb.zbin_over_quant - 1)
4186 : zbin_oq_low;
4187 }
4188
4189 if (overshoot_seen) {
4190 /* Update rate_correction_factor unless
4191 * cpi->active_worst_quality has changed.
4192 */
4193 if (!active_worst_qchanged) {
4194 vp8_update_rate_correction_factors(cpi, 1);
4195 }
4196
4197 Q = (q_high + q_low) / 2;
4198
4199 /* Adjust cpi->zbin_over_quant (only allowed when Q
4200 * is max)
4201 */
4202 if (Q < MAXQ) {
4203 cpi->mb.zbin_over_quant = 0;
4204 } else {
4205 cpi->mb.zbin_over_quant = (zbin_oq_high + zbin_oq_low) / 2;
4206 }
4207 } else {
4208 /* Update rate_correction_factor unless
4209 * cpi->active_worst_quality has changed.
4210 */
4211 if (!active_worst_qchanged) {
4212 vp8_update_rate_correction_factors(cpi, 0);
4213 }
4214
4215 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
4216
4217 /* Special case reset for qlow for constrained quality.
4218 * This should only trigger where there is very substantial
4219 * undershoot on a frame and the auto cq level is above
4220 * the user passsed in value.
4221 */
4222 if ((cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) &&
4223 (Q < q_low)) {
4224 q_low = Q;
4225 }
4226
4227 while (((Q > q_high) || (cpi->mb.zbin_over_quant > zbin_oq_high)) &&
4228 (Retries < 10)) {
4229 vp8_update_rate_correction_factors(cpi, 0);
4230 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
4231 Retries++;
4232 }
4233 }
4234
4235 undershoot_seen = 1;
4236 }
4237
4238 /* Clamp Q to upper and lower limits: */
4239 if (Q > q_high) {
4240 Q = q_high;
4241 } else if (Q < q_low) {
4242 Q = q_low;
4243 }
4244
4245 /* Clamp cpi->zbin_over_quant */
4246 cpi->mb.zbin_over_quant = (cpi->mb.zbin_over_quant < zbin_oq_low)
4247 ? zbin_oq_low
4248 : (cpi->mb.zbin_over_quant > zbin_oq_high)
4249 ? zbin_oq_high
4250 : cpi->mb.zbin_over_quant;
4251
4252 Loop = Q != last_q;
4253 } else {
4254 Loop = 0;
4255 }
4256 #endif // CONFIG_REALTIME_ONLY
4257
4258 if (cpi->is_src_frame_alt_ref) Loop = 0;
4259
4260 if (Loop == 1) {
4261 vp8_restore_coding_context(cpi);
4262 loop_count++;
4263 #if CONFIG_INTERNAL_STATS
4264 cpi->tot_recode_hits++;
4265 #endif
4266 }
4267 } while (Loop == 1);
4268
4269 #if defined(DROP_UNCODED_FRAMES)
4270 /* if there are no coded macroblocks at all drop this frame */
4271 if (cpi->common.MBs == cpi->mb.skip_true_count &&
4272 (cpi->drop_frame_count & 7) != 7 && cm->frame_type != KEY_FRAME) {
4273 cpi->common.current_video_frame++;
4274 cpi->frames_since_key++;
4275 cpi->drop_frame_count++;
4276 // We advance the temporal pattern for dropped frames.
4277 cpi->temporal_pattern_counter++;
4278 return;
4279 }
4280 cpi->drop_frame_count = 0;
4281 #endif
4282
4283 #if 0
4284 /* Experimental code for lagged and one pass
4285 * Update stats used for one pass GF selection
4286 */
4287 {
4288 cpi->one_pass_frame_stats[cpi->one_pass_frame_index].frame_coded_error = (double)cpi->prediction_error;
4289 cpi->one_pass_frame_stats[cpi->one_pass_frame_index].frame_intra_error = (double)cpi->intra_error;
4290 cpi->one_pass_frame_stats[cpi->one_pass_frame_index].frame_pcnt_inter = (double)(100 - cpi->this_frame_percent_intra) / 100.0;
4291 }
4292 #endif
4293
4294 /* Special case code to reduce pulsing when key frames are forced at a
4295 * fixed interval. Note the reconstruction error if it is the frame before
4296 * the force key frame
4297 */
4298 if (cpi->next_key_frame_forced && (cpi->twopass.frames_to_key == 0)) {
4299 cpi->ambient_err =
4300 vp8_calc_ss_err(cpi->Source, &cm->yv12_fb[cm->new_fb_idx]);
4301 }
4302
4303 /* This frame's MVs are saved and will be used in next frame's MV predictor.
4304 * Last frame has one more line(add to bottom) and one more column(add to
4305 * right) than cm->mip. The edge elements are initialized to 0.
4306 */
4307 #if CONFIG_MULTI_RES_ENCODING
4308 if (!cpi->oxcf.mr_encoder_id && cm->show_frame)
4309 #else
4310 if (cm->show_frame) /* do not save for altref frame */
4311 #endif
4312 {
4313 int mb_row;
4314 int mb_col;
4315 /* Point to beginning of allocated MODE_INFO arrays. */
4316 MODE_INFO *tmp = cm->mip;
4317
4318 if (cm->frame_type != KEY_FRAME) {
4319 for (mb_row = 0; mb_row < cm->mb_rows + 1; ++mb_row) {
4320 for (mb_col = 0; mb_col < cm->mb_cols + 1; ++mb_col) {
4321 if (tmp->mbmi.ref_frame != INTRA_FRAME) {
4322 cpi->lfmv[mb_col + mb_row * (cm->mode_info_stride + 1)].as_int =
4323 tmp->mbmi.mv.as_int;
4324 }
4325
4326 cpi->lf_ref_frame_sign_bias[mb_col +
4327 mb_row * (cm->mode_info_stride + 1)] =
4328 cm->ref_frame_sign_bias[tmp->mbmi.ref_frame];
4329 cpi->lf_ref_frame[mb_col + mb_row * (cm->mode_info_stride + 1)] =
4330 tmp->mbmi.ref_frame;
4331 tmp++;
4332 }
4333 }
4334 }
4335 }
4336
4337 /* Count last ref frame 0,0 usage on current encoded frame. */
4338 {
4339 int mb_row;
4340 int mb_col;
4341 /* Point to beginning of MODE_INFO arrays. */
4342 MODE_INFO *tmp = cm->mi;
4343
4344 cpi->zeromv_count = 0;
4345
4346 if (cm->frame_type != KEY_FRAME) {
4347 for (mb_row = 0; mb_row < cm->mb_rows; ++mb_row) {
4348 for (mb_col = 0; mb_col < cm->mb_cols; ++mb_col) {
4349 if (tmp->mbmi.mode == ZEROMV && tmp->mbmi.ref_frame == LAST_FRAME) {
4350 cpi->zeromv_count++;
4351 }
4352 tmp++;
4353 }
4354 tmp++;
4355 }
4356 }
4357 }
4358
4359 #if CONFIG_MULTI_RES_ENCODING
4360 vp8_cal_dissimilarity(cpi);
4361 #endif
4362
4363 /* Update the GF useage maps.
4364 * This is done after completing the compression of a frame when all
4365 * modes etc. are finalized but before loop filter
4366 */
4367 if (cpi->oxcf.number_of_layers == 1) {
4368 vp8_update_gf_useage_maps(cpi, cm, &cpi->mb);
4369 }
4370
4371 if (cm->frame_type == KEY_FRAME) cm->refresh_last_frame = 1;
4372
4373 #if 0
4374 {
4375 FILE *f = fopen("gfactive.stt", "a");
4376 fprintf(f, "%8d %8d %8d %8d %8d\n", cm->current_video_frame, (100 * cpi->gf_active_count) / (cpi->common.mb_rows * cpi->common.mb_cols), cpi->this_iiratio, cpi->next_iiratio, cm->refresh_golden_frame);
4377 fclose(f);
4378 }
4379 #endif
4380
4381 /* For inter frames the current default behavior is that when
4382 * cm->refresh_golden_frame is set we copy the old GF over to the ARF buffer
4383 * This is purely an encoder decision at present.
4384 */
4385 if (!cpi->oxcf.error_resilient_mode && cm->refresh_golden_frame) {
4386 cm->copy_buffer_to_arf = 2;
4387 } else {
4388 cm->copy_buffer_to_arf = 0;
4389 }
4390
4391 cm->frame_to_show = &cm->yv12_fb[cm->new_fb_idx];
4392
4393 #if CONFIG_TEMPORAL_DENOISING
4394 // Get some measure of the amount of noise, by measuring the (partial) mse
4395 // between source and denoised buffer, for y channel. Partial refers to
4396 // computing the sse for a sub-sample of the frame (i.e., skip x blocks along
4397 // row/column),
4398 // and only for blocks in that set that are consecutive ZEROMV_LAST mode.
4399 // Do this every ~8 frames, to further reduce complexity.
4400 // TODO(marpan): Keep this for now for the case cpi->oxcf.noise_sensitivity <
4401 // 4,
4402 // should be removed in favor of the process_denoiser_mode_change() function
4403 // below.
4404 if (cpi->oxcf.noise_sensitivity > 0 && cpi->oxcf.noise_sensitivity < 4 &&
4405 !cpi->oxcf.screen_content_mode && cpi->frames_since_key % 8 == 0 &&
4406 cm->frame_type != KEY_FRAME) {
4407 cpi->mse_source_denoised = measure_square_diff_partial(
4408 &cpi->denoiser.yv12_running_avg[INTRA_FRAME], cpi->Source, cpi);
4409 }
4410
4411 // For the adaptive denoising mode (noise_sensitivity == 4), sample the mse
4412 // of source diff (between current and previous frame), and determine if we
4413 // should switch the denoiser mode. Sampling refers to computing the mse for
4414 // a sub-sample of the frame (i.e., skip x blocks along row/column), and
4415 // only for blocks in that set that have used ZEROMV LAST, along with some
4416 // constraint on the sum diff between blocks. This process is called every
4417 // ~8 frames, to further reduce complexity.
4418 if (cpi->oxcf.noise_sensitivity == 4 && !cpi->oxcf.screen_content_mode &&
4419 cpi->frames_since_key % 8 == 0 && cm->frame_type != KEY_FRAME) {
4420 process_denoiser_mode_change(cpi);
4421 }
4422 #endif
4423
4424 #if CONFIG_MULTITHREAD
4425 if (cpi->b_multi_threaded) {
4426 /* start loopfilter in separate thread */
4427 sem_post(&cpi->h_event_start_lpf);
4428 cpi->b_lpf_running = 1;
4429 } else
4430 #endif
4431 {
4432 vp8_loopfilter_frame(cpi, cm);
4433 }
4434
4435 update_reference_frames(cpi);
4436
4437 #ifdef OUTPUT_YUV_DENOISED
4438 vp8_write_yuv_frame(yuv_denoised_file,
4439 &cpi->denoiser.yv12_running_avg[INTRA_FRAME]);
4440 #endif
4441
4442 #if !(CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING)
4443 if (cpi->oxcf.error_resilient_mode) {
4444 cm->refresh_entropy_probs = 0;
4445 }
4446 #endif
4447
4448 #if CONFIG_MULTITHREAD
4449 /* wait that filter_level is picked so that we can continue with stream
4450 * packing */
4451 if (cpi->b_multi_threaded) sem_wait(&cpi->h_event_end_lpf);
4452 #endif
4453
4454 /* build the bitstream */
4455 vp8_pack_bitstream(cpi, dest, dest_end, size);
4456
4457 /* Move storing frame_type out of the above loop since it is also
4458 * needed in motion search besides loopfilter */
4459 cm->last_frame_type = cm->frame_type;
4460
4461 /* Update rate control heuristics */
4462 cpi->total_byte_count += (*size);
4463 cpi->projected_frame_size = (int)(*size) << 3;
4464
4465 if (cpi->oxcf.number_of_layers > 1) {
4466 unsigned int i;
4467 for (i = cpi->current_layer + 1; i < cpi->oxcf.number_of_layers; ++i) {
4468 cpi->layer_context[i].total_byte_count += (*size);
4469 }
4470 }
4471
4472 if (!active_worst_qchanged) vp8_update_rate_correction_factors(cpi, 2);
4473
4474 cpi->last_q[cm->frame_type] = cm->base_qindex;
4475
4476 if (cm->frame_type == KEY_FRAME) {
4477 vp8_adjust_key_frame_context(cpi);
4478 }
4479
4480 /* Keep a record of ambient average Q. */
4481 if (cm->frame_type != KEY_FRAME) {
4482 cpi->avg_frame_qindex =
4483 (2 + 3 * cpi->avg_frame_qindex + cm->base_qindex) >> 2;
4484 }
4485
4486 /* Keep a record from which we can calculate the average Q excluding
4487 * GF updates and key frames
4488 */
4489 if ((cm->frame_type != KEY_FRAME) &&
4490 ((cpi->oxcf.number_of_layers > 1) ||
4491 (!cm->refresh_golden_frame && !cm->refresh_alt_ref_frame))) {
4492 cpi->ni_frames++;
4493
4494 /* Calculate the average Q for normal inter frames (not key or GFU
4495 * frames).
4496 */
4497 if (cpi->pass == 2) {
4498 cpi->ni_tot_qi += Q;
4499 cpi->ni_av_qi = (cpi->ni_tot_qi / cpi->ni_frames);
4500 } else {
4501 /* Damp value for first few frames */
4502 if (cpi->ni_frames > 150) {
4503 cpi->ni_tot_qi += Q;
4504 cpi->ni_av_qi = (cpi->ni_tot_qi / cpi->ni_frames);
4505 }
4506 /* For one pass, early in the clip ... average the current frame Q
4507 * value with the worstq entered by the user as a dampening measure
4508 */
4509 else {
4510 cpi->ni_tot_qi += Q;
4511 cpi->ni_av_qi =
4512 ((cpi->ni_tot_qi / cpi->ni_frames) + cpi->worst_quality + 1) / 2;
4513 }
4514
4515 /* If the average Q is higher than what was used in the last
4516 * frame (after going through the recode loop to keep the frame
4517 * size within range) then use the last frame value - 1. The -1
4518 * is designed to stop Q and hence the data rate, from
4519 * progressively falling away during difficult sections, but at
4520 * the same time reduce the number of itterations around the
4521 * recode loop.
4522 */
4523 if (Q > cpi->ni_av_qi) cpi->ni_av_qi = Q - 1;
4524 }
4525 }
4526
4527 /* Update the buffer level variable. */
4528 /* Non-viewable frames are a special case and are treated as pure overhead. */
4529 if (!cm->show_frame) {
4530 cpi->bits_off_target -= cpi->projected_frame_size;
4531 } else {
4532 cpi->bits_off_target +=
4533 cpi->av_per_frame_bandwidth - cpi->projected_frame_size;
4534 }
4535
4536 /* Clip the buffer level to the maximum specified buffer size */
4537 if (cpi->bits_off_target > cpi->oxcf.maximum_buffer_size) {
4538 cpi->bits_off_target = cpi->oxcf.maximum_buffer_size;
4539 }
4540
4541 // If the frame dropper is not enabled, don't let the buffer level go below
4542 // some threshold, given here by -|maximum_buffer_size|. For now we only do
4543 // this for screen content input.
4544 if (cpi->drop_frames_allowed == 0 && cpi->oxcf.screen_content_mode &&
4545 cpi->bits_off_target < -cpi->oxcf.maximum_buffer_size) {
4546 cpi->bits_off_target = -cpi->oxcf.maximum_buffer_size;
4547 }
4548
4549 /* Rolling monitors of whether we are over or underspending used to
4550 * help regulate min and Max Q in two pass.
4551 */
4552 cpi->rolling_target_bits =
4553 ((cpi->rolling_target_bits * 3) + cpi->this_frame_target + 2) / 4;
4554 cpi->rolling_actual_bits =
4555 ((cpi->rolling_actual_bits * 3) + cpi->projected_frame_size + 2) / 4;
4556 cpi->long_rolling_target_bits =
4557 ((cpi->long_rolling_target_bits * 31) + cpi->this_frame_target + 16) / 32;
4558 cpi->long_rolling_actual_bits =
4559 ((cpi->long_rolling_actual_bits * 31) + cpi->projected_frame_size + 16) /
4560 32;
4561
4562 /* Actual bits spent */
4563 cpi->total_actual_bits += cpi->projected_frame_size;
4564
4565 /* Debug stats */
4566 cpi->total_target_vs_actual +=
4567 (cpi->this_frame_target - cpi->projected_frame_size);
4568
4569 cpi->buffer_level = cpi->bits_off_target;
4570
4571 /* Propagate values to higher temporal layers */
4572 if (cpi->oxcf.number_of_layers > 1) {
4573 unsigned int i;
4574
4575 for (i = cpi->current_layer + 1; i < cpi->oxcf.number_of_layers; ++i) {
4576 LAYER_CONTEXT *lc = &cpi->layer_context[i];
4577 int bits_off_for_this_layer = (int)(lc->target_bandwidth / lc->framerate -
4578 cpi->projected_frame_size);
4579
4580 lc->bits_off_target += bits_off_for_this_layer;
4581
4582 /* Clip buffer level to maximum buffer size for the layer */
4583 if (lc->bits_off_target > lc->maximum_buffer_size) {
4584 lc->bits_off_target = lc->maximum_buffer_size;
4585 }
4586
4587 lc->total_actual_bits += cpi->projected_frame_size;
4588 lc->total_target_vs_actual += bits_off_for_this_layer;
4589 lc->buffer_level = lc->bits_off_target;
4590 }
4591 }
4592
4593 /* Update bits left to the kf and gf groups to account for overshoot
4594 * or undershoot on these frames
4595 */
4596 if (cm->frame_type == KEY_FRAME) {
4597 cpi->twopass.kf_group_bits +=
4598 cpi->this_frame_target - cpi->projected_frame_size;
4599
4600 if (cpi->twopass.kf_group_bits < 0) cpi->twopass.kf_group_bits = 0;
4601 } else if (cm->refresh_golden_frame || cm->refresh_alt_ref_frame) {
4602 cpi->twopass.gf_group_bits +=
4603 cpi->this_frame_target - cpi->projected_frame_size;
4604
4605 if (cpi->twopass.gf_group_bits < 0) cpi->twopass.gf_group_bits = 0;
4606 }
4607
4608 if (cm->frame_type != KEY_FRAME) {
4609 if (cpi->common.refresh_alt_ref_frame) {
4610 cpi->last_skip_false_probs[2] = cpi->prob_skip_false;
4611 cpi->last_skip_probs_q[2] = cm->base_qindex;
4612 } else if (cpi->common.refresh_golden_frame) {
4613 cpi->last_skip_false_probs[1] = cpi->prob_skip_false;
4614 cpi->last_skip_probs_q[1] = cm->base_qindex;
4615 } else {
4616 cpi->last_skip_false_probs[0] = cpi->prob_skip_false;
4617 cpi->last_skip_probs_q[0] = cm->base_qindex;
4618
4619 /* update the baseline */
4620 cpi->base_skip_false_prob[cm->base_qindex] = cpi->prob_skip_false;
4621 }
4622 }
4623
4624 #if 0 && CONFIG_INTERNAL_STATS
4625 {
4626 FILE *f = fopen("tmp.stt", "a");
4627
4628 vpx_clear_system_state();
4629
4630 if (cpi->twopass.total_left_stats.coded_error != 0.0)
4631 fprintf(f, "%10d %10d %10d %10d %10d %10"PRId64" %10"PRId64
4632 "%10"PRId64" %10d %6d %6d %6d %6d %5d %5d %5d %8d "
4633 "%8.2lf %"PRId64" %10.3lf %10"PRId64" %8d\n",
4634 cpi->common.current_video_frame, cpi->this_frame_target,
4635 cpi->projected_frame_size,
4636 (cpi->projected_frame_size - cpi->this_frame_target),
4637 cpi->total_target_vs_actual,
4638 cpi->buffer_level,
4639 (cpi->oxcf.starting_buffer_level-cpi->bits_off_target),
4640 cpi->total_actual_bits, cm->base_qindex,
4641 cpi->active_best_quality, cpi->active_worst_quality,
4642 cpi->ni_av_qi, cpi->cq_target_quality,
4643 cm->refresh_golden_frame, cm->refresh_alt_ref_frame,
4644 cm->frame_type, cpi->gfu_boost,
4645 cpi->twopass.est_max_qcorrection_factor,
4646 cpi->twopass.bits_left,
4647 cpi->twopass.total_left_stats.coded_error,
4648 (double)cpi->twopass.bits_left /
4649 cpi->twopass.total_left_stats.coded_error,
4650 cpi->tot_recode_hits);
4651 else
4652 fprintf(f, "%10d %10d %10d %10d %10d %10"PRId64" %10"PRId64
4653 "%10"PRId64" %10d %6d %6d %6d %6d %5d %5d %5d %8d "
4654 "%8.2lf %"PRId64" %10.3lf %8d\n",
4655 cpi->common.current_video_frame, cpi->this_frame_target,
4656 cpi->projected_frame_size,
4657 (cpi->projected_frame_size - cpi->this_frame_target),
4658 cpi->total_target_vs_actual,
4659 cpi->buffer_level,
4660 (cpi->oxcf.starting_buffer_level-cpi->bits_off_target),
4661 cpi->total_actual_bits, cm->base_qindex,
4662 cpi->active_best_quality, cpi->active_worst_quality,
4663 cpi->ni_av_qi, cpi->cq_target_quality,
4664 cm->refresh_golden_frame, cm->refresh_alt_ref_frame,
4665 cm->frame_type, cpi->gfu_boost,
4666 cpi->twopass.est_max_qcorrection_factor,
4667 cpi->twopass.bits_left,
4668 cpi->twopass.total_left_stats.coded_error,
4669 cpi->tot_recode_hits);
4670
4671 fclose(f);
4672
4673 {
4674 FILE *fmodes = fopen("Modes.stt", "a");
4675
4676 fprintf(fmodes, "%6d:%1d:%1d:%1d ",
4677 cpi->common.current_video_frame,
4678 cm->frame_type, cm->refresh_golden_frame,
4679 cm->refresh_alt_ref_frame);
4680
4681 fprintf(fmodes, "\n");
4682
4683 fclose(fmodes);
4684 }
4685 }
4686
4687 #endif
4688
4689 if (cm->refresh_golden_frame == 1) {
4690 cm->frame_flags = cm->frame_flags | FRAMEFLAGS_GOLDEN;
4691 } else {
4692 cm->frame_flags = cm->frame_flags & ~FRAMEFLAGS_GOLDEN;
4693 }
4694
4695 if (cm->refresh_alt_ref_frame == 1) {
4696 cm->frame_flags = cm->frame_flags | FRAMEFLAGS_ALTREF;
4697 } else {
4698 cm->frame_flags = cm->frame_flags & ~FRAMEFLAGS_ALTREF;
4699 }
4700
4701 if (cm->refresh_last_frame & cm->refresh_golden_frame) { /* both refreshed */
4702 cpi->gold_is_last = 1;
4703 } else if (cm->refresh_last_frame ^ cm->refresh_golden_frame) {
4704 /* 1 refreshed but not the other */
4705 cpi->gold_is_last = 0;
4706 }
4707
4708 if (cm->refresh_last_frame & cm->refresh_alt_ref_frame) { /* both refreshed */
4709 cpi->alt_is_last = 1;
4710 } else if (cm->refresh_last_frame ^ cm->refresh_alt_ref_frame) {
4711 /* 1 refreshed but not the other */
4712 cpi->alt_is_last = 0;
4713 }
4714
4715 if (cm->refresh_alt_ref_frame &
4716 cm->refresh_golden_frame) { /* both refreshed */
4717 cpi->gold_is_alt = 1;
4718 } else if (cm->refresh_alt_ref_frame ^ cm->refresh_golden_frame) {
4719 /* 1 refreshed but not the other */
4720 cpi->gold_is_alt = 0;
4721 }
4722
4723 cpi->ref_frame_flags = VP8_ALTR_FRAME | VP8_GOLD_FRAME | VP8_LAST_FRAME;
4724
4725 if (cpi->gold_is_last) cpi->ref_frame_flags &= ~VP8_GOLD_FRAME;
4726
4727 if (cpi->alt_is_last) cpi->ref_frame_flags &= ~VP8_ALTR_FRAME;
4728
4729 if (cpi->gold_is_alt) cpi->ref_frame_flags &= ~VP8_ALTR_FRAME;
4730
4731 if (!cpi->oxcf.error_resilient_mode) {
4732 if (cpi->oxcf.play_alternate && cm->refresh_alt_ref_frame &&
4733 (cm->frame_type != KEY_FRAME)) {
4734 /* Update the alternate reference frame stats as appropriate. */
4735 update_alt_ref_frame_stats(cpi);
4736 } else {
4737 /* Update the Golden frame stats as appropriate. */
4738 update_golden_frame_stats(cpi);
4739 }
4740 }
4741
4742 if (cm->frame_type == KEY_FRAME) {
4743 /* Tell the caller that the frame was coded as a key frame */
4744 *frame_flags = cm->frame_flags | FRAMEFLAGS_KEY;
4745
4746 /* As this frame is a key frame the next defaults to an inter frame. */
4747 cm->frame_type = INTER_FRAME;
4748
4749 cpi->last_frame_percent_intra = 100;
4750 } else {
4751 *frame_flags = cm->frame_flags & ~FRAMEFLAGS_KEY;
4752
4753 cpi->last_frame_percent_intra = cpi->this_frame_percent_intra;
4754 }
4755
4756 /* Clear the one shot update flags for segmentation map and mode/ref
4757 * loop filter deltas.
4758 */
4759 cpi->mb.e_mbd.update_mb_segmentation_map = 0;
4760 cpi->mb.e_mbd.update_mb_segmentation_data = 0;
4761 cpi->mb.e_mbd.mode_ref_lf_delta_update = 0;
4762
4763 /* Dont increment frame counters if this was an altref buffer update
4764 * not a real frame
4765 */
4766 if (cm->show_frame) {
4767 cm->current_video_frame++;
4768 cpi->frames_since_key++;
4769 cpi->temporal_pattern_counter++;
4770 }
4771
4772 /* reset to normal state now that we are done. */
4773
4774 #if 0
4775 {
4776 char filename[512];
4777 FILE *recon_file;
4778 sprintf(filename, "enc%04d.yuv", (int) cm->current_video_frame);
4779 recon_file = fopen(filename, "wb");
4780 fwrite(cm->yv12_fb[cm->lst_fb_idx].buffer_alloc,
4781 cm->yv12_fb[cm->lst_fb_idx].frame_size, 1, recon_file);
4782 fclose(recon_file);
4783 }
4784 #endif
4785
4786 /* DEBUG */
4787 /* vp8_write_yuv_frame("encoder_recon.yuv", cm->frame_to_show); */
4788 }
4789 #if !CONFIG_REALTIME_ONLY
Pass2Encode(VP8_COMP * cpi,size_t * size,unsigned char * dest,unsigned char * dest_end,unsigned int * frame_flags)4790 static void Pass2Encode(VP8_COMP *cpi, size_t *size, unsigned char *dest,
4791 unsigned char *dest_end, unsigned int *frame_flags) {
4792 if (!cpi->common.refresh_alt_ref_frame) vp8_second_pass(cpi);
4793
4794 encode_frame_to_data_rate(cpi, size, dest, dest_end, frame_flags);
4795 cpi->twopass.bits_left -= 8 * (int)(*size);
4796
4797 if (!cpi->common.refresh_alt_ref_frame) {
4798 double two_pass_min_rate =
4799 (double)(cpi->oxcf.target_bandwidth *
4800 cpi->oxcf.two_pass_vbrmin_section / 100);
4801 cpi->twopass.bits_left += (int64_t)(two_pass_min_rate / cpi->framerate);
4802 }
4803 }
4804 #endif
4805
vp8_receive_raw_frame(VP8_COMP * cpi,unsigned int frame_flags,YV12_BUFFER_CONFIG * sd,int64_t time_stamp,int64_t end_time)4806 int vp8_receive_raw_frame(VP8_COMP *cpi, unsigned int frame_flags,
4807 YV12_BUFFER_CONFIG *sd, int64_t time_stamp,
4808 int64_t end_time) {
4809 struct vpx_usec_timer timer;
4810 int res = 0;
4811
4812 vpx_usec_timer_start(&timer);
4813
4814 /* Reinit the lookahead buffer if the frame size changes */
4815 if (sd->y_width != cpi->oxcf.Width || sd->y_height != cpi->oxcf.Height) {
4816 assert(cpi->oxcf.lag_in_frames < 2);
4817 dealloc_raw_frame_buffers(cpi);
4818 alloc_raw_frame_buffers(cpi);
4819 }
4820
4821 if (vp8_lookahead_push(cpi->lookahead, sd, time_stamp, end_time, frame_flags,
4822 cpi->active_map_enabled ? cpi->active_map : NULL)) {
4823 res = -1;
4824 }
4825 vpx_usec_timer_mark(&timer);
4826 cpi->time_receive_data += vpx_usec_timer_elapsed(&timer);
4827
4828 return res;
4829 }
4830
frame_is_reference(const VP8_COMP * cpi)4831 static int frame_is_reference(const VP8_COMP *cpi) {
4832 const VP8_COMMON *cm = &cpi->common;
4833 const MACROBLOCKD *xd = &cpi->mb.e_mbd;
4834
4835 return cm->frame_type == KEY_FRAME || cm->refresh_last_frame ||
4836 cm->refresh_golden_frame || cm->refresh_alt_ref_frame ||
4837 cm->copy_buffer_to_gf || cm->copy_buffer_to_arf ||
4838 cm->refresh_entropy_probs || xd->mode_ref_lf_delta_update ||
4839 xd->update_mb_segmentation_map || xd->update_mb_segmentation_data;
4840 }
4841
vp8_get_compressed_data(VP8_COMP * cpi,unsigned int * frame_flags,size_t * size,unsigned char * dest,unsigned char * dest_end,int64_t * time_stamp,int64_t * time_end,int flush)4842 int vp8_get_compressed_data(VP8_COMP *cpi, unsigned int *frame_flags,
4843 size_t *size, unsigned char *dest,
4844 unsigned char *dest_end, int64_t *time_stamp,
4845 int64_t *time_end, int flush) {
4846 VP8_COMMON *cm;
4847 struct vpx_usec_timer tsctimer;
4848 struct vpx_usec_timer ticktimer;
4849 struct vpx_usec_timer cmptimer;
4850 YV12_BUFFER_CONFIG *force_src_buffer = NULL;
4851
4852 if (!cpi) return -1;
4853
4854 cm = &cpi->common;
4855
4856 if (setjmp(cpi->common.error.jmp)) {
4857 cpi->common.error.setjmp = 0;
4858 vpx_clear_system_state();
4859 return VPX_CODEC_CORRUPT_FRAME;
4860 }
4861
4862 cpi->common.error.setjmp = 1;
4863
4864 vpx_usec_timer_start(&cmptimer);
4865
4866 cpi->source = NULL;
4867
4868 #if !CONFIG_REALTIME_ONLY
4869 /* Should we code an alternate reference frame */
4870 if (cpi->oxcf.error_resilient_mode == 0 && cpi->oxcf.play_alternate &&
4871 cpi->source_alt_ref_pending) {
4872 if ((cpi->source = vp8_lookahead_peek(
4873 cpi->lookahead, cpi->frames_till_gf_update_due, PEEK_FORWARD))) {
4874 cpi->alt_ref_source = cpi->source;
4875 if (cpi->oxcf.arnr_max_frames > 0) {
4876 vp8_temporal_filter_prepare_c(cpi, cpi->frames_till_gf_update_due);
4877 force_src_buffer = &cpi->alt_ref_buffer;
4878 }
4879 cpi->frames_till_alt_ref_frame = cpi->frames_till_gf_update_due;
4880 cm->refresh_alt_ref_frame = 1;
4881 cm->refresh_golden_frame = 0;
4882 cm->refresh_last_frame = 0;
4883 cm->show_frame = 0;
4884 /* Clear Pending alt Ref flag. */
4885 cpi->source_alt_ref_pending = 0;
4886 cpi->is_src_frame_alt_ref = 0;
4887 }
4888 }
4889 #endif
4890
4891 if (!cpi->source) {
4892 /* Read last frame source if we are encoding first pass. */
4893 if (cpi->pass == 1 && cm->current_video_frame > 0) {
4894 if ((cpi->last_source =
4895 vp8_lookahead_peek(cpi->lookahead, 1, PEEK_BACKWARD)) == NULL) {
4896 return -1;
4897 }
4898 }
4899
4900 if ((cpi->source = vp8_lookahead_pop(cpi->lookahead, flush))) {
4901 cm->show_frame = 1;
4902
4903 cpi->is_src_frame_alt_ref =
4904 cpi->alt_ref_source && (cpi->source == cpi->alt_ref_source);
4905
4906 if (cpi->is_src_frame_alt_ref) cpi->alt_ref_source = NULL;
4907 }
4908 }
4909
4910 if (cpi->source) {
4911 cpi->Source = force_src_buffer ? force_src_buffer : &cpi->source->img;
4912 cpi->un_scaled_source = cpi->Source;
4913 *time_stamp = cpi->source->ts_start;
4914 *time_end = cpi->source->ts_end;
4915 *frame_flags = cpi->source->flags;
4916
4917 if (cpi->pass == 1 && cm->current_video_frame > 0) {
4918 cpi->last_frame_unscaled_source = &cpi->last_source->img;
4919 }
4920 } else {
4921 *size = 0;
4922 #if !CONFIG_REALTIME_ONLY
4923
4924 if (flush && cpi->pass == 1 && !cpi->twopass.first_pass_done) {
4925 vp8_end_first_pass(cpi); /* get last stats packet */
4926 cpi->twopass.first_pass_done = 1;
4927 }
4928
4929 #endif
4930
4931 return -1;
4932 }
4933
4934 if (cpi->source->ts_start < cpi->first_time_stamp_ever) {
4935 cpi->first_time_stamp_ever = cpi->source->ts_start;
4936 cpi->last_end_time_stamp_seen = cpi->source->ts_start;
4937 }
4938
4939 /* adjust frame rates based on timestamps given */
4940 if (cm->show_frame) {
4941 int64_t this_duration;
4942 int step = 0;
4943
4944 if (cpi->source->ts_start == cpi->first_time_stamp_ever) {
4945 this_duration = cpi->source->ts_end - cpi->source->ts_start;
4946 step = 1;
4947 } else {
4948 int64_t last_duration;
4949
4950 this_duration = cpi->source->ts_end - cpi->last_end_time_stamp_seen;
4951 last_duration = cpi->last_end_time_stamp_seen - cpi->last_time_stamp_seen;
4952 /* do a step update if the duration changes by 10% */
4953 if (last_duration) {
4954 step = (int)(((this_duration - last_duration) * 10 / last_duration));
4955 }
4956 }
4957
4958 if (this_duration) {
4959 if (step) {
4960 cpi->ref_framerate = 10000000.0 / this_duration;
4961 } else {
4962 double avg_duration, interval;
4963
4964 /* Average this frame's rate into the last second's average
4965 * frame rate. If we haven't seen 1 second yet, then average
4966 * over the whole interval seen.
4967 */
4968 interval = (double)(cpi->source->ts_end - cpi->first_time_stamp_ever);
4969 if (interval > 10000000.0) interval = 10000000;
4970
4971 avg_duration = 10000000.0 / cpi->ref_framerate;
4972 avg_duration *= (interval - avg_duration + this_duration);
4973 avg_duration /= interval;
4974
4975 cpi->ref_framerate = 10000000.0 / avg_duration;
4976 }
4977 #if CONFIG_MULTI_RES_ENCODING
4978 if (cpi->oxcf.mr_total_resolutions > 1) {
4979 LOWER_RES_FRAME_INFO *low_res_frame_info =
4980 (LOWER_RES_FRAME_INFO *)cpi->oxcf.mr_low_res_mode_info;
4981 // Frame rate should be the same for all spatial layers in
4982 // multi-res-encoding (simulcast), so we constrain the frame for
4983 // higher layers to be that of lowest resolution. This is needed
4984 // as he application may decide to skip encoding a high layer and
4985 // then start again, in which case a big jump in time-stamps will
4986 // be received for that high layer, which will yield an incorrect
4987 // frame rate (from time-stamp adjustment in above calculation).
4988 if (cpi->oxcf.mr_encoder_id) {
4989 cpi->ref_framerate = low_res_frame_info->low_res_framerate;
4990 } else {
4991 // Keep track of frame rate for lowest resolution.
4992 low_res_frame_info->low_res_framerate = cpi->ref_framerate;
4993 }
4994 }
4995 #endif
4996 if (cpi->oxcf.number_of_layers > 1) {
4997 unsigned int i;
4998
4999 /* Update frame rates for each layer */
5000 assert(cpi->oxcf.number_of_layers <= VPX_TS_MAX_LAYERS);
5001 for (i = 0; i < cpi->oxcf.number_of_layers && i < VPX_TS_MAX_LAYERS;
5002 ++i) {
5003 LAYER_CONTEXT *lc = &cpi->layer_context[i];
5004 lc->framerate = cpi->ref_framerate / cpi->oxcf.rate_decimator[i];
5005 }
5006 } else {
5007 vp8_new_framerate(cpi, cpi->ref_framerate);
5008 }
5009 }
5010
5011 cpi->last_time_stamp_seen = cpi->source->ts_start;
5012 cpi->last_end_time_stamp_seen = cpi->source->ts_end;
5013 }
5014
5015 if (cpi->oxcf.number_of_layers > 1) {
5016 int layer;
5017
5018 update_layer_contexts(cpi);
5019
5020 /* Restore layer specific context & set frame rate */
5021 if (cpi->temporal_layer_id >= 0) {
5022 layer = cpi->temporal_layer_id;
5023 } else {
5024 layer =
5025 cpi->oxcf
5026 .layer_id[cpi->temporal_pattern_counter % cpi->oxcf.periodicity];
5027 }
5028 restore_layer_context(cpi, layer);
5029 vp8_new_framerate(cpi, cpi->layer_context[layer].framerate);
5030 }
5031
5032 if (cpi->compressor_speed == 2) {
5033 vpx_usec_timer_start(&tsctimer);
5034 vpx_usec_timer_start(&ticktimer);
5035 }
5036
5037 cpi->lf_zeromv_pct = (cpi->zeromv_count * 100) / cm->MBs;
5038
5039 #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
5040 {
5041 int i;
5042 const int num_part = (1 << cm->multi_token_partition);
5043 /* the available bytes in dest */
5044 const unsigned long dest_size = dest_end - dest;
5045 const int tok_part_buff_size = (dest_size * 9) / (10 * num_part);
5046
5047 unsigned char *dp = dest;
5048
5049 cpi->partition_d[0] = dp;
5050 dp += dest_size / 10; /* reserve 1/10 for control partition */
5051 cpi->partition_d_end[0] = dp;
5052
5053 for (i = 0; i < num_part; ++i) {
5054 cpi->partition_d[i + 1] = dp;
5055 dp += tok_part_buff_size;
5056 cpi->partition_d_end[i + 1] = dp;
5057 }
5058 }
5059 #endif
5060
5061 /* start with a 0 size frame */
5062 *size = 0;
5063
5064 /* Clear down mmx registers */
5065 vpx_clear_system_state();
5066
5067 cm->frame_type = INTER_FRAME;
5068 cm->frame_flags = *frame_flags;
5069
5070 #if 0
5071
5072 if (cm->refresh_alt_ref_frame)
5073 {
5074 cm->refresh_golden_frame = 0;
5075 cm->refresh_last_frame = 0;
5076 }
5077 else
5078 {
5079 cm->refresh_golden_frame = 0;
5080 cm->refresh_last_frame = 1;
5081 }
5082
5083 #endif
5084 /* find a free buffer for the new frame */
5085 {
5086 int i = 0;
5087 for (; i < NUM_YV12_BUFFERS; ++i) {
5088 if (!cm->yv12_fb[i].flags) {
5089 cm->new_fb_idx = i;
5090 break;
5091 }
5092 }
5093
5094 assert(i < NUM_YV12_BUFFERS);
5095 }
5096 switch (cpi->pass) {
5097 #if !CONFIG_REALTIME_ONLY
5098 case 1: Pass1Encode(cpi, size, dest, frame_flags); break;
5099 case 2: Pass2Encode(cpi, size, dest, dest_end, frame_flags); break;
5100 #endif // !CONFIG_REALTIME_ONLY
5101 default:
5102 encode_frame_to_data_rate(cpi, size, dest, dest_end, frame_flags);
5103 break;
5104 }
5105
5106 if (cpi->compressor_speed == 2) {
5107 unsigned int duration, duration2;
5108 vpx_usec_timer_mark(&tsctimer);
5109 vpx_usec_timer_mark(&ticktimer);
5110
5111 duration = (int)(vpx_usec_timer_elapsed(&ticktimer));
5112 duration2 = (unsigned int)((double)duration / 2);
5113
5114 if (cm->frame_type != KEY_FRAME) {
5115 if (cpi->avg_encode_time == 0) {
5116 cpi->avg_encode_time = duration;
5117 } else {
5118 cpi->avg_encode_time = (7 * cpi->avg_encode_time + duration) >> 3;
5119 }
5120 }
5121
5122 if (duration2) {
5123 {
5124 if (cpi->avg_pick_mode_time == 0) {
5125 cpi->avg_pick_mode_time = duration2;
5126 } else {
5127 cpi->avg_pick_mode_time =
5128 (7 * cpi->avg_pick_mode_time + duration2) >> 3;
5129 }
5130 }
5131 }
5132 }
5133
5134 if (cm->refresh_entropy_probs == 0) {
5135 memcpy(&cm->fc, &cm->lfc, sizeof(cm->fc));
5136 }
5137
5138 /* Save the contexts separately for alt ref, gold and last. */
5139 /* (TODO jbb -> Optimize this with pointers to avoid extra copies. ) */
5140 if (cm->refresh_alt_ref_frame) memcpy(&cpi->lfc_a, &cm->fc, sizeof(cm->fc));
5141
5142 if (cm->refresh_golden_frame) memcpy(&cpi->lfc_g, &cm->fc, sizeof(cm->fc));
5143
5144 if (cm->refresh_last_frame) memcpy(&cpi->lfc_n, &cm->fc, sizeof(cm->fc));
5145
5146 /* if its a dropped frame honor the requests on subsequent frames */
5147 if (*size > 0) {
5148 cpi->droppable = !frame_is_reference(cpi);
5149
5150 /* return to normal state */
5151 cm->refresh_entropy_probs = 1;
5152 cm->refresh_alt_ref_frame = 0;
5153 cm->refresh_golden_frame = 0;
5154 cm->refresh_last_frame = 1;
5155 cm->frame_type = INTER_FRAME;
5156 }
5157
5158 /* Save layer specific state */
5159 if (cpi->oxcf.number_of_layers > 1) save_layer_context(cpi);
5160
5161 vpx_usec_timer_mark(&cmptimer);
5162 cpi->time_compress_data += vpx_usec_timer_elapsed(&cmptimer);
5163
5164 if (cpi->b_calculate_psnr && cpi->pass != 1 && cm->show_frame) {
5165 generate_psnr_packet(cpi);
5166 }
5167
5168 #if CONFIG_INTERNAL_STATS
5169
5170 if (cpi->pass != 1) {
5171 cpi->bytes += *size;
5172
5173 if (cm->show_frame) {
5174 cpi->common.show_frame_mi = cpi->common.mi;
5175 cpi->count++;
5176
5177 if (cpi->b_calculate_psnr) {
5178 uint64_t ye, ue, ve;
5179 double frame_psnr;
5180 YV12_BUFFER_CONFIG *orig = cpi->Source;
5181 YV12_BUFFER_CONFIG *recon = cpi->common.frame_to_show;
5182 unsigned int y_width = cpi->common.Width;
5183 unsigned int y_height = cpi->common.Height;
5184 unsigned int uv_width = (y_width + 1) / 2;
5185 unsigned int uv_height = (y_height + 1) / 2;
5186 int y_samples = y_height * y_width;
5187 int uv_samples = uv_height * uv_width;
5188 int t_samples = y_samples + 2 * uv_samples;
5189 double sq_error;
5190
5191 ye = calc_plane_error(orig->y_buffer, orig->y_stride, recon->y_buffer,
5192 recon->y_stride, y_width, y_height);
5193
5194 ue = calc_plane_error(orig->u_buffer, orig->uv_stride, recon->u_buffer,
5195 recon->uv_stride, uv_width, uv_height);
5196
5197 ve = calc_plane_error(orig->v_buffer, orig->uv_stride, recon->v_buffer,
5198 recon->uv_stride, uv_width, uv_height);
5199
5200 sq_error = (double)(ye + ue + ve);
5201
5202 frame_psnr = vpx_sse_to_psnr(t_samples, 255.0, sq_error);
5203
5204 cpi->total_y += vpx_sse_to_psnr(y_samples, 255.0, (double)ye);
5205 cpi->total_u += vpx_sse_to_psnr(uv_samples, 255.0, (double)ue);
5206 cpi->total_v += vpx_sse_to_psnr(uv_samples, 255.0, (double)ve);
5207 cpi->total_sq_error += sq_error;
5208 cpi->total += frame_psnr;
5209 #if CONFIG_POSTPROC
5210 {
5211 YV12_BUFFER_CONFIG *pp = &cm->post_proc_buffer;
5212 double sq_error2;
5213 double frame_psnr2, frame_ssim2 = 0;
5214 double weight = 0;
5215
5216 vp8_deblock(cm, cm->frame_to_show, &cm->post_proc_buffer,
5217 cm->filter_level * 10 / 6, 1, 0);
5218 vpx_clear_system_state();
5219
5220 ye = calc_plane_error(orig->y_buffer, orig->y_stride, pp->y_buffer,
5221 pp->y_stride, y_width, y_height);
5222
5223 ue = calc_plane_error(orig->u_buffer, orig->uv_stride, pp->u_buffer,
5224 pp->uv_stride, uv_width, uv_height);
5225
5226 ve = calc_plane_error(orig->v_buffer, orig->uv_stride, pp->v_buffer,
5227 pp->uv_stride, uv_width, uv_height);
5228
5229 sq_error2 = (double)(ye + ue + ve);
5230
5231 frame_psnr2 = vpx_sse_to_psnr(t_samples, 255.0, sq_error2);
5232
5233 cpi->totalp_y += vpx_sse_to_psnr(y_samples, 255.0, (double)ye);
5234 cpi->totalp_u += vpx_sse_to_psnr(uv_samples, 255.0, (double)ue);
5235 cpi->totalp_v += vpx_sse_to_psnr(uv_samples, 255.0, (double)ve);
5236 cpi->total_sq_error2 += sq_error2;
5237 cpi->totalp += frame_psnr2;
5238
5239 frame_ssim2 =
5240 vpx_calc_ssim(cpi->Source, &cm->post_proc_buffer, &weight);
5241
5242 cpi->summed_quality += frame_ssim2 * weight;
5243 cpi->summed_weights += weight;
5244
5245 if (cpi->oxcf.number_of_layers > 1) {
5246 unsigned int i;
5247
5248 for (i = cpi->current_layer; i < cpi->oxcf.number_of_layers; ++i) {
5249 cpi->frames_in_layer[i]++;
5250
5251 cpi->bytes_in_layer[i] += *size;
5252 cpi->sum_psnr[i] += frame_psnr;
5253 cpi->sum_psnr_p[i] += frame_psnr2;
5254 cpi->total_error2[i] += sq_error;
5255 cpi->total_error2_p[i] += sq_error2;
5256 cpi->sum_ssim[i] += frame_ssim2 * weight;
5257 cpi->sum_weights[i] += weight;
5258 }
5259 }
5260 }
5261 #endif
5262 }
5263 }
5264 }
5265
5266 #if 0
5267
5268 if (cpi->common.frame_type != 0 && cpi->common.base_qindex == cpi->oxcf.worst_allowed_q)
5269 {
5270 skiptruecount += cpi->skip_true_count;
5271 skipfalsecount += cpi->skip_false_count;
5272 }
5273
5274 #endif
5275 #if 0
5276
5277 if (cpi->pass != 1)
5278 {
5279 FILE *f = fopen("skip.stt", "a");
5280 fprintf(f, "frame:%4d flags:%4x Q:%4d P:%4d Size:%5d\n", cpi->common.current_video_frame, *frame_flags, cpi->common.base_qindex, cpi->prob_skip_false, *size);
5281
5282 if (cpi->is_src_frame_alt_ref == 1)
5283 fprintf(f, "skipcount: %4d framesize: %d\n", cpi->skip_true_count , *size);
5284
5285 fclose(f);
5286 }
5287
5288 #endif
5289 #endif
5290
5291 cpi->common.error.setjmp = 0;
5292
5293 #if CONFIG_MULTITHREAD
5294 /* wait for the lpf thread done */
5295 if (cpi->b_multi_threaded && cpi->b_lpf_running) {
5296 sem_wait(&cpi->h_event_end_lpf);
5297 cpi->b_lpf_running = 0;
5298 }
5299 #endif
5300
5301 return 0;
5302 }
5303
vp8_get_preview_raw_frame(VP8_COMP * cpi,YV12_BUFFER_CONFIG * dest,vp8_ppflags_t * flags)5304 int vp8_get_preview_raw_frame(VP8_COMP *cpi, YV12_BUFFER_CONFIG *dest,
5305 vp8_ppflags_t *flags) {
5306 if (cpi->common.refresh_alt_ref_frame) {
5307 return -1;
5308 } else {
5309 int ret;
5310
5311 #if CONFIG_POSTPROC
5312 cpi->common.show_frame_mi = cpi->common.mi;
5313 ret = vp8_post_proc_frame(&cpi->common, dest, flags);
5314 #else
5315 (void)flags;
5316
5317 if (cpi->common.frame_to_show) {
5318 *dest = *cpi->common.frame_to_show;
5319 dest->y_width = cpi->common.Width;
5320 dest->y_height = cpi->common.Height;
5321 dest->uv_height = cpi->common.Height / 2;
5322 ret = 0;
5323 } else {
5324 ret = -1;
5325 }
5326
5327 #endif
5328 vpx_clear_system_state();
5329 return ret;
5330 }
5331 }
5332
vp8_set_roimap(VP8_COMP * cpi,unsigned char * map,unsigned int rows,unsigned int cols,int delta_q[4],int delta_lf[4],unsigned int threshold[4])5333 int vp8_set_roimap(VP8_COMP *cpi, unsigned char *map, unsigned int rows,
5334 unsigned int cols, int delta_q[4], int delta_lf[4],
5335 unsigned int threshold[4]) {
5336 signed char feature_data[MB_LVL_MAX][MAX_MB_SEGMENTS];
5337 int internal_delta_q[MAX_MB_SEGMENTS];
5338 const int range = 63;
5339 int i;
5340
5341 // This method is currently incompatible with the cyclic refresh method
5342 if (cpi->cyclic_refresh_mode_enabled) return -1;
5343
5344 // Check number of rows and columns match
5345 if (cpi->common.mb_rows != (int)rows || cpi->common.mb_cols != (int)cols) {
5346 return -1;
5347 }
5348
5349 // Range check the delta Q values and convert the external Q range values
5350 // to internal ones.
5351 if ((abs(delta_q[0]) > range) || (abs(delta_q[1]) > range) ||
5352 (abs(delta_q[2]) > range) || (abs(delta_q[3]) > range)) {
5353 return -1;
5354 }
5355
5356 // Range check the delta lf values
5357 if ((abs(delta_lf[0]) > range) || (abs(delta_lf[1]) > range) ||
5358 (abs(delta_lf[2]) > range) || (abs(delta_lf[3]) > range)) {
5359 return -1;
5360 }
5361
5362 if (!map) {
5363 disable_segmentation(cpi);
5364 return 0;
5365 }
5366
5367 // Translate the external delta q values to internal values.
5368 for (i = 0; i < MAX_MB_SEGMENTS; ++i) {
5369 internal_delta_q[i] =
5370 (delta_q[i] >= 0) ? q_trans[delta_q[i]] : -q_trans[-delta_q[i]];
5371 }
5372
5373 /* Set the segmentation Map */
5374 set_segmentation_map(cpi, map);
5375
5376 /* Activate segmentation. */
5377 enable_segmentation(cpi);
5378
5379 /* Set up the quant segment data */
5380 feature_data[MB_LVL_ALT_Q][0] = internal_delta_q[0];
5381 feature_data[MB_LVL_ALT_Q][1] = internal_delta_q[1];
5382 feature_data[MB_LVL_ALT_Q][2] = internal_delta_q[2];
5383 feature_data[MB_LVL_ALT_Q][3] = internal_delta_q[3];
5384
5385 /* Set up the loop segment data s */
5386 feature_data[MB_LVL_ALT_LF][0] = delta_lf[0];
5387 feature_data[MB_LVL_ALT_LF][1] = delta_lf[1];
5388 feature_data[MB_LVL_ALT_LF][2] = delta_lf[2];
5389 feature_data[MB_LVL_ALT_LF][3] = delta_lf[3];
5390
5391 cpi->segment_encode_breakout[0] = threshold[0];
5392 cpi->segment_encode_breakout[1] = threshold[1];
5393 cpi->segment_encode_breakout[2] = threshold[2];
5394 cpi->segment_encode_breakout[3] = threshold[3];
5395
5396 /* Initialise the feature data structure */
5397 set_segment_data(cpi, &feature_data[0][0], SEGMENT_DELTADATA);
5398
5399 return 0;
5400 }
5401
vp8_set_active_map(VP8_COMP * cpi,unsigned char * map,unsigned int rows,unsigned int cols)5402 int vp8_set_active_map(VP8_COMP *cpi, unsigned char *map, unsigned int rows,
5403 unsigned int cols) {
5404 if ((int)rows == cpi->common.mb_rows && (int)cols == cpi->common.mb_cols) {
5405 if (map) {
5406 memcpy(cpi->active_map, map, rows * cols);
5407 cpi->active_map_enabled = 1;
5408 } else {
5409 cpi->active_map_enabled = 0;
5410 }
5411
5412 return 0;
5413 } else {
5414 return -1;
5415 }
5416 }
5417
vp8_set_internal_size(VP8_COMP * cpi,VPX_SCALING horiz_mode,VPX_SCALING vert_mode)5418 int vp8_set_internal_size(VP8_COMP *cpi, VPX_SCALING horiz_mode,
5419 VPX_SCALING vert_mode) {
5420 if (horiz_mode <= ONETWO) {
5421 cpi->common.horiz_scale = horiz_mode;
5422 } else {
5423 return -1;
5424 }
5425
5426 if (vert_mode <= ONETWO) {
5427 cpi->common.vert_scale = vert_mode;
5428 } else {
5429 return -1;
5430 }
5431
5432 return 0;
5433 }
5434
vp8_calc_ss_err(YV12_BUFFER_CONFIG * source,YV12_BUFFER_CONFIG * dest)5435 int vp8_calc_ss_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest) {
5436 int i, j;
5437 int Total = 0;
5438
5439 unsigned char *src = source->y_buffer;
5440 unsigned char *dst = dest->y_buffer;
5441
5442 /* Loop through the Y plane raw and reconstruction data summing
5443 * (square differences)
5444 */
5445 for (i = 0; i < source->y_height; i += 16) {
5446 for (j = 0; j < source->y_width; j += 16) {
5447 unsigned int sse;
5448 Total += vpx_mse16x16(src + j, source->y_stride, dst + j, dest->y_stride,
5449 &sse);
5450 }
5451
5452 src += 16 * source->y_stride;
5453 dst += 16 * dest->y_stride;
5454 }
5455
5456 return Total;
5457 }
5458
vp8_get_quantizer(VP8_COMP * cpi)5459 int vp8_get_quantizer(VP8_COMP *cpi) { return cpi->common.base_qindex; }
5460