1 /*
2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #include "vpx_config.h"
12 #include "./vpx_scale_rtcd.h"
13 #include "./vpx_dsp_rtcd.h"
14 #include "./vp8_rtcd.h"
15 #include "bitstream.h"
16 #include "vp8/common/onyxc_int.h"
17 #include "vp8/common/blockd.h"
18 #include "onyx_int.h"
19 #include "vp8/common/systemdependent.h"
20 #include "vp8/common/vp8_skin_detection.h"
21 #include "vp8/encoder/quantize.h"
22 #include "vp8/common/alloccommon.h"
23 #include "mcomp.h"
24 #include "firstpass.h"
25 #include "vpx_dsp/psnr.h"
26 #include "vpx_scale/vpx_scale.h"
27 #include "vp8/common/extend.h"
28 #include "ratectrl.h"
29 #include "vp8/common/quant_common.h"
30 #include "segmentation.h"
31 #if CONFIG_POSTPROC
32 #include "vp8/common/postproc.h"
33 #endif
34 #include "vpx_mem/vpx_mem.h"
35 #include "vp8/common/reconintra.h"
36 #include "vp8/common/swapyv12buffer.h"
37 #include "vp8/common/threading.h"
38 #include "vpx_ports/system_state.h"
39 #include "vpx_ports/vpx_once.h"
40 #include "vpx_ports/vpx_timer.h"
41 #include "vpx_util/vpx_write_yuv_frame.h"
42 #if VPX_ARCH_ARM
43 #include "vpx_ports/arm.h"
44 #endif
45 #if CONFIG_MULTI_RES_ENCODING
46 #include "mr_dissim.h"
47 #endif
48 #include "encodeframe.h"
49 #if CONFIG_MULTITHREAD
50 #include "ethreading.h"
51 #endif
52 #include "picklpf.h"
53 #if !CONFIG_REALTIME_ONLY
54 #include "temporal_filter.h"
55 #endif
56
57 #include <assert.h>
58 #include <errno.h>
59 #include <math.h>
60 #include <stdio.h>
61 #include <limits.h>
62
63 #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
64 extern int vp8_update_coef_context(VP8_COMP *cpi);
65 #endif
66
67 extern unsigned int vp8_get_processor_freq();
68
69 int vp8_calc_ss_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest);
70
71 static void set_default_lf_deltas(VP8_COMP *cpi);
72
73 extern const int vp8_gf_interval_table[101];
74
75 #if CONFIG_INTERNAL_STATS
76 #include "math.h"
77 #include "vpx_dsp/ssim.h"
78 #endif
79
80 #ifdef OUTPUT_YUV_SRC
81 FILE *yuv_file;
82 #endif
83 #ifdef OUTPUT_YUV_DENOISED
84 FILE *yuv_denoised_file;
85 #endif
86 #ifdef OUTPUT_YUV_SKINMAP
87 static FILE *yuv_skinmap_file = NULL;
88 #endif
89
90 #if 0
91 FILE *framepsnr;
92 FILE *kf_list;
93 FILE *keyfile;
94 #endif
95
96 #if 0
97 extern int skip_true_count;
98 extern int skip_false_count;
99 #endif
100
101 #ifdef SPEEDSTATS
102 unsigned int frames_at_speed[16] = { 0, 0, 0, 0, 0, 0, 0, 0,
103 0, 0, 0, 0, 0, 0, 0, 0 };
104 unsigned int tot_pm = 0;
105 unsigned int cnt_pm = 0;
106 unsigned int tot_ef = 0;
107 unsigned int cnt_ef = 0;
108 #endif
109
110 #ifdef MODE_STATS
111 extern unsigned __int64 Sectionbits[50];
112 extern int y_modes[5];
113 extern int uv_modes[4];
114 extern int b_modes[10];
115
116 extern int inter_y_modes[10];
117 extern int inter_uv_modes[4];
118 extern unsigned int inter_b_modes[15];
119 #endif
120
121 extern const int vp8_bits_per_mb[2][QINDEX_RANGE];
122
123 extern const int qrounding_factors[129];
124 extern const int qzbin_factors[129];
125 extern void vp8cx_init_quantizer(VP8_COMP *cpi);
126 extern const int vp8cx_base_skip_false_prob[128];
127
128 /* Tables relating active max Q to active min Q */
129 static const unsigned char kf_low_motion_minq[QINDEX_RANGE] = {
130 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
131 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
132 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,
133 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 4, 4, 4, 5, 5, 5,
134 5, 5, 6, 6, 6, 6, 7, 7, 8, 8, 8, 8, 9, 9, 10, 10, 10, 10, 11,
135 11, 11, 11, 12, 12, 13, 13, 13, 13, 14, 14, 15, 15, 15, 15, 16, 16, 16, 16,
136 17, 17, 18, 18, 18, 18, 19, 20, 20, 21, 21, 22, 23, 23
137 };
138 static const unsigned char kf_high_motion_minq[QINDEX_RANGE] = {
139 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
140 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1,
141 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 5,
142 5, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 8, 8, 8, 8, 9, 9, 10, 10,
143 10, 10, 11, 11, 11, 11, 12, 12, 13, 13, 13, 13, 14, 14, 15, 15, 15, 15, 16,
144 16, 16, 16, 17, 17, 18, 18, 18, 18, 19, 19, 20, 20, 20, 20, 21, 21, 21, 21,
145 22, 22, 23, 23, 24, 25, 25, 26, 26, 27, 28, 28, 29, 30
146 };
147 static const unsigned char gf_low_motion_minq[QINDEX_RANGE] = {
148 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3,
149 3, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8,
150 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15,
151 15, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20, 21, 21, 22, 22, 23, 23, 24, 24,
152 25, 25, 26, 26, 27, 27, 28, 28, 29, 29, 30, 30, 31, 31, 32, 32, 33, 33, 34,
153 34, 35, 35, 36, 36, 37, 37, 38, 38, 39, 39, 40, 40, 41, 41, 42, 42, 43, 44,
154 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58
155 };
156 static const unsigned char gf_mid_motion_minq[QINDEX_RANGE] = {
157 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 2, 2, 3, 3, 3, 4, 4, 4, 5,
158 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10, 10, 11,
159 11, 11, 12, 12, 12, 12, 13, 13, 13, 14, 14, 14, 15, 15, 16, 16, 17, 17, 18,
160 18, 19, 19, 20, 20, 21, 21, 22, 22, 23, 23, 24, 24, 25, 25, 26, 26, 27, 27,
161 28, 28, 29, 29, 30, 30, 31, 31, 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 37,
162 37, 38, 39, 39, 40, 40, 41, 41, 42, 42, 43, 43, 44, 45, 46, 47, 48, 49, 50,
163 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64
164 };
165 static const unsigned char gf_high_motion_minq[QINDEX_RANGE] = {
166 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5,
167 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10, 11, 11,
168 12, 12, 13, 13, 14, 14, 15, 15, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20, 21,
169 21, 22, 22, 23, 23, 24, 24, 25, 25, 26, 26, 27, 27, 28, 28, 29, 29, 30, 30,
170 31, 31, 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 37, 37, 38, 38, 39, 39, 40,
171 40, 41, 41, 42, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56,
172 57, 58, 59, 60, 62, 64, 66, 68, 70, 72, 74, 76, 78, 80
173 };
174 static const unsigned char inter_minq[QINDEX_RANGE] = {
175 0, 0, 1, 1, 2, 3, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 9, 10, 11,
176 11, 12, 13, 13, 14, 15, 15, 16, 17, 17, 18, 19, 20, 20, 21, 22, 22, 23, 24,
177 24, 25, 26, 27, 27, 28, 29, 30, 30, 31, 32, 33, 33, 34, 35, 36, 36, 37, 38,
178 39, 39, 40, 41, 42, 42, 43, 44, 45, 46, 46, 47, 48, 49, 50, 50, 51, 52, 53,
179 54, 55, 55, 56, 57, 58, 59, 60, 60, 61, 62, 63, 64, 65, 66, 67, 67, 68, 69,
180 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 86,
181 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100
182 };
183
184 #ifdef PACKET_TESTING
185 extern FILE *vpxlogc;
186 #endif
187
vp8_save_layer_context(VP8_COMP * cpi)188 void vp8_save_layer_context(VP8_COMP *cpi) {
189 LAYER_CONTEXT *lc = &cpi->layer_context[cpi->current_layer];
190
191 /* Save layer dependent coding state */
192 lc->target_bandwidth = cpi->target_bandwidth;
193 lc->starting_buffer_level = cpi->oxcf.starting_buffer_level;
194 lc->optimal_buffer_level = cpi->oxcf.optimal_buffer_level;
195 lc->maximum_buffer_size = cpi->oxcf.maximum_buffer_size;
196 lc->starting_buffer_level_in_ms = cpi->oxcf.starting_buffer_level_in_ms;
197 lc->optimal_buffer_level_in_ms = cpi->oxcf.optimal_buffer_level_in_ms;
198 lc->maximum_buffer_size_in_ms = cpi->oxcf.maximum_buffer_size_in_ms;
199 lc->buffer_level = cpi->buffer_level;
200 lc->bits_off_target = cpi->bits_off_target;
201 lc->total_actual_bits = cpi->total_actual_bits;
202 lc->worst_quality = cpi->worst_quality;
203 lc->active_worst_quality = cpi->active_worst_quality;
204 lc->best_quality = cpi->best_quality;
205 lc->active_best_quality = cpi->active_best_quality;
206 lc->ni_av_qi = cpi->ni_av_qi;
207 lc->ni_tot_qi = cpi->ni_tot_qi;
208 lc->ni_frames = cpi->ni_frames;
209 lc->avg_frame_qindex = cpi->avg_frame_qindex;
210 lc->rate_correction_factor = cpi->rate_correction_factor;
211 lc->key_frame_rate_correction_factor = cpi->key_frame_rate_correction_factor;
212 lc->gf_rate_correction_factor = cpi->gf_rate_correction_factor;
213 lc->zbin_over_quant = cpi->mb.zbin_over_quant;
214 lc->inter_frame_target = cpi->inter_frame_target;
215 lc->total_byte_count = cpi->total_byte_count;
216 lc->filter_level = cpi->common.filter_level;
217 lc->frames_since_last_drop_overshoot = cpi->frames_since_last_drop_overshoot;
218 lc->force_maxqp = cpi->force_maxqp;
219 lc->last_frame_percent_intra = cpi->last_frame_percent_intra;
220 lc->last_q[0] = cpi->last_q[0];
221 lc->last_q[1] = cpi->last_q[1];
222
223 memcpy(lc->count_mb_ref_frame_usage, cpi->mb.count_mb_ref_frame_usage,
224 sizeof(cpi->mb.count_mb_ref_frame_usage));
225 }
226
vp8_restore_layer_context(VP8_COMP * cpi,const int layer)227 void vp8_restore_layer_context(VP8_COMP *cpi, const int layer) {
228 LAYER_CONTEXT *lc = &cpi->layer_context[layer];
229
230 /* Restore layer dependent coding state */
231 cpi->current_layer = layer;
232 cpi->target_bandwidth = lc->target_bandwidth;
233 cpi->oxcf.target_bandwidth = lc->target_bandwidth;
234 cpi->oxcf.starting_buffer_level = lc->starting_buffer_level;
235 cpi->oxcf.optimal_buffer_level = lc->optimal_buffer_level;
236 cpi->oxcf.maximum_buffer_size = lc->maximum_buffer_size;
237 cpi->oxcf.starting_buffer_level_in_ms = lc->starting_buffer_level_in_ms;
238 cpi->oxcf.optimal_buffer_level_in_ms = lc->optimal_buffer_level_in_ms;
239 cpi->oxcf.maximum_buffer_size_in_ms = lc->maximum_buffer_size_in_ms;
240 cpi->buffer_level = lc->buffer_level;
241 cpi->bits_off_target = lc->bits_off_target;
242 cpi->total_actual_bits = lc->total_actual_bits;
243 cpi->active_worst_quality = lc->active_worst_quality;
244 cpi->active_best_quality = lc->active_best_quality;
245 cpi->ni_av_qi = lc->ni_av_qi;
246 cpi->ni_tot_qi = lc->ni_tot_qi;
247 cpi->ni_frames = lc->ni_frames;
248 cpi->avg_frame_qindex = lc->avg_frame_qindex;
249 cpi->rate_correction_factor = lc->rate_correction_factor;
250 cpi->key_frame_rate_correction_factor = lc->key_frame_rate_correction_factor;
251 cpi->gf_rate_correction_factor = lc->gf_rate_correction_factor;
252 cpi->mb.zbin_over_quant = lc->zbin_over_quant;
253 cpi->inter_frame_target = lc->inter_frame_target;
254 cpi->total_byte_count = lc->total_byte_count;
255 cpi->common.filter_level = lc->filter_level;
256 cpi->frames_since_last_drop_overshoot = lc->frames_since_last_drop_overshoot;
257 cpi->force_maxqp = lc->force_maxqp;
258 cpi->last_frame_percent_intra = lc->last_frame_percent_intra;
259 cpi->last_q[0] = lc->last_q[0];
260 cpi->last_q[1] = lc->last_q[1];
261
262 memcpy(cpi->mb.count_mb_ref_frame_usage, lc->count_mb_ref_frame_usage,
263 sizeof(cpi->mb.count_mb_ref_frame_usage));
264 }
265
rescale(int val,int num,int denom)266 static int rescale(int val, int num, int denom) {
267 int64_t llnum = num;
268 int64_t llden = denom;
269 int64_t llval = val;
270
271 int64_t result = (llval * llnum / llden);
272 if (result <= INT_MAX)
273 return (int)result;
274 else
275 return INT_MAX;
276 }
277
vp8_init_temporal_layer_context(VP8_COMP * cpi,const VP8_CONFIG * oxcf,const int layer,double prev_layer_framerate)278 void vp8_init_temporal_layer_context(VP8_COMP *cpi, const VP8_CONFIG *oxcf,
279 const int layer,
280 double prev_layer_framerate) {
281 LAYER_CONTEXT *lc = &cpi->layer_context[layer];
282
283 lc->framerate = cpi->output_framerate / cpi->oxcf.rate_decimator[layer];
284 if (cpi->oxcf.target_bitrate[layer] > INT_MAX / 1000)
285 lc->target_bandwidth = INT_MAX;
286 else
287 lc->target_bandwidth = cpi->oxcf.target_bitrate[layer] * 1000;
288
289 lc->starting_buffer_level_in_ms = oxcf->starting_buffer_level;
290 lc->optimal_buffer_level_in_ms = oxcf->optimal_buffer_level;
291 lc->maximum_buffer_size_in_ms = oxcf->maximum_buffer_size;
292
293 lc->starting_buffer_level =
294 rescale((int)(oxcf->starting_buffer_level), lc->target_bandwidth, 1000);
295
296 if (oxcf->optimal_buffer_level == 0) {
297 lc->optimal_buffer_level = lc->target_bandwidth / 8;
298 } else {
299 lc->optimal_buffer_level =
300 rescale((int)(oxcf->optimal_buffer_level), lc->target_bandwidth, 1000);
301 }
302
303 if (oxcf->maximum_buffer_size == 0) {
304 lc->maximum_buffer_size = lc->target_bandwidth / 8;
305 } else {
306 lc->maximum_buffer_size =
307 rescale((int)(oxcf->maximum_buffer_size), lc->target_bandwidth, 1000);
308 }
309
310 /* Work out the average size of a frame within this layer */
311 if (layer > 0) {
312 lc->avg_frame_size_for_layer =
313 (int)round((cpi->oxcf.target_bitrate[layer] -
314 cpi->oxcf.target_bitrate[layer - 1]) *
315 1000 / (lc->framerate - prev_layer_framerate));
316 }
317
318 lc->active_worst_quality = cpi->oxcf.worst_allowed_q;
319 lc->active_best_quality = cpi->oxcf.best_allowed_q;
320 lc->avg_frame_qindex = cpi->oxcf.worst_allowed_q;
321
322 lc->buffer_level = lc->starting_buffer_level;
323 lc->bits_off_target = lc->starting_buffer_level;
324
325 lc->total_actual_bits = 0;
326 lc->ni_av_qi = 0;
327 lc->ni_tot_qi = 0;
328 lc->ni_frames = 0;
329 lc->rate_correction_factor = 1.0;
330 lc->key_frame_rate_correction_factor = 1.0;
331 lc->gf_rate_correction_factor = 1.0;
332 lc->inter_frame_target = 0;
333 }
334
335 // Upon a run-time change in temporal layers, reset the layer context parameters
336 // for any "new" layers. For "existing" layers, let them inherit the parameters
337 // from the previous layer state (at the same layer #). In future we may want
338 // to better map the previous layer state(s) to the "new" ones.
vp8_reset_temporal_layer_change(VP8_COMP * cpi,const VP8_CONFIG * oxcf,const int prev_num_layers)339 void vp8_reset_temporal_layer_change(VP8_COMP *cpi, const VP8_CONFIG *oxcf,
340 const int prev_num_layers) {
341 int i;
342 double prev_layer_framerate = 0;
343 const int curr_num_layers = cpi->oxcf.number_of_layers;
344 // If the previous state was 1 layer, get current layer context from cpi.
345 // We need this to set the layer context for the new layers below.
346 if (prev_num_layers == 1) {
347 cpi->current_layer = 0;
348 vp8_save_layer_context(cpi);
349 }
350 for (i = 0; i < curr_num_layers; ++i) {
351 LAYER_CONTEXT *lc = &cpi->layer_context[i];
352 if (i >= prev_num_layers) {
353 vp8_init_temporal_layer_context(cpi, oxcf, i, prev_layer_framerate);
354 }
355 // The initial buffer levels are set based on their starting levels.
356 // We could set the buffer levels based on the previous state (normalized
357 // properly by the layer bandwidths) but we would need to keep track of
358 // the previous set of layer bandwidths (i.e., target_bitrate[i])
359 // before the layer change. For now, reset to the starting levels.
360 lc->buffer_level =
361 cpi->oxcf.starting_buffer_level_in_ms * cpi->oxcf.target_bitrate[i];
362 lc->bits_off_target = lc->buffer_level;
363 // TDOD(marpan): Should we set the rate_correction_factor and
364 // active_worst/best_quality to values derived from the previous layer
365 // state (to smooth-out quality dips/rate fluctuation at transition)?
366
367 // We need to treat the 1 layer case separately: oxcf.target_bitrate[i]
368 // is not set for 1 layer, and the vp8_restore_layer_context/save_context()
369 // are not called in the encoding loop, so we need to call it here to
370 // pass the layer context state to |cpi|.
371 if (curr_num_layers == 1) {
372 lc->target_bandwidth = cpi->oxcf.target_bandwidth;
373 lc->buffer_level =
374 cpi->oxcf.starting_buffer_level_in_ms * lc->target_bandwidth / 1000;
375 lc->bits_off_target = lc->buffer_level;
376 vp8_restore_layer_context(cpi, 0);
377 }
378 prev_layer_framerate = cpi->output_framerate / cpi->oxcf.rate_decimator[i];
379 }
380 }
381
setup_features(VP8_COMP * cpi)382 static void setup_features(VP8_COMP *cpi) {
383 // If segmentation enabled set the update flags
384 if (cpi->mb.e_mbd.segmentation_enabled) {
385 cpi->mb.e_mbd.update_mb_segmentation_map = 1;
386 cpi->mb.e_mbd.update_mb_segmentation_data = 1;
387 } else {
388 cpi->mb.e_mbd.update_mb_segmentation_map = 0;
389 cpi->mb.e_mbd.update_mb_segmentation_data = 0;
390 }
391
392 cpi->mb.e_mbd.mode_ref_lf_delta_enabled = 0;
393 cpi->mb.e_mbd.mode_ref_lf_delta_update = 0;
394 memset(cpi->mb.e_mbd.ref_lf_deltas, 0, sizeof(cpi->mb.e_mbd.ref_lf_deltas));
395 memset(cpi->mb.e_mbd.mode_lf_deltas, 0, sizeof(cpi->mb.e_mbd.mode_lf_deltas));
396 memset(cpi->mb.e_mbd.last_ref_lf_deltas, 0,
397 sizeof(cpi->mb.e_mbd.ref_lf_deltas));
398 memset(cpi->mb.e_mbd.last_mode_lf_deltas, 0,
399 sizeof(cpi->mb.e_mbd.mode_lf_deltas));
400
401 set_default_lf_deltas(cpi);
402 }
403
404 static void dealloc_raw_frame_buffers(VP8_COMP *cpi);
405
initialize_enc(void)406 static void initialize_enc(void) {
407 vpx_dsp_rtcd();
408 vp8_init_intra_predictors();
409 }
410
vp8_initialize_enc(void)411 void vp8_initialize_enc(void) { once(initialize_enc); }
412
dealloc_compressor_data(VP8_COMP * cpi)413 static void dealloc_compressor_data(VP8_COMP *cpi) {
414 vpx_free(cpi->tplist);
415 cpi->tplist = NULL;
416
417 /* Delete last frame MV storage buffers */
418 vpx_free(cpi->lfmv);
419 cpi->lfmv = 0;
420
421 vpx_free(cpi->lf_ref_frame_sign_bias);
422 cpi->lf_ref_frame_sign_bias = 0;
423
424 vpx_free(cpi->lf_ref_frame);
425 cpi->lf_ref_frame = 0;
426
427 /* Delete sementation map */
428 vpx_free(cpi->segmentation_map);
429 cpi->segmentation_map = 0;
430
431 vpx_free(cpi->active_map);
432 cpi->active_map = 0;
433
434 vp8_de_alloc_frame_buffers(&cpi->common);
435
436 vp8_yv12_de_alloc_frame_buffer(&cpi->pick_lf_lvl_frame);
437 vp8_yv12_de_alloc_frame_buffer(&cpi->scaled_source);
438 dealloc_raw_frame_buffers(cpi);
439
440 vpx_free(cpi->tok);
441 cpi->tok = 0;
442
443 /* Structure used to monitor GF usage */
444 vpx_free(cpi->gf_active_flags);
445 cpi->gf_active_flags = 0;
446
447 /* Activity mask based per mb zbin adjustments */
448 vpx_free(cpi->mb_activity_map);
449 cpi->mb_activity_map = 0;
450
451 vpx_free(cpi->mb.pip);
452 cpi->mb.pip = 0;
453 }
454
enable_segmentation(VP8_COMP * cpi)455 static void enable_segmentation(VP8_COMP *cpi) {
456 /* Set the appropriate feature bit */
457 cpi->mb.e_mbd.segmentation_enabled = 1;
458 cpi->mb.e_mbd.update_mb_segmentation_map = 1;
459 cpi->mb.e_mbd.update_mb_segmentation_data = 1;
460 }
disable_segmentation(VP8_COMP * cpi)461 static void disable_segmentation(VP8_COMP *cpi) {
462 /* Clear the appropriate feature bit */
463 cpi->mb.e_mbd.segmentation_enabled = 0;
464 }
465
466 /* Valid values for a segment are 0 to 3
467 * Segmentation map is arrange as [Rows][Columns]
468 */
set_segmentation_map(VP8_COMP * cpi,unsigned char * segmentation_map)469 static void set_segmentation_map(VP8_COMP *cpi,
470 unsigned char *segmentation_map) {
471 /* Copy in the new segmentation map */
472 memcpy(cpi->segmentation_map, segmentation_map,
473 (cpi->common.mb_rows * cpi->common.mb_cols));
474
475 /* Signal that the map should be updated. */
476 cpi->mb.e_mbd.update_mb_segmentation_map = 1;
477 cpi->mb.e_mbd.update_mb_segmentation_data = 1;
478 }
479
480 /* The values given for each segment can be either deltas (from the default
481 * value chosen for the frame) or absolute values.
482 *
483 * Valid range for abs values is:
484 * (0-127 for MB_LVL_ALT_Q), (0-63 for SEGMENT_ALT_LF)
485 * Valid range for delta values are:
486 * (+/-127 for MB_LVL_ALT_Q), (+/-63 for SEGMENT_ALT_LF)
487 *
488 * abs_delta = SEGMENT_DELTADATA (deltas)
489 * abs_delta = SEGMENT_ABSDATA (use the absolute values given).
490 *
491 */
set_segment_data(VP8_COMP * cpi,signed char * feature_data,unsigned char abs_delta)492 static void set_segment_data(VP8_COMP *cpi, signed char *feature_data,
493 unsigned char abs_delta) {
494 cpi->mb.e_mbd.mb_segment_abs_delta = abs_delta;
495 memcpy(cpi->segment_feature_data, feature_data,
496 sizeof(cpi->segment_feature_data));
497 }
498
499 /* A simple function to cyclically refresh the background at a lower Q */
cyclic_background_refresh(VP8_COMP * cpi,int Q,int lf_adjustment)500 static void cyclic_background_refresh(VP8_COMP *cpi, int Q, int lf_adjustment) {
501 unsigned char *seg_map = cpi->segmentation_map;
502 signed char feature_data[MB_LVL_MAX][MAX_MB_SEGMENTS];
503 int i;
504 int block_count = cpi->cyclic_refresh_mode_max_mbs_perframe;
505 int mbs_in_frame = cpi->common.mb_rows * cpi->common.mb_cols;
506
507 cpi->cyclic_refresh_q = Q / 2;
508
509 if (cpi->oxcf.screen_content_mode) {
510 // Modify quality ramp-up based on Q. Above some Q level, increase the
511 // number of blocks to be refreshed, and reduce it below the thredhold.
512 // Turn-off under certain conditions (i.e., away from key frame, and if
513 // we are at good quality (low Q) and most of the blocks were
514 // skipped-encoded
515 // in previous frame.
516 int qp_thresh = (cpi->oxcf.screen_content_mode == 2) ? 80 : 100;
517 if (Q >= qp_thresh) {
518 cpi->cyclic_refresh_mode_max_mbs_perframe =
519 (cpi->common.mb_rows * cpi->common.mb_cols) / 10;
520 } else if (cpi->frames_since_key > 250 && Q < 20 &&
521 cpi->mb.skip_true_count > (int)(0.95 * mbs_in_frame)) {
522 cpi->cyclic_refresh_mode_max_mbs_perframe = 0;
523 } else {
524 cpi->cyclic_refresh_mode_max_mbs_perframe =
525 (cpi->common.mb_rows * cpi->common.mb_cols) / 20;
526 }
527 block_count = cpi->cyclic_refresh_mode_max_mbs_perframe;
528 }
529
530 // Set every macroblock to be eligible for update.
531 // For key frame this will reset seg map to 0.
532 memset(cpi->segmentation_map, 0, mbs_in_frame);
533
534 if (cpi->common.frame_type != KEY_FRAME && block_count > 0) {
535 /* Cycle through the macro_block rows */
536 /* MB loop to set local segmentation map */
537 i = cpi->cyclic_refresh_mode_index;
538 assert(i < mbs_in_frame);
539 do {
540 /* If the MB is as a candidate for clean up then mark it for
541 * possible boost/refresh (segment 1) The segment id may get
542 * reset to 0 later if the MB gets coded anything other than
543 * last frame 0,0 as only (last frame 0,0) MBs are eligable for
544 * refresh : that is to say Mbs likely to be background blocks.
545 */
546 if (cpi->cyclic_refresh_map[i] == 0) {
547 seg_map[i] = 1;
548 block_count--;
549 } else if (cpi->cyclic_refresh_map[i] < 0) {
550 cpi->cyclic_refresh_map[i]++;
551 }
552
553 i++;
554 if (i == mbs_in_frame) i = 0;
555
556 } while (block_count && i != cpi->cyclic_refresh_mode_index);
557
558 cpi->cyclic_refresh_mode_index = i;
559
560 #if CONFIG_TEMPORAL_DENOISING
561 if (cpi->oxcf.noise_sensitivity > 0) {
562 if (cpi->denoiser.denoiser_mode == kDenoiserOnYUVAggressive &&
563 Q < (int)cpi->denoiser.denoise_pars.qp_thresh &&
564 (cpi->frames_since_key >
565 2 * cpi->denoiser.denoise_pars.consec_zerolast)) {
566 // Under aggressive denoising, use segmentation to turn off loop
567 // filter below some qp thresh. The filter is reduced for all
568 // blocks that have been encoded as ZEROMV LAST x frames in a row,
569 // where x is set by cpi->denoiser.denoise_pars.consec_zerolast.
570 // This is to avoid "dot" artifacts that can occur from repeated
571 // loop filtering on noisy input source.
572 cpi->cyclic_refresh_q = Q;
573 // lf_adjustment = -MAX_LOOP_FILTER;
574 lf_adjustment = -40;
575 for (i = 0; i < mbs_in_frame; ++i) {
576 seg_map[i] = (cpi->consec_zero_last[i] >
577 cpi->denoiser.denoise_pars.consec_zerolast)
578 ? 1
579 : 0;
580 }
581 }
582 }
583 #endif
584 }
585
586 /* Activate segmentation. */
587 cpi->mb.e_mbd.update_mb_segmentation_map = 1;
588 cpi->mb.e_mbd.update_mb_segmentation_data = 1;
589 enable_segmentation(cpi);
590
591 /* Set up the quant segment data */
592 feature_data[MB_LVL_ALT_Q][0] = 0;
593 feature_data[MB_LVL_ALT_Q][1] = (cpi->cyclic_refresh_q - Q);
594 feature_data[MB_LVL_ALT_Q][2] = 0;
595 feature_data[MB_LVL_ALT_Q][3] = 0;
596
597 /* Set up the loop segment data */
598 feature_data[MB_LVL_ALT_LF][0] = 0;
599 feature_data[MB_LVL_ALT_LF][1] = lf_adjustment;
600 feature_data[MB_LVL_ALT_LF][2] = 0;
601 feature_data[MB_LVL_ALT_LF][3] = 0;
602
603 /* Initialise the feature data structure */
604 set_segment_data(cpi, &feature_data[0][0], SEGMENT_DELTADATA);
605 }
606
compute_skin_map(VP8_COMP * cpi)607 static void compute_skin_map(VP8_COMP *cpi) {
608 int mb_row, mb_col, num_bl;
609 VP8_COMMON *cm = &cpi->common;
610 const uint8_t *src_y = cpi->Source->y_buffer;
611 const uint8_t *src_u = cpi->Source->u_buffer;
612 const uint8_t *src_v = cpi->Source->v_buffer;
613 const int src_ystride = cpi->Source->y_stride;
614 const int src_uvstride = cpi->Source->uv_stride;
615
616 const SKIN_DETECTION_BLOCK_SIZE bsize =
617 (cm->Width * cm->Height <= 352 * 288) ? SKIN_8X8 : SKIN_16X16;
618
619 for (mb_row = 0; mb_row < cm->mb_rows; mb_row++) {
620 num_bl = 0;
621 for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) {
622 const int bl_index = mb_row * cm->mb_cols + mb_col;
623 cpi->skin_map[bl_index] =
624 vp8_compute_skin_block(src_y, src_u, src_v, src_ystride, src_uvstride,
625 bsize, cpi->consec_zero_last[bl_index], 0);
626 num_bl++;
627 src_y += 16;
628 src_u += 8;
629 src_v += 8;
630 }
631 src_y += (src_ystride << 4) - (num_bl << 4);
632 src_u += (src_uvstride << 3) - (num_bl << 3);
633 src_v += (src_uvstride << 3) - (num_bl << 3);
634 }
635
636 // Remove isolated skin blocks (none of its neighbors are skin) and isolated
637 // non-skin blocks (all of its neighbors are skin). Skip the boundary.
638 for (mb_row = 1; mb_row < cm->mb_rows - 1; mb_row++) {
639 for (mb_col = 1; mb_col < cm->mb_cols - 1; mb_col++) {
640 const int bl_index = mb_row * cm->mb_cols + mb_col;
641 int num_neighbor = 0;
642 int mi, mj;
643 int non_skin_threshold = 8;
644
645 for (mi = -1; mi <= 1; mi += 1) {
646 for (mj = -1; mj <= 1; mj += 1) {
647 int bl_neighbor_index = (mb_row + mi) * cm->mb_cols + mb_col + mj;
648 if (cpi->skin_map[bl_neighbor_index]) num_neighbor++;
649 }
650 }
651
652 if (cpi->skin_map[bl_index] && num_neighbor < 2)
653 cpi->skin_map[bl_index] = 0;
654 if (!cpi->skin_map[bl_index] && num_neighbor == non_skin_threshold)
655 cpi->skin_map[bl_index] = 1;
656 }
657 }
658 }
659
set_default_lf_deltas(VP8_COMP * cpi)660 static void set_default_lf_deltas(VP8_COMP *cpi) {
661 cpi->mb.e_mbd.mode_ref_lf_delta_enabled = 1;
662 cpi->mb.e_mbd.mode_ref_lf_delta_update = 1;
663
664 memset(cpi->mb.e_mbd.ref_lf_deltas, 0, sizeof(cpi->mb.e_mbd.ref_lf_deltas));
665 memset(cpi->mb.e_mbd.mode_lf_deltas, 0, sizeof(cpi->mb.e_mbd.mode_lf_deltas));
666
667 /* Test of ref frame deltas */
668 cpi->mb.e_mbd.ref_lf_deltas[INTRA_FRAME] = 2;
669 cpi->mb.e_mbd.ref_lf_deltas[LAST_FRAME] = 0;
670 cpi->mb.e_mbd.ref_lf_deltas[GOLDEN_FRAME] = -2;
671 cpi->mb.e_mbd.ref_lf_deltas[ALTREF_FRAME] = -2;
672
673 cpi->mb.e_mbd.mode_lf_deltas[0] = 4; /* BPRED */
674
675 if (cpi->oxcf.Mode == MODE_REALTIME) {
676 cpi->mb.e_mbd.mode_lf_deltas[1] = -12; /* Zero */
677 } else {
678 cpi->mb.e_mbd.mode_lf_deltas[1] = -2; /* Zero */
679 }
680
681 cpi->mb.e_mbd.mode_lf_deltas[2] = 2; /* New mv */
682 cpi->mb.e_mbd.mode_lf_deltas[3] = 4; /* Split mv */
683 }
684
685 /* Convenience macros for mapping speed and mode into a continuous
686 * range
687 */
688 #define GOOD(x) ((x) + 1)
689 #define RT(x) ((x) + 7)
690
speed_map(int speed,const int * map)691 static int speed_map(int speed, const int *map) {
692 int res;
693
694 do {
695 res = *map++;
696 } while (speed >= *map++);
697 return res;
698 }
699
700 static const int thresh_mult_map_znn[] = {
701 /* map common to zero, nearest, and near */
702 0, GOOD(2), 1500, GOOD(3), 2000, RT(0), 1000, RT(2), 2000, INT_MAX
703 };
704
705 static const int thresh_mult_map_vhpred[] = { 1000, GOOD(2), 1500, GOOD(3),
706 2000, RT(0), 1000, RT(1),
707 2000, RT(7), INT_MAX, INT_MAX };
708
709 static const int thresh_mult_map_bpred[] = { 2000, GOOD(0), 2500, GOOD(2),
710 5000, GOOD(3), 7500, RT(0),
711 2500, RT(1), 5000, RT(6),
712 INT_MAX, INT_MAX };
713
714 static const int thresh_mult_map_tm[] = { 1000, GOOD(2), 1500, GOOD(3),
715 2000, RT(0), 0, RT(1),
716 1000, RT(2), 2000, RT(7),
717 INT_MAX, INT_MAX };
718
719 static const int thresh_mult_map_new1[] = { 1000, GOOD(2), 2000,
720 RT(0), 2000, INT_MAX };
721
722 static const int thresh_mult_map_new2[] = { 1000, GOOD(2), 2000, GOOD(3),
723 2500, GOOD(5), 4000, RT(0),
724 2000, RT(2), 2500, RT(5),
725 4000, INT_MAX };
726
727 static const int thresh_mult_map_split1[] = {
728 2500, GOOD(0), 1700, GOOD(2), 10000, GOOD(3), 25000, GOOD(4), INT_MAX,
729 RT(0), 5000, RT(1), 10000, RT(2), 25000, RT(3), INT_MAX, INT_MAX
730 };
731
732 static const int thresh_mult_map_split2[] = {
733 5000, GOOD(0), 4500, GOOD(2), 20000, GOOD(3), 50000, GOOD(4), INT_MAX,
734 RT(0), 10000, RT(1), 20000, RT(2), 50000, RT(3), INT_MAX, INT_MAX
735 };
736
737 static const int mode_check_freq_map_zn2[] = {
738 /* {zero,nearest}{2,3} */
739 0, RT(10), 1 << 1, RT(11), 1 << 2, RT(12), 1 << 3, INT_MAX
740 };
741
742 static const int mode_check_freq_map_vhbpred[] = { 0, GOOD(5), 2, RT(0),
743 0, RT(3), 2, RT(5),
744 4, INT_MAX };
745
746 static const int mode_check_freq_map_near2[] = {
747 0, GOOD(5), 2, RT(0), 0, RT(3), 2,
748 RT(10), 1 << 2, RT(11), 1 << 3, RT(12), 1 << 4, INT_MAX
749 };
750
751 static const int mode_check_freq_map_new1[] = {
752 0, RT(10), 1 << 1, RT(11), 1 << 2, RT(12), 1 << 3, INT_MAX
753 };
754
755 static const int mode_check_freq_map_new2[] = { 0, GOOD(5), 4, RT(0),
756 0, RT(3), 4, RT(10),
757 1 << 3, RT(11), 1 << 4, RT(12),
758 1 << 5, INT_MAX };
759
760 static const int mode_check_freq_map_split1[] = { 0, GOOD(2), 2, GOOD(3),
761 7, RT(1), 2, RT(2),
762 7, INT_MAX };
763
764 static const int mode_check_freq_map_split2[] = { 0, GOOD(1), 2, GOOD(2),
765 4, GOOD(3), 15, RT(1),
766 4, RT(2), 15, INT_MAX };
767
vp8_set_speed_features(VP8_COMP * cpi)768 void vp8_set_speed_features(VP8_COMP *cpi) {
769 SPEED_FEATURES *sf = &cpi->sf;
770 int Mode = cpi->compressor_speed;
771 int Speed = cpi->Speed;
772 int Speed2;
773 int i;
774 VP8_COMMON *cm = &cpi->common;
775 int last_improved_quant = sf->improved_quant;
776 int ref_frames;
777
778 /* Initialise default mode frequency sampling variables */
779 for (i = 0; i < MAX_MODES; ++i) {
780 cpi->mode_check_freq[i] = 0;
781 }
782
783 cpi->mb.mbs_tested_so_far = 0;
784 cpi->mb.mbs_zero_last_dot_suppress = 0;
785
786 /* best quality defaults */
787 sf->RD = 1;
788 sf->search_method = NSTEP;
789 sf->improved_quant = 1;
790 sf->improved_dct = 1;
791 sf->auto_filter = 1;
792 sf->recode_loop = 1;
793 sf->quarter_pixel_search = 1;
794 sf->half_pixel_search = 1;
795 sf->iterative_sub_pixel = 1;
796 sf->optimize_coefficients = 1;
797 sf->use_fastquant_for_pick = 0;
798 sf->no_skip_block4x4_search = 1;
799
800 sf->first_step = 0;
801 sf->max_step_search_steps = MAX_MVSEARCH_STEPS;
802 sf->improved_mv_pred = 1;
803
804 /* default thresholds to 0 */
805 for (i = 0; i < MAX_MODES; ++i) sf->thresh_mult[i] = 0;
806
807 /* Count enabled references */
808 ref_frames = 1;
809 if (cpi->ref_frame_flags & VP8_LAST_FRAME) ref_frames++;
810 if (cpi->ref_frame_flags & VP8_GOLD_FRAME) ref_frames++;
811 if (cpi->ref_frame_flags & VP8_ALTR_FRAME) ref_frames++;
812
813 /* Convert speed to continuous range, with clamping */
814 if (Mode == 0) {
815 Speed = 0;
816 } else if (Mode == 2) {
817 Speed = RT(Speed);
818 } else {
819 if (Speed > 5) Speed = 5;
820 Speed = GOOD(Speed);
821 }
822
823 sf->thresh_mult[THR_ZERO1] = sf->thresh_mult[THR_NEAREST1] =
824 sf->thresh_mult[THR_NEAR1] = sf->thresh_mult[THR_DC] = 0; /* always */
825
826 sf->thresh_mult[THR_ZERO2] = sf->thresh_mult[THR_ZERO3] =
827 sf->thresh_mult[THR_NEAREST2] = sf->thresh_mult[THR_NEAREST3] =
828 sf->thresh_mult[THR_NEAR2] = sf->thresh_mult[THR_NEAR3] =
829 speed_map(Speed, thresh_mult_map_znn);
830
831 sf->thresh_mult[THR_V_PRED] = sf->thresh_mult[THR_H_PRED] =
832 speed_map(Speed, thresh_mult_map_vhpred);
833 sf->thresh_mult[THR_B_PRED] = speed_map(Speed, thresh_mult_map_bpred);
834 sf->thresh_mult[THR_TM] = speed_map(Speed, thresh_mult_map_tm);
835 sf->thresh_mult[THR_NEW1] = speed_map(Speed, thresh_mult_map_new1);
836 sf->thresh_mult[THR_NEW2] = sf->thresh_mult[THR_NEW3] =
837 speed_map(Speed, thresh_mult_map_new2);
838 sf->thresh_mult[THR_SPLIT1] = speed_map(Speed, thresh_mult_map_split1);
839 sf->thresh_mult[THR_SPLIT2] = sf->thresh_mult[THR_SPLIT3] =
840 speed_map(Speed, thresh_mult_map_split2);
841
842 // Special case for temporal layers.
843 // Reduce the thresholds for zero/nearest/near for GOLDEN, if GOLDEN is
844 // used as second reference. We don't modify thresholds for ALTREF case
845 // since ALTREF is usually used as long-term reference in temporal layers.
846 if ((cpi->Speed <= 6) && (cpi->oxcf.number_of_layers > 1) &&
847 (cpi->ref_frame_flags & VP8_LAST_FRAME) &&
848 (cpi->ref_frame_flags & VP8_GOLD_FRAME)) {
849 if (cpi->closest_reference_frame == GOLDEN_FRAME) {
850 sf->thresh_mult[THR_ZERO2] = sf->thresh_mult[THR_ZERO2] >> 3;
851 sf->thresh_mult[THR_NEAREST2] = sf->thresh_mult[THR_NEAREST2] >> 3;
852 sf->thresh_mult[THR_NEAR2] = sf->thresh_mult[THR_NEAR2] >> 3;
853 } else {
854 sf->thresh_mult[THR_ZERO2] = sf->thresh_mult[THR_ZERO2] >> 1;
855 sf->thresh_mult[THR_NEAREST2] = sf->thresh_mult[THR_NEAREST2] >> 1;
856 sf->thresh_mult[THR_NEAR2] = sf->thresh_mult[THR_NEAR2] >> 1;
857 }
858 }
859
860 cpi->mode_check_freq[THR_ZERO1] = cpi->mode_check_freq[THR_NEAREST1] =
861 cpi->mode_check_freq[THR_NEAR1] = cpi->mode_check_freq[THR_TM] =
862 cpi->mode_check_freq[THR_DC] = 0; /* always */
863
864 cpi->mode_check_freq[THR_ZERO2] = cpi->mode_check_freq[THR_ZERO3] =
865 cpi->mode_check_freq[THR_NEAREST2] = cpi->mode_check_freq[THR_NEAREST3] =
866 speed_map(Speed, mode_check_freq_map_zn2);
867
868 cpi->mode_check_freq[THR_NEAR2] = cpi->mode_check_freq[THR_NEAR3] =
869 speed_map(Speed, mode_check_freq_map_near2);
870
871 cpi->mode_check_freq[THR_V_PRED] = cpi->mode_check_freq[THR_H_PRED] =
872 cpi->mode_check_freq[THR_B_PRED] =
873 speed_map(Speed, mode_check_freq_map_vhbpred);
874
875 // For real-time mode at speed 10 keep the mode_check_freq threshold
876 // for NEW1 similar to that of speed 9.
877 Speed2 = Speed;
878 if (cpi->Speed == 10 && Mode == 2) Speed2 = RT(9);
879 cpi->mode_check_freq[THR_NEW1] = speed_map(Speed2, mode_check_freq_map_new1);
880
881 cpi->mode_check_freq[THR_NEW2] = cpi->mode_check_freq[THR_NEW3] =
882 speed_map(Speed, mode_check_freq_map_new2);
883
884 cpi->mode_check_freq[THR_SPLIT1] =
885 speed_map(Speed, mode_check_freq_map_split1);
886 cpi->mode_check_freq[THR_SPLIT2] = cpi->mode_check_freq[THR_SPLIT3] =
887 speed_map(Speed, mode_check_freq_map_split2);
888 Speed = cpi->Speed;
889 switch (Mode) {
890 #if !CONFIG_REALTIME_ONLY
891 case 0: /* best quality mode */
892 sf->first_step = 0;
893 sf->max_step_search_steps = MAX_MVSEARCH_STEPS;
894 break;
895 case 1:
896 case 3:
897 if (Speed > 0) {
898 /* Disable coefficient optimization above speed 0 */
899 sf->optimize_coefficients = 0;
900 sf->use_fastquant_for_pick = 1;
901 sf->no_skip_block4x4_search = 0;
902
903 sf->first_step = 1;
904 }
905
906 if (Speed > 2) {
907 sf->improved_quant = 0;
908 sf->improved_dct = 0;
909
910 /* Only do recode loop on key frames, golden frames and
911 * alt ref frames
912 */
913 sf->recode_loop = 2;
914 }
915
916 if (Speed > 3) {
917 sf->auto_filter = 1;
918 sf->recode_loop = 0; /* recode loop off */
919 sf->RD = 0; /* Turn rd off */
920 }
921
922 if (Speed > 4) {
923 sf->auto_filter = 0; /* Faster selection of loop filter */
924 }
925
926 break;
927 #endif
928 case 2:
929 sf->optimize_coefficients = 0;
930 sf->recode_loop = 0;
931 sf->auto_filter = 1;
932 sf->iterative_sub_pixel = 1;
933 sf->search_method = NSTEP;
934
935 if (Speed > 0) {
936 sf->improved_quant = 0;
937 sf->improved_dct = 0;
938
939 sf->use_fastquant_for_pick = 1;
940 sf->no_skip_block4x4_search = 0;
941 sf->first_step = 1;
942 }
943
944 if (Speed > 2) sf->auto_filter = 0; /* Faster selection of loop filter */
945
946 if (Speed > 3) {
947 sf->RD = 0;
948 sf->auto_filter = 1;
949 }
950
951 if (Speed > 4) {
952 sf->auto_filter = 0; /* Faster selection of loop filter */
953 sf->search_method = HEX;
954 sf->iterative_sub_pixel = 0;
955 }
956
957 if (Speed > 6) {
958 unsigned int sum = 0;
959 unsigned int total_mbs = cm->MBs;
960 int thresh;
961 unsigned int total_skip;
962
963 int min = 2000;
964
965 if (cpi->oxcf.encode_breakout > 2000) min = cpi->oxcf.encode_breakout;
966
967 min >>= 7;
968
969 for (i = 0; i < min; ++i) {
970 sum += cpi->mb.error_bins[i];
971 }
972
973 total_skip = sum;
974 sum = 0;
975
976 /* i starts from 2 to make sure thresh started from 2048 */
977 for (; i < 1024; ++i) {
978 sum += cpi->mb.error_bins[i];
979
980 if (10 * sum >=
981 (unsigned int)(cpi->Speed - 6) * (total_mbs - total_skip)) {
982 break;
983 }
984 }
985
986 i--;
987 thresh = (i << 7);
988
989 if (thresh < 2000) thresh = 2000;
990
991 if (ref_frames > 1) {
992 sf->thresh_mult[THR_NEW1] = thresh;
993 sf->thresh_mult[THR_NEAREST1] = thresh >> 1;
994 sf->thresh_mult[THR_NEAR1] = thresh >> 1;
995 }
996
997 if (ref_frames > 2) {
998 sf->thresh_mult[THR_NEW2] = thresh << 1;
999 sf->thresh_mult[THR_NEAREST2] = thresh;
1000 sf->thresh_mult[THR_NEAR2] = thresh;
1001 }
1002
1003 if (ref_frames > 3) {
1004 sf->thresh_mult[THR_NEW3] = thresh << 1;
1005 sf->thresh_mult[THR_NEAREST3] = thresh;
1006 sf->thresh_mult[THR_NEAR3] = thresh;
1007 }
1008
1009 sf->improved_mv_pred = 0;
1010 }
1011
1012 if (Speed > 8) sf->quarter_pixel_search = 0;
1013
1014 if (cm->version == 0) {
1015 cm->filter_type = NORMAL_LOOPFILTER;
1016
1017 if (Speed >= 14) cm->filter_type = SIMPLE_LOOPFILTER;
1018 } else {
1019 cm->filter_type = SIMPLE_LOOPFILTER;
1020 }
1021
1022 /* This has a big hit on quality. Last resort */
1023 if (Speed >= 15) sf->half_pixel_search = 0;
1024
1025 memset(cpi->mb.error_bins, 0, sizeof(cpi->mb.error_bins));
1026
1027 } /* switch */
1028
1029 /* Slow quant, dct and trellis not worthwhile for first pass
1030 * so make sure they are always turned off.
1031 */
1032 if (cpi->pass == 1) {
1033 sf->improved_quant = 0;
1034 sf->optimize_coefficients = 0;
1035 sf->improved_dct = 0;
1036 }
1037
1038 if (cpi->sf.search_method == NSTEP) {
1039 vp8_init3smotion_compensation(&cpi->mb,
1040 cm->yv12_fb[cm->lst_fb_idx].y_stride);
1041 } else if (cpi->sf.search_method == DIAMOND) {
1042 vp8_init_dsmotion_compensation(&cpi->mb,
1043 cm->yv12_fb[cm->lst_fb_idx].y_stride);
1044 }
1045
1046 if (cpi->sf.improved_dct) {
1047 cpi->mb.short_fdct8x4 = vp8_short_fdct8x4;
1048 cpi->mb.short_fdct4x4 = vp8_short_fdct4x4;
1049 } else {
1050 /* No fast FDCT defined for any platform at this time. */
1051 cpi->mb.short_fdct8x4 = vp8_short_fdct8x4;
1052 cpi->mb.short_fdct4x4 = vp8_short_fdct4x4;
1053 }
1054
1055 cpi->mb.short_walsh4x4 = vp8_short_walsh4x4;
1056
1057 if (cpi->sf.improved_quant) {
1058 cpi->mb.quantize_b = vp8_regular_quantize_b;
1059 } else {
1060 cpi->mb.quantize_b = vp8_fast_quantize_b;
1061 }
1062 if (cpi->sf.improved_quant != last_improved_quant) vp8cx_init_quantizer(cpi);
1063
1064 if (cpi->sf.iterative_sub_pixel == 1) {
1065 cpi->find_fractional_mv_step = vp8_find_best_sub_pixel_step_iteratively;
1066 } else if (cpi->sf.quarter_pixel_search) {
1067 cpi->find_fractional_mv_step = vp8_find_best_sub_pixel_step;
1068 } else if (cpi->sf.half_pixel_search) {
1069 cpi->find_fractional_mv_step = vp8_find_best_half_pixel_step;
1070 } else {
1071 cpi->find_fractional_mv_step = vp8_skip_fractional_mv_step;
1072 }
1073
1074 if (cpi->sf.optimize_coefficients == 1 && cpi->pass != 1) {
1075 cpi->mb.optimize = 1;
1076 } else {
1077 cpi->mb.optimize = 0;
1078 }
1079
1080 if (cpi->common.full_pixel) {
1081 cpi->find_fractional_mv_step = vp8_skip_fractional_mv_step;
1082 }
1083
1084 #ifdef SPEEDSTATS
1085 frames_at_speed[cpi->Speed]++;
1086 #endif
1087 }
1088 #undef GOOD
1089 #undef RT
1090
alloc_raw_frame_buffers(VP8_COMP * cpi)1091 static void alloc_raw_frame_buffers(VP8_COMP *cpi) {
1092 #if VP8_TEMPORAL_ALT_REF
1093 int width = (cpi->oxcf.Width + 15) & ~15;
1094 int height = (cpi->oxcf.Height + 15) & ~15;
1095 #endif
1096
1097 cpi->lookahead = vp8_lookahead_init(cpi->oxcf.Width, cpi->oxcf.Height,
1098 cpi->oxcf.lag_in_frames);
1099 if (!cpi->lookahead) {
1100 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1101 "Failed to allocate lag buffers");
1102 }
1103
1104 #if VP8_TEMPORAL_ALT_REF
1105
1106 if (vp8_yv12_alloc_frame_buffer(&cpi->alt_ref_buffer, width, height,
1107 VP8BORDERINPIXELS)) {
1108 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1109 "Failed to allocate altref buffer");
1110 }
1111
1112 #endif
1113 }
1114
dealloc_raw_frame_buffers(VP8_COMP * cpi)1115 static void dealloc_raw_frame_buffers(VP8_COMP *cpi) {
1116 #if VP8_TEMPORAL_ALT_REF
1117 vp8_yv12_de_alloc_frame_buffer(&cpi->alt_ref_buffer);
1118 #endif
1119 vp8_lookahead_destroy(cpi->lookahead);
1120 }
1121
vp8_alloc_partition_data(VP8_COMP * cpi)1122 static int vp8_alloc_partition_data(VP8_COMP *cpi) {
1123 vpx_free(cpi->mb.pip);
1124
1125 cpi->mb.pip =
1126 vpx_calloc((cpi->common.mb_cols + 1) * (cpi->common.mb_rows + 1),
1127 sizeof(PARTITION_INFO));
1128 if (!cpi->mb.pip) return 1;
1129
1130 cpi->mb.pi = cpi->mb.pip + cpi->common.mode_info_stride + 1;
1131
1132 return 0;
1133 }
1134
vp8_alloc_compressor_data(VP8_COMP * cpi)1135 void vp8_alloc_compressor_data(VP8_COMP *cpi) {
1136 VP8_COMMON *cm = &cpi->common;
1137
1138 int width = cm->Width;
1139 int height = cm->Height;
1140
1141 if (vp8_alloc_frame_buffers(cm, width, height)) {
1142 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1143 "Failed to allocate frame buffers");
1144 }
1145
1146 if (vp8_alloc_partition_data(cpi)) {
1147 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1148 "Failed to allocate partition data");
1149 }
1150
1151 if ((width & 0xf) != 0) width += 16 - (width & 0xf);
1152
1153 if ((height & 0xf) != 0) height += 16 - (height & 0xf);
1154
1155 if (vp8_yv12_alloc_frame_buffer(&cpi->pick_lf_lvl_frame, width, height,
1156 VP8BORDERINPIXELS)) {
1157 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1158 "Failed to allocate last frame buffer");
1159 }
1160
1161 if (vp8_yv12_alloc_frame_buffer(&cpi->scaled_source, width, height,
1162 VP8BORDERINPIXELS)) {
1163 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1164 "Failed to allocate scaled source buffer");
1165 }
1166
1167 vpx_free(cpi->tok);
1168
1169 {
1170 #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
1171 unsigned int tokens = 8 * 24 * 16; /* one MB for each thread */
1172 #else
1173 unsigned int tokens = cm->mb_rows * cm->mb_cols * 24 * 16;
1174 #endif
1175 CHECK_MEM_ERROR(&cpi->common.error, cpi->tok,
1176 vpx_calloc(tokens, sizeof(*cpi->tok)));
1177 }
1178
1179 /* Data used for real time vc mode to see if gf needs refreshing */
1180 cpi->zeromv_count = 0;
1181
1182 /* Structures used to monitor GF usage */
1183 vpx_free(cpi->gf_active_flags);
1184 CHECK_MEM_ERROR(
1185 &cpi->common.error, cpi->gf_active_flags,
1186 vpx_calloc(sizeof(*cpi->gf_active_flags), cm->mb_rows * cm->mb_cols));
1187 cpi->gf_active_count = cm->mb_rows * cm->mb_cols;
1188
1189 vpx_free(cpi->mb_activity_map);
1190 CHECK_MEM_ERROR(
1191 &cpi->common.error, cpi->mb_activity_map,
1192 vpx_calloc(sizeof(*cpi->mb_activity_map), cm->mb_rows * cm->mb_cols));
1193
1194 /* allocate memory for storing last frame's MVs for MV prediction. */
1195 vpx_free(cpi->lfmv);
1196 CHECK_MEM_ERROR(
1197 &cpi->common.error, cpi->lfmv,
1198 vpx_calloc((cm->mb_rows + 2) * (cm->mb_cols + 2), sizeof(*cpi->lfmv)));
1199 vpx_free(cpi->lf_ref_frame_sign_bias);
1200 CHECK_MEM_ERROR(&cpi->common.error, cpi->lf_ref_frame_sign_bias,
1201 vpx_calloc((cm->mb_rows + 2) * (cm->mb_cols + 2),
1202 sizeof(*cpi->lf_ref_frame_sign_bias)));
1203 vpx_free(cpi->lf_ref_frame);
1204 CHECK_MEM_ERROR(&cpi->common.error, cpi->lf_ref_frame,
1205 vpx_calloc((cm->mb_rows + 2) * (cm->mb_cols + 2),
1206 sizeof(*cpi->lf_ref_frame)));
1207
1208 /* Create the encoder segmentation map and set all entries to 0 */
1209 vpx_free(cpi->segmentation_map);
1210 CHECK_MEM_ERROR(
1211 &cpi->common.error, cpi->segmentation_map,
1212 vpx_calloc(cm->mb_rows * cm->mb_cols, sizeof(*cpi->segmentation_map)));
1213 cpi->cyclic_refresh_mode_index = 0;
1214 vpx_free(cpi->active_map);
1215 CHECK_MEM_ERROR(
1216 &cpi->common.error, cpi->active_map,
1217 vpx_calloc(cm->mb_rows * cm->mb_cols, sizeof(*cpi->active_map)));
1218 memset(cpi->active_map, 1, (cm->mb_rows * cm->mb_cols));
1219
1220 #if CONFIG_MULTITHREAD
1221 if (width < 640) {
1222 cpi->mt_sync_range = 1;
1223 } else if (width <= 1280) {
1224 cpi->mt_sync_range = 4;
1225 } else if (width <= 2560) {
1226 cpi->mt_sync_range = 8;
1227 } else {
1228 cpi->mt_sync_range = 16;
1229 }
1230 #endif
1231
1232 vpx_free(cpi->tplist);
1233 CHECK_MEM_ERROR(&cpi->common.error, cpi->tplist,
1234 vpx_malloc(sizeof(TOKENLIST) * cm->mb_rows));
1235
1236 #if CONFIG_TEMPORAL_DENOISING
1237 if (cpi->oxcf.noise_sensitivity > 0) {
1238 vp8_denoiser_free(&cpi->denoiser);
1239 if (vp8_denoiser_allocate(&cpi->denoiser, width, height, cm->mb_rows,
1240 cm->mb_cols, cpi->oxcf.noise_sensitivity)) {
1241 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1242 "Failed to allocate denoiser");
1243 }
1244 }
1245 #endif
1246 }
1247
1248 /* Quant MOD */
1249 static const int q_trans[] = {
1250 0, 1, 2, 3, 4, 5, 7, 8, 9, 10, 12, 13, 15, 17, 18, 19,
1251 20, 21, 23, 24, 25, 26, 27, 28, 29, 30, 31, 33, 35, 37, 39, 41,
1252 43, 45, 47, 49, 51, 53, 55, 57, 59, 61, 64, 67, 70, 73, 76, 79,
1253 82, 85, 88, 91, 94, 97, 100, 103, 106, 109, 112, 115, 118, 121, 124, 127,
1254 };
1255
vp8_reverse_trans(int x)1256 int vp8_reverse_trans(int x) {
1257 int i;
1258
1259 for (i = 0; i < 64; ++i) {
1260 if (q_trans[i] >= x) return i;
1261 }
1262
1263 return 63;
1264 }
vp8_new_framerate(VP8_COMP * cpi,double framerate)1265 void vp8_new_framerate(VP8_COMP *cpi, double framerate) {
1266 if (framerate < .1) framerate = 30;
1267
1268 cpi->framerate = framerate;
1269 cpi->output_framerate = framerate;
1270 const double per_frame_bandwidth =
1271 round(cpi->oxcf.target_bandwidth / cpi->output_framerate);
1272 cpi->per_frame_bandwidth = (int)VPXMIN(per_frame_bandwidth, INT_MAX);
1273 cpi->av_per_frame_bandwidth = cpi->per_frame_bandwidth;
1274 const int64_t vbr_min_bits = (int64_t)cpi->av_per_frame_bandwidth *
1275 cpi->oxcf.two_pass_vbrmin_section / 100;
1276 cpi->min_frame_bandwidth = (int)VPXMIN(vbr_min_bits, INT_MAX);
1277
1278 /* Set Maximum gf/arf interval */
1279 cpi->max_gf_interval = ((int)(cpi->output_framerate / 2.0) + 2);
1280
1281 if (cpi->max_gf_interval < 12) cpi->max_gf_interval = 12;
1282
1283 /* Extended interval for genuinely static scenes */
1284 cpi->twopass.static_scene_max_gf_interval = cpi->key_frame_frequency >> 1;
1285
1286 /* Special conditions when altr ref frame enabled in lagged compress mode */
1287 if (cpi->oxcf.play_alternate && cpi->oxcf.lag_in_frames) {
1288 if (cpi->max_gf_interval > cpi->oxcf.lag_in_frames - 1) {
1289 cpi->max_gf_interval = cpi->oxcf.lag_in_frames - 1;
1290 }
1291
1292 if (cpi->twopass.static_scene_max_gf_interval >
1293 cpi->oxcf.lag_in_frames - 1) {
1294 cpi->twopass.static_scene_max_gf_interval = cpi->oxcf.lag_in_frames - 1;
1295 }
1296 }
1297
1298 if (cpi->max_gf_interval > cpi->twopass.static_scene_max_gf_interval) {
1299 cpi->max_gf_interval = cpi->twopass.static_scene_max_gf_interval;
1300 }
1301 }
1302
init_config(VP8_COMP * cpi,const VP8_CONFIG * oxcf)1303 static void init_config(VP8_COMP *cpi, const VP8_CONFIG *oxcf) {
1304 VP8_COMMON *cm = &cpi->common;
1305
1306 cpi->oxcf = *oxcf;
1307
1308 cpi->auto_gold = 1;
1309 cpi->auto_adjust_gold_quantizer = 1;
1310
1311 cm->version = oxcf->Version;
1312 vp8_setup_version(cm);
1313
1314 /* Frame rate is not available on the first frame, as it's derived from
1315 * the observed timestamps. The actual value used here doesn't matter
1316 * too much, as it will adapt quickly.
1317 */
1318 if (oxcf->timebase.num > 0) {
1319 cpi->framerate =
1320 (double)(oxcf->timebase.den) / (double)(oxcf->timebase.num);
1321 } else {
1322 cpi->framerate = 30;
1323 }
1324
1325 /* If the reciprocal of the timebase seems like a reasonable framerate,
1326 * then use that as a guess, otherwise use 30.
1327 */
1328 if (cpi->framerate > 180) cpi->framerate = 30;
1329
1330 cpi->ref_framerate = cpi->framerate;
1331
1332 cpi->ref_frame_flags = VP8_ALTR_FRAME | VP8_GOLD_FRAME | VP8_LAST_FRAME;
1333
1334 cm->refresh_golden_frame = 0;
1335 cm->refresh_last_frame = 1;
1336 cm->refresh_entropy_probs = 1;
1337
1338 /* change includes all joint functionality */
1339 vp8_change_config(cpi, oxcf);
1340
1341 /* Initialize active best and worst q and average q values. */
1342 cpi->active_worst_quality = cpi->oxcf.worst_allowed_q;
1343 cpi->active_best_quality = cpi->oxcf.best_allowed_q;
1344 cpi->avg_frame_qindex = cpi->oxcf.worst_allowed_q;
1345
1346 /* Initialise the starting buffer levels */
1347 cpi->buffer_level = cpi->oxcf.starting_buffer_level;
1348 cpi->bits_off_target = cpi->oxcf.starting_buffer_level;
1349
1350 cpi->rolling_target_bits = cpi->av_per_frame_bandwidth;
1351 cpi->rolling_actual_bits = cpi->av_per_frame_bandwidth;
1352 cpi->long_rolling_target_bits = cpi->av_per_frame_bandwidth;
1353 cpi->long_rolling_actual_bits = cpi->av_per_frame_bandwidth;
1354
1355 cpi->total_actual_bits = 0;
1356 cpi->total_target_vs_actual = 0;
1357
1358 /* Temporal scalabilty */
1359 if (cpi->oxcf.number_of_layers > 1) {
1360 unsigned int i;
1361 double prev_layer_framerate = 0;
1362
1363 for (i = 0; i < cpi->oxcf.number_of_layers; ++i) {
1364 vp8_init_temporal_layer_context(cpi, oxcf, i, prev_layer_framerate);
1365 prev_layer_framerate =
1366 cpi->output_framerate / cpi->oxcf.rate_decimator[i];
1367 }
1368 }
1369
1370 #if VP8_TEMPORAL_ALT_REF
1371 {
1372 int i;
1373
1374 cpi->fixed_divide[0] = 0;
1375
1376 for (i = 1; i < 512; ++i) cpi->fixed_divide[i] = 0x80000 / i;
1377 }
1378 #endif
1379 }
1380
vp8_update_layer_contexts(VP8_COMP * cpi)1381 void vp8_update_layer_contexts(VP8_COMP *cpi) {
1382 VP8_CONFIG *oxcf = &cpi->oxcf;
1383
1384 /* Update snapshots of the layer contexts to reflect new parameters */
1385 if (oxcf->number_of_layers > 1) {
1386 unsigned int i;
1387 double prev_layer_framerate = 0;
1388
1389 assert(oxcf->number_of_layers <= VPX_TS_MAX_LAYERS);
1390 for (i = 0; i < oxcf->number_of_layers && i < VPX_TS_MAX_LAYERS; ++i) {
1391 LAYER_CONTEXT *lc = &cpi->layer_context[i];
1392
1393 lc->framerate = cpi->ref_framerate / oxcf->rate_decimator[i];
1394 if (oxcf->target_bitrate[i] > INT_MAX / 1000)
1395 lc->target_bandwidth = INT_MAX;
1396 else
1397 lc->target_bandwidth = oxcf->target_bitrate[i] * 1000;
1398
1399 lc->starting_buffer_level = rescale(
1400 (int)oxcf->starting_buffer_level_in_ms, lc->target_bandwidth, 1000);
1401
1402 if (oxcf->optimal_buffer_level == 0) {
1403 lc->optimal_buffer_level = lc->target_bandwidth / 8;
1404 } else {
1405 lc->optimal_buffer_level = rescale(
1406 (int)oxcf->optimal_buffer_level_in_ms, lc->target_bandwidth, 1000);
1407 }
1408
1409 if (oxcf->maximum_buffer_size == 0) {
1410 lc->maximum_buffer_size = lc->target_bandwidth / 8;
1411 } else {
1412 lc->maximum_buffer_size = rescale((int)oxcf->maximum_buffer_size_in_ms,
1413 lc->target_bandwidth, 1000);
1414 }
1415
1416 /* Work out the average size of a frame within this layer */
1417 if (i > 0) {
1418 lc->avg_frame_size_for_layer =
1419 (int)round((oxcf->target_bitrate[i] - oxcf->target_bitrate[i - 1]) *
1420 1000 / (lc->framerate - prev_layer_framerate));
1421 }
1422
1423 prev_layer_framerate = lc->framerate;
1424 }
1425 }
1426 }
1427
vp8_change_config(VP8_COMP * cpi,const VP8_CONFIG * oxcf)1428 void vp8_change_config(VP8_COMP *cpi, const VP8_CONFIG *oxcf) {
1429 VP8_COMMON *cm = &cpi->common;
1430 int last_w, last_h;
1431 unsigned int prev_number_of_layers;
1432 double raw_target_rate;
1433
1434 if (!cpi) return;
1435
1436 if (!oxcf) return;
1437
1438 if (cm->version != oxcf->Version) {
1439 cm->version = oxcf->Version;
1440 vp8_setup_version(cm);
1441 }
1442
1443 last_w = cpi->oxcf.Width;
1444 last_h = cpi->oxcf.Height;
1445 prev_number_of_layers = cpi->oxcf.number_of_layers;
1446
1447 cpi->oxcf = *oxcf;
1448
1449 switch (cpi->oxcf.Mode) {
1450 case MODE_REALTIME:
1451 cpi->pass = 0;
1452 cpi->compressor_speed = 2;
1453
1454 if (cpi->oxcf.cpu_used < -16) {
1455 cpi->oxcf.cpu_used = -16;
1456 }
1457
1458 if (cpi->oxcf.cpu_used > 16) cpi->oxcf.cpu_used = 16;
1459
1460 break;
1461
1462 case MODE_GOODQUALITY:
1463 cpi->pass = 0;
1464 cpi->compressor_speed = 1;
1465
1466 if (cpi->oxcf.cpu_used < -5) {
1467 cpi->oxcf.cpu_used = -5;
1468 }
1469
1470 if (cpi->oxcf.cpu_used > 5) cpi->oxcf.cpu_used = 5;
1471
1472 break;
1473
1474 case MODE_BESTQUALITY:
1475 cpi->pass = 0;
1476 cpi->compressor_speed = 0;
1477 break;
1478
1479 case MODE_FIRSTPASS:
1480 cpi->pass = 1;
1481 cpi->compressor_speed = 1;
1482 break;
1483 case MODE_SECONDPASS:
1484 cpi->pass = 2;
1485 cpi->compressor_speed = 1;
1486
1487 if (cpi->oxcf.cpu_used < -5) {
1488 cpi->oxcf.cpu_used = -5;
1489 }
1490
1491 if (cpi->oxcf.cpu_used > 5) cpi->oxcf.cpu_used = 5;
1492
1493 break;
1494 case MODE_SECONDPASS_BEST:
1495 cpi->pass = 2;
1496 cpi->compressor_speed = 0;
1497 break;
1498 }
1499
1500 if (cpi->pass == 0) cpi->auto_worst_q = 1;
1501
1502 cpi->oxcf.worst_allowed_q = q_trans[oxcf->worst_allowed_q];
1503 cpi->oxcf.best_allowed_q = q_trans[oxcf->best_allowed_q];
1504 cpi->oxcf.cq_level = q_trans[cpi->oxcf.cq_level];
1505
1506 if (oxcf->fixed_q >= 0) {
1507 if (oxcf->worst_allowed_q < 0) {
1508 cpi->oxcf.fixed_q = q_trans[0];
1509 } else {
1510 cpi->oxcf.fixed_q = q_trans[oxcf->worst_allowed_q];
1511 }
1512
1513 if (oxcf->alt_q < 0) {
1514 cpi->oxcf.alt_q = q_trans[0];
1515 } else {
1516 cpi->oxcf.alt_q = q_trans[oxcf->alt_q];
1517 }
1518
1519 if (oxcf->key_q < 0) {
1520 cpi->oxcf.key_q = q_trans[0];
1521 } else {
1522 cpi->oxcf.key_q = q_trans[oxcf->key_q];
1523 }
1524
1525 if (oxcf->gold_q < 0) {
1526 cpi->oxcf.gold_q = q_trans[0];
1527 } else {
1528 cpi->oxcf.gold_q = q_trans[oxcf->gold_q];
1529 }
1530 }
1531
1532 cpi->ext_refresh_frame_flags_pending = 0;
1533
1534 cpi->baseline_gf_interval =
1535 cpi->oxcf.alt_freq ? cpi->oxcf.alt_freq : DEFAULT_GF_INTERVAL;
1536
1537 // GF behavior for 1 pass CBR, used when error_resilience is off.
1538 if (!cpi->oxcf.error_resilient_mode &&
1539 cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER &&
1540 cpi->oxcf.Mode == MODE_REALTIME)
1541 cpi->baseline_gf_interval = cpi->gf_interval_onepass_cbr;
1542
1543 #if (CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING)
1544 cpi->oxcf.token_partitions = 3;
1545 #endif
1546
1547 if (cpi->oxcf.token_partitions >= 0 && cpi->oxcf.token_partitions <= 3) {
1548 cm->multi_token_partition = (TOKEN_PARTITION)cpi->oxcf.token_partitions;
1549 }
1550
1551 setup_features(cpi);
1552
1553 if (!cpi->use_roi_static_threshold) {
1554 int i;
1555 for (i = 0; i < MAX_MB_SEGMENTS; ++i) {
1556 cpi->segment_encode_breakout[i] = cpi->oxcf.encode_breakout;
1557 }
1558 }
1559
1560 /* At the moment the first order values may not be > MAXQ */
1561 if (cpi->oxcf.fixed_q > MAXQ) cpi->oxcf.fixed_q = MAXQ;
1562
1563 /* local file playback mode == really big buffer */
1564 if (cpi->oxcf.end_usage == USAGE_LOCAL_FILE_PLAYBACK) {
1565 cpi->oxcf.starting_buffer_level = 60000;
1566 cpi->oxcf.optimal_buffer_level = 60000;
1567 cpi->oxcf.maximum_buffer_size = 240000;
1568 cpi->oxcf.starting_buffer_level_in_ms = 60000;
1569 cpi->oxcf.optimal_buffer_level_in_ms = 60000;
1570 cpi->oxcf.maximum_buffer_size_in_ms = 240000;
1571 }
1572
1573 raw_target_rate = ((int64_t)cpi->oxcf.Width * cpi->oxcf.Height * 8 * 3 *
1574 cpi->framerate / 1000.0);
1575 if (cpi->oxcf.target_bandwidth > raw_target_rate)
1576 cpi->oxcf.target_bandwidth = (unsigned int)raw_target_rate;
1577 /* Convert target bandwidth from Kbit/s to Bit/s */
1578 cpi->oxcf.target_bandwidth *= 1000;
1579
1580 cpi->oxcf.starting_buffer_level = rescale(
1581 (int)cpi->oxcf.starting_buffer_level, cpi->oxcf.target_bandwidth, 1000);
1582
1583 /* Set or reset optimal and maximum buffer levels. */
1584 if (cpi->oxcf.optimal_buffer_level == 0) {
1585 cpi->oxcf.optimal_buffer_level = cpi->oxcf.target_bandwidth / 8;
1586 } else {
1587 cpi->oxcf.optimal_buffer_level = rescale(
1588 (int)cpi->oxcf.optimal_buffer_level, cpi->oxcf.target_bandwidth, 1000);
1589 }
1590
1591 if (cpi->oxcf.maximum_buffer_size == 0) {
1592 cpi->oxcf.maximum_buffer_size = cpi->oxcf.target_bandwidth / 8;
1593 } else {
1594 cpi->oxcf.maximum_buffer_size = rescale((int)cpi->oxcf.maximum_buffer_size,
1595 cpi->oxcf.target_bandwidth, 1000);
1596 }
1597 // Under a configuration change, where maximum_buffer_size may change,
1598 // keep buffer level clipped to the maximum allowed buffer size.
1599 if (cpi->bits_off_target > cpi->oxcf.maximum_buffer_size) {
1600 cpi->bits_off_target = cpi->oxcf.maximum_buffer_size;
1601 cpi->buffer_level = cpi->bits_off_target;
1602 }
1603
1604 /* Set up frame rate and related parameters rate control values. */
1605 vp8_new_framerate(cpi, cpi->framerate);
1606
1607 /* Set absolute upper and lower quality limits */
1608 cpi->worst_quality = cpi->oxcf.worst_allowed_q;
1609 cpi->best_quality = cpi->oxcf.best_allowed_q;
1610
1611 /* active values should only be modified if out of new range */
1612 if (cpi->active_worst_quality > cpi->oxcf.worst_allowed_q) {
1613 cpi->active_worst_quality = cpi->oxcf.worst_allowed_q;
1614 }
1615 /* less likely */
1616 else if (cpi->active_worst_quality < cpi->oxcf.best_allowed_q) {
1617 cpi->active_worst_quality = cpi->oxcf.best_allowed_q;
1618 }
1619 if (cpi->active_best_quality < cpi->oxcf.best_allowed_q) {
1620 cpi->active_best_quality = cpi->oxcf.best_allowed_q;
1621 }
1622 /* less likely */
1623 else if (cpi->active_best_quality > cpi->oxcf.worst_allowed_q) {
1624 cpi->active_best_quality = cpi->oxcf.worst_allowed_q;
1625 }
1626
1627 cpi->buffered_mode = cpi->oxcf.optimal_buffer_level > 0;
1628
1629 cpi->cq_target_quality = cpi->oxcf.cq_level;
1630
1631 /* Only allow dropped frames in buffered mode */
1632 cpi->drop_frames_allowed = cpi->oxcf.allow_df && cpi->buffered_mode;
1633
1634 cpi->target_bandwidth = cpi->oxcf.target_bandwidth;
1635
1636 // Check if the number of temporal layers has changed, and if so reset the
1637 // pattern counter and set/initialize the temporal layer context for the
1638 // new layer configuration.
1639 if (cpi->oxcf.number_of_layers != prev_number_of_layers) {
1640 // If the number of temporal layers are changed we must start at the
1641 // base of the pattern cycle, so set the layer id to 0 and reset
1642 // the temporal pattern counter.
1643 if (cpi->temporal_layer_id > 0) {
1644 cpi->temporal_layer_id = 0;
1645 }
1646 cpi->temporal_pattern_counter = 0;
1647 vp8_reset_temporal_layer_change(cpi, oxcf, prev_number_of_layers);
1648 }
1649
1650 if (!cpi->initial_width) {
1651 cpi->initial_width = cpi->oxcf.Width;
1652 cpi->initial_height = cpi->oxcf.Height;
1653 }
1654
1655 cm->Width = cpi->oxcf.Width;
1656 cm->Height = cpi->oxcf.Height;
1657 assert(cm->Width <= cpi->initial_width);
1658 assert(cm->Height <= cpi->initial_height);
1659
1660 /* TODO(jkoleszar): if an internal spatial resampling is active,
1661 * and we downsize the input image, maybe we should clear the
1662 * internal scale immediately rather than waiting for it to
1663 * correct.
1664 */
1665
1666 /* VP8 sharpness level mapping 0-7 (vs 0-10 in general VPx dialogs) */
1667 if (cpi->oxcf.Sharpness > 7) cpi->oxcf.Sharpness = 7;
1668
1669 cm->sharpness_level = cpi->oxcf.Sharpness;
1670
1671 if (cm->horiz_scale != VP8E_NORMAL || cm->vert_scale != VP8E_NORMAL) {
1672 int hr, hs, vr, vs;
1673
1674 Scale2Ratio(cm->horiz_scale, &hr, &hs);
1675 Scale2Ratio(cm->vert_scale, &vr, &vs);
1676
1677 /* always go to the next whole number */
1678 cm->Width = (hs - 1 + cpi->oxcf.Width * hr) / hs;
1679 cm->Height = (vs - 1 + cpi->oxcf.Height * vr) / vs;
1680 }
1681
1682 if (last_w != cpi->oxcf.Width || last_h != cpi->oxcf.Height) {
1683 cpi->force_next_frame_intra = 1;
1684 }
1685
1686 if (((cm->Width + 15) & ~15) != cm->yv12_fb[cm->lst_fb_idx].y_width ||
1687 ((cm->Height + 15) & ~15) != cm->yv12_fb[cm->lst_fb_idx].y_height ||
1688 cm->yv12_fb[cm->lst_fb_idx].y_width == 0) {
1689 dealloc_raw_frame_buffers(cpi);
1690 alloc_raw_frame_buffers(cpi);
1691 vp8_alloc_compressor_data(cpi);
1692 }
1693
1694 if (cpi->oxcf.fixed_q >= 0) {
1695 cpi->last_q[0] = cpi->oxcf.fixed_q;
1696 cpi->last_q[1] = cpi->oxcf.fixed_q;
1697 }
1698
1699 cpi->Speed = cpi->oxcf.cpu_used;
1700
1701 /* force to allowlag to 0 if lag_in_frames is 0; */
1702 if (cpi->oxcf.lag_in_frames == 0) {
1703 cpi->oxcf.allow_lag = 0;
1704 }
1705 /* Limit on lag buffers as these are not currently dynamically allocated */
1706 else if (cpi->oxcf.lag_in_frames > MAX_LAG_BUFFERS) {
1707 cpi->oxcf.lag_in_frames = MAX_LAG_BUFFERS;
1708 }
1709
1710 /* YX Temp */
1711 cpi->alt_ref_source = NULL;
1712 cpi->is_src_frame_alt_ref = 0;
1713
1714 #if CONFIG_TEMPORAL_DENOISING
1715 if (cpi->oxcf.noise_sensitivity) {
1716 if (!cpi->denoiser.yv12_mc_running_avg.buffer_alloc) {
1717 int width = (cpi->oxcf.Width + 15) & ~15;
1718 int height = (cpi->oxcf.Height + 15) & ~15;
1719 if (vp8_denoiser_allocate(&cpi->denoiser, width, height, cm->mb_rows,
1720 cm->mb_cols, cpi->oxcf.noise_sensitivity)) {
1721 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1722 "Failed to allocate denoiser");
1723 }
1724 }
1725 }
1726 #endif
1727
1728 #if 0
1729 /* Experimental RD Code */
1730 cpi->frame_distortion = 0;
1731 cpi->last_frame_distortion = 0;
1732 #endif
1733 }
1734
1735 #ifndef M_LOG2_E
1736 #define M_LOG2_E 0.693147180559945309417
1737 #endif
1738 #define log2f(x) (log(x) / (float)M_LOG2_E)
1739
cal_mvsadcosts(int * mvsadcost[2])1740 static void cal_mvsadcosts(int *mvsadcost[2]) {
1741 int i = 1;
1742
1743 mvsadcost[0][0] = 300;
1744 mvsadcost[1][0] = 300;
1745
1746 do {
1747 double z = 256 * (2 * (log2f(8 * i) + .6));
1748 mvsadcost[0][i] = (int)z;
1749 mvsadcost[1][i] = (int)z;
1750 mvsadcost[0][-i] = (int)z;
1751 mvsadcost[1][-i] = (int)z;
1752 } while (++i <= mvfp_max);
1753 }
1754
vp8_create_compressor(const VP8_CONFIG * oxcf)1755 struct VP8_COMP *vp8_create_compressor(const VP8_CONFIG *oxcf) {
1756 int i;
1757
1758 VP8_COMP *cpi;
1759 VP8_COMMON *cm;
1760
1761 cpi = vpx_memalign(32, sizeof(VP8_COMP));
1762 /* Check that the CPI instance is valid */
1763 if (!cpi) return 0;
1764
1765 cm = &cpi->common;
1766
1767 memset(cpi, 0, sizeof(VP8_COMP));
1768
1769 if (setjmp(cm->error.jmp)) {
1770 cpi->common.error.setjmp = 0;
1771 vp8_remove_compressor(&cpi);
1772 return 0;
1773 }
1774
1775 cpi->common.error.setjmp = 1;
1776
1777 CHECK_MEM_ERROR(
1778 &cpi->common.error, cpi->mb.ss,
1779 vpx_calloc(sizeof(search_site), (MAX_MVSEARCH_STEPS * 8) + 1));
1780
1781 vp8_create_common(&cpi->common);
1782
1783 init_config(cpi, oxcf);
1784
1785 memcpy(cpi->base_skip_false_prob, vp8cx_base_skip_false_prob,
1786 sizeof(vp8cx_base_skip_false_prob));
1787 cpi->common.current_video_frame = 0;
1788 cpi->temporal_pattern_counter = 0;
1789 cpi->temporal_layer_id = -1;
1790 cpi->kf_overspend_bits = 0;
1791 cpi->kf_bitrate_adjustment = 0;
1792 cpi->frames_till_gf_update_due = 0;
1793 cpi->gf_overspend_bits = 0;
1794 cpi->non_gf_bitrate_adjustment = 0;
1795 cpi->prob_last_coded = 128;
1796 cpi->prob_gf_coded = 128;
1797 cpi->prob_intra_coded = 63;
1798
1799 /* Prime the recent reference frame usage counters.
1800 * Hereafter they will be maintained as a sort of moving average
1801 */
1802 cpi->recent_ref_frame_usage[INTRA_FRAME] = 1;
1803 cpi->recent_ref_frame_usage[LAST_FRAME] = 1;
1804 cpi->recent_ref_frame_usage[GOLDEN_FRAME] = 1;
1805 cpi->recent_ref_frame_usage[ALTREF_FRAME] = 1;
1806
1807 /* Set reference frame sign bias for ALTREF frame to 1 (for now) */
1808 cpi->common.ref_frame_sign_bias[ALTREF_FRAME] = 1;
1809
1810 cpi->twopass.gf_decay_rate = 0;
1811 cpi->baseline_gf_interval = DEFAULT_GF_INTERVAL;
1812
1813 cpi->gold_is_last = 0;
1814 cpi->alt_is_last = 0;
1815 cpi->gold_is_alt = 0;
1816
1817 cpi->active_map_enabled = 0;
1818
1819 cpi->use_roi_static_threshold = 0;
1820
1821 #if 0
1822 /* Experimental code for lagged and one pass */
1823 /* Initialise one_pass GF frames stats */
1824 /* Update stats used for GF selection */
1825 if (cpi->pass == 0)
1826 {
1827 cpi->one_pass_frame_index = 0;
1828
1829 for (i = 0; i < MAX_LAG_BUFFERS; ++i)
1830 {
1831 cpi->one_pass_frame_stats[i].frames_so_far = 0;
1832 cpi->one_pass_frame_stats[i].frame_intra_error = 0.0;
1833 cpi->one_pass_frame_stats[i].frame_coded_error = 0.0;
1834 cpi->one_pass_frame_stats[i].frame_pcnt_inter = 0.0;
1835 cpi->one_pass_frame_stats[i].frame_pcnt_motion = 0.0;
1836 cpi->one_pass_frame_stats[i].frame_mvr = 0.0;
1837 cpi->one_pass_frame_stats[i].frame_mvr_abs = 0.0;
1838 cpi->one_pass_frame_stats[i].frame_mvc = 0.0;
1839 cpi->one_pass_frame_stats[i].frame_mvc_abs = 0.0;
1840 }
1841 }
1842 #endif
1843
1844 cpi->mse_source_denoised = 0;
1845
1846 /* Should we use the cyclic refresh method.
1847 * Currently there is no external control for this.
1848 * Enable it for error_resilient_mode, or for 1 pass CBR mode.
1849 */
1850 cpi->cyclic_refresh_mode_enabled =
1851 (cpi->oxcf.error_resilient_mode ||
1852 (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER &&
1853 cpi->oxcf.Mode <= 2));
1854 cpi->cyclic_refresh_mode_max_mbs_perframe =
1855 (cpi->common.mb_rows * cpi->common.mb_cols) / 7;
1856 if (cpi->oxcf.number_of_layers == 1) {
1857 cpi->cyclic_refresh_mode_max_mbs_perframe =
1858 (cpi->common.mb_rows * cpi->common.mb_cols) / 20;
1859 } else if (cpi->oxcf.number_of_layers == 2) {
1860 cpi->cyclic_refresh_mode_max_mbs_perframe =
1861 (cpi->common.mb_rows * cpi->common.mb_cols) / 10;
1862 }
1863 cpi->cyclic_refresh_mode_index = 0;
1864 cpi->cyclic_refresh_q = 32;
1865
1866 // GF behavior for 1 pass CBR, used when error_resilience is off.
1867 cpi->gf_update_onepass_cbr = 0;
1868 cpi->gf_noboost_onepass_cbr = 0;
1869 if (!cpi->oxcf.error_resilient_mode &&
1870 cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER && cpi->oxcf.Mode <= 2) {
1871 cpi->gf_update_onepass_cbr = 1;
1872 cpi->gf_noboost_onepass_cbr = 1;
1873 cpi->gf_interval_onepass_cbr =
1874 cpi->cyclic_refresh_mode_max_mbs_perframe > 0
1875 ? (2 * (cpi->common.mb_rows * cpi->common.mb_cols) /
1876 cpi->cyclic_refresh_mode_max_mbs_perframe)
1877 : 10;
1878 cpi->gf_interval_onepass_cbr =
1879 VPXMIN(40, VPXMAX(6, cpi->gf_interval_onepass_cbr));
1880 cpi->baseline_gf_interval = cpi->gf_interval_onepass_cbr;
1881 }
1882
1883 if (cpi->cyclic_refresh_mode_enabled) {
1884 CHECK_MEM_ERROR(&cpi->common.error, cpi->cyclic_refresh_map,
1885 vpx_calloc((cpi->common.mb_rows * cpi->common.mb_cols), 1));
1886 } else {
1887 cpi->cyclic_refresh_map = (signed char *)NULL;
1888 }
1889
1890 CHECK_MEM_ERROR(
1891 &cpi->common.error, cpi->skin_map,
1892 vpx_calloc(cm->mb_rows * cm->mb_cols, sizeof(cpi->skin_map[0])));
1893
1894 CHECK_MEM_ERROR(&cpi->common.error, cpi->consec_zero_last,
1895 vpx_calloc(cm->mb_rows * cm->mb_cols, 1));
1896 CHECK_MEM_ERROR(&cpi->common.error, cpi->consec_zero_last_mvbias,
1897 vpx_calloc((cpi->common.mb_rows * cpi->common.mb_cols), 1));
1898
1899 /*Initialize the feed-forward activity masking.*/
1900 cpi->activity_avg = 90 << 12;
1901
1902 /* Give a sensible default for the first frame. */
1903 cpi->frames_since_key = 8;
1904 cpi->key_frame_frequency = cpi->oxcf.key_freq;
1905 cpi->this_key_frame_forced = 0;
1906 cpi->next_key_frame_forced = 0;
1907
1908 cpi->source_alt_ref_pending = 0;
1909 cpi->source_alt_ref_active = 0;
1910 cpi->common.refresh_alt_ref_frame = 0;
1911
1912 cpi->force_maxqp = 0;
1913 cpi->frames_since_last_drop_overshoot = 0;
1914 cpi->rt_always_update_correction_factor = 0;
1915 cpi->rt_drop_recode_on_overshoot = 1;
1916
1917 cpi->b_calculate_psnr = CONFIG_INTERNAL_STATS;
1918 #if CONFIG_INTERNAL_STATS
1919 cpi->b_calculate_ssimg = 0;
1920
1921 cpi->count = 0;
1922 cpi->bytes = 0;
1923
1924 if (cpi->b_calculate_psnr) {
1925 cpi->total_sq_error = 0.0;
1926 cpi->total_sq_error2 = 0.0;
1927 cpi->total_y = 0.0;
1928 cpi->total_u = 0.0;
1929 cpi->total_v = 0.0;
1930 cpi->total = 0.0;
1931 cpi->totalp_y = 0.0;
1932 cpi->totalp_u = 0.0;
1933 cpi->totalp_v = 0.0;
1934 cpi->totalp = 0.0;
1935 cpi->tot_recode_hits = 0;
1936 cpi->summed_quality = 0;
1937 cpi->summed_weights = 0;
1938 }
1939
1940 #endif
1941
1942 cpi->first_time_stamp_ever = 0x7FFFFFFF;
1943
1944 cpi->frames_till_gf_update_due = 0;
1945 cpi->key_frame_count = 1;
1946
1947 cpi->ni_av_qi = cpi->oxcf.worst_allowed_q;
1948 cpi->ni_tot_qi = 0;
1949 cpi->ni_frames = 0;
1950 cpi->total_byte_count = 0;
1951
1952 cpi->drop_frame = 0;
1953
1954 cpi->rate_correction_factor = 1.0;
1955 cpi->key_frame_rate_correction_factor = 1.0;
1956 cpi->gf_rate_correction_factor = 1.0;
1957 cpi->twopass.est_max_qcorrection_factor = 1.0;
1958
1959 for (i = 0; i < KEY_FRAME_CONTEXT; ++i) {
1960 cpi->prior_key_frame_distance[i] = (int)cpi->output_framerate;
1961 }
1962
1963 #ifdef OUTPUT_YUV_SRC
1964 yuv_file = fopen("bd.yuv", "ab");
1965 #endif
1966 #ifdef OUTPUT_YUV_DENOISED
1967 yuv_denoised_file = fopen("denoised.yuv", "ab");
1968 #endif
1969 #ifdef OUTPUT_YUV_SKINMAP
1970 yuv_skinmap_file = fopen("skinmap.yuv", "wb");
1971 #endif
1972
1973 #if 0
1974 framepsnr = fopen("framepsnr.stt", "a");
1975 kf_list = fopen("kf_list.stt", "w");
1976 #endif
1977
1978 cpi->output_pkt_list = oxcf->output_pkt_list;
1979
1980 #if !CONFIG_REALTIME_ONLY
1981
1982 if (cpi->pass == 1) {
1983 vp8_init_first_pass(cpi);
1984 } else if (cpi->pass == 2) {
1985 size_t packet_sz = sizeof(FIRSTPASS_STATS);
1986 int packets = (int)(oxcf->two_pass_stats_in.sz / packet_sz);
1987
1988 cpi->twopass.stats_in_start = oxcf->two_pass_stats_in.buf;
1989 cpi->twopass.stats_in = cpi->twopass.stats_in_start;
1990 cpi->twopass.stats_in_end =
1991 (void *)((char *)cpi->twopass.stats_in + (packets - 1) * packet_sz);
1992 vp8_init_second_pass(cpi);
1993 }
1994
1995 #endif
1996
1997 if (cpi->compressor_speed == 2) {
1998 cpi->avg_encode_time = 0;
1999 cpi->avg_pick_mode_time = 0;
2000 }
2001
2002 vp8_set_speed_features(cpi);
2003
2004 /* Set starting values of RD threshold multipliers (128 = *1) */
2005 for (i = 0; i < MAX_MODES; ++i) {
2006 cpi->mb.rd_thresh_mult[i] = 128;
2007 }
2008
2009 #if CONFIG_MULTITHREAD
2010 if (vp8cx_create_encoder_threads(cpi)) {
2011 vp8_remove_compressor(&cpi);
2012 return 0;
2013 }
2014 #endif
2015
2016 cpi->fn_ptr[BLOCK_16X16].sdf = vpx_sad16x16;
2017 cpi->fn_ptr[BLOCK_16X16].vf = vpx_variance16x16;
2018 cpi->fn_ptr[BLOCK_16X16].svf = vpx_sub_pixel_variance16x16;
2019 cpi->fn_ptr[BLOCK_16X16].sdx4df = vpx_sad16x16x4d;
2020
2021 cpi->fn_ptr[BLOCK_16X8].sdf = vpx_sad16x8;
2022 cpi->fn_ptr[BLOCK_16X8].vf = vpx_variance16x8;
2023 cpi->fn_ptr[BLOCK_16X8].svf = vpx_sub_pixel_variance16x8;
2024 cpi->fn_ptr[BLOCK_16X8].sdx4df = vpx_sad16x8x4d;
2025
2026 cpi->fn_ptr[BLOCK_8X16].sdf = vpx_sad8x16;
2027 cpi->fn_ptr[BLOCK_8X16].vf = vpx_variance8x16;
2028 cpi->fn_ptr[BLOCK_8X16].svf = vpx_sub_pixel_variance8x16;
2029 cpi->fn_ptr[BLOCK_8X16].sdx4df = vpx_sad8x16x4d;
2030
2031 cpi->fn_ptr[BLOCK_8X8].sdf = vpx_sad8x8;
2032 cpi->fn_ptr[BLOCK_8X8].vf = vpx_variance8x8;
2033 cpi->fn_ptr[BLOCK_8X8].svf = vpx_sub_pixel_variance8x8;
2034 cpi->fn_ptr[BLOCK_8X8].sdx4df = vpx_sad8x8x4d;
2035
2036 cpi->fn_ptr[BLOCK_4X4].sdf = vpx_sad4x4;
2037 cpi->fn_ptr[BLOCK_4X4].vf = vpx_variance4x4;
2038 cpi->fn_ptr[BLOCK_4X4].svf = vpx_sub_pixel_variance4x4;
2039 cpi->fn_ptr[BLOCK_4X4].sdx4df = vpx_sad4x4x4d;
2040
2041 #if VPX_ARCH_X86 || VPX_ARCH_X86_64
2042 cpi->fn_ptr[BLOCK_16X16].copymem = vp8_copy32xn;
2043 cpi->fn_ptr[BLOCK_16X8].copymem = vp8_copy32xn;
2044 cpi->fn_ptr[BLOCK_8X16].copymem = vp8_copy32xn;
2045 cpi->fn_ptr[BLOCK_8X8].copymem = vp8_copy32xn;
2046 cpi->fn_ptr[BLOCK_4X4].copymem = vp8_copy32xn;
2047 #endif
2048
2049 cpi->diamond_search_sad = vp8_diamond_search_sad;
2050 cpi->refining_search_sad = vp8_refining_search_sad;
2051
2052 /* make sure frame 1 is okay */
2053 cpi->mb.error_bins[0] = cpi->common.MBs;
2054
2055 /* vp8cx_init_quantizer() is first called here. Add check in
2056 * vp8cx_frame_init_quantizer() so that vp8cx_init_quantizer is only
2057 * called later when needed. This will avoid unnecessary calls of
2058 * vp8cx_init_quantizer() for every frame.
2059 */
2060 vp8cx_init_quantizer(cpi);
2061
2062 vp8_loop_filter_init(cm);
2063
2064 cpi->common.error.setjmp = 0;
2065
2066 #if CONFIG_MULTI_RES_ENCODING
2067
2068 /* Calculate # of MBs in a row in lower-resolution level image. */
2069 if (cpi->oxcf.mr_encoder_id > 0) vp8_cal_low_res_mb_cols(cpi);
2070
2071 #endif
2072
2073 /* setup RD costs to MACROBLOCK struct */
2074
2075 cpi->mb.mvcost[0] = &cpi->rd_costs.mvcosts[0][mv_max + 1];
2076 cpi->mb.mvcost[1] = &cpi->rd_costs.mvcosts[1][mv_max + 1];
2077 cpi->mb.mvsadcost[0] = &cpi->rd_costs.mvsadcosts[0][mvfp_max + 1];
2078 cpi->mb.mvsadcost[1] = &cpi->rd_costs.mvsadcosts[1][mvfp_max + 1];
2079
2080 cal_mvsadcosts(cpi->mb.mvsadcost);
2081
2082 cpi->mb.mbmode_cost = cpi->rd_costs.mbmode_cost;
2083 cpi->mb.intra_uv_mode_cost = cpi->rd_costs.intra_uv_mode_cost;
2084 cpi->mb.bmode_costs = cpi->rd_costs.bmode_costs;
2085 cpi->mb.inter_bmode_costs = cpi->rd_costs.inter_bmode_costs;
2086 cpi->mb.token_costs = cpi->rd_costs.token_costs;
2087
2088 /* setup block ptrs & offsets */
2089 vp8_setup_block_ptrs(&cpi->mb);
2090 vp8_setup_block_dptrs(&cpi->mb.e_mbd);
2091
2092 return cpi;
2093 }
2094
vp8_remove_compressor(VP8_COMP ** comp)2095 void vp8_remove_compressor(VP8_COMP **comp) {
2096 VP8_COMP *cpi = *comp;
2097
2098 if (!cpi) return;
2099
2100 if (cpi && (cpi->common.current_video_frame > 0)) {
2101 #if !CONFIG_REALTIME_ONLY
2102
2103 if (cpi->pass == 2) {
2104 vp8_end_second_pass(cpi);
2105 }
2106
2107 #endif
2108
2109 #if CONFIG_INTERNAL_STATS
2110
2111 if (cpi->pass != 1) {
2112 FILE *f = fopen("opsnr.stt", "a");
2113 double time_encoded =
2114 (cpi->last_end_time_stamp_seen - cpi->first_time_stamp_ever) /
2115 10000000.000;
2116
2117 if (cpi->b_calculate_psnr) {
2118 if (cpi->oxcf.number_of_layers > 1) {
2119 int i;
2120
2121 fprintf(f,
2122 "Layer\tBitrate\tAVGPsnr\tGLBPsnr\tAVPsnrP\t"
2123 "GLPsnrP\tVPXSSIM\n");
2124 for (i = 0; i < (int)cpi->oxcf.number_of_layers; ++i) {
2125 double dr =
2126 (double)cpi->bytes_in_layer[i] * 8.0 / 1000.0 / time_encoded;
2127 double samples = 3.0 / 2 * cpi->frames_in_layer[i] *
2128 cpi->common.Width * cpi->common.Height;
2129 double total_psnr =
2130 vpx_sse_to_psnr(samples, 255.0, cpi->total_error2[i]);
2131 double total_psnr2 =
2132 vpx_sse_to_psnr(samples, 255.0, cpi->total_error2_p[i]);
2133 double total_ssim =
2134 100 * pow(cpi->sum_ssim[i] / cpi->sum_weights[i], 8.0);
2135
2136 fprintf(f,
2137 "%5d\t%7.3f\t%7.3f\t%7.3f\t%7.3f\t"
2138 "%7.3f\t%7.3f\n",
2139 i, dr, cpi->sum_psnr[i] / cpi->frames_in_layer[i],
2140 total_psnr, cpi->sum_psnr_p[i] / cpi->frames_in_layer[i],
2141 total_psnr2, total_ssim);
2142 }
2143 } else {
2144 double dr = (double)cpi->bytes * 8.0 / 1000.0 / time_encoded;
2145 double samples =
2146 3.0 / 2 * cpi->count * cpi->common.Width * cpi->common.Height;
2147 double total_psnr =
2148 vpx_sse_to_psnr(samples, 255.0, cpi->total_sq_error);
2149 double total_psnr2 =
2150 vpx_sse_to_psnr(samples, 255.0, cpi->total_sq_error2);
2151 double total_ssim =
2152 100 * pow(cpi->summed_quality / cpi->summed_weights, 8.0);
2153
2154 fprintf(f,
2155 "Bitrate\tAVGPsnr\tGLBPsnr\tAVPsnrP\t"
2156 "GLPsnrP\tVPXSSIM\n");
2157 fprintf(f,
2158 "%7.3f\t%7.3f\t%7.3f\t%7.3f\t%7.3f\t"
2159 "%7.3f\n",
2160 dr, cpi->total / cpi->count, total_psnr,
2161 cpi->totalp / cpi->count, total_psnr2, total_ssim);
2162 }
2163 }
2164 fclose(f);
2165 #if 0
2166 f = fopen("qskip.stt", "a");
2167 fprintf(f, "minq:%d -maxq:%d skiptrue:skipfalse = %d:%d\n", cpi->oxcf.best_allowed_q, cpi->oxcf.worst_allowed_q, skiptruecount, skipfalsecount);
2168 fclose(f);
2169 #endif
2170 }
2171
2172 #endif
2173
2174 #ifdef SPEEDSTATS
2175
2176 if (cpi->compressor_speed == 2) {
2177 int i;
2178 FILE *f = fopen("cxspeed.stt", "a");
2179 cnt_pm /= cpi->common.MBs;
2180
2181 for (i = 0; i < 16; ++i) fprintf(f, "%5d", frames_at_speed[i]);
2182
2183 fprintf(f, "\n");
2184 fclose(f);
2185 }
2186
2187 #endif
2188
2189 #ifdef MODE_STATS
2190 {
2191 extern int count_mb_seg[4];
2192 FILE *f = fopen("modes.stt", "a");
2193 double dr = (double)cpi->framerate * (double)bytes * (double)8 /
2194 (double)count / (double)1000;
2195 fprintf(f, "intra_mode in Intra Frames:\n");
2196 fprintf(f, "Y: %8d, %8d, %8d, %8d, %8d\n", y_modes[0], y_modes[1],
2197 y_modes[2], y_modes[3], y_modes[4]);
2198 fprintf(f, "UV:%8d, %8d, %8d, %8d\n", uv_modes[0], uv_modes[1],
2199 uv_modes[2], uv_modes[3]);
2200 fprintf(f, "B: ");
2201 {
2202 int i;
2203
2204 for (i = 0; i < 10; ++i) fprintf(f, "%8d, ", b_modes[i]);
2205
2206 fprintf(f, "\n");
2207 }
2208
2209 fprintf(f, "Modes in Inter Frames:\n");
2210 fprintf(f, "Y: %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d\n",
2211 inter_y_modes[0], inter_y_modes[1], inter_y_modes[2],
2212 inter_y_modes[3], inter_y_modes[4], inter_y_modes[5],
2213 inter_y_modes[6], inter_y_modes[7], inter_y_modes[8],
2214 inter_y_modes[9]);
2215 fprintf(f, "UV:%8d, %8d, %8d, %8d\n", inter_uv_modes[0],
2216 inter_uv_modes[1], inter_uv_modes[2], inter_uv_modes[3]);
2217 fprintf(f, "B: ");
2218 {
2219 int i;
2220
2221 for (i = 0; i < 15; ++i) fprintf(f, "%8d, ", inter_b_modes[i]);
2222
2223 fprintf(f, "\n");
2224 }
2225 fprintf(f, "P:%8d, %8d, %8d, %8d\n", count_mb_seg[0], count_mb_seg[1],
2226 count_mb_seg[2], count_mb_seg[3]);
2227 fprintf(f, "PB:%8d, %8d, %8d, %8d\n", inter_b_modes[LEFT4X4],
2228 inter_b_modes[ABOVE4X4], inter_b_modes[ZERO4X4],
2229 inter_b_modes[NEW4X4]);
2230
2231 fclose(f);
2232 }
2233 #endif
2234
2235 #if defined(SECTIONBITS_OUTPUT)
2236
2237 if (0) {
2238 int i;
2239 FILE *f = fopen("tokenbits.stt", "a");
2240
2241 for (i = 0; i < 28; ++i) fprintf(f, "%8d", (int)(Sectionbits[i] / 256));
2242
2243 fprintf(f, "\n");
2244 fclose(f);
2245 }
2246
2247 #endif
2248
2249 #if 0
2250 {
2251 printf("\n_pick_loop_filter_level:%d\n", cpi->time_pick_lpf / 1000);
2252 printf("\n_frames receive_data encod_mb_row compress_frame Total\n");
2253 printf("%6d %10ld %10ld %10ld %10ld\n", cpi->common.current_video_frame, cpi->time_receive_data / 1000, cpi->time_encode_mb_row / 1000, cpi->time_compress_data / 1000, (cpi->time_receive_data + cpi->time_compress_data) / 1000);
2254 }
2255 #endif
2256 }
2257
2258 #if CONFIG_MULTITHREAD
2259 vp8cx_remove_encoder_threads(cpi);
2260 #endif
2261
2262 #if CONFIG_TEMPORAL_DENOISING
2263 vp8_denoiser_free(&cpi->denoiser);
2264 #endif
2265 dealloc_compressor_data(cpi);
2266 vpx_free(cpi->mb.ss);
2267 vpx_free(cpi->tok);
2268 vpx_free(cpi->skin_map);
2269 vpx_free(cpi->cyclic_refresh_map);
2270 vpx_free(cpi->consec_zero_last);
2271 vpx_free(cpi->consec_zero_last_mvbias);
2272
2273 vp8_remove_common(&cpi->common);
2274 vpx_free(cpi);
2275 *comp = 0;
2276
2277 #ifdef OUTPUT_YUV_SRC
2278 fclose(yuv_file);
2279 #endif
2280 #ifdef OUTPUT_YUV_DENOISED
2281 fclose(yuv_denoised_file);
2282 #endif
2283 #ifdef OUTPUT_YUV_SKINMAP
2284 fclose(yuv_skinmap_file);
2285 #endif
2286
2287 #if 0
2288
2289 if (keyfile)
2290 fclose(keyfile);
2291
2292 if (framepsnr)
2293 fclose(framepsnr);
2294
2295 if (kf_list)
2296 fclose(kf_list);
2297
2298 #endif
2299 }
2300
calc_plane_error(unsigned char * orig,int orig_stride,unsigned char * recon,int recon_stride,unsigned int cols,unsigned int rows)2301 static uint64_t calc_plane_error(unsigned char *orig, int orig_stride,
2302 unsigned char *recon, int recon_stride,
2303 unsigned int cols, unsigned int rows) {
2304 unsigned int row, col;
2305 uint64_t total_sse = 0;
2306 int diff;
2307
2308 for (row = 0; row + 16 <= rows; row += 16) {
2309 for (col = 0; col + 16 <= cols; col += 16) {
2310 unsigned int sse;
2311
2312 vpx_mse16x16(orig + col, orig_stride, recon + col, recon_stride, &sse);
2313 total_sse += sse;
2314 }
2315
2316 /* Handle odd-sized width */
2317 if (col < cols) {
2318 unsigned int border_row, border_col;
2319 unsigned char *border_orig = orig;
2320 unsigned char *border_recon = recon;
2321
2322 for (border_row = 0; border_row < 16; ++border_row) {
2323 for (border_col = col; border_col < cols; ++border_col) {
2324 diff = border_orig[border_col] - border_recon[border_col];
2325 total_sse += diff * diff;
2326 }
2327
2328 border_orig += orig_stride;
2329 border_recon += recon_stride;
2330 }
2331 }
2332
2333 orig += orig_stride * 16;
2334 recon += recon_stride * 16;
2335 }
2336
2337 /* Handle odd-sized height */
2338 for (; row < rows; ++row) {
2339 for (col = 0; col < cols; ++col) {
2340 diff = orig[col] - recon[col];
2341 total_sse += diff * diff;
2342 }
2343
2344 orig += orig_stride;
2345 recon += recon_stride;
2346 }
2347
2348 vpx_clear_system_state();
2349 return total_sse;
2350 }
2351
generate_psnr_packet(VP8_COMP * cpi)2352 static void generate_psnr_packet(VP8_COMP *cpi) {
2353 YV12_BUFFER_CONFIG *orig = cpi->Source;
2354 YV12_BUFFER_CONFIG *recon = cpi->common.frame_to_show;
2355 struct vpx_codec_cx_pkt pkt;
2356 uint64_t sse;
2357 int i;
2358 unsigned int width = cpi->common.Width;
2359 unsigned int height = cpi->common.Height;
2360
2361 pkt.kind = VPX_CODEC_PSNR_PKT;
2362 sse = calc_plane_error(orig->y_buffer, orig->y_stride, recon->y_buffer,
2363 recon->y_stride, width, height);
2364 pkt.data.psnr.sse[0] = sse;
2365 pkt.data.psnr.sse[1] = sse;
2366 pkt.data.psnr.samples[0] = width * height;
2367 pkt.data.psnr.samples[1] = width * height;
2368
2369 width = (width + 1) / 2;
2370 height = (height + 1) / 2;
2371
2372 sse = calc_plane_error(orig->u_buffer, orig->uv_stride, recon->u_buffer,
2373 recon->uv_stride, width, height);
2374 pkt.data.psnr.sse[0] += sse;
2375 pkt.data.psnr.sse[2] = sse;
2376 pkt.data.psnr.samples[0] += width * height;
2377 pkt.data.psnr.samples[2] = width * height;
2378
2379 sse = calc_plane_error(orig->v_buffer, orig->uv_stride, recon->v_buffer,
2380 recon->uv_stride, width, height);
2381 pkt.data.psnr.sse[0] += sse;
2382 pkt.data.psnr.sse[3] = sse;
2383 pkt.data.psnr.samples[0] += width * height;
2384 pkt.data.psnr.samples[3] = width * height;
2385
2386 for (i = 0; i < 4; ++i) {
2387 pkt.data.psnr.psnr[i] = vpx_sse_to_psnr(pkt.data.psnr.samples[i], 255.0,
2388 (double)(pkt.data.psnr.sse[i]));
2389 }
2390
2391 vpx_codec_pkt_list_add(cpi->output_pkt_list, &pkt);
2392 }
2393
vp8_use_as_reference(VP8_COMP * cpi,int ref_frame_flags)2394 int vp8_use_as_reference(VP8_COMP *cpi, int ref_frame_flags) {
2395 if (ref_frame_flags > 7) return -1;
2396
2397 cpi->ref_frame_flags = ref_frame_flags;
2398 return 0;
2399 }
vp8_update_reference(VP8_COMP * cpi,int ref_frame_flags)2400 int vp8_update_reference(VP8_COMP *cpi, int ref_frame_flags) {
2401 if (ref_frame_flags > 7) return -1;
2402
2403 cpi->common.refresh_golden_frame = 0;
2404 cpi->common.refresh_alt_ref_frame = 0;
2405 cpi->common.refresh_last_frame = 0;
2406
2407 if (ref_frame_flags & VP8_LAST_FRAME) cpi->common.refresh_last_frame = 1;
2408
2409 if (ref_frame_flags & VP8_GOLD_FRAME) cpi->common.refresh_golden_frame = 1;
2410
2411 if (ref_frame_flags & VP8_ALTR_FRAME) cpi->common.refresh_alt_ref_frame = 1;
2412
2413 cpi->ext_refresh_frame_flags_pending = 1;
2414 return 0;
2415 }
2416
vp8_get_reference(VP8_COMP * cpi,enum vpx_ref_frame_type ref_frame_flag,YV12_BUFFER_CONFIG * sd)2417 int vp8_get_reference(VP8_COMP *cpi, enum vpx_ref_frame_type ref_frame_flag,
2418 YV12_BUFFER_CONFIG *sd) {
2419 VP8_COMMON *cm = &cpi->common;
2420 int ref_fb_idx;
2421
2422 if (ref_frame_flag == VP8_LAST_FRAME) {
2423 ref_fb_idx = cm->lst_fb_idx;
2424 } else if (ref_frame_flag == VP8_GOLD_FRAME) {
2425 ref_fb_idx = cm->gld_fb_idx;
2426 } else if (ref_frame_flag == VP8_ALTR_FRAME) {
2427 ref_fb_idx = cm->alt_fb_idx;
2428 } else {
2429 return -1;
2430 }
2431
2432 vp8_yv12_copy_frame(&cm->yv12_fb[ref_fb_idx], sd);
2433
2434 return 0;
2435 }
vp8_set_reference(VP8_COMP * cpi,enum vpx_ref_frame_type ref_frame_flag,YV12_BUFFER_CONFIG * sd)2436 int vp8_set_reference(VP8_COMP *cpi, enum vpx_ref_frame_type ref_frame_flag,
2437 YV12_BUFFER_CONFIG *sd) {
2438 VP8_COMMON *cm = &cpi->common;
2439
2440 int ref_fb_idx;
2441
2442 if (ref_frame_flag == VP8_LAST_FRAME) {
2443 ref_fb_idx = cm->lst_fb_idx;
2444 } else if (ref_frame_flag == VP8_GOLD_FRAME) {
2445 ref_fb_idx = cm->gld_fb_idx;
2446 } else if (ref_frame_flag == VP8_ALTR_FRAME) {
2447 ref_fb_idx = cm->alt_fb_idx;
2448 } else {
2449 return -1;
2450 }
2451
2452 vp8_yv12_copy_frame(sd, &cm->yv12_fb[ref_fb_idx]);
2453
2454 return 0;
2455 }
vp8_update_entropy(VP8_COMP * cpi,int update)2456 int vp8_update_entropy(VP8_COMP *cpi, int update) {
2457 VP8_COMMON *cm = &cpi->common;
2458 cm->refresh_entropy_probs = update;
2459
2460 return 0;
2461 }
2462
scale_and_extend_source(YV12_BUFFER_CONFIG * sd,VP8_COMP * cpi)2463 static void scale_and_extend_source(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi) {
2464 VP8_COMMON *cm = &cpi->common;
2465
2466 /* are we resizing the image */
2467 if (cm->horiz_scale != 0 || cm->vert_scale != 0) {
2468 #if CONFIG_SPATIAL_RESAMPLING
2469 int hr, hs, vr, vs;
2470 int tmp_height;
2471
2472 if (cm->vert_scale == 3) {
2473 tmp_height = 9;
2474 } else {
2475 tmp_height = 11;
2476 }
2477
2478 Scale2Ratio(cm->horiz_scale, &hr, &hs);
2479 Scale2Ratio(cm->vert_scale, &vr, &vs);
2480
2481 vpx_scale_frame(sd, &cpi->scaled_source, cm->temp_scale_frame.y_buffer,
2482 tmp_height, hs, hr, vs, vr, 0);
2483
2484 vp8_yv12_extend_frame_borders(&cpi->scaled_source);
2485 cpi->Source = &cpi->scaled_source;
2486 #endif
2487 } else {
2488 cpi->Source = sd;
2489 }
2490 }
2491
resize_key_frame(VP8_COMP * cpi)2492 static int resize_key_frame(VP8_COMP *cpi) {
2493 #if CONFIG_SPATIAL_RESAMPLING
2494 VP8_COMMON *cm = &cpi->common;
2495
2496 /* Do we need to apply resampling for one pass cbr.
2497 * In one pass this is more limited than in two pass cbr.
2498 * The test and any change is only made once per key frame sequence.
2499 */
2500 if (cpi->oxcf.allow_spatial_resampling &&
2501 (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER)) {
2502 int hr, hs, vr, vs;
2503 int new_width, new_height;
2504
2505 /* If we are below the resample DOWN watermark then scale down a
2506 * notch.
2507 */
2508 if (cpi->buffer_level < (cpi->oxcf.resample_down_water_mark *
2509 cpi->oxcf.optimal_buffer_level / 100)) {
2510 cm->horiz_scale =
2511 (cm->horiz_scale < VP8E_ONETWO) ? cm->horiz_scale + 1 : VP8E_ONETWO;
2512 cm->vert_scale =
2513 (cm->vert_scale < VP8E_ONETWO) ? cm->vert_scale + 1 : VP8E_ONETWO;
2514 }
2515 /* Should we now start scaling back up */
2516 else if (cpi->buffer_level > (cpi->oxcf.resample_up_water_mark *
2517 cpi->oxcf.optimal_buffer_level / 100)) {
2518 cm->horiz_scale =
2519 (cm->horiz_scale > VP8E_NORMAL) ? cm->horiz_scale - 1 : VP8E_NORMAL;
2520 cm->vert_scale =
2521 (cm->vert_scale > VP8E_NORMAL) ? cm->vert_scale - 1 : VP8E_NORMAL;
2522 }
2523
2524 /* Get the new height and width */
2525 Scale2Ratio(cm->horiz_scale, &hr, &hs);
2526 Scale2Ratio(cm->vert_scale, &vr, &vs);
2527 new_width = ((hs - 1) + (cpi->oxcf.Width * hr)) / hs;
2528 new_height = ((vs - 1) + (cpi->oxcf.Height * vr)) / vs;
2529
2530 /* If the image size has changed we need to reallocate the buffers
2531 * and resample the source image
2532 */
2533 if ((cm->Width != new_width) || (cm->Height != new_height)) {
2534 cm->Width = new_width;
2535 cm->Height = new_height;
2536 vp8_alloc_compressor_data(cpi);
2537 scale_and_extend_source(cpi->un_scaled_source, cpi);
2538 return 1;
2539 }
2540 }
2541
2542 #endif
2543 return 0;
2544 }
2545
update_alt_ref_frame_stats(VP8_COMP * cpi)2546 static void update_alt_ref_frame_stats(VP8_COMP *cpi) {
2547 VP8_COMMON *cm = &cpi->common;
2548
2549 /* Select an interval before next GF or altref */
2550 if (!cpi->auto_gold) cpi->frames_till_gf_update_due = DEFAULT_GF_INTERVAL;
2551
2552 if ((cpi->pass != 2) && cpi->frames_till_gf_update_due) {
2553 cpi->current_gf_interval = cpi->frames_till_gf_update_due;
2554
2555 /* Set the bits per frame that we should try and recover in
2556 * subsequent inter frames to account for the extra GF spend...
2557 * note that his does not apply for GF updates that occur
2558 * coincident with a key frame as the extra cost of key frames is
2559 * dealt with elsewhere.
2560 */
2561 cpi->gf_overspend_bits += cpi->projected_frame_size;
2562 cpi->non_gf_bitrate_adjustment =
2563 cpi->gf_overspend_bits / cpi->frames_till_gf_update_due;
2564 }
2565
2566 /* Update data structure that monitors level of reference to last GF */
2567 memset(cpi->gf_active_flags, 1, (cm->mb_rows * cm->mb_cols));
2568 cpi->gf_active_count = cm->mb_rows * cm->mb_cols;
2569
2570 /* this frame refreshes means next frames don't unless specified by user */
2571 cpi->frames_since_golden = 0;
2572
2573 /* Clear the alternate reference update pending flag. */
2574 cpi->source_alt_ref_pending = 0;
2575
2576 /* Set the alternate reference frame active flag */
2577 cpi->source_alt_ref_active = 1;
2578 }
update_golden_frame_stats(VP8_COMP * cpi)2579 static void update_golden_frame_stats(VP8_COMP *cpi) {
2580 VP8_COMMON *cm = &cpi->common;
2581
2582 /* Update the Golden frame usage counts. */
2583 if (cm->refresh_golden_frame) {
2584 /* Select an interval before next GF */
2585 if (!cpi->auto_gold) cpi->frames_till_gf_update_due = DEFAULT_GF_INTERVAL;
2586
2587 if ((cpi->pass != 2) && (cpi->frames_till_gf_update_due > 0)) {
2588 cpi->current_gf_interval = cpi->frames_till_gf_update_due;
2589
2590 /* Set the bits per frame that we should try and recover in
2591 * subsequent inter frames to account for the extra GF spend...
2592 * note that his does not apply for GF updates that occur
2593 * coincident with a key frame as the extra cost of key frames
2594 * is dealt with elsewhere.
2595 */
2596 if ((cm->frame_type != KEY_FRAME) && !cpi->source_alt_ref_active) {
2597 /* Calcluate GF bits to be recovered
2598 * Projected size - av frame bits available for inter
2599 * frames for clip as a whole
2600 */
2601 cpi->gf_overspend_bits +=
2602 (cpi->projected_frame_size - cpi->inter_frame_target);
2603 }
2604
2605 cpi->non_gf_bitrate_adjustment =
2606 cpi->gf_overspend_bits / cpi->frames_till_gf_update_due;
2607 }
2608
2609 /* Update data structure that monitors level of reference to last GF */
2610 memset(cpi->gf_active_flags, 1, (cm->mb_rows * cm->mb_cols));
2611 cpi->gf_active_count = cm->mb_rows * cm->mb_cols;
2612
2613 /* this frame refreshes means next frames don't unless specified by
2614 * user
2615 */
2616 cm->refresh_golden_frame = 0;
2617 cpi->frames_since_golden = 0;
2618
2619 cpi->recent_ref_frame_usage[INTRA_FRAME] = 1;
2620 cpi->recent_ref_frame_usage[LAST_FRAME] = 1;
2621 cpi->recent_ref_frame_usage[GOLDEN_FRAME] = 1;
2622 cpi->recent_ref_frame_usage[ALTREF_FRAME] = 1;
2623
2624 /* ******** Fixed Q test code only ************ */
2625 /* If we are going to use the ALT reference for the next group of
2626 * frames set a flag to say so.
2627 */
2628 if (cpi->oxcf.fixed_q >= 0 && cpi->oxcf.play_alternate &&
2629 !cpi->common.refresh_alt_ref_frame) {
2630 cpi->source_alt_ref_pending = 1;
2631 cpi->frames_till_gf_update_due = cpi->baseline_gf_interval;
2632 }
2633
2634 if (!cpi->source_alt_ref_pending) cpi->source_alt_ref_active = 0;
2635
2636 /* Decrement count down till next gf */
2637 if (cpi->frames_till_gf_update_due > 0) cpi->frames_till_gf_update_due--;
2638
2639 } else if (!cpi->common.refresh_alt_ref_frame) {
2640 /* Decrement count down till next gf */
2641 if (cpi->frames_till_gf_update_due > 0) cpi->frames_till_gf_update_due--;
2642
2643 if (cpi->frames_till_alt_ref_frame) cpi->frames_till_alt_ref_frame--;
2644
2645 cpi->frames_since_golden++;
2646
2647 if (cpi->frames_since_golden > 1) {
2648 cpi->recent_ref_frame_usage[INTRA_FRAME] +=
2649 cpi->mb.count_mb_ref_frame_usage[INTRA_FRAME];
2650 cpi->recent_ref_frame_usage[LAST_FRAME] +=
2651 cpi->mb.count_mb_ref_frame_usage[LAST_FRAME];
2652 cpi->recent_ref_frame_usage[GOLDEN_FRAME] +=
2653 cpi->mb.count_mb_ref_frame_usage[GOLDEN_FRAME];
2654 cpi->recent_ref_frame_usage[ALTREF_FRAME] +=
2655 cpi->mb.count_mb_ref_frame_usage[ALTREF_FRAME];
2656 }
2657 }
2658 }
2659
2660 /* This function updates the reference frame probability estimates that
2661 * will be used during mode selection
2662 */
update_rd_ref_frame_probs(VP8_COMP * cpi)2663 static void update_rd_ref_frame_probs(VP8_COMP *cpi) {
2664 VP8_COMMON *cm = &cpi->common;
2665
2666 const int *const rfct = cpi->mb.count_mb_ref_frame_usage;
2667 const int rf_intra = rfct[INTRA_FRAME];
2668 const int rf_inter =
2669 rfct[LAST_FRAME] + rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME];
2670
2671 if (cm->frame_type == KEY_FRAME) {
2672 cpi->prob_intra_coded = 255;
2673 cpi->prob_last_coded = 128;
2674 cpi->prob_gf_coded = 128;
2675 } else if (!(rf_intra + rf_inter)) {
2676 cpi->prob_intra_coded = 63;
2677 cpi->prob_last_coded = 128;
2678 cpi->prob_gf_coded = 128;
2679 }
2680
2681 /* update reference frame costs since we can do better than what we got
2682 * last frame.
2683 */
2684 if (cpi->oxcf.number_of_layers == 1) {
2685 if (cpi->common.refresh_alt_ref_frame) {
2686 cpi->prob_intra_coded += 40;
2687 if (cpi->prob_intra_coded > 255) cpi->prob_intra_coded = 255;
2688 cpi->prob_last_coded = 200;
2689 cpi->prob_gf_coded = 1;
2690 } else if (cpi->frames_since_golden == 0) {
2691 cpi->prob_last_coded = 214;
2692 } else if (cpi->frames_since_golden == 1) {
2693 cpi->prob_last_coded = 192;
2694 cpi->prob_gf_coded = 220;
2695 } else if (cpi->source_alt_ref_active) {
2696 cpi->prob_gf_coded -= 20;
2697
2698 if (cpi->prob_gf_coded < 10) cpi->prob_gf_coded = 10;
2699 }
2700 if (!cpi->source_alt_ref_active) cpi->prob_gf_coded = 255;
2701 }
2702 }
2703
2704 #if !CONFIG_REALTIME_ONLY
2705 /* 1 = key, 0 = inter */
decide_key_frame(VP8_COMP * cpi)2706 static int decide_key_frame(VP8_COMP *cpi) {
2707 VP8_COMMON *cm = &cpi->common;
2708
2709 int code_key_frame = 0;
2710
2711 cpi->kf_boost = 0;
2712
2713 if (cpi->Speed > 11) return 0;
2714
2715 /* Clear down mmx registers */
2716 vpx_clear_system_state();
2717
2718 if ((cpi->compressor_speed == 2) && (cpi->Speed >= 5) && (cpi->sf.RD == 0)) {
2719 double change = 1.0 *
2720 abs((int)(cpi->mb.intra_error - cpi->last_intra_error)) /
2721 (1 + cpi->last_intra_error);
2722 double change2 =
2723 1.0 *
2724 abs((int)(cpi->mb.prediction_error - cpi->last_prediction_error)) /
2725 (1 + cpi->last_prediction_error);
2726 double minerror = cm->MBs * 256;
2727
2728 cpi->last_intra_error = cpi->mb.intra_error;
2729 cpi->last_prediction_error = cpi->mb.prediction_error;
2730
2731 if (10 * cpi->mb.intra_error / (1 + cpi->mb.prediction_error) < 15 &&
2732 cpi->mb.prediction_error > minerror &&
2733 (change > .25 || change2 > .25)) {
2734 /*(change > 1.4 || change < .75)&& cpi->this_frame_percent_intra >
2735 * cpi->last_frame_percent_intra + 3*/
2736 return 1;
2737 }
2738
2739 return 0;
2740 }
2741
2742 /* If the following are true we might as well code a key frame */
2743 if (((cpi->this_frame_percent_intra == 100) &&
2744 (cpi->this_frame_percent_intra > (cpi->last_frame_percent_intra + 2))) ||
2745 ((cpi->this_frame_percent_intra > 95) &&
2746 (cpi->this_frame_percent_intra >=
2747 (cpi->last_frame_percent_intra + 5)))) {
2748 code_key_frame = 1;
2749 }
2750 /* in addition if the following are true and this is not a golden frame
2751 * then code a key frame Note that on golden frames there often seems
2752 * to be a pop in intra usage anyway hence this restriction is
2753 * designed to prevent spurious key frames. The Intra pop needs to be
2754 * investigated.
2755 */
2756 else if (((cpi->this_frame_percent_intra > 60) &&
2757 (cpi->this_frame_percent_intra >
2758 (cpi->last_frame_percent_intra * 2))) ||
2759 ((cpi->this_frame_percent_intra > 75) &&
2760 (cpi->this_frame_percent_intra >
2761 (cpi->last_frame_percent_intra * 3 / 2))) ||
2762 ((cpi->this_frame_percent_intra > 90) &&
2763 (cpi->this_frame_percent_intra >
2764 (cpi->last_frame_percent_intra + 10)))) {
2765 if (!cm->refresh_golden_frame) code_key_frame = 1;
2766 }
2767
2768 return code_key_frame;
2769 }
2770
Pass1Encode(VP8_COMP * cpi)2771 static void Pass1Encode(VP8_COMP *cpi) {
2772 vp8_set_quantizer(cpi, 26);
2773 vp8_first_pass(cpi);
2774 }
2775 #endif
2776
2777 #if 0
2778 void write_cx_frame_to_file(YV12_BUFFER_CONFIG *frame, int this_frame)
2779 {
2780
2781 /* write the frame */
2782 FILE *yframe;
2783 int i;
2784 char filename[255];
2785
2786 sprintf(filename, "cx\\y%04d.raw", this_frame);
2787 yframe = fopen(filename, "wb");
2788
2789 for (i = 0; i < frame->y_height; ++i)
2790 fwrite(frame->y_buffer + i * frame->y_stride, frame->y_width, 1, yframe);
2791
2792 fclose(yframe);
2793 sprintf(filename, "cx\\u%04d.raw", this_frame);
2794 yframe = fopen(filename, "wb");
2795
2796 for (i = 0; i < frame->uv_height; ++i)
2797 fwrite(frame->u_buffer + i * frame->uv_stride, frame->uv_width, 1, yframe);
2798
2799 fclose(yframe);
2800 sprintf(filename, "cx\\v%04d.raw", this_frame);
2801 yframe = fopen(filename, "wb");
2802
2803 for (i = 0; i < frame->uv_height; ++i)
2804 fwrite(frame->v_buffer + i * frame->uv_stride, frame->uv_width, 1, yframe);
2805
2806 fclose(yframe);
2807 }
2808 #endif
2809
2810 #if !CONFIG_REALTIME_ONLY
2811 /* Function to test for conditions that indeicate we should loop
2812 * back and recode a frame.
2813 */
recode_loop_test(VP8_COMP * cpi,int high_limit,int low_limit,int q,int maxq,int minq)2814 static int recode_loop_test(VP8_COMP *cpi, int high_limit, int low_limit, int q,
2815 int maxq, int minq) {
2816 int force_recode = 0;
2817 VP8_COMMON *cm = &cpi->common;
2818
2819 /* Is frame recode allowed at all
2820 * Yes if either recode mode 1 is selected or mode two is selcted
2821 * and the frame is a key frame. golden frame or alt_ref_frame
2822 */
2823 if ((cpi->sf.recode_loop == 1) ||
2824 ((cpi->sf.recode_loop == 2) &&
2825 ((cm->frame_type == KEY_FRAME) || cm->refresh_golden_frame ||
2826 cm->refresh_alt_ref_frame))) {
2827 /* General over and under shoot tests */
2828 if (((cpi->projected_frame_size > high_limit) && (q < maxq)) ||
2829 ((cpi->projected_frame_size < low_limit) && (q > minq))) {
2830 force_recode = 1;
2831 }
2832 /* Special Constrained quality tests */
2833 else if (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) {
2834 /* Undershoot and below auto cq level */
2835 if ((q > cpi->cq_target_quality) &&
2836 (cpi->projected_frame_size < ((cpi->this_frame_target * 7) >> 3))) {
2837 force_recode = 1;
2838 }
2839 /* Severe undershoot and between auto and user cq level */
2840 else if ((q > cpi->oxcf.cq_level) &&
2841 (cpi->projected_frame_size < cpi->min_frame_bandwidth) &&
2842 (cpi->active_best_quality > cpi->oxcf.cq_level)) {
2843 force_recode = 1;
2844 cpi->active_best_quality = cpi->oxcf.cq_level;
2845 }
2846 }
2847 }
2848
2849 return force_recode;
2850 }
2851 #endif // !CONFIG_REALTIME_ONLY
2852
update_reference_frames(VP8_COMP * cpi)2853 static void update_reference_frames(VP8_COMP *cpi) {
2854 VP8_COMMON *cm = &cpi->common;
2855 YV12_BUFFER_CONFIG *yv12_fb = cm->yv12_fb;
2856
2857 /* At this point the new frame has been encoded.
2858 * If any buffer copy / swapping is signaled it should be done here.
2859 */
2860
2861 if (cm->frame_type == KEY_FRAME) {
2862 yv12_fb[cm->new_fb_idx].flags |= VP8_GOLD_FRAME | VP8_ALTR_FRAME;
2863
2864 yv12_fb[cm->gld_fb_idx].flags &= ~VP8_GOLD_FRAME;
2865 yv12_fb[cm->alt_fb_idx].flags &= ~VP8_ALTR_FRAME;
2866
2867 cm->alt_fb_idx = cm->gld_fb_idx = cm->new_fb_idx;
2868
2869 cpi->current_ref_frames[GOLDEN_FRAME] = cm->current_video_frame;
2870 cpi->current_ref_frames[ALTREF_FRAME] = cm->current_video_frame;
2871 } else {
2872 if (cm->refresh_alt_ref_frame) {
2873 assert(!cm->copy_buffer_to_arf);
2874
2875 cm->yv12_fb[cm->new_fb_idx].flags |= VP8_ALTR_FRAME;
2876 cm->yv12_fb[cm->alt_fb_idx].flags &= ~VP8_ALTR_FRAME;
2877 cm->alt_fb_idx = cm->new_fb_idx;
2878
2879 cpi->current_ref_frames[ALTREF_FRAME] = cm->current_video_frame;
2880 } else if (cm->copy_buffer_to_arf) {
2881 assert(!(cm->copy_buffer_to_arf & ~0x3));
2882
2883 if (cm->copy_buffer_to_arf == 1) {
2884 if (cm->alt_fb_idx != cm->lst_fb_idx) {
2885 yv12_fb[cm->lst_fb_idx].flags |= VP8_ALTR_FRAME;
2886 yv12_fb[cm->alt_fb_idx].flags &= ~VP8_ALTR_FRAME;
2887 cm->alt_fb_idx = cm->lst_fb_idx;
2888
2889 cpi->current_ref_frames[ALTREF_FRAME] =
2890 cpi->current_ref_frames[LAST_FRAME];
2891 }
2892 } else {
2893 if (cm->alt_fb_idx != cm->gld_fb_idx) {
2894 yv12_fb[cm->gld_fb_idx].flags |= VP8_ALTR_FRAME;
2895 yv12_fb[cm->alt_fb_idx].flags &= ~VP8_ALTR_FRAME;
2896 cm->alt_fb_idx = cm->gld_fb_idx;
2897
2898 cpi->current_ref_frames[ALTREF_FRAME] =
2899 cpi->current_ref_frames[GOLDEN_FRAME];
2900 }
2901 }
2902 }
2903
2904 if (cm->refresh_golden_frame) {
2905 assert(!cm->copy_buffer_to_gf);
2906
2907 cm->yv12_fb[cm->new_fb_idx].flags |= VP8_GOLD_FRAME;
2908 cm->yv12_fb[cm->gld_fb_idx].flags &= ~VP8_GOLD_FRAME;
2909 cm->gld_fb_idx = cm->new_fb_idx;
2910
2911 cpi->current_ref_frames[GOLDEN_FRAME] = cm->current_video_frame;
2912 } else if (cm->copy_buffer_to_gf) {
2913 assert(!(cm->copy_buffer_to_arf & ~0x3));
2914
2915 if (cm->copy_buffer_to_gf == 1) {
2916 if (cm->gld_fb_idx != cm->lst_fb_idx) {
2917 yv12_fb[cm->lst_fb_idx].flags |= VP8_GOLD_FRAME;
2918 yv12_fb[cm->gld_fb_idx].flags &= ~VP8_GOLD_FRAME;
2919 cm->gld_fb_idx = cm->lst_fb_idx;
2920
2921 cpi->current_ref_frames[GOLDEN_FRAME] =
2922 cpi->current_ref_frames[LAST_FRAME];
2923 }
2924 } else {
2925 if (cm->alt_fb_idx != cm->gld_fb_idx) {
2926 yv12_fb[cm->alt_fb_idx].flags |= VP8_GOLD_FRAME;
2927 yv12_fb[cm->gld_fb_idx].flags &= ~VP8_GOLD_FRAME;
2928 cm->gld_fb_idx = cm->alt_fb_idx;
2929
2930 cpi->current_ref_frames[GOLDEN_FRAME] =
2931 cpi->current_ref_frames[ALTREF_FRAME];
2932 }
2933 }
2934 }
2935 }
2936
2937 if (cm->refresh_last_frame) {
2938 cm->yv12_fb[cm->new_fb_idx].flags |= VP8_LAST_FRAME;
2939 cm->yv12_fb[cm->lst_fb_idx].flags &= ~VP8_LAST_FRAME;
2940 cm->lst_fb_idx = cm->new_fb_idx;
2941
2942 cpi->current_ref_frames[LAST_FRAME] = cm->current_video_frame;
2943 }
2944
2945 #if CONFIG_TEMPORAL_DENOISING
2946 if (cpi->oxcf.noise_sensitivity) {
2947 /* we shouldn't have to keep multiple copies as we know in advance which
2948 * buffer we should start - for now to get something up and running
2949 * I've chosen to copy the buffers
2950 */
2951 if (cm->frame_type == KEY_FRAME) {
2952 int i;
2953 for (i = LAST_FRAME; i < MAX_REF_FRAMES; ++i)
2954 vp8_yv12_copy_frame(cpi->Source, &cpi->denoiser.yv12_running_avg[i]);
2955 } else {
2956 vp8_yv12_extend_frame_borders(
2957 &cpi->denoiser.yv12_running_avg[INTRA_FRAME]);
2958
2959 if (cm->refresh_alt_ref_frame || cm->copy_buffer_to_arf) {
2960 vp8_yv12_copy_frame(&cpi->denoiser.yv12_running_avg[INTRA_FRAME],
2961 &cpi->denoiser.yv12_running_avg[ALTREF_FRAME]);
2962 }
2963 if (cm->refresh_golden_frame || cm->copy_buffer_to_gf) {
2964 vp8_yv12_copy_frame(&cpi->denoiser.yv12_running_avg[INTRA_FRAME],
2965 &cpi->denoiser.yv12_running_avg[GOLDEN_FRAME]);
2966 }
2967 if (cm->refresh_last_frame) {
2968 vp8_yv12_copy_frame(&cpi->denoiser.yv12_running_avg[INTRA_FRAME],
2969 &cpi->denoiser.yv12_running_avg[LAST_FRAME]);
2970 }
2971 }
2972 if (cpi->oxcf.noise_sensitivity == 4)
2973 vp8_yv12_copy_frame(cpi->Source, &cpi->denoiser.yv12_last_source);
2974 }
2975 #endif
2976 }
2977
measure_square_diff_partial(YV12_BUFFER_CONFIG * source,YV12_BUFFER_CONFIG * dest,VP8_COMP * cpi)2978 static int measure_square_diff_partial(YV12_BUFFER_CONFIG *source,
2979 YV12_BUFFER_CONFIG *dest,
2980 VP8_COMP *cpi) {
2981 int i, j;
2982 int Total = 0;
2983 int num_blocks = 0;
2984 int skip = 2;
2985 int min_consec_zero_last = 10;
2986 int tot_num_blocks = (source->y_height * source->y_width) >> 8;
2987 unsigned char *src = source->y_buffer;
2988 unsigned char *dst = dest->y_buffer;
2989
2990 /* Loop through the Y plane, every |skip| blocks along rows and colmumns,
2991 * summing the square differences, and only for blocks that have been
2992 * zero_last mode at least |x| frames in a row.
2993 */
2994 for (i = 0; i < source->y_height; i += 16 * skip) {
2995 int block_index_row = (i >> 4) * cpi->common.mb_cols;
2996 for (j = 0; j < source->y_width; j += 16 * skip) {
2997 int index = block_index_row + (j >> 4);
2998 if (cpi->consec_zero_last[index] >= min_consec_zero_last) {
2999 unsigned int sse;
3000 Total += vpx_mse16x16(src + j, source->y_stride, dst + j,
3001 dest->y_stride, &sse);
3002 num_blocks++;
3003 }
3004 }
3005 src += 16 * skip * source->y_stride;
3006 dst += 16 * skip * dest->y_stride;
3007 }
3008 // Only return non-zero if we have at least ~1/16 samples for estimate.
3009 if (num_blocks > (tot_num_blocks >> 4)) {
3010 assert(num_blocks != 0);
3011 return (Total / num_blocks);
3012 } else {
3013 return 0;
3014 }
3015 }
3016
3017 #if CONFIG_TEMPORAL_DENOISING
process_denoiser_mode_change(VP8_COMP * cpi)3018 static void process_denoiser_mode_change(VP8_COMP *cpi) {
3019 const VP8_COMMON *const cm = &cpi->common;
3020 int i, j;
3021 int total = 0;
3022 int num_blocks = 0;
3023 // Number of blocks skipped along row/column in computing the
3024 // nmse (normalized mean square error) of source.
3025 int skip = 2;
3026 // Only select blocks for computing nmse that have been encoded
3027 // as ZERO LAST min_consec_zero_last frames in a row.
3028 // Scale with number of temporal layers.
3029 int min_consec_zero_last = 12 / cpi->oxcf.number_of_layers;
3030 // Decision is tested for changing the denoising mode every
3031 // num_mode_change times this function is called. Note that this
3032 // function called every 8 frames, so (8 * num_mode_change) is number
3033 // of frames where denoising mode change is tested for switch.
3034 int num_mode_change = 20;
3035 // Framerate factor, to compensate for larger mse at lower framerates.
3036 // Use ref_framerate, which is full source framerate for temporal layers.
3037 // TODO(marpan): Adjust this factor.
3038 int fac_framerate = cpi->ref_framerate < 25.0f ? 80 : 100;
3039 int tot_num_blocks = cm->mb_rows * cm->mb_cols;
3040 int ystride = cpi->Source->y_stride;
3041 unsigned char *src = cpi->Source->y_buffer;
3042 unsigned char *dst = cpi->denoiser.yv12_last_source.y_buffer;
3043 static const unsigned char const_source[16] = { 128, 128, 128, 128, 128, 128,
3044 128, 128, 128, 128, 128, 128,
3045 128, 128, 128, 128 };
3046 int bandwidth = (int)(cpi->target_bandwidth);
3047 // For temporal layers, use full bandwidth (top layer).
3048 if (cpi->oxcf.number_of_layers > 1) {
3049 LAYER_CONTEXT *lc = &cpi->layer_context[cpi->oxcf.number_of_layers - 1];
3050 bandwidth = (int)(lc->target_bandwidth);
3051 }
3052 // Loop through the Y plane, every skip blocks along rows and columns,
3053 // summing the normalized mean square error, only for blocks that have
3054 // been encoded as ZEROMV LAST at least min_consec_zero_last least frames in
3055 // a row and have small sum difference between current and previous frame.
3056 // Normalization here is by the contrast of the current frame block.
3057 for (i = 0; i < cm->Height; i += 16 * skip) {
3058 int block_index_row = (i >> 4) * cm->mb_cols;
3059 for (j = 0; j < cm->Width; j += 16 * skip) {
3060 int index = block_index_row + (j >> 4);
3061 if (cpi->consec_zero_last[index] >= min_consec_zero_last) {
3062 unsigned int sse;
3063 const unsigned int var =
3064 vpx_variance16x16(src + j, ystride, dst + j, ystride, &sse);
3065 // Only consider this block as valid for noise measurement
3066 // if the sum_diff average of the current and previous frame
3067 // is small (to avoid effects from lighting change).
3068 if ((sse - var) < 128) {
3069 unsigned int sse2;
3070 const unsigned int act =
3071 vpx_variance16x16(src + j, ystride, const_source, 0, &sse2);
3072 if (act > 0) total += sse / act;
3073 num_blocks++;
3074 }
3075 }
3076 }
3077 src += 16 * skip * ystride;
3078 dst += 16 * skip * ystride;
3079 }
3080 total = total * fac_framerate / 100;
3081
3082 // Only consider this frame as valid sample if we have computed nmse over
3083 // at least ~1/16 blocks, and Total > 0 (Total == 0 can happen if the
3084 // application inputs duplicate frames, or contrast is all zero).
3085 if (total > 0 && (num_blocks > (tot_num_blocks >> 4))) {
3086 // Update the recursive mean square source_diff.
3087 total = (total << 8) / num_blocks;
3088 if (cpi->denoiser.nmse_source_diff_count == 0) {
3089 // First sample in new interval.
3090 cpi->denoiser.nmse_source_diff = total;
3091 cpi->denoiser.qp_avg = cm->base_qindex;
3092 } else {
3093 // For subsequent samples, use average with weight ~1/4 for new sample.
3094 cpi->denoiser.nmse_source_diff =
3095 (int)((total + 3 * cpi->denoiser.nmse_source_diff) >> 2);
3096 cpi->denoiser.qp_avg =
3097 (int)((cm->base_qindex + 3 * cpi->denoiser.qp_avg) >> 2);
3098 }
3099 cpi->denoiser.nmse_source_diff_count++;
3100 }
3101 // Check for changing the denoiser mode, when we have obtained #samples =
3102 // num_mode_change. Condition the change also on the bitrate and QP.
3103 if (cpi->denoiser.nmse_source_diff_count == num_mode_change) {
3104 // Check for going up: from normal to aggressive mode.
3105 if ((cpi->denoiser.denoiser_mode == kDenoiserOnYUV) &&
3106 (cpi->denoiser.nmse_source_diff >
3107 cpi->denoiser.threshold_aggressive_mode) &&
3108 (cpi->denoiser.qp_avg < cpi->denoiser.qp_threshold_up &&
3109 bandwidth > cpi->denoiser.bitrate_threshold)) {
3110 vp8_denoiser_set_parameters(&cpi->denoiser, kDenoiserOnYUVAggressive);
3111 } else {
3112 // Check for going down: from aggressive to normal mode.
3113 if (((cpi->denoiser.denoiser_mode == kDenoiserOnYUVAggressive) &&
3114 (cpi->denoiser.nmse_source_diff <
3115 cpi->denoiser.threshold_aggressive_mode)) ||
3116 ((cpi->denoiser.denoiser_mode == kDenoiserOnYUVAggressive) &&
3117 (cpi->denoiser.qp_avg > cpi->denoiser.qp_threshold_down ||
3118 bandwidth < cpi->denoiser.bitrate_threshold))) {
3119 vp8_denoiser_set_parameters(&cpi->denoiser, kDenoiserOnYUV);
3120 }
3121 }
3122 // Reset metric and counter for next interval.
3123 cpi->denoiser.nmse_source_diff = 0;
3124 cpi->denoiser.qp_avg = 0;
3125 cpi->denoiser.nmse_source_diff_count = 0;
3126 }
3127 }
3128 #endif
3129
vp8_loopfilter_frame(VP8_COMP * cpi,VP8_COMMON * cm)3130 void vp8_loopfilter_frame(VP8_COMP *cpi, VP8_COMMON *cm) {
3131 const FRAME_TYPE frame_type = cm->frame_type;
3132
3133 int update_any_ref_buffers = 1;
3134 if (cpi->common.refresh_last_frame == 0 &&
3135 cpi->common.refresh_golden_frame == 0 &&
3136 cpi->common.refresh_alt_ref_frame == 0) {
3137 update_any_ref_buffers = 0;
3138 }
3139
3140 if (cm->no_lpf) {
3141 cm->filter_level = 0;
3142 } else {
3143 struct vpx_usec_timer timer;
3144
3145 vpx_clear_system_state();
3146
3147 vpx_usec_timer_start(&timer);
3148 if (cpi->sf.auto_filter == 0) {
3149 #if CONFIG_TEMPORAL_DENOISING
3150 if (cpi->oxcf.noise_sensitivity && cm->frame_type != KEY_FRAME) {
3151 // Use the denoised buffer for selecting base loop filter level.
3152 // Denoised signal for current frame is stored in INTRA_FRAME.
3153 // No denoising on key frames.
3154 vp8cx_pick_filter_level_fast(
3155 &cpi->denoiser.yv12_running_avg[INTRA_FRAME], cpi);
3156 } else {
3157 vp8cx_pick_filter_level_fast(cpi->Source, cpi);
3158 }
3159 #else
3160 vp8cx_pick_filter_level_fast(cpi->Source, cpi);
3161 #endif
3162 } else {
3163 #if CONFIG_TEMPORAL_DENOISING
3164 if (cpi->oxcf.noise_sensitivity && cm->frame_type != KEY_FRAME) {
3165 // Use the denoised buffer for selecting base loop filter level.
3166 // Denoised signal for current frame is stored in INTRA_FRAME.
3167 // No denoising on key frames.
3168 vp8cx_pick_filter_level(&cpi->denoiser.yv12_running_avg[INTRA_FRAME],
3169 cpi);
3170 } else {
3171 vp8cx_pick_filter_level(cpi->Source, cpi);
3172 }
3173 #else
3174 vp8cx_pick_filter_level(cpi->Source, cpi);
3175 #endif
3176 }
3177
3178 if (cm->filter_level > 0) {
3179 vp8cx_set_alt_lf_level(cpi, cm->filter_level);
3180 }
3181
3182 vpx_usec_timer_mark(&timer);
3183 cpi->time_pick_lpf += vpx_usec_timer_elapsed(&timer);
3184 }
3185
3186 #if CONFIG_MULTITHREAD
3187 if (vpx_atomic_load_acquire(&cpi->b_multi_threaded)) {
3188 sem_post(&cpi->h_event_end_lpf); /* signal that we have set filter_level */
3189 }
3190 #endif
3191
3192 // No need to apply loop-filter if the encoded frame does not update
3193 // any reference buffers.
3194 if (cm->filter_level > 0 && update_any_ref_buffers) {
3195 vp8_loop_filter_frame(cm, &cpi->mb.e_mbd, frame_type);
3196 }
3197
3198 vp8_yv12_extend_frame_borders(cm->frame_to_show);
3199 }
3200 // Return 1 if frame is to be dropped. Update frame drop decimation
3201 // counters.
vp8_check_drop_buffer(VP8_COMP * cpi)3202 int vp8_check_drop_buffer(VP8_COMP *cpi) {
3203 VP8_COMMON *cm = &cpi->common;
3204 int drop_mark = (int)(cpi->oxcf.drop_frames_water_mark *
3205 cpi->oxcf.optimal_buffer_level / 100);
3206 int drop_mark75 = drop_mark * 2 / 3;
3207 int drop_mark50 = drop_mark / 4;
3208 int drop_mark25 = drop_mark / 8;
3209 if (cpi->drop_frames_allowed) {
3210 /* The reset to decimation 0 is only done here for one pass.
3211 * Once it is set two pass leaves decimation on till the next kf.
3212 */
3213 if (cpi->buffer_level > drop_mark && cpi->decimation_factor > 0) {
3214 cpi->decimation_factor--;
3215 }
3216
3217 if (cpi->buffer_level > drop_mark75 && cpi->decimation_factor > 0) {
3218 cpi->decimation_factor = 1;
3219
3220 } else if (cpi->buffer_level < drop_mark25 &&
3221 (cpi->decimation_factor == 2 || cpi->decimation_factor == 3)) {
3222 cpi->decimation_factor = 3;
3223 } else if (cpi->buffer_level < drop_mark50 &&
3224 (cpi->decimation_factor == 1 || cpi->decimation_factor == 2)) {
3225 cpi->decimation_factor = 2;
3226 } else if (cpi->buffer_level < drop_mark75 &&
3227 (cpi->decimation_factor == 0 || cpi->decimation_factor == 1)) {
3228 cpi->decimation_factor = 1;
3229 }
3230 }
3231
3232 /* The following decimates the frame rate according to a regular
3233 * pattern (i.e. to 1/2 or 2/3 frame rate) This can be used to help
3234 * prevent buffer under-run in CBR mode. Alternatively it might be
3235 * desirable in some situations to drop frame rate but throw more bits
3236 * at each frame.
3237 *
3238 * Note that dropping a key frame can be problematic if spatial
3239 * resampling is also active
3240 */
3241 if (cpi->decimation_factor > 0 && cpi->drop_frames_allowed) {
3242 switch (cpi->decimation_factor) {
3243 case 1:
3244 cpi->per_frame_bandwidth = cpi->per_frame_bandwidth * 3 / 2;
3245 break;
3246 case 2:
3247 cpi->per_frame_bandwidth = cpi->per_frame_bandwidth * 5 / 4;
3248 break;
3249 case 3:
3250 cpi->per_frame_bandwidth = cpi->per_frame_bandwidth * 5 / 4;
3251 break;
3252 }
3253
3254 /* Note that we should not throw out a key frame (especially when
3255 * spatial resampling is enabled).
3256 */
3257 if (cm->frame_type == KEY_FRAME) {
3258 cpi->decimation_count = cpi->decimation_factor;
3259 } else if (cpi->decimation_count > 0) {
3260 cpi->decimation_count--;
3261
3262 cpi->bits_off_target += cpi->av_per_frame_bandwidth;
3263 if (cpi->bits_off_target > cpi->oxcf.maximum_buffer_size) {
3264 cpi->bits_off_target = cpi->oxcf.maximum_buffer_size;
3265 }
3266
3267 #if CONFIG_MULTI_RES_ENCODING
3268 vp8_store_drop_frame_info(cpi);
3269 #endif
3270
3271 cm->current_video_frame++;
3272 cpi->frames_since_key++;
3273 cpi->ext_refresh_frame_flags_pending = 0;
3274 // We advance the temporal pattern for dropped frames.
3275 cpi->temporal_pattern_counter++;
3276
3277 #if CONFIG_INTERNAL_STATS
3278 cpi->count++;
3279 #endif
3280
3281 cpi->buffer_level = cpi->bits_off_target;
3282
3283 if (cpi->oxcf.number_of_layers > 1) {
3284 unsigned int i;
3285
3286 /* Propagate bits saved by dropping the frame to higher
3287 * layers
3288 */
3289 for (i = cpi->current_layer + 1; i < cpi->oxcf.number_of_layers; ++i) {
3290 LAYER_CONTEXT *lc = &cpi->layer_context[i];
3291 lc->bits_off_target += (int)(lc->target_bandwidth / lc->framerate);
3292 if (lc->bits_off_target > lc->maximum_buffer_size) {
3293 lc->bits_off_target = lc->maximum_buffer_size;
3294 }
3295 lc->buffer_level = lc->bits_off_target;
3296 }
3297 }
3298 return 1;
3299 } else {
3300 cpi->decimation_count = cpi->decimation_factor;
3301 }
3302 } else {
3303 cpi->decimation_count = 0;
3304 }
3305 return 0;
3306 }
3307
encode_frame_to_data_rate(VP8_COMP * cpi,size_t * size,unsigned char * dest,unsigned char * dest_end,unsigned int * frame_flags)3308 static void encode_frame_to_data_rate(VP8_COMP *cpi, size_t *size,
3309 unsigned char *dest,
3310 unsigned char *dest_end,
3311 unsigned int *frame_flags) {
3312 int Q;
3313 int frame_over_shoot_limit;
3314 int frame_under_shoot_limit;
3315
3316 int Loop = 0;
3317
3318 VP8_COMMON *cm = &cpi->common;
3319 int active_worst_qchanged = 0;
3320
3321 #if !CONFIG_REALTIME_ONLY
3322 int q_low;
3323 int q_high;
3324 int zbin_oq_high;
3325 int zbin_oq_low = 0;
3326 int top_index;
3327 int bottom_index;
3328 int overshoot_seen = 0;
3329 int undershoot_seen = 0;
3330 #endif
3331
3332 /* Clear down mmx registers to allow floating point in what follows */
3333 vpx_clear_system_state();
3334
3335 if (cpi->force_next_frame_intra) {
3336 cm->frame_type = KEY_FRAME; /* delayed intra frame */
3337 cpi->force_next_frame_intra = 0;
3338 }
3339
3340 /* For an alt ref frame in 2 pass we skip the call to the second pass
3341 * function that sets the target bandwidth
3342 */
3343 switch (cpi->pass) {
3344 #if !CONFIG_REALTIME_ONLY
3345 case 2:
3346 if (cpi->common.refresh_alt_ref_frame) {
3347 /* Per frame bit target for the alt ref frame */
3348 cpi->per_frame_bandwidth = cpi->twopass.gf_bits;
3349 /* per second target bitrate */
3350 cpi->target_bandwidth =
3351 (int)(cpi->twopass.gf_bits * cpi->output_framerate);
3352 }
3353 break;
3354 #endif // !CONFIG_REALTIME_ONLY
3355 default: {
3356 const double per_frame_bandwidth =
3357 round(cpi->target_bandwidth / cpi->output_framerate);
3358 cpi->per_frame_bandwidth = (int)VPXMIN(per_frame_bandwidth, INT_MAX);
3359 break;
3360 }
3361 }
3362
3363 /* Default turn off buffer to buffer copying */
3364 cm->copy_buffer_to_gf = 0;
3365 cm->copy_buffer_to_arf = 0;
3366
3367 /* Clear zbin over-quant value and mode boost values. */
3368 cpi->mb.zbin_over_quant = 0;
3369 cpi->mb.zbin_mode_boost = 0;
3370
3371 /* Enable or disable mode based tweaking of the zbin
3372 * For 2 Pass Only used where GF/ARF prediction quality
3373 * is above a threshold
3374 */
3375 cpi->mb.zbin_mode_boost_enabled = 1;
3376 if (cpi->pass == 2) {
3377 if (cpi->gfu_boost <= 400) {
3378 cpi->mb.zbin_mode_boost_enabled = 0;
3379 }
3380 }
3381
3382 /* Current default encoder behaviour for the altref sign bias */
3383 if (cpi->source_alt_ref_active) {
3384 cpi->common.ref_frame_sign_bias[ALTREF_FRAME] = 1;
3385 } else {
3386 cpi->common.ref_frame_sign_bias[ALTREF_FRAME] = 0;
3387 }
3388
3389 /* Check to see if a key frame is signaled
3390 * For two pass with auto key frame enabled cm->frame_type may already
3391 * be set, but not for one pass.
3392 */
3393 if ((cm->current_video_frame == 0) || (cm->frame_flags & FRAMEFLAGS_KEY) ||
3394 (cpi->oxcf.auto_key &&
3395 (cpi->frames_since_key % cpi->key_frame_frequency == 0))) {
3396 /* Key frame from VFW/auto-keyframe/first frame */
3397 cm->frame_type = KEY_FRAME;
3398 #if CONFIG_TEMPORAL_DENOISING
3399 if (cpi->oxcf.noise_sensitivity == 4) {
3400 // For adaptive mode, reset denoiser to normal mode on key frame.
3401 vp8_denoiser_set_parameters(&cpi->denoiser, kDenoiserOnYUV);
3402 }
3403 #endif
3404 }
3405
3406 #if CONFIG_MULTI_RES_ENCODING
3407 if (cpi->oxcf.mr_total_resolutions > 1) {
3408 LOWER_RES_FRAME_INFO *low_res_frame_info =
3409 (LOWER_RES_FRAME_INFO *)cpi->oxcf.mr_low_res_mode_info;
3410
3411 if (cpi->oxcf.mr_encoder_id) {
3412 // Check if lower resolution is available for motion vector reuse.
3413 if (cm->frame_type != KEY_FRAME) {
3414 cpi->mr_low_res_mv_avail = 1;
3415 cpi->mr_low_res_mv_avail &= !(low_res_frame_info->is_frame_dropped);
3416
3417 if (cpi->ref_frame_flags & VP8_LAST_FRAME)
3418 cpi->mr_low_res_mv_avail &=
3419 (cpi->current_ref_frames[LAST_FRAME] ==
3420 low_res_frame_info->low_res_ref_frames[LAST_FRAME]);
3421
3422 if (cpi->ref_frame_flags & VP8_GOLD_FRAME)
3423 cpi->mr_low_res_mv_avail &=
3424 (cpi->current_ref_frames[GOLDEN_FRAME] ==
3425 low_res_frame_info->low_res_ref_frames[GOLDEN_FRAME]);
3426
3427 // Don't use altref to determine whether low res is available.
3428 // TODO (marpan): Should we make this type of condition on a
3429 // per-reference frame basis?
3430 /*
3431 if (cpi->ref_frame_flags & VP8_ALTR_FRAME)
3432 cpi->mr_low_res_mv_avail &= (cpi->current_ref_frames[ALTREF_FRAME]
3433 == low_res_frame_info->low_res_ref_frames[ALTREF_FRAME]);
3434 */
3435 }
3436 // Disable motion vector reuse (i.e., disable any usage of the low_res)
3437 // if the previous lower stream is skipped/disabled.
3438 if (low_res_frame_info->skip_encoding_prev_stream) {
3439 cpi->mr_low_res_mv_avail = 0;
3440 }
3441 }
3442 // This stream is not skipped (i.e., it's being encoded), so set this skip
3443 // flag to 0. This is needed for the next stream (i.e., which is the next
3444 // frame to be encoded).
3445 low_res_frame_info->skip_encoding_prev_stream = 0;
3446
3447 // On a key frame: For the lowest resolution, keep track of the key frame
3448 // counter value. For the higher resolutions, reset the current video
3449 // frame counter to that of the lowest resolution.
3450 // This is done to the handle the case where we may stop/start encoding
3451 // higher layer(s). The restart-encoding of higher layer is only signaled
3452 // by a key frame for now.
3453 // TODO (marpan): Add flag to indicate restart-encoding of higher layer.
3454 if (cm->frame_type == KEY_FRAME) {
3455 if (cpi->oxcf.mr_encoder_id) {
3456 // If the initial starting value of the buffer level is zero (this can
3457 // happen because we may have not started encoding this higher stream),
3458 // then reset it to non-zero value based on |starting_buffer_level|.
3459 if (cpi->common.current_video_frame == 0 && cpi->buffer_level == 0) {
3460 unsigned int i;
3461 cpi->bits_off_target = cpi->oxcf.starting_buffer_level;
3462 cpi->buffer_level = cpi->oxcf.starting_buffer_level;
3463 for (i = 0; i < cpi->oxcf.number_of_layers; ++i) {
3464 LAYER_CONTEXT *lc = &cpi->layer_context[i];
3465 lc->bits_off_target = lc->starting_buffer_level;
3466 lc->buffer_level = lc->starting_buffer_level;
3467 }
3468 }
3469 cpi->common.current_video_frame =
3470 low_res_frame_info->key_frame_counter_value;
3471 } else {
3472 low_res_frame_info->key_frame_counter_value =
3473 cpi->common.current_video_frame;
3474 }
3475 }
3476 }
3477 #endif
3478
3479 // Find the reference frame closest to the current frame.
3480 cpi->closest_reference_frame = LAST_FRAME;
3481 if (cm->frame_type != KEY_FRAME) {
3482 int i;
3483 MV_REFERENCE_FRAME closest_ref = INTRA_FRAME;
3484 if (cpi->ref_frame_flags & VP8_LAST_FRAME) {
3485 closest_ref = LAST_FRAME;
3486 } else if (cpi->ref_frame_flags & VP8_GOLD_FRAME) {
3487 closest_ref = GOLDEN_FRAME;
3488 } else if (cpi->ref_frame_flags & VP8_ALTR_FRAME) {
3489 closest_ref = ALTREF_FRAME;
3490 }
3491 for (i = 1; i <= 3; ++i) {
3492 vpx_ref_frame_type_t ref_frame_type =
3493 (vpx_ref_frame_type_t)((i == 3) ? 4 : i);
3494 if (cpi->ref_frame_flags & ref_frame_type) {
3495 if ((cm->current_video_frame - cpi->current_ref_frames[i]) <
3496 (cm->current_video_frame - cpi->current_ref_frames[closest_ref])) {
3497 closest_ref = i;
3498 }
3499 }
3500 }
3501 cpi->closest_reference_frame = closest_ref;
3502 }
3503
3504 /* Set various flags etc to special state if it is a key frame */
3505 if (cm->frame_type == KEY_FRAME) {
3506 int i;
3507
3508 // Set the loop filter deltas and segmentation map update
3509 setup_features(cpi);
3510
3511 /* The alternate reference frame cannot be active for a key frame */
3512 cpi->source_alt_ref_active = 0;
3513
3514 /* Reset the RD threshold multipliers to default of * 1 (128) */
3515 for (i = 0; i < MAX_MODES; ++i) {
3516 cpi->mb.rd_thresh_mult[i] = 128;
3517 }
3518
3519 // Reset the zero_last counter to 0 on key frame.
3520 memset(cpi->consec_zero_last, 0, cm->mb_rows * cm->mb_cols);
3521 memset(cpi->consec_zero_last_mvbias, 0,
3522 (cpi->common.mb_rows * cpi->common.mb_cols));
3523 }
3524
3525 #if 0
3526 /* Experimental code for lagged compress and one pass
3527 * Initialise one_pass GF frames stats
3528 * Update stats used for GF selection
3529 */
3530 {
3531 cpi->one_pass_frame_index = cm->current_video_frame % MAX_LAG_BUFFERS;
3532
3533 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frames_so_far = 0;
3534 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_intra_error = 0.0;
3535 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_coded_error = 0.0;
3536 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_pcnt_inter = 0.0;
3537 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_pcnt_motion = 0.0;
3538 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_mvr = 0.0;
3539 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_mvr_abs = 0.0;
3540 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_mvc = 0.0;
3541 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_mvc_abs = 0.0;
3542 }
3543 #endif
3544
3545 update_rd_ref_frame_probs(cpi);
3546
3547 if (vp8_check_drop_buffer(cpi)) {
3548 return;
3549 }
3550
3551 /* Decide how big to make the frame */
3552 if (!vp8_pick_frame_size(cpi)) {
3553 /*TODO: 2 drop_frame and return code could be put together. */
3554 #if CONFIG_MULTI_RES_ENCODING
3555 vp8_store_drop_frame_info(cpi);
3556 #endif
3557 cm->current_video_frame++;
3558 cpi->frames_since_key++;
3559 cpi->ext_refresh_frame_flags_pending = 0;
3560 // We advance the temporal pattern for dropped frames.
3561 cpi->temporal_pattern_counter++;
3562 return;
3563 }
3564
3565 /* Reduce active_worst_allowed_q for CBR if our buffer is getting too full.
3566 * This has a knock on effect on active best quality as well.
3567 * For CBR if the buffer reaches its maximum level then we can no longer
3568 * save up bits for later frames so we might as well use them up
3569 * on the current frame.
3570 */
3571 if ((cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) &&
3572 (cpi->buffer_level >= cpi->oxcf.optimal_buffer_level) &&
3573 cpi->buffered_mode) {
3574 /* Max adjustment is 1/4 */
3575 int Adjustment = cpi->active_worst_quality / 4;
3576
3577 if (Adjustment) {
3578 int buff_lvl_step;
3579
3580 if (cpi->buffer_level < cpi->oxcf.maximum_buffer_size) {
3581 buff_lvl_step = (int)((cpi->oxcf.maximum_buffer_size -
3582 cpi->oxcf.optimal_buffer_level) /
3583 Adjustment);
3584
3585 if (buff_lvl_step) {
3586 Adjustment =
3587 (int)((cpi->buffer_level - cpi->oxcf.optimal_buffer_level) /
3588 buff_lvl_step);
3589 } else {
3590 Adjustment = 0;
3591 }
3592 }
3593
3594 cpi->active_worst_quality -= Adjustment;
3595
3596 if (cpi->active_worst_quality < cpi->active_best_quality) {
3597 cpi->active_worst_quality = cpi->active_best_quality;
3598 }
3599 }
3600 }
3601
3602 /* Set an active best quality and if necessary active worst quality
3603 * There is some odd behavior for one pass here that needs attention.
3604 */
3605 if ((cpi->pass == 2) || (cpi->ni_frames > 150)) {
3606 vpx_clear_system_state();
3607
3608 Q = cpi->active_worst_quality;
3609
3610 if (cm->frame_type == KEY_FRAME) {
3611 if (cpi->pass == 2) {
3612 if (cpi->gfu_boost > 600) {
3613 cpi->active_best_quality = kf_low_motion_minq[Q];
3614 } else {
3615 cpi->active_best_quality = kf_high_motion_minq[Q];
3616 }
3617
3618 /* Special case for key frames forced because we have reached
3619 * the maximum key frame interval. Here force the Q to a range
3620 * based on the ambient Q to reduce the risk of popping
3621 */
3622 if (cpi->this_key_frame_forced) {
3623 if (cpi->active_best_quality > cpi->avg_frame_qindex * 7 / 8) {
3624 cpi->active_best_quality = cpi->avg_frame_qindex * 7 / 8;
3625 } else if (cpi->active_best_quality < (cpi->avg_frame_qindex >> 2)) {
3626 cpi->active_best_quality = cpi->avg_frame_qindex >> 2;
3627 }
3628 }
3629 }
3630 /* One pass more conservative */
3631 else {
3632 cpi->active_best_quality = kf_high_motion_minq[Q];
3633 }
3634 }
3635
3636 else if (cpi->oxcf.number_of_layers == 1 &&
3637 (cm->refresh_golden_frame || cpi->common.refresh_alt_ref_frame)) {
3638 /* Use the lower of cpi->active_worst_quality and recent
3639 * average Q as basis for GF/ARF Q limit unless last frame was
3640 * a key frame.
3641 */
3642 if ((cpi->frames_since_key > 1) &&
3643 (cpi->avg_frame_qindex < cpi->active_worst_quality)) {
3644 Q = cpi->avg_frame_qindex;
3645 }
3646
3647 /* For constrained quality don't allow Q less than the cq level */
3648 if ((cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) &&
3649 (Q < cpi->cq_target_quality)) {
3650 Q = cpi->cq_target_quality;
3651 }
3652
3653 if (cpi->pass == 2) {
3654 if (cpi->gfu_boost > 1000) {
3655 cpi->active_best_quality = gf_low_motion_minq[Q];
3656 } else if (cpi->gfu_boost < 400) {
3657 cpi->active_best_quality = gf_high_motion_minq[Q];
3658 } else {
3659 cpi->active_best_quality = gf_mid_motion_minq[Q];
3660 }
3661
3662 /* Constrained quality use slightly lower active best. */
3663 if (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) {
3664 cpi->active_best_quality = cpi->active_best_quality * 15 / 16;
3665 }
3666 }
3667 /* One pass more conservative */
3668 else {
3669 cpi->active_best_quality = gf_high_motion_minq[Q];
3670 }
3671 } else {
3672 cpi->active_best_quality = inter_minq[Q];
3673
3674 /* For the constant/constrained quality mode we don't want
3675 * q to fall below the cq level.
3676 */
3677 if ((cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) &&
3678 (cpi->active_best_quality < cpi->cq_target_quality)) {
3679 /* If we are strongly undershooting the target rate in the last
3680 * frames then use the user passed in cq value not the auto
3681 * cq value.
3682 */
3683 if (cpi->rolling_actual_bits < cpi->min_frame_bandwidth) {
3684 cpi->active_best_quality = cpi->oxcf.cq_level;
3685 } else {
3686 cpi->active_best_quality = cpi->cq_target_quality;
3687 }
3688 }
3689 }
3690
3691 /* If CBR and the buffer is as full then it is reasonable to allow
3692 * higher quality on the frames to prevent bits just going to waste.
3693 */
3694 if (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) {
3695 /* Note that the use of >= here elliminates the risk of a divide
3696 * by 0 error in the else if clause
3697 */
3698 if (cpi->buffer_level >= cpi->oxcf.maximum_buffer_size) {
3699 cpi->active_best_quality = cpi->best_quality;
3700
3701 } else if (cpi->buffer_level > cpi->oxcf.optimal_buffer_level) {
3702 int Fraction =
3703 (int)(((cpi->buffer_level - cpi->oxcf.optimal_buffer_level) * 128) /
3704 (cpi->oxcf.maximum_buffer_size -
3705 cpi->oxcf.optimal_buffer_level));
3706 int min_qadjustment =
3707 ((cpi->active_best_quality - cpi->best_quality) * Fraction) / 128;
3708
3709 cpi->active_best_quality -= min_qadjustment;
3710 }
3711 }
3712 }
3713 /* Make sure constrained quality mode limits are adhered to for the first
3714 * few frames of one pass encodes
3715 */
3716 else if (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) {
3717 if ((cm->frame_type == KEY_FRAME) || cm->refresh_golden_frame ||
3718 cpi->common.refresh_alt_ref_frame) {
3719 cpi->active_best_quality = cpi->best_quality;
3720 } else if (cpi->active_best_quality < cpi->cq_target_quality) {
3721 cpi->active_best_quality = cpi->cq_target_quality;
3722 }
3723 }
3724
3725 /* Clip the active best and worst quality values to limits */
3726 if (cpi->active_worst_quality > cpi->worst_quality) {
3727 cpi->active_worst_quality = cpi->worst_quality;
3728 }
3729
3730 if (cpi->active_best_quality < cpi->best_quality) {
3731 cpi->active_best_quality = cpi->best_quality;
3732 }
3733
3734 if (cpi->active_worst_quality < cpi->active_best_quality) {
3735 cpi->active_worst_quality = cpi->active_best_quality;
3736 }
3737
3738 /* Determine initial Q to try */
3739 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
3740
3741 #if !CONFIG_REALTIME_ONLY
3742
3743 /* Set highest allowed value for Zbin over quant */
3744 if (cm->frame_type == KEY_FRAME) {
3745 zbin_oq_high = 0;
3746 } else if ((cpi->oxcf.number_of_layers == 1) &&
3747 ((cm->refresh_alt_ref_frame ||
3748 (cm->refresh_golden_frame && !cpi->source_alt_ref_active)))) {
3749 zbin_oq_high = 16;
3750 } else {
3751 zbin_oq_high = ZBIN_OQ_MAX;
3752 }
3753 #endif
3754
3755 compute_skin_map(cpi);
3756
3757 /* Setup background Q adjustment for error resilient mode.
3758 * For multi-layer encodes only enable this for the base layer.
3759 */
3760 if (cpi->cyclic_refresh_mode_enabled) {
3761 // Special case for screen_content_mode with golden frame updates.
3762 int disable_cr_gf =
3763 (cpi->oxcf.screen_content_mode == 2 && cm->refresh_golden_frame);
3764 if (cpi->current_layer == 0 && cpi->force_maxqp == 0 && !disable_cr_gf) {
3765 cyclic_background_refresh(cpi, Q, 0);
3766 } else {
3767 disable_segmentation(cpi);
3768 }
3769 }
3770
3771 vp8_compute_frame_size_bounds(cpi, &frame_under_shoot_limit,
3772 &frame_over_shoot_limit);
3773
3774 #if !CONFIG_REALTIME_ONLY
3775 /* Limit Q range for the adaptive loop. */
3776 bottom_index = cpi->active_best_quality;
3777 top_index = cpi->active_worst_quality;
3778 q_low = cpi->active_best_quality;
3779 q_high = cpi->active_worst_quality;
3780 #endif
3781
3782 vp8_save_coding_context(cpi);
3783
3784 scale_and_extend_source(cpi->un_scaled_source, cpi);
3785
3786 #if CONFIG_TEMPORAL_DENOISING && CONFIG_POSTPROC
3787 // Option to apply spatial blur under the aggressive or adaptive
3788 // (temporal denoising) mode.
3789 if (cpi->oxcf.noise_sensitivity >= 3) {
3790 if (cpi->denoiser.denoise_pars.spatial_blur != 0) {
3791 vp8_de_noise(cm, cpi->Source, cpi->denoiser.denoise_pars.spatial_blur, 1);
3792 }
3793 }
3794 #endif
3795
3796 #if !(CONFIG_REALTIME_ONLY) && CONFIG_POSTPROC && !(CONFIG_TEMPORAL_DENOISING)
3797
3798 if (cpi->oxcf.noise_sensitivity > 0) {
3799 unsigned char *src;
3800 int l = 0;
3801
3802 switch (cpi->oxcf.noise_sensitivity) {
3803 case 1: l = 20; break;
3804 case 2: l = 40; break;
3805 case 3: l = 60; break;
3806 case 4: l = 80; break;
3807 case 5: l = 100; break;
3808 case 6: l = 150; break;
3809 }
3810
3811 if (cm->frame_type == KEY_FRAME) {
3812 vp8_de_noise(cm, cpi->Source, l, 1);
3813 } else {
3814 vp8_de_noise(cm, cpi->Source, l, 1);
3815
3816 src = cpi->Source->y_buffer;
3817
3818 if (cpi->Source->y_stride < 0) {
3819 src += cpi->Source->y_stride * (cpi->Source->y_height - 1);
3820 }
3821 }
3822 }
3823
3824 #endif
3825
3826 #ifdef OUTPUT_YUV_SRC
3827 vpx_write_yuv_frame(yuv_file, cpi->Source);
3828 #endif
3829
3830 do {
3831 vpx_clear_system_state();
3832
3833 vp8_set_quantizer(cpi, Q);
3834
3835 /* setup skip prob for costing in mode/mv decision */
3836 if (cpi->common.mb_no_coeff_skip) {
3837 cpi->prob_skip_false = cpi->base_skip_false_prob[Q];
3838
3839 if (cm->frame_type != KEY_FRAME) {
3840 if (cpi->common.refresh_alt_ref_frame) {
3841 if (cpi->last_skip_false_probs[2] != 0) {
3842 cpi->prob_skip_false = cpi->last_skip_false_probs[2];
3843 }
3844
3845 /*
3846 if(cpi->last_skip_false_probs[2]!=0 && abs(Q-
3847 cpi->last_skip_probs_q[2])<=16 )
3848 cpi->prob_skip_false = cpi->last_skip_false_probs[2];
3849 else if (cpi->last_skip_false_probs[2]!=0)
3850 cpi->prob_skip_false = (cpi->last_skip_false_probs[2] +
3851 cpi->prob_skip_false ) / 2;
3852 */
3853 } else if (cpi->common.refresh_golden_frame) {
3854 if (cpi->last_skip_false_probs[1] != 0) {
3855 cpi->prob_skip_false = cpi->last_skip_false_probs[1];
3856 }
3857
3858 /*
3859 if(cpi->last_skip_false_probs[1]!=0 && abs(Q-
3860 cpi->last_skip_probs_q[1])<=16 )
3861 cpi->prob_skip_false = cpi->last_skip_false_probs[1];
3862 else if (cpi->last_skip_false_probs[1]!=0)
3863 cpi->prob_skip_false = (cpi->last_skip_false_probs[1] +
3864 cpi->prob_skip_false ) / 2;
3865 */
3866 } else {
3867 if (cpi->last_skip_false_probs[0] != 0) {
3868 cpi->prob_skip_false = cpi->last_skip_false_probs[0];
3869 }
3870
3871 /*
3872 if(cpi->last_skip_false_probs[0]!=0 && abs(Q-
3873 cpi->last_skip_probs_q[0])<=16 )
3874 cpi->prob_skip_false = cpi->last_skip_false_probs[0];
3875 else if(cpi->last_skip_false_probs[0]!=0)
3876 cpi->prob_skip_false = (cpi->last_skip_false_probs[0] +
3877 cpi->prob_skip_false ) / 2;
3878 */
3879 }
3880
3881 /* as this is for cost estimate, let's make sure it does not
3882 * go extreme eitehr way
3883 */
3884 if (cpi->prob_skip_false < 5) cpi->prob_skip_false = 5;
3885
3886 if (cpi->prob_skip_false > 250) cpi->prob_skip_false = 250;
3887
3888 if (cpi->oxcf.number_of_layers == 1 && cpi->is_src_frame_alt_ref) {
3889 cpi->prob_skip_false = 1;
3890 }
3891 }
3892
3893 #if 0
3894
3895 if (cpi->pass != 1)
3896 {
3897 FILE *f = fopen("skip.stt", "a");
3898 fprintf(f, "%d, %d, %4d ", cpi->common.refresh_golden_frame, cpi->common.refresh_alt_ref_frame, cpi->prob_skip_false);
3899 fclose(f);
3900 }
3901
3902 #endif
3903 }
3904
3905 if (cm->frame_type == KEY_FRAME) {
3906 if (resize_key_frame(cpi)) {
3907 /* If the frame size has changed, need to reset Q, quantizer,
3908 * and background refresh.
3909 */
3910 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
3911 if (cpi->cyclic_refresh_mode_enabled) {
3912 if (cpi->current_layer == 0) {
3913 cyclic_background_refresh(cpi, Q, 0);
3914 } else {
3915 disable_segmentation(cpi);
3916 }
3917 }
3918 // Reset the zero_last counter to 0 on key frame.
3919 memset(cpi->consec_zero_last, 0, cm->mb_rows * cm->mb_cols);
3920 memset(cpi->consec_zero_last_mvbias, 0,
3921 (cpi->common.mb_rows * cpi->common.mb_cols));
3922 vp8_set_quantizer(cpi, Q);
3923 }
3924
3925 vp8_setup_key_frame(cpi);
3926 }
3927
3928 #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
3929 {
3930 if (cpi->oxcf.error_resilient_mode) cm->refresh_entropy_probs = 0;
3931
3932 if (cpi->oxcf.error_resilient_mode & VPX_ERROR_RESILIENT_PARTITIONS) {
3933 if (cm->frame_type == KEY_FRAME) cm->refresh_entropy_probs = 1;
3934 }
3935
3936 if (cm->refresh_entropy_probs == 0) {
3937 /* save a copy for later refresh */
3938 memcpy(&cm->lfc, &cm->fc, sizeof(cm->fc));
3939 }
3940
3941 vp8_update_coef_context(cpi);
3942
3943 vp8_update_coef_probs(cpi);
3944
3945 /* transform / motion compensation build reconstruction frame
3946 * +pack coef partitions
3947 */
3948 vp8_encode_frame(cpi);
3949
3950 /* cpi->projected_frame_size is not needed for RT mode */
3951 }
3952 #else
3953 /* transform / motion compensation build reconstruction frame */
3954 vp8_encode_frame(cpi);
3955
3956 if (cpi->pass == 0 && cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER &&
3957 cpi->rt_drop_recode_on_overshoot == 1) {
3958 if (vp8_drop_encodedframe_overshoot(cpi, Q)) {
3959 vpx_clear_system_state();
3960 return;
3961 }
3962 if (cm->frame_type != KEY_FRAME)
3963 cpi->last_pred_err_mb =
3964 (int)(cpi->mb.prediction_error / cpi->common.MBs);
3965 }
3966
3967 cpi->projected_frame_size -= vp8_estimate_entropy_savings(cpi);
3968 cpi->projected_frame_size =
3969 (cpi->projected_frame_size > 0) ? cpi->projected_frame_size : 0;
3970 #endif
3971 vpx_clear_system_state();
3972
3973 /* Test to see if the stats generated for this frame indicate that
3974 * we should have coded a key frame (assuming that we didn't)!
3975 */
3976
3977 if (cpi->pass != 2 && cpi->oxcf.auto_key && cm->frame_type != KEY_FRAME &&
3978 cpi->compressor_speed != 2) {
3979 #if !CONFIG_REALTIME_ONLY
3980 if (decide_key_frame(cpi)) {
3981 /* Reset all our sizing numbers and recode */
3982 cm->frame_type = KEY_FRAME;
3983
3984 vp8_pick_frame_size(cpi);
3985
3986 /* Clear the Alt reference frame active flag when we have
3987 * a key frame
3988 */
3989 cpi->source_alt_ref_active = 0;
3990
3991 // Set the loop filter deltas and segmentation map update
3992 setup_features(cpi);
3993
3994 vp8_restore_coding_context(cpi);
3995
3996 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
3997
3998 vp8_compute_frame_size_bounds(cpi, &frame_under_shoot_limit,
3999 &frame_over_shoot_limit);
4000
4001 /* Limit Q range for the adaptive loop. */
4002 bottom_index = cpi->active_best_quality;
4003 top_index = cpi->active_worst_quality;
4004 q_low = cpi->active_best_quality;
4005 q_high = cpi->active_worst_quality;
4006
4007 Loop = 1;
4008
4009 continue;
4010 }
4011 #endif
4012 }
4013
4014 vpx_clear_system_state();
4015
4016 if (frame_over_shoot_limit == 0) frame_over_shoot_limit = 1;
4017
4018 /* Are we are overshooting and up against the limit of active max Q. */
4019 if (!cpi->rt_always_update_correction_factor &&
4020 ((cpi->pass != 2) ||
4021 (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER)) &&
4022 (Q == cpi->active_worst_quality) &&
4023 (cpi->active_worst_quality < cpi->worst_quality) &&
4024 (cpi->projected_frame_size > frame_over_shoot_limit)) {
4025 int over_size_percent =
4026 ((cpi->projected_frame_size - frame_over_shoot_limit) * 100) /
4027 frame_over_shoot_limit;
4028
4029 /* If so is there any scope for relaxing it */
4030 while ((cpi->active_worst_quality < cpi->worst_quality) &&
4031 (over_size_percent > 0)) {
4032 cpi->active_worst_quality++;
4033 /* Assume 1 qstep = about 4% on frame size. */
4034 over_size_percent = (int)(over_size_percent * 0.96);
4035 }
4036 #if !CONFIG_REALTIME_ONLY
4037 top_index = cpi->active_worst_quality;
4038 #endif // !CONFIG_REALTIME_ONLY
4039 /* If we have updated the active max Q do not call
4040 * vp8_update_rate_correction_factors() this loop.
4041 */
4042 active_worst_qchanged = 1;
4043 } else {
4044 active_worst_qchanged = 0;
4045 }
4046
4047 #if CONFIG_REALTIME_ONLY
4048 Loop = 0;
4049 #else
4050 /* Special case handling for forced key frames */
4051 if ((cm->frame_type == KEY_FRAME) && cpi->this_key_frame_forced) {
4052 int last_q = Q;
4053 int kf_err = vp8_calc_ss_err(cpi->Source, &cm->yv12_fb[cm->new_fb_idx]);
4054
4055 /* The key frame is not good enough */
4056 if (kf_err > ((cpi->ambient_err * 7) >> 3)) {
4057 /* Lower q_high */
4058 q_high = (Q > q_low) ? (Q - 1) : q_low;
4059
4060 /* Adjust Q */
4061 Q = (q_high + q_low) >> 1;
4062 }
4063 /* The key frame is much better than the previous frame */
4064 else if (kf_err < (cpi->ambient_err >> 1)) {
4065 /* Raise q_low */
4066 q_low = (Q < q_high) ? (Q + 1) : q_high;
4067
4068 /* Adjust Q */
4069 Q = (q_high + q_low + 1) >> 1;
4070 }
4071
4072 /* Clamp Q to upper and lower limits: */
4073 if (Q > q_high) {
4074 Q = q_high;
4075 } else if (Q < q_low) {
4076 Q = q_low;
4077 }
4078
4079 Loop = Q != last_q;
4080 }
4081
4082 /* Is the projected frame size out of range and are we allowed
4083 * to attempt to recode.
4084 */
4085 else if (recode_loop_test(cpi, frame_over_shoot_limit,
4086 frame_under_shoot_limit, Q, top_index,
4087 bottom_index)) {
4088 int last_q = Q;
4089 int Retries = 0;
4090
4091 /* Frame size out of permitted range. Update correction factor
4092 * & compute new Q to try...
4093 */
4094
4095 /* Frame is too large */
4096 if (cpi->projected_frame_size > cpi->this_frame_target) {
4097 /* Raise Qlow as to at least the current value */
4098 q_low = (Q < q_high) ? (Q + 1) : q_high;
4099
4100 /* If we are using over quant do the same for zbin_oq_low */
4101 if (cpi->mb.zbin_over_quant > 0) {
4102 zbin_oq_low = (cpi->mb.zbin_over_quant < zbin_oq_high)
4103 ? (cpi->mb.zbin_over_quant + 1)
4104 : zbin_oq_high;
4105 }
4106
4107 if (undershoot_seen) {
4108 /* Update rate_correction_factor unless
4109 * cpi->active_worst_quality has changed.
4110 */
4111 if (!active_worst_qchanged) {
4112 vp8_update_rate_correction_factors(cpi, 1);
4113 }
4114
4115 Q = (q_high + q_low + 1) / 2;
4116
4117 /* Adjust cpi->zbin_over_quant (only allowed when Q
4118 * is max)
4119 */
4120 if (Q < MAXQ) {
4121 cpi->mb.zbin_over_quant = 0;
4122 } else {
4123 zbin_oq_low = (cpi->mb.zbin_over_quant < zbin_oq_high)
4124 ? (cpi->mb.zbin_over_quant + 1)
4125 : zbin_oq_high;
4126 cpi->mb.zbin_over_quant = (zbin_oq_high + zbin_oq_low) / 2;
4127 }
4128 } else {
4129 /* Update rate_correction_factor unless
4130 * cpi->active_worst_quality has changed.
4131 */
4132 if (!active_worst_qchanged) {
4133 vp8_update_rate_correction_factors(cpi, 0);
4134 }
4135
4136 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
4137
4138 while (((Q < q_low) || (cpi->mb.zbin_over_quant < zbin_oq_low)) &&
4139 (Retries < 10)) {
4140 vp8_update_rate_correction_factors(cpi, 0);
4141 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
4142 Retries++;
4143 }
4144 }
4145
4146 overshoot_seen = 1;
4147 }
4148 /* Frame is too small */
4149 else {
4150 if (cpi->mb.zbin_over_quant == 0) {
4151 /* Lower q_high if not using over quant */
4152 q_high = (Q > q_low) ? (Q - 1) : q_low;
4153 } else {
4154 /* else lower zbin_oq_high */
4155 zbin_oq_high = (cpi->mb.zbin_over_quant > zbin_oq_low)
4156 ? (cpi->mb.zbin_over_quant - 1)
4157 : zbin_oq_low;
4158 }
4159
4160 if (overshoot_seen) {
4161 /* Update rate_correction_factor unless
4162 * cpi->active_worst_quality has changed.
4163 */
4164 if (!active_worst_qchanged) {
4165 vp8_update_rate_correction_factors(cpi, 1);
4166 }
4167
4168 Q = (q_high + q_low) / 2;
4169
4170 /* Adjust cpi->zbin_over_quant (only allowed when Q
4171 * is max)
4172 */
4173 if (Q < MAXQ) {
4174 cpi->mb.zbin_over_quant = 0;
4175 } else {
4176 cpi->mb.zbin_over_quant = (zbin_oq_high + zbin_oq_low) / 2;
4177 }
4178 } else {
4179 /* Update rate_correction_factor unless
4180 * cpi->active_worst_quality has changed.
4181 */
4182 if (!active_worst_qchanged) {
4183 vp8_update_rate_correction_factors(cpi, 0);
4184 }
4185
4186 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
4187
4188 /* Special case reset for qlow for constrained quality.
4189 * This should only trigger where there is very substantial
4190 * undershoot on a frame and the auto cq level is above
4191 * the user passsed in value.
4192 */
4193 if ((cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) &&
4194 (Q < q_low)) {
4195 q_low = Q;
4196 }
4197
4198 while (((Q > q_high) || (cpi->mb.zbin_over_quant > zbin_oq_high)) &&
4199 (Retries < 10)) {
4200 vp8_update_rate_correction_factors(cpi, 0);
4201 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
4202 Retries++;
4203 }
4204 }
4205
4206 undershoot_seen = 1;
4207 }
4208
4209 /* Clamp Q to upper and lower limits: */
4210 if (Q > q_high) {
4211 Q = q_high;
4212 } else if (Q < q_low) {
4213 Q = q_low;
4214 }
4215
4216 /* Clamp cpi->zbin_over_quant */
4217 cpi->mb.zbin_over_quant =
4218 (cpi->mb.zbin_over_quant < zbin_oq_low) ? zbin_oq_low
4219 : (cpi->mb.zbin_over_quant > zbin_oq_high) ? zbin_oq_high
4220 : cpi->mb.zbin_over_quant;
4221
4222 Loop = Q != last_q;
4223 } else {
4224 Loop = 0;
4225 }
4226 #endif // CONFIG_REALTIME_ONLY
4227
4228 if (cpi->is_src_frame_alt_ref) Loop = 0;
4229
4230 if (Loop == 1) {
4231 vp8_restore_coding_context(cpi);
4232 #if CONFIG_INTERNAL_STATS
4233 cpi->tot_recode_hits++;
4234 #endif
4235 }
4236 } while (Loop == 1);
4237
4238 #if defined(DROP_UNCODED_FRAMES)
4239 /* if there are no coded macroblocks at all drop this frame */
4240 if (cpi->common.MBs == cpi->mb.skip_true_count &&
4241 (cpi->drop_frame_count & 7) != 7 && cm->frame_type != KEY_FRAME) {
4242 cpi->common.current_video_frame++;
4243 cpi->frames_since_key++;
4244 cpi->drop_frame_count++;
4245 cpi->ext_refresh_frame_flags_pending = 0;
4246 // We advance the temporal pattern for dropped frames.
4247 cpi->temporal_pattern_counter++;
4248 return;
4249 }
4250 cpi->drop_frame_count = 0;
4251 #endif
4252
4253 #if 0
4254 /* Experimental code for lagged and one pass
4255 * Update stats used for one pass GF selection
4256 */
4257 {
4258 cpi->one_pass_frame_stats[cpi->one_pass_frame_index].frame_coded_error = (double)cpi->prediction_error;
4259 cpi->one_pass_frame_stats[cpi->one_pass_frame_index].frame_intra_error = (double)cpi->intra_error;
4260 cpi->one_pass_frame_stats[cpi->one_pass_frame_index].frame_pcnt_inter = (double)(100 - cpi->this_frame_percent_intra) / 100.0;
4261 }
4262 #endif
4263
4264 /* Special case code to reduce pulsing when key frames are forced at a
4265 * fixed interval. Note the reconstruction error if it is the frame before
4266 * the force key frame
4267 */
4268 if (cpi->next_key_frame_forced && (cpi->twopass.frames_to_key == 0)) {
4269 cpi->ambient_err =
4270 vp8_calc_ss_err(cpi->Source, &cm->yv12_fb[cm->new_fb_idx]);
4271 }
4272
4273 /* This frame's MVs are saved and will be used in next frame's MV predictor.
4274 * Last frame has one more line(add to bottom) and one more column(add to
4275 * right) than cm->mip. The edge elements are initialized to 0.
4276 */
4277 #if CONFIG_MULTI_RES_ENCODING
4278 if (!cpi->oxcf.mr_encoder_id && cm->show_frame)
4279 #else
4280 if (cm->show_frame) /* do not save for altref frame */
4281 #endif
4282 {
4283 int mb_row;
4284 int mb_col;
4285 /* Point to beginning of allocated MODE_INFO arrays. */
4286 MODE_INFO *tmp = cm->mip;
4287
4288 if (cm->frame_type != KEY_FRAME) {
4289 for (mb_row = 0; mb_row < cm->mb_rows + 1; ++mb_row) {
4290 for (mb_col = 0; mb_col < cm->mb_cols + 1; ++mb_col) {
4291 if (tmp->mbmi.ref_frame != INTRA_FRAME) {
4292 cpi->lfmv[mb_col + mb_row * (cm->mode_info_stride + 1)].as_int =
4293 tmp->mbmi.mv.as_int;
4294 }
4295
4296 cpi->lf_ref_frame_sign_bias[mb_col +
4297 mb_row * (cm->mode_info_stride + 1)] =
4298 cm->ref_frame_sign_bias[tmp->mbmi.ref_frame];
4299 cpi->lf_ref_frame[mb_col + mb_row * (cm->mode_info_stride + 1)] =
4300 tmp->mbmi.ref_frame;
4301 tmp++;
4302 }
4303 }
4304 }
4305 }
4306
4307 /* Count last ref frame 0,0 usage on current encoded frame. */
4308 {
4309 int mb_row;
4310 int mb_col;
4311 /* Point to beginning of MODE_INFO arrays. */
4312 MODE_INFO *tmp = cm->mi;
4313
4314 cpi->zeromv_count = 0;
4315
4316 if (cm->frame_type != KEY_FRAME) {
4317 for (mb_row = 0; mb_row < cm->mb_rows; ++mb_row) {
4318 for (mb_col = 0; mb_col < cm->mb_cols; ++mb_col) {
4319 if (tmp->mbmi.mode == ZEROMV && tmp->mbmi.ref_frame == LAST_FRAME) {
4320 cpi->zeromv_count++;
4321 }
4322 tmp++;
4323 }
4324 tmp++;
4325 }
4326 }
4327 }
4328
4329 #if CONFIG_MULTI_RES_ENCODING
4330 vp8_cal_dissimilarity(cpi);
4331 #endif
4332
4333 /* Update the GF usage maps.
4334 * This is done after completing the compression of a frame when all
4335 * modes etc. are finalized but before loop filter
4336 */
4337 if (cpi->oxcf.number_of_layers == 1) {
4338 vp8_update_gf_usage_maps(cpi, cm, &cpi->mb);
4339 }
4340
4341 if (cm->frame_type == KEY_FRAME) cm->refresh_last_frame = 1;
4342
4343 #if 0
4344 {
4345 FILE *f = fopen("gfactive.stt", "a");
4346 fprintf(f, "%8d %8d %8d %8d %8d\n", cm->current_video_frame, (100 * cpi->gf_active_count) / (cpi->common.mb_rows * cpi->common.mb_cols), cpi->this_iiratio, cpi->next_iiratio, cm->refresh_golden_frame);
4347 fclose(f);
4348 }
4349 #endif
4350
4351 /* For inter frames the current default behavior is that when
4352 * cm->refresh_golden_frame is set we copy the old GF over to the ARF buffer
4353 * This is purely an encoder decision at present.
4354 * Avoid this behavior when refresh flags are set by the user.
4355 */
4356 if (!cpi->oxcf.error_resilient_mode && cm->refresh_golden_frame &&
4357 !cpi->ext_refresh_frame_flags_pending) {
4358 cm->copy_buffer_to_arf = 2;
4359 } else {
4360 cm->copy_buffer_to_arf = 0;
4361 }
4362
4363 cm->frame_to_show = &cm->yv12_fb[cm->new_fb_idx];
4364
4365 #if CONFIG_TEMPORAL_DENOISING
4366 // Get some measure of the amount of noise, by measuring the (partial) mse
4367 // between source and denoised buffer, for y channel. Partial refers to
4368 // computing the sse for a sub-sample of the frame (i.e., skip x blocks along
4369 // row/column),
4370 // and only for blocks in that set that are consecutive ZEROMV_LAST mode.
4371 // Do this every ~8 frames, to further reduce complexity.
4372 // TODO(marpan): Keep this for now for the case cpi->oxcf.noise_sensitivity <
4373 // 4,
4374 // should be removed in favor of the process_denoiser_mode_change() function
4375 // below.
4376 if (cpi->oxcf.noise_sensitivity > 0 && cpi->oxcf.noise_sensitivity < 4 &&
4377 !cpi->oxcf.screen_content_mode && cpi->frames_since_key % 8 == 0 &&
4378 cm->frame_type != KEY_FRAME) {
4379 cpi->mse_source_denoised = measure_square_diff_partial(
4380 &cpi->denoiser.yv12_running_avg[INTRA_FRAME], cpi->Source, cpi);
4381 }
4382
4383 // For the adaptive denoising mode (noise_sensitivity == 4), sample the mse
4384 // of source diff (between current and previous frame), and determine if we
4385 // should switch the denoiser mode. Sampling refers to computing the mse for
4386 // a sub-sample of the frame (i.e., skip x blocks along row/column), and
4387 // only for blocks in that set that have used ZEROMV LAST, along with some
4388 // constraint on the sum diff between blocks. This process is called every
4389 // ~8 frames, to further reduce complexity.
4390 if (cpi->oxcf.noise_sensitivity == 4 && !cpi->oxcf.screen_content_mode &&
4391 cpi->frames_since_key % 8 == 0 && cm->frame_type != KEY_FRAME) {
4392 process_denoiser_mode_change(cpi);
4393 }
4394 #endif
4395
4396 #ifdef OUTPUT_YUV_SKINMAP
4397 if (cpi->common.current_video_frame > 1) {
4398 vp8_compute_skin_map(cpi, yuv_skinmap_file);
4399 }
4400 #endif
4401
4402 #if CONFIG_MULTITHREAD
4403 if (vpx_atomic_load_acquire(&cpi->b_multi_threaded)) {
4404 /* start loopfilter in separate thread */
4405 sem_post(&cpi->h_event_start_lpf);
4406 cpi->b_lpf_running = 1;
4407 /* wait for the filter_level to be picked so that we can continue with
4408 * stream packing */
4409 errno = 0;
4410 while (sem_wait(&cpi->h_event_end_lpf) != 0 && errno == EINTR) {
4411 }
4412 } else
4413 #endif
4414 {
4415 vp8_loopfilter_frame(cpi, cm);
4416 }
4417
4418 update_reference_frames(cpi);
4419
4420 #ifdef OUTPUT_YUV_DENOISED
4421 vpx_write_yuv_frame(yuv_denoised_file,
4422 &cpi->denoiser.yv12_running_avg[INTRA_FRAME]);
4423 #endif
4424
4425 #if !(CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING)
4426 if (cpi->oxcf.error_resilient_mode) {
4427 cm->refresh_entropy_probs = 0;
4428 }
4429 #endif
4430
4431 /* build the bitstream */
4432 vp8_pack_bitstream(cpi, dest, dest_end, size);
4433
4434 /* Move storing frame_type out of the above loop since it is also
4435 * needed in motion search besides loopfilter */
4436 cm->last_frame_type = cm->frame_type;
4437
4438 /* Update rate control heuristics */
4439 cpi->total_byte_count += (*size);
4440 cpi->projected_frame_size = (int)(*size) << 3;
4441
4442 if (cpi->oxcf.number_of_layers > 1) {
4443 unsigned int i;
4444 for (i = cpi->current_layer + 1; i < cpi->oxcf.number_of_layers; ++i) {
4445 cpi->layer_context[i].total_byte_count += (*size);
4446 }
4447 }
4448
4449 if (!active_worst_qchanged) vp8_update_rate_correction_factors(cpi, 2);
4450
4451 cpi->last_q[cm->frame_type] = cm->base_qindex;
4452
4453 if (cm->frame_type == KEY_FRAME) {
4454 vp8_adjust_key_frame_context(cpi);
4455 }
4456
4457 /* Keep a record of ambient average Q. */
4458 if (cm->frame_type != KEY_FRAME) {
4459 cpi->avg_frame_qindex =
4460 (2 + 3 * cpi->avg_frame_qindex + cm->base_qindex) >> 2;
4461 }
4462
4463 /* Keep a record from which we can calculate the average Q excluding
4464 * GF updates and key frames
4465 */
4466 if ((cm->frame_type != KEY_FRAME) &&
4467 ((cpi->oxcf.number_of_layers > 1) ||
4468 (!cm->refresh_golden_frame && !cm->refresh_alt_ref_frame))) {
4469 cpi->ni_frames++;
4470
4471 /* Calculate the average Q for normal inter frames (not key or GFU
4472 * frames).
4473 */
4474 if (cpi->pass == 2) {
4475 cpi->ni_tot_qi += Q;
4476 cpi->ni_av_qi = (cpi->ni_tot_qi / cpi->ni_frames);
4477 } else {
4478 /* Damp value for first few frames */
4479 if (cpi->ni_frames > 150) {
4480 cpi->ni_tot_qi += Q;
4481 cpi->ni_av_qi = (cpi->ni_tot_qi / cpi->ni_frames);
4482 }
4483 /* For one pass, early in the clip ... average the current frame Q
4484 * value with the worstq entered by the user as a dampening measure
4485 */
4486 else {
4487 cpi->ni_tot_qi += Q;
4488 cpi->ni_av_qi =
4489 ((cpi->ni_tot_qi / cpi->ni_frames) + cpi->worst_quality + 1) / 2;
4490 }
4491
4492 /* If the average Q is higher than what was used in the last
4493 * frame (after going through the recode loop to keep the frame
4494 * size within range) then use the last frame value - 1. The -1
4495 * is designed to stop Q and hence the data rate, from
4496 * progressively falling away during difficult sections, but at
4497 * the same time reduce the number of iterations around the
4498 * recode loop.
4499 */
4500 if (Q > cpi->ni_av_qi) cpi->ni_av_qi = Q - 1;
4501 }
4502 }
4503
4504 /* Update the buffer level variable. */
4505 /* Non-viewable frames are a special case and are treated as pure overhead. */
4506 if (!cm->show_frame) {
4507 cpi->bits_off_target -= cpi->projected_frame_size;
4508 } else {
4509 cpi->bits_off_target +=
4510 cpi->av_per_frame_bandwidth - cpi->projected_frame_size;
4511 }
4512
4513 /* Clip the buffer level to the maximum specified buffer size */
4514 if (cpi->bits_off_target > cpi->oxcf.maximum_buffer_size) {
4515 cpi->bits_off_target = cpi->oxcf.maximum_buffer_size;
4516 }
4517
4518 // Don't let the buffer level go below some threshold, given here
4519 // by -|maximum_buffer_size|. For now we only do this for
4520 // screen content input.
4521 if (cpi->oxcf.screen_content_mode &&
4522 cpi->bits_off_target < -cpi->oxcf.maximum_buffer_size) {
4523 cpi->bits_off_target = -cpi->oxcf.maximum_buffer_size;
4524 }
4525
4526 /* Rolling monitors of whether we are over or underspending used to
4527 * help regulate min and Max Q in two pass.
4528 */
4529 cpi->rolling_target_bits = (int)ROUND64_POWER_OF_TWO(
4530 (int64_t)cpi->rolling_target_bits * 3 + cpi->this_frame_target, 2);
4531 cpi->rolling_actual_bits = (int)ROUND64_POWER_OF_TWO(
4532 (int64_t)cpi->rolling_actual_bits * 3 + cpi->projected_frame_size, 2);
4533 cpi->long_rolling_target_bits = (int)ROUND64_POWER_OF_TWO(
4534 (int64_t)cpi->long_rolling_target_bits * 31 + cpi->this_frame_target, 5);
4535 cpi->long_rolling_actual_bits = (int)ROUND64_POWER_OF_TWO(
4536 (int64_t)cpi->long_rolling_actual_bits * 31 + cpi->projected_frame_size,
4537 5);
4538
4539 /* Actual bits spent */
4540 cpi->total_actual_bits += cpi->projected_frame_size;
4541
4542 #if 0 && CONFIG_INTERNAL_STATS
4543 /* Debug stats */
4544 cpi->total_target_vs_actual +=
4545 (cpi->this_frame_target - cpi->projected_frame_size);
4546 #endif
4547
4548 cpi->buffer_level = cpi->bits_off_target;
4549
4550 /* Propagate values to higher temporal layers */
4551 if (cpi->oxcf.number_of_layers > 1) {
4552 unsigned int i;
4553
4554 for (i = cpi->current_layer + 1; i < cpi->oxcf.number_of_layers; ++i) {
4555 LAYER_CONTEXT *lc = &cpi->layer_context[i];
4556 int bits_off_for_this_layer = (int)round(
4557 lc->target_bandwidth / lc->framerate - cpi->projected_frame_size);
4558
4559 lc->bits_off_target += bits_off_for_this_layer;
4560
4561 /* Clip buffer level to maximum buffer size for the layer */
4562 if (lc->bits_off_target > lc->maximum_buffer_size) {
4563 lc->bits_off_target = lc->maximum_buffer_size;
4564 }
4565
4566 lc->total_actual_bits += cpi->projected_frame_size;
4567 lc->total_target_vs_actual += bits_off_for_this_layer;
4568 lc->buffer_level = lc->bits_off_target;
4569 }
4570 }
4571
4572 /* Update bits left to the kf and gf groups to account for overshoot
4573 * or undershoot on these frames
4574 */
4575 if (cm->frame_type == KEY_FRAME) {
4576 cpi->twopass.kf_group_bits +=
4577 cpi->this_frame_target - cpi->projected_frame_size;
4578
4579 if (cpi->twopass.kf_group_bits < 0) cpi->twopass.kf_group_bits = 0;
4580 } else if (cm->refresh_golden_frame || cm->refresh_alt_ref_frame) {
4581 cpi->twopass.gf_group_bits +=
4582 cpi->this_frame_target - cpi->projected_frame_size;
4583
4584 if (cpi->twopass.gf_group_bits < 0) cpi->twopass.gf_group_bits = 0;
4585 }
4586
4587 if (cm->frame_type != KEY_FRAME) {
4588 if (cpi->common.refresh_alt_ref_frame) {
4589 cpi->last_skip_false_probs[2] = cpi->prob_skip_false;
4590 cpi->last_skip_probs_q[2] = cm->base_qindex;
4591 } else if (cpi->common.refresh_golden_frame) {
4592 cpi->last_skip_false_probs[1] = cpi->prob_skip_false;
4593 cpi->last_skip_probs_q[1] = cm->base_qindex;
4594 } else {
4595 cpi->last_skip_false_probs[0] = cpi->prob_skip_false;
4596 cpi->last_skip_probs_q[0] = cm->base_qindex;
4597
4598 /* update the baseline */
4599 cpi->base_skip_false_prob[cm->base_qindex] = cpi->prob_skip_false;
4600 }
4601 }
4602
4603 #if 0 && CONFIG_INTERNAL_STATS
4604 {
4605 FILE *f = fopen("tmp.stt", "a");
4606
4607 vpx_clear_system_state();
4608
4609 if (cpi->twopass.total_left_stats.coded_error != 0.0)
4610 fprintf(f, "%10d %10d %10d %10d %10d %10"PRId64" %10"PRId64
4611 "%10"PRId64" %10d %6d %6d %6d %6d %5d %5d %5d %8d "
4612 "%8.2lf %"PRId64" %10.3lf %10"PRId64" %8d\n",
4613 cpi->common.current_video_frame, cpi->this_frame_target,
4614 cpi->projected_frame_size,
4615 (cpi->projected_frame_size - cpi->this_frame_target),
4616 cpi->total_target_vs_actual,
4617 cpi->buffer_level,
4618 (cpi->oxcf.starting_buffer_level-cpi->bits_off_target),
4619 cpi->total_actual_bits, cm->base_qindex,
4620 cpi->active_best_quality, cpi->active_worst_quality,
4621 cpi->ni_av_qi, cpi->cq_target_quality,
4622 cm->refresh_golden_frame, cm->refresh_alt_ref_frame,
4623 cm->frame_type, cpi->gfu_boost,
4624 cpi->twopass.est_max_qcorrection_factor,
4625 cpi->twopass.bits_left,
4626 cpi->twopass.total_left_stats.coded_error,
4627 (double)cpi->twopass.bits_left /
4628 cpi->twopass.total_left_stats.coded_error,
4629 cpi->tot_recode_hits);
4630 else
4631 fprintf(f, "%10d %10d %10d %10d %10d %10"PRId64" %10"PRId64
4632 "%10"PRId64" %10d %6d %6d %6d %6d %5d %5d %5d %8d "
4633 "%8.2lf %"PRId64" %10.3lf %8d\n",
4634 cpi->common.current_video_frame, cpi->this_frame_target,
4635 cpi->projected_frame_size,
4636 (cpi->projected_frame_size - cpi->this_frame_target),
4637 cpi->total_target_vs_actual,
4638 cpi->buffer_level,
4639 (cpi->oxcf.starting_buffer_level-cpi->bits_off_target),
4640 cpi->total_actual_bits, cm->base_qindex,
4641 cpi->active_best_quality, cpi->active_worst_quality,
4642 cpi->ni_av_qi, cpi->cq_target_quality,
4643 cm->refresh_golden_frame, cm->refresh_alt_ref_frame,
4644 cm->frame_type, cpi->gfu_boost,
4645 cpi->twopass.est_max_qcorrection_factor,
4646 cpi->twopass.bits_left,
4647 cpi->twopass.total_left_stats.coded_error,
4648 cpi->tot_recode_hits);
4649
4650 fclose(f);
4651
4652 {
4653 FILE *fmodes = fopen("Modes.stt", "a");
4654
4655 fprintf(fmodes, "%6d:%1d:%1d:%1d ",
4656 cpi->common.current_video_frame,
4657 cm->frame_type, cm->refresh_golden_frame,
4658 cm->refresh_alt_ref_frame);
4659
4660 fprintf(fmodes, "\n");
4661
4662 fclose(fmodes);
4663 }
4664 }
4665
4666 #endif
4667
4668 cpi->ext_refresh_frame_flags_pending = 0;
4669
4670 if (cm->refresh_golden_frame == 1) {
4671 cm->frame_flags = cm->frame_flags | FRAMEFLAGS_GOLDEN;
4672 } else {
4673 cm->frame_flags = cm->frame_flags & ~FRAMEFLAGS_GOLDEN;
4674 }
4675
4676 if (cm->refresh_alt_ref_frame == 1) {
4677 cm->frame_flags = cm->frame_flags | FRAMEFLAGS_ALTREF;
4678 } else {
4679 cm->frame_flags = cm->frame_flags & ~FRAMEFLAGS_ALTREF;
4680 }
4681
4682 if (cm->refresh_last_frame & cm->refresh_golden_frame) { /* both refreshed */
4683 cpi->gold_is_last = 1;
4684 } else if (cm->refresh_last_frame ^ cm->refresh_golden_frame) {
4685 /* 1 refreshed but not the other */
4686 cpi->gold_is_last = 0;
4687 }
4688
4689 if (cm->refresh_last_frame & cm->refresh_alt_ref_frame) { /* both refreshed */
4690 cpi->alt_is_last = 1;
4691 } else if (cm->refresh_last_frame ^ cm->refresh_alt_ref_frame) {
4692 /* 1 refreshed but not the other */
4693 cpi->alt_is_last = 0;
4694 }
4695
4696 if (cm->refresh_alt_ref_frame &
4697 cm->refresh_golden_frame) { /* both refreshed */
4698 cpi->gold_is_alt = 1;
4699 } else if (cm->refresh_alt_ref_frame ^ cm->refresh_golden_frame) {
4700 /* 1 refreshed but not the other */
4701 cpi->gold_is_alt = 0;
4702 }
4703
4704 cpi->ref_frame_flags = VP8_ALTR_FRAME | VP8_GOLD_FRAME | VP8_LAST_FRAME;
4705
4706 if (cpi->gold_is_last) cpi->ref_frame_flags &= ~VP8_GOLD_FRAME;
4707
4708 if (cpi->alt_is_last) cpi->ref_frame_flags &= ~VP8_ALTR_FRAME;
4709
4710 if (cpi->gold_is_alt) cpi->ref_frame_flags &= ~VP8_ALTR_FRAME;
4711
4712 if (!cpi->oxcf.error_resilient_mode) {
4713 if (cpi->oxcf.play_alternate && cm->refresh_alt_ref_frame &&
4714 (cm->frame_type != KEY_FRAME)) {
4715 /* Update the alternate reference frame stats as appropriate. */
4716 update_alt_ref_frame_stats(cpi);
4717 } else {
4718 /* Update the Golden frame stats as appropriate. */
4719 update_golden_frame_stats(cpi);
4720 }
4721 }
4722
4723 if (cm->frame_type == KEY_FRAME) {
4724 /* Tell the caller that the frame was coded as a key frame */
4725 *frame_flags = cm->frame_flags | FRAMEFLAGS_KEY;
4726
4727 /* As this frame is a key frame the next defaults to an inter frame. */
4728 cm->frame_type = INTER_FRAME;
4729
4730 cpi->last_frame_percent_intra = 100;
4731 } else {
4732 *frame_flags = cm->frame_flags & ~FRAMEFLAGS_KEY;
4733
4734 cpi->last_frame_percent_intra = cpi->this_frame_percent_intra;
4735 }
4736
4737 /* Clear the one shot update flags for segmentation map and mode/ref
4738 * loop filter deltas.
4739 */
4740 cpi->mb.e_mbd.update_mb_segmentation_map = 0;
4741 cpi->mb.e_mbd.update_mb_segmentation_data = 0;
4742 cpi->mb.e_mbd.mode_ref_lf_delta_update = 0;
4743
4744 /* Don't increment frame counters if this was an altref buffer update
4745 * not a real frame
4746 */
4747 if (cm->show_frame) {
4748 cm->current_video_frame++;
4749 cpi->frames_since_key++;
4750 cpi->temporal_pattern_counter++;
4751 }
4752
4753 #if 0
4754 {
4755 char filename[512];
4756 FILE *recon_file;
4757 sprintf(filename, "enc%04d.yuv", (int) cm->current_video_frame);
4758 recon_file = fopen(filename, "wb");
4759 fwrite(cm->yv12_fb[cm->lst_fb_idx].buffer_alloc,
4760 cm->yv12_fb[cm->lst_fb_idx].frame_size, 1, recon_file);
4761 fclose(recon_file);
4762 }
4763 #endif
4764
4765 /* DEBUG */
4766 /* vpx_write_yuv_frame("encoder_recon.yuv", cm->frame_to_show); */
4767 }
4768 #if !CONFIG_REALTIME_ONLY
Pass2Encode(VP8_COMP * cpi,size_t * size,unsigned char * dest,unsigned char * dest_end,unsigned int * frame_flags)4769 static void Pass2Encode(VP8_COMP *cpi, size_t *size, unsigned char *dest,
4770 unsigned char *dest_end, unsigned int *frame_flags) {
4771 if (!cpi->common.refresh_alt_ref_frame) vp8_second_pass(cpi);
4772
4773 encode_frame_to_data_rate(cpi, size, dest, dest_end, frame_flags);
4774 cpi->twopass.bits_left -= 8 * (int)(*size);
4775
4776 if (!cpi->common.refresh_alt_ref_frame) {
4777 double two_pass_min_rate =
4778 (double)(cpi->oxcf.target_bandwidth *
4779 cpi->oxcf.two_pass_vbrmin_section / 100);
4780 cpi->twopass.bits_left += (int64_t)(two_pass_min_rate / cpi->framerate);
4781 }
4782 }
4783 #endif
4784
vp8_receive_raw_frame(VP8_COMP * cpi,unsigned int frame_flags,YV12_BUFFER_CONFIG * sd,int64_t time_stamp,int64_t end_time)4785 int vp8_receive_raw_frame(VP8_COMP *cpi, unsigned int frame_flags,
4786 YV12_BUFFER_CONFIG *sd, int64_t time_stamp,
4787 int64_t end_time) {
4788 struct vpx_usec_timer timer;
4789 int res = 0;
4790
4791 vpx_usec_timer_start(&timer);
4792
4793 /* Reinit the lookahead buffer if the frame size changes */
4794 if (sd->y_width != cpi->oxcf.Width || sd->y_height != cpi->oxcf.Height) {
4795 assert(cpi->oxcf.lag_in_frames < 2);
4796 dealloc_raw_frame_buffers(cpi);
4797 alloc_raw_frame_buffers(cpi);
4798 }
4799
4800 if (vp8_lookahead_push(cpi->lookahead, sd, time_stamp, end_time, frame_flags,
4801 cpi->active_map_enabled ? cpi->active_map : NULL)) {
4802 res = -1;
4803 }
4804 vpx_usec_timer_mark(&timer);
4805 cpi->time_receive_data += vpx_usec_timer_elapsed(&timer);
4806
4807 return res;
4808 }
4809
frame_is_reference(const VP8_COMP * cpi)4810 static int frame_is_reference(const VP8_COMP *cpi) {
4811 const VP8_COMMON *cm = &cpi->common;
4812 const MACROBLOCKD *xd = &cpi->mb.e_mbd;
4813
4814 return cm->frame_type == KEY_FRAME || cm->refresh_last_frame ||
4815 cm->refresh_golden_frame || cm->refresh_alt_ref_frame ||
4816 cm->copy_buffer_to_gf || cm->copy_buffer_to_arf ||
4817 cm->refresh_entropy_probs || xd->mode_ref_lf_delta_update ||
4818 xd->update_mb_segmentation_map || xd->update_mb_segmentation_data;
4819 }
4820
vp8_get_compressed_data(VP8_COMP * cpi,unsigned int * frame_flags,size_t * size,unsigned char * dest,unsigned char * dest_end,int64_t * time_stamp,int64_t * time_end,int flush)4821 int vp8_get_compressed_data(VP8_COMP *cpi, unsigned int *frame_flags,
4822 size_t *size, unsigned char *dest,
4823 unsigned char *dest_end, int64_t *time_stamp,
4824 int64_t *time_end, int flush) {
4825 VP8_COMMON *cm;
4826 struct vpx_usec_timer tsctimer;
4827 struct vpx_usec_timer ticktimer;
4828 struct vpx_usec_timer cmptimer;
4829 YV12_BUFFER_CONFIG *force_src_buffer = NULL;
4830
4831 if (!cpi) return -1;
4832
4833 cm = &cpi->common;
4834
4835 vpx_usec_timer_start(&cmptimer);
4836
4837 cpi->source = NULL;
4838
4839 #if !CONFIG_REALTIME_ONLY
4840 /* Should we code an alternate reference frame */
4841 if (cpi->oxcf.error_resilient_mode == 0 && cpi->oxcf.play_alternate &&
4842 cpi->source_alt_ref_pending) {
4843 if ((cpi->source = vp8_lookahead_peek(
4844 cpi->lookahead, cpi->frames_till_gf_update_due, PEEK_FORWARD))) {
4845 cpi->alt_ref_source = cpi->source;
4846 if (cpi->oxcf.arnr_max_frames > 0) {
4847 vp8_temporal_filter_prepare_c(cpi, cpi->frames_till_gf_update_due);
4848 force_src_buffer = &cpi->alt_ref_buffer;
4849 }
4850 cpi->frames_till_alt_ref_frame = cpi->frames_till_gf_update_due;
4851 cm->refresh_alt_ref_frame = 1;
4852 cm->refresh_golden_frame = 0;
4853 cm->refresh_last_frame = 0;
4854 cm->show_frame = 0;
4855 /* Clear Pending alt Ref flag. */
4856 cpi->source_alt_ref_pending = 0;
4857 cpi->is_src_frame_alt_ref = 0;
4858 }
4859 }
4860 #endif
4861
4862 if (!cpi->source) {
4863 /* Read last frame source if we are encoding first pass. */
4864 if (cpi->pass == 1 && cm->current_video_frame > 0) {
4865 if ((cpi->last_source =
4866 vp8_lookahead_peek(cpi->lookahead, 1, PEEK_BACKWARD)) == NULL) {
4867 return -1;
4868 }
4869 }
4870
4871 if ((cpi->source = vp8_lookahead_pop(cpi->lookahead, flush))) {
4872 cm->show_frame = 1;
4873
4874 cpi->is_src_frame_alt_ref =
4875 cpi->alt_ref_source && (cpi->source == cpi->alt_ref_source);
4876
4877 if (cpi->is_src_frame_alt_ref) cpi->alt_ref_source = NULL;
4878 }
4879 }
4880
4881 if (cpi->source) {
4882 cpi->Source = force_src_buffer ? force_src_buffer : &cpi->source->img;
4883 cpi->un_scaled_source = cpi->Source;
4884 *time_stamp = cpi->source->ts_start;
4885 *time_end = cpi->source->ts_end;
4886 *frame_flags = cpi->source->flags;
4887
4888 if (cpi->pass == 1 && cm->current_video_frame > 0) {
4889 cpi->last_frame_unscaled_source = &cpi->last_source->img;
4890 }
4891 } else {
4892 *size = 0;
4893 #if !CONFIG_REALTIME_ONLY
4894
4895 if (flush && cpi->pass == 1 && !cpi->twopass.first_pass_done) {
4896 vp8_end_first_pass(cpi); /* get last stats packet */
4897 cpi->twopass.first_pass_done = 1;
4898 }
4899
4900 #endif
4901
4902 return -1;
4903 }
4904
4905 if (cpi->source->ts_start < cpi->first_time_stamp_ever) {
4906 cpi->first_time_stamp_ever = cpi->source->ts_start;
4907 cpi->last_end_time_stamp_seen = cpi->source->ts_start;
4908 }
4909
4910 /* adjust frame rates based on timestamps given */
4911 if (cm->show_frame) {
4912 int64_t this_duration;
4913 int step = 0;
4914
4915 if (cpi->source->ts_start == cpi->first_time_stamp_ever) {
4916 this_duration = cpi->source->ts_end - cpi->source->ts_start;
4917 step = 1;
4918 } else {
4919 int64_t last_duration;
4920
4921 this_duration = cpi->source->ts_end - cpi->last_end_time_stamp_seen;
4922 last_duration = cpi->last_end_time_stamp_seen - cpi->last_time_stamp_seen;
4923 // Cap this to avoid overflow of (this_duration - last_duration) * 10
4924 this_duration = VPXMIN(this_duration, INT64_MAX / 10);
4925 /* do a step update if the duration changes by 10% */
4926 if (last_duration) {
4927 step = (int)(((this_duration - last_duration) * 10 / last_duration));
4928 }
4929 }
4930
4931 if (this_duration) {
4932 if (step) {
4933 cpi->ref_framerate = 10000000.0 / this_duration;
4934 } else {
4935 double avg_duration, interval;
4936
4937 /* Average this frame's rate into the last second's average
4938 * frame rate. If we haven't seen 1 second yet, then average
4939 * over the whole interval seen.
4940 */
4941 interval = (double)(cpi->source->ts_end - cpi->first_time_stamp_ever);
4942 if (interval > 10000000.0) interval = 10000000;
4943
4944 avg_duration = 10000000.0 / cpi->ref_framerate;
4945 avg_duration *= (interval - avg_duration + this_duration);
4946 avg_duration /= interval;
4947
4948 cpi->ref_framerate = 10000000.0 / avg_duration;
4949 }
4950 #if CONFIG_MULTI_RES_ENCODING
4951 if (cpi->oxcf.mr_total_resolutions > 1) {
4952 LOWER_RES_FRAME_INFO *low_res_frame_info =
4953 (LOWER_RES_FRAME_INFO *)cpi->oxcf.mr_low_res_mode_info;
4954 // Frame rate should be the same for all spatial layers in
4955 // multi-res-encoding (simulcast), so we constrain the frame for
4956 // higher layers to be that of lowest resolution. This is needed
4957 // as he application may decide to skip encoding a high layer and
4958 // then start again, in which case a big jump in time-stamps will
4959 // be received for that high layer, which will yield an incorrect
4960 // frame rate (from time-stamp adjustment in above calculation).
4961 if (cpi->oxcf.mr_encoder_id) {
4962 if (!low_res_frame_info->skip_encoding_base_stream)
4963 cpi->ref_framerate = low_res_frame_info->low_res_framerate;
4964 } else {
4965 // Keep track of frame rate for lowest resolution.
4966 low_res_frame_info->low_res_framerate = cpi->ref_framerate;
4967 // The base stream is being encoded so set skip flag to 0.
4968 low_res_frame_info->skip_encoding_base_stream = 0;
4969 }
4970 }
4971 #endif
4972 if (cpi->oxcf.number_of_layers > 1) {
4973 unsigned int i;
4974
4975 /* Update frame rates for each layer */
4976 assert(cpi->oxcf.number_of_layers <= VPX_TS_MAX_LAYERS);
4977 for (i = 0; i < cpi->oxcf.number_of_layers && i < VPX_TS_MAX_LAYERS;
4978 ++i) {
4979 LAYER_CONTEXT *lc = &cpi->layer_context[i];
4980 lc->framerate = cpi->ref_framerate / cpi->oxcf.rate_decimator[i];
4981 }
4982 } else {
4983 vp8_new_framerate(cpi, cpi->ref_framerate);
4984 }
4985 }
4986
4987 cpi->last_time_stamp_seen = cpi->source->ts_start;
4988 cpi->last_end_time_stamp_seen = cpi->source->ts_end;
4989 }
4990
4991 if (cpi->oxcf.number_of_layers > 1) {
4992 int layer;
4993
4994 vp8_update_layer_contexts(cpi);
4995
4996 /* Restore layer specific context & set frame rate */
4997 if (cpi->temporal_layer_id >= 0) {
4998 layer = cpi->temporal_layer_id;
4999 } else {
5000 layer =
5001 cpi->oxcf
5002 .layer_id[cpi->temporal_pattern_counter % cpi->oxcf.periodicity];
5003 }
5004 vp8_restore_layer_context(cpi, layer);
5005 vp8_new_framerate(cpi, cpi->layer_context[layer].framerate);
5006 }
5007
5008 if (cpi->compressor_speed == 2) {
5009 vpx_usec_timer_start(&tsctimer);
5010 vpx_usec_timer_start(&ticktimer);
5011 }
5012
5013 cpi->lf_zeromv_pct = (cpi->zeromv_count * 100) / cm->MBs;
5014
5015 #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
5016 {
5017 int i;
5018 const int num_part = (1 << cm->multi_token_partition);
5019 /* the available bytes in dest */
5020 const unsigned long dest_size = dest_end - dest;
5021 const int tok_part_buff_size = (dest_size * 9) / (10 * num_part);
5022
5023 unsigned char *dp = dest;
5024
5025 cpi->partition_d[0] = dp;
5026 dp += dest_size / 10; /* reserve 1/10 for control partition */
5027 cpi->partition_d_end[0] = dp;
5028
5029 for (i = 0; i < num_part; ++i) {
5030 cpi->partition_d[i + 1] = dp;
5031 dp += tok_part_buff_size;
5032 cpi->partition_d_end[i + 1] = dp;
5033 }
5034 }
5035 #endif
5036
5037 /* start with a 0 size frame */
5038 *size = 0;
5039
5040 /* Clear down mmx registers */
5041 vpx_clear_system_state();
5042
5043 cm->frame_type = INTER_FRAME;
5044 cm->frame_flags = *frame_flags;
5045
5046 #if 0
5047
5048 if (cm->refresh_alt_ref_frame)
5049 {
5050 cm->refresh_golden_frame = 0;
5051 cm->refresh_last_frame = 0;
5052 }
5053 else
5054 {
5055 cm->refresh_golden_frame = 0;
5056 cm->refresh_last_frame = 1;
5057 }
5058
5059 #endif
5060 /* find a free buffer for the new frame */
5061 {
5062 int i = 0;
5063 for (; i < NUM_YV12_BUFFERS; ++i) {
5064 if (!cm->yv12_fb[i].flags) {
5065 cm->new_fb_idx = i;
5066 break;
5067 }
5068 }
5069
5070 assert(i < NUM_YV12_BUFFERS);
5071 }
5072 switch (cpi->pass) {
5073 #if !CONFIG_REALTIME_ONLY
5074 case 1: Pass1Encode(cpi); break;
5075 case 2: Pass2Encode(cpi, size, dest, dest_end, frame_flags); break;
5076 #endif // !CONFIG_REALTIME_ONLY
5077 default:
5078 encode_frame_to_data_rate(cpi, size, dest, dest_end, frame_flags);
5079 break;
5080 }
5081
5082 if (cpi->compressor_speed == 2) {
5083 unsigned int duration, duration2;
5084 vpx_usec_timer_mark(&tsctimer);
5085 vpx_usec_timer_mark(&ticktimer);
5086
5087 duration = (int)(vpx_usec_timer_elapsed(&ticktimer));
5088 duration2 = (unsigned int)((double)duration / 2);
5089
5090 if (cm->frame_type != KEY_FRAME) {
5091 if (cpi->avg_encode_time == 0) {
5092 cpi->avg_encode_time = duration;
5093 } else {
5094 cpi->avg_encode_time = (7 * cpi->avg_encode_time + duration) >> 3;
5095 }
5096 }
5097
5098 if (duration2) {
5099 {
5100 if (cpi->avg_pick_mode_time == 0) {
5101 cpi->avg_pick_mode_time = duration2;
5102 } else {
5103 cpi->avg_pick_mode_time =
5104 (7 * cpi->avg_pick_mode_time + duration2) >> 3;
5105 }
5106 }
5107 }
5108 }
5109
5110 if (cm->refresh_entropy_probs == 0) {
5111 memcpy(&cm->fc, &cm->lfc, sizeof(cm->fc));
5112 }
5113
5114 /* Save the contexts separately for alt ref, gold and last. */
5115 /* (TODO jbb -> Optimize this with pointers to avoid extra copies. ) */
5116 if (cm->refresh_alt_ref_frame) memcpy(&cpi->lfc_a, &cm->fc, sizeof(cm->fc));
5117
5118 if (cm->refresh_golden_frame) memcpy(&cpi->lfc_g, &cm->fc, sizeof(cm->fc));
5119
5120 if (cm->refresh_last_frame) memcpy(&cpi->lfc_n, &cm->fc, sizeof(cm->fc));
5121
5122 /* if it's a dropped frame honor the requests on subsequent frames */
5123 if (*size > 0) {
5124 cpi->droppable = !frame_is_reference(cpi);
5125
5126 /* return to normal state */
5127 cm->refresh_entropy_probs = 1;
5128 cm->refresh_alt_ref_frame = 0;
5129 cm->refresh_golden_frame = 0;
5130 cm->refresh_last_frame = 1;
5131 cm->frame_type = INTER_FRAME;
5132 }
5133
5134 /* Save layer specific state */
5135 if (cpi->oxcf.number_of_layers > 1) vp8_save_layer_context(cpi);
5136
5137 vpx_usec_timer_mark(&cmptimer);
5138 cpi->time_compress_data += vpx_usec_timer_elapsed(&cmptimer);
5139
5140 #if CONFIG_MULTITHREAD
5141 /* wait for the lpf thread done */
5142 if (vpx_atomic_load_acquire(&cpi->b_multi_threaded) && cpi->b_lpf_running) {
5143 errno = 0;
5144 while (sem_wait(&cpi->h_event_end_lpf) != 0 && errno == EINTR) {
5145 }
5146 cpi->b_lpf_running = 0;
5147 }
5148 #endif
5149
5150 if (cpi->b_calculate_psnr && cpi->pass != 1 && cm->show_frame) {
5151 generate_psnr_packet(cpi);
5152 }
5153
5154 #if CONFIG_INTERNAL_STATS
5155
5156 if (cpi->pass != 1) {
5157 cpi->bytes += *size;
5158
5159 if (cm->show_frame) {
5160 cpi->common.show_frame_mi = cpi->common.mi;
5161 cpi->count++;
5162
5163 if (cpi->b_calculate_psnr) {
5164 uint64_t ye, ue, ve;
5165 double frame_psnr;
5166 YV12_BUFFER_CONFIG *orig = cpi->Source;
5167 YV12_BUFFER_CONFIG *recon = cpi->common.frame_to_show;
5168 unsigned int y_width = cpi->common.Width;
5169 unsigned int y_height = cpi->common.Height;
5170 unsigned int uv_width = (y_width + 1) / 2;
5171 unsigned int uv_height = (y_height + 1) / 2;
5172 int y_samples = y_height * y_width;
5173 int uv_samples = uv_height * uv_width;
5174 int t_samples = y_samples + 2 * uv_samples;
5175 double sq_error;
5176
5177 ye = calc_plane_error(orig->y_buffer, orig->y_stride, recon->y_buffer,
5178 recon->y_stride, y_width, y_height);
5179
5180 ue = calc_plane_error(orig->u_buffer, orig->uv_stride, recon->u_buffer,
5181 recon->uv_stride, uv_width, uv_height);
5182
5183 ve = calc_plane_error(orig->v_buffer, orig->uv_stride, recon->v_buffer,
5184 recon->uv_stride, uv_width, uv_height);
5185
5186 sq_error = (double)(ye + ue + ve);
5187
5188 frame_psnr = vpx_sse_to_psnr(t_samples, 255.0, sq_error);
5189
5190 cpi->total_y += vpx_sse_to_psnr(y_samples, 255.0, (double)ye);
5191 cpi->total_u += vpx_sse_to_psnr(uv_samples, 255.0, (double)ue);
5192 cpi->total_v += vpx_sse_to_psnr(uv_samples, 255.0, (double)ve);
5193 cpi->total_sq_error += sq_error;
5194 cpi->total += frame_psnr;
5195 #if CONFIG_POSTPROC
5196 {
5197 YV12_BUFFER_CONFIG *pp = &cm->post_proc_buffer;
5198 double sq_error2;
5199 double frame_psnr2, frame_ssim2 = 0;
5200 double weight = 0;
5201
5202 vp8_deblock(cm, cm->frame_to_show, &cm->post_proc_buffer,
5203 cm->filter_level * 10 / 6);
5204 vpx_clear_system_state();
5205
5206 ye = calc_plane_error(orig->y_buffer, orig->y_stride, pp->y_buffer,
5207 pp->y_stride, y_width, y_height);
5208
5209 ue = calc_plane_error(orig->u_buffer, orig->uv_stride, pp->u_buffer,
5210 pp->uv_stride, uv_width, uv_height);
5211
5212 ve = calc_plane_error(orig->v_buffer, orig->uv_stride, pp->v_buffer,
5213 pp->uv_stride, uv_width, uv_height);
5214
5215 sq_error2 = (double)(ye + ue + ve);
5216
5217 frame_psnr2 = vpx_sse_to_psnr(t_samples, 255.0, sq_error2);
5218
5219 cpi->totalp_y += vpx_sse_to_psnr(y_samples, 255.0, (double)ye);
5220 cpi->totalp_u += vpx_sse_to_psnr(uv_samples, 255.0, (double)ue);
5221 cpi->totalp_v += vpx_sse_to_psnr(uv_samples, 255.0, (double)ve);
5222 cpi->total_sq_error2 += sq_error2;
5223 cpi->totalp += frame_psnr2;
5224
5225 frame_ssim2 =
5226 vpx_calc_ssim(cpi->Source, &cm->post_proc_buffer, &weight);
5227
5228 cpi->summed_quality += frame_ssim2 * weight;
5229 cpi->summed_weights += weight;
5230
5231 if (cpi->oxcf.number_of_layers > 1) {
5232 unsigned int i;
5233
5234 for (i = cpi->current_layer; i < cpi->oxcf.number_of_layers; ++i) {
5235 cpi->frames_in_layer[i]++;
5236
5237 cpi->bytes_in_layer[i] += *size;
5238 cpi->sum_psnr[i] += frame_psnr;
5239 cpi->sum_psnr_p[i] += frame_psnr2;
5240 cpi->total_error2[i] += sq_error;
5241 cpi->total_error2_p[i] += sq_error2;
5242 cpi->sum_ssim[i] += frame_ssim2 * weight;
5243 cpi->sum_weights[i] += weight;
5244 }
5245 }
5246 }
5247 #endif
5248 }
5249 }
5250 }
5251
5252 #if 0
5253
5254 if (cpi->common.frame_type != 0 && cpi->common.base_qindex == cpi->oxcf.worst_allowed_q)
5255 {
5256 skiptruecount += cpi->skip_true_count;
5257 skipfalsecount += cpi->skip_false_count;
5258 }
5259
5260 #endif
5261 #if 0
5262
5263 if (cpi->pass != 1)
5264 {
5265 FILE *f = fopen("skip.stt", "a");
5266 fprintf(f, "frame:%4d flags:%4x Q:%4d P:%4d Size:%5d\n", cpi->common.current_video_frame, *frame_flags, cpi->common.base_qindex, cpi->prob_skip_false, *size);
5267
5268 if (cpi->is_src_frame_alt_ref == 1)
5269 fprintf(f, "skipcount: %4d framesize: %d\n", cpi->skip_true_count , *size);
5270
5271 fclose(f);
5272 }
5273
5274 #endif
5275 #endif
5276
5277 cpi->common.error.setjmp = 0;
5278
5279 return 0;
5280 }
5281
vp8_get_preview_raw_frame(VP8_COMP * cpi,YV12_BUFFER_CONFIG * dest,vp8_ppflags_t * flags)5282 int vp8_get_preview_raw_frame(VP8_COMP *cpi, YV12_BUFFER_CONFIG *dest,
5283 vp8_ppflags_t *flags) {
5284 if (cpi->common.refresh_alt_ref_frame) {
5285 return -1;
5286 } else {
5287 int ret;
5288
5289 #if CONFIG_POSTPROC
5290 cpi->common.show_frame_mi = cpi->common.mi;
5291 ret = vp8_post_proc_frame(&cpi->common, dest, flags);
5292 #else
5293 (void)flags;
5294
5295 if (cpi->common.frame_to_show) {
5296 *dest = *cpi->common.frame_to_show;
5297 dest->y_width = cpi->common.Width;
5298 dest->y_height = cpi->common.Height;
5299 dest->uv_height = cpi->common.Height / 2;
5300 ret = 0;
5301 } else {
5302 ret = -1;
5303 }
5304
5305 #endif
5306 vpx_clear_system_state();
5307 return ret;
5308 }
5309 }
5310
vp8_set_roimap(VP8_COMP * cpi,unsigned char * map,unsigned int rows,unsigned int cols,int delta_q[4],int delta_lf[4],unsigned int threshold[4])5311 int vp8_set_roimap(VP8_COMP *cpi, unsigned char *map, unsigned int rows,
5312 unsigned int cols, int delta_q[4], int delta_lf[4],
5313 unsigned int threshold[4]) {
5314 signed char feature_data[MB_LVL_MAX][MAX_MB_SEGMENTS];
5315 int internal_delta_q[MAX_MB_SEGMENTS];
5316 const int range = 63;
5317 int i;
5318
5319 // Check number of rows and columns match
5320 if (cpi->common.mb_rows != (int)rows || cpi->common.mb_cols != (int)cols) {
5321 return -1;
5322 }
5323
5324 for (i = 0; i < MAX_MB_SEGMENTS; ++i) {
5325 // Note abs() alone can't be used as the behavior of abs(INT_MIN) is
5326 // undefined.
5327 if (delta_q[i] > range || delta_q[i] < -range || delta_lf[i] > range ||
5328 delta_lf[i] < -range) {
5329 return -1;
5330 }
5331 }
5332
5333 // Also disable segmentation if no deltas are specified.
5334 if (!map || (delta_q[0] == 0 && delta_q[1] == 0 && delta_q[2] == 0 &&
5335 delta_q[3] == 0 && delta_lf[0] == 0 && delta_lf[1] == 0 &&
5336 delta_lf[2] == 0 && delta_lf[3] == 0 && threshold[0] == 0 &&
5337 threshold[1] == 0 && threshold[2] == 0 && threshold[3] == 0)) {
5338 disable_segmentation(cpi);
5339 return 0;
5340 }
5341
5342 // Translate the external delta q values to internal values.
5343 for (i = 0; i < MAX_MB_SEGMENTS; ++i) {
5344 internal_delta_q[i] =
5345 (delta_q[i] >= 0) ? q_trans[delta_q[i]] : -q_trans[-delta_q[i]];
5346 }
5347
5348 /* Set the segmentation Map */
5349 set_segmentation_map(cpi, map);
5350
5351 /* Activate segmentation. */
5352 enable_segmentation(cpi);
5353
5354 /* Set up the quant segment data */
5355 feature_data[MB_LVL_ALT_Q][0] = internal_delta_q[0];
5356 feature_data[MB_LVL_ALT_Q][1] = internal_delta_q[1];
5357 feature_data[MB_LVL_ALT_Q][2] = internal_delta_q[2];
5358 feature_data[MB_LVL_ALT_Q][3] = internal_delta_q[3];
5359
5360 /* Set up the loop segment data s */
5361 feature_data[MB_LVL_ALT_LF][0] = delta_lf[0];
5362 feature_data[MB_LVL_ALT_LF][1] = delta_lf[1];
5363 feature_data[MB_LVL_ALT_LF][2] = delta_lf[2];
5364 feature_data[MB_LVL_ALT_LF][3] = delta_lf[3];
5365
5366 cpi->segment_encode_breakout[0] = threshold[0];
5367 cpi->segment_encode_breakout[1] = threshold[1];
5368 cpi->segment_encode_breakout[2] = threshold[2];
5369 cpi->segment_encode_breakout[3] = threshold[3];
5370
5371 /* Initialise the feature data structure */
5372 set_segment_data(cpi, &feature_data[0][0], SEGMENT_DELTADATA);
5373
5374 if (threshold[0] != 0 || threshold[1] != 0 || threshold[2] != 0 ||
5375 threshold[3] != 0)
5376 cpi->use_roi_static_threshold = 1;
5377 cpi->cyclic_refresh_mode_enabled = 0;
5378
5379 return 0;
5380 }
5381
vp8_set_active_map(VP8_COMP * cpi,unsigned char * map,unsigned int rows,unsigned int cols)5382 int vp8_set_active_map(VP8_COMP *cpi, unsigned char *map, unsigned int rows,
5383 unsigned int cols) {
5384 if ((int)rows == cpi->common.mb_rows && (int)cols == cpi->common.mb_cols) {
5385 if (map) {
5386 memcpy(cpi->active_map, map, rows * cols);
5387 cpi->active_map_enabled = 1;
5388 } else {
5389 cpi->active_map_enabled = 0;
5390 }
5391
5392 return 0;
5393 } else {
5394 return -1;
5395 }
5396 }
5397
vp8_set_internal_size(VP8_COMP * cpi,VPX_SCALING_MODE horiz_mode,VPX_SCALING_MODE vert_mode)5398 int vp8_set_internal_size(VP8_COMP *cpi, VPX_SCALING_MODE horiz_mode,
5399 VPX_SCALING_MODE vert_mode) {
5400 if (horiz_mode <= VP8E_ONETWO) {
5401 cpi->common.horiz_scale = horiz_mode;
5402 } else {
5403 return -1;
5404 }
5405
5406 if (vert_mode <= VP8E_ONETWO) {
5407 cpi->common.vert_scale = vert_mode;
5408 } else {
5409 return -1;
5410 }
5411
5412 return 0;
5413 }
5414
vp8_calc_ss_err(YV12_BUFFER_CONFIG * source,YV12_BUFFER_CONFIG * dest)5415 int vp8_calc_ss_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest) {
5416 int i, j;
5417 int Total = 0;
5418
5419 unsigned char *src = source->y_buffer;
5420 unsigned char *dst = dest->y_buffer;
5421
5422 /* Loop through the Y plane raw and reconstruction data summing
5423 * (square differences)
5424 */
5425 for (i = 0; i < source->y_height; i += 16) {
5426 for (j = 0; j < source->y_width; j += 16) {
5427 unsigned int sse;
5428 Total += vpx_mse16x16(src + j, source->y_stride, dst + j, dest->y_stride,
5429 &sse);
5430 }
5431
5432 src += 16 * source->y_stride;
5433 dst += 16 * dest->y_stride;
5434 }
5435
5436 return Total;
5437 }
5438
vp8_get_quantizer(VP8_COMP * cpi)5439 int vp8_get_quantizer(VP8_COMP *cpi) { return cpi->common.base_qindex; }
5440