1 /*
2 * Copyright (c) 2019, Alliance for Open Media. All rights reserved
3 *
4 * This source code is subject to the terms of the BSD 2 Clause License and
5 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6 * was not distributed with this source code in the LICENSE file, you can
7 * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8 * Media Patent License 1.0 was not distributed with this source code in the
9 * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10 */
11
12 /*!\defgroup gf_group_algo Golden Frame Group
13 * \ingroup high_level_algo
14 * Algorithms regarding determining the length of GF groups and defining GF
15 * group structures.
16 * @{
17 */
18 /*! @} - end defgroup gf_group_algo */
19
20 #include <assert.h>
21 #include <stdint.h>
22
23 #include "aom_mem/aom_mem.h"
24 #include "config/aom_config.h"
25 #include "config/aom_scale_rtcd.h"
26
27 #include "aom/aom_codec.h"
28 #include "aom/aom_encoder.h"
29
30 #include "av1/common/av1_common_int.h"
31
32 #include "av1/encoder/encoder.h"
33 #include "av1/encoder/firstpass.h"
34 #include "av1/encoder/gop_structure.h"
35 #include "av1/encoder/pass2_strategy.h"
36 #include "av1/encoder/ratectrl.h"
37 #include "av1/encoder/rc_utils.h"
38 #include "av1/encoder/temporal_filter.h"
39 #include "av1/encoder/thirdpass.h"
40 #include "av1/encoder/tpl_model.h"
41 #include "av1/encoder/encode_strategy.h"
42
43 #define DEFAULT_KF_BOOST 2300
44 #define DEFAULT_GF_BOOST 2000
45 #define GROUP_ADAPTIVE_MAXQ 1
46
47 static void init_gf_stats(GF_GROUP_STATS *gf_stats);
48 static int define_gf_group_pass3(AV1_COMP *cpi, EncodeFrameParams *frame_params,
49 int is_final_pass);
50
51 // Calculate an active area of the image that discounts formatting
52 // bars and partially discounts other 0 energy areas.
53 #define MIN_ACTIVE_AREA 0.5
54 #define MAX_ACTIVE_AREA 1.0
calculate_active_area(const FRAME_INFO * frame_info,const FIRSTPASS_STATS * this_frame)55 static double calculate_active_area(const FRAME_INFO *frame_info,
56 const FIRSTPASS_STATS *this_frame) {
57 const double active_pct =
58 1.0 -
59 ((this_frame->intra_skip_pct / 2) +
60 ((this_frame->inactive_zone_rows * 2) / (double)frame_info->mb_rows));
61 return fclamp(active_pct, MIN_ACTIVE_AREA, MAX_ACTIVE_AREA);
62 }
63
64 // Calculate a modified Error used in distributing bits between easier and
65 // harder frames.
66 #define ACT_AREA_CORRECTION 0.5
calculate_modified_err_new(const FRAME_INFO * frame_info,const FIRSTPASS_STATS * total_stats,const FIRSTPASS_STATS * this_stats,int vbrbias,double modified_error_min,double modified_error_max)67 static double calculate_modified_err_new(const FRAME_INFO *frame_info,
68 const FIRSTPASS_STATS *total_stats,
69 const FIRSTPASS_STATS *this_stats,
70 int vbrbias, double modified_error_min,
71 double modified_error_max) {
72 if (total_stats == NULL) {
73 return 0;
74 }
75 const double av_weight = total_stats->weight / total_stats->count;
76 const double av_err =
77 (total_stats->coded_error * av_weight) / total_stats->count;
78 double modified_error =
79 av_err * pow(this_stats->coded_error * this_stats->weight /
80 DOUBLE_DIVIDE_CHECK(av_err),
81 vbrbias / 100.0);
82
83 // Correction for active area. Frames with a reduced active area
84 // (eg due to formatting bars) have a higher error per mb for the
85 // remaining active MBs. The correction here assumes that coding
86 // 0.5N blocks of complexity 2X is a little easier than coding N
87 // blocks of complexity X.
88 modified_error *=
89 pow(calculate_active_area(frame_info, this_stats), ACT_AREA_CORRECTION);
90
91 return fclamp(modified_error, modified_error_min, modified_error_max);
92 }
93
calculate_modified_err(const FRAME_INFO * frame_info,const TWO_PASS * twopass,const AV1EncoderConfig * oxcf,const FIRSTPASS_STATS * this_frame)94 static double calculate_modified_err(const FRAME_INFO *frame_info,
95 const TWO_PASS *twopass,
96 const AV1EncoderConfig *oxcf,
97 const FIRSTPASS_STATS *this_frame) {
98 const FIRSTPASS_STATS *total_stats = twopass->stats_buf_ctx->total_stats;
99 return calculate_modified_err_new(
100 frame_info, total_stats, this_frame, oxcf->rc_cfg.vbrbias,
101 twopass->modified_error_min, twopass->modified_error_max);
102 }
103
104 // Resets the first pass file to the given position using a relative seek from
105 // the current position.
reset_fpf_position(TWO_PASS_FRAME * p_frame,const FIRSTPASS_STATS * position)106 static void reset_fpf_position(TWO_PASS_FRAME *p_frame,
107 const FIRSTPASS_STATS *position) {
108 p_frame->stats_in = position;
109 }
110
input_stats(TWO_PASS * p,TWO_PASS_FRAME * p_frame,FIRSTPASS_STATS * fps)111 static int input_stats(TWO_PASS *p, TWO_PASS_FRAME *p_frame,
112 FIRSTPASS_STATS *fps) {
113 if (p_frame->stats_in >= p->stats_buf_ctx->stats_in_end) return EOF;
114
115 *fps = *p_frame->stats_in;
116 ++p_frame->stats_in;
117 return 1;
118 }
119
input_stats_lap(TWO_PASS * p,TWO_PASS_FRAME * p_frame,FIRSTPASS_STATS * fps)120 static int input_stats_lap(TWO_PASS *p, TWO_PASS_FRAME *p_frame,
121 FIRSTPASS_STATS *fps) {
122 if (p_frame->stats_in >= p->stats_buf_ctx->stats_in_end) return EOF;
123
124 *fps = *p_frame->stats_in;
125 /* Move old stats[0] out to accommodate for next frame stats */
126 memmove(p->frame_stats_arr[0], p->frame_stats_arr[1],
127 (p->stats_buf_ctx->stats_in_end - p_frame->stats_in - 1) *
128 sizeof(FIRSTPASS_STATS));
129 p->stats_buf_ctx->stats_in_end--;
130 return 1;
131 }
132
133 // Read frame stats at an offset from the current position.
read_frame_stats(const TWO_PASS * p,const TWO_PASS_FRAME * p_frame,int offset)134 static const FIRSTPASS_STATS *read_frame_stats(const TWO_PASS *p,
135 const TWO_PASS_FRAME *p_frame,
136 int offset) {
137 if ((offset >= 0 &&
138 p_frame->stats_in + offset >= p->stats_buf_ctx->stats_in_end) ||
139 (offset < 0 &&
140 p_frame->stats_in + offset < p->stats_buf_ctx->stats_in_start)) {
141 return NULL;
142 }
143
144 return &p_frame->stats_in[offset];
145 }
146
147 // This function returns the maximum target rate per frame.
frame_max_bits(const RATE_CONTROL * rc,const AV1EncoderConfig * oxcf)148 static int frame_max_bits(const RATE_CONTROL *rc,
149 const AV1EncoderConfig *oxcf) {
150 int64_t max_bits = ((int64_t)rc->avg_frame_bandwidth *
151 (int64_t)oxcf->rc_cfg.vbrmax_section) /
152 100;
153 if (max_bits < 0)
154 max_bits = 0;
155 else if (max_bits > rc->max_frame_bandwidth)
156 max_bits = rc->max_frame_bandwidth;
157
158 return (int)max_bits;
159 }
160
161 // Based on history adjust expectations of bits per macroblock.
twopass_update_bpm_factor(AV1_COMP * cpi,int rate_err_tol)162 static void twopass_update_bpm_factor(AV1_COMP *cpi, int rate_err_tol) {
163 TWO_PASS *const twopass = &cpi->ppi->twopass;
164 const PRIMARY_RATE_CONTROL *const p_rc = &cpi->ppi->p_rc;
165
166 // Based on recent history adjust expectations of bits per macroblock.
167 double rate_err_factor = 1.0;
168 const double adj_limit = AOMMAX(0.2, (double)(100 - rate_err_tol) / 200.0);
169 const double min_fac = 1.0 - adj_limit;
170 const double max_fac = 1.0 + adj_limit;
171
172 if (cpi->third_pass_ctx && cpi->third_pass_ctx->frame_info_count > 0) {
173 int64_t actual_bits = 0;
174 int64_t target_bits = 0;
175 double factor = 0.0;
176 int count = 0;
177 for (int i = 0; i < cpi->third_pass_ctx->frame_info_count; i++) {
178 actual_bits += cpi->third_pass_ctx->frame_info[i].actual_bits;
179 target_bits += cpi->third_pass_ctx->frame_info[i].bits_allocated;
180 factor += cpi->third_pass_ctx->frame_info[i].bpm_factor;
181 count++;
182 }
183
184 if (count == 0) {
185 factor = 1.0;
186 } else {
187 factor /= (double)count;
188 }
189
190 factor *= (double)actual_bits / DOUBLE_DIVIDE_CHECK((double)target_bits);
191
192 if ((twopass->bpm_factor <= 1 && factor < twopass->bpm_factor) ||
193 (twopass->bpm_factor >= 1 && factor > twopass->bpm_factor)) {
194 twopass->bpm_factor = factor;
195 twopass->bpm_factor =
196 AOMMAX(min_fac, AOMMIN(max_fac, twopass->bpm_factor));
197 }
198 }
199
200 int err_estimate = p_rc->rate_error_estimate;
201 int64_t total_actual_bits = p_rc->total_actual_bits;
202 double rolling_arf_group_actual_bits =
203 (double)twopass->rolling_arf_group_actual_bits;
204 double rolling_arf_group_target_bits =
205 (double)twopass->rolling_arf_group_target_bits;
206
207 #if CONFIG_FPMT_TEST
208 const int is_parallel_frame =
209 cpi->ppi->gf_group.frame_parallel_level[cpi->gf_frame_index] > 0 ? 1 : 0;
210 const int simulate_parallel_frame =
211 cpi->ppi->fpmt_unit_test_cfg == PARALLEL_SIMULATION_ENCODE
212 ? is_parallel_frame
213 : 0;
214 total_actual_bits = simulate_parallel_frame ? p_rc->temp_total_actual_bits
215 : p_rc->total_actual_bits;
216 rolling_arf_group_target_bits =
217 (double)(simulate_parallel_frame
218 ? p_rc->temp_rolling_arf_group_target_bits
219 : twopass->rolling_arf_group_target_bits);
220 rolling_arf_group_actual_bits =
221 (double)(simulate_parallel_frame
222 ? p_rc->temp_rolling_arf_group_actual_bits
223 : twopass->rolling_arf_group_actual_bits);
224 err_estimate = simulate_parallel_frame ? p_rc->temp_rate_error_estimate
225 : p_rc->rate_error_estimate;
226 #endif
227
228 if ((p_rc->bits_off_target && total_actual_bits > 0) &&
229 (rolling_arf_group_target_bits >= 1.0)) {
230 if (rolling_arf_group_actual_bits > rolling_arf_group_target_bits) {
231 double error_fraction =
232 (rolling_arf_group_actual_bits - rolling_arf_group_target_bits) /
233 rolling_arf_group_target_bits;
234 error_fraction = (error_fraction > 1.0) ? 1.0 : error_fraction;
235 rate_err_factor = 1.0 + error_fraction;
236 } else {
237 double error_fraction =
238 (rolling_arf_group_target_bits - rolling_arf_group_actual_bits) /
239 rolling_arf_group_target_bits;
240 rate_err_factor = 1.0 - error_fraction;
241 }
242
243 rate_err_factor = AOMMAX(min_fac, AOMMIN(max_fac, rate_err_factor));
244 }
245
246 // Is the rate control trending in the right direction. Only make
247 // an adjustment if things are getting worse.
248 if ((rate_err_factor < 1.0 && err_estimate >= 0) ||
249 (rate_err_factor > 1.0 && err_estimate <= 0)) {
250 twopass->bpm_factor *= rate_err_factor;
251 twopass->bpm_factor = AOMMAX(min_fac, AOMMIN(max_fac, twopass->bpm_factor));
252 }
253 }
254
255 static const double q_div_term[(QINDEX_RANGE >> 5) + 1] = { 32.0, 40.0, 46.0,
256 52.0, 56.0, 60.0,
257 64.0, 68.0, 72.0 };
258 #define EPMB_SCALER 1250000
calc_correction_factor(double err_per_mb,int q)259 static double calc_correction_factor(double err_per_mb, int q) {
260 double power_term = 0.90;
261 const int index = q >> 5;
262 const double divisor =
263 q_div_term[index] +
264 (((q_div_term[index + 1] - q_div_term[index]) * (q % 32)) / 32.0);
265 double error_term = EPMB_SCALER * pow(err_per_mb, power_term);
266 return error_term / divisor;
267 }
268
269 // Similar to find_qindex_by_rate() function in ratectrl.c, but includes
270 // calculation of a correction_factor.
find_qindex_by_rate_with_correction(int desired_bits_per_mb,aom_bit_depth_t bit_depth,double error_per_mb,double group_weight_factor,int best_qindex,int worst_qindex)271 static int find_qindex_by_rate_with_correction(
272 int desired_bits_per_mb, aom_bit_depth_t bit_depth, double error_per_mb,
273 double group_weight_factor, int best_qindex, int worst_qindex) {
274 assert(best_qindex <= worst_qindex);
275 int low = best_qindex;
276 int high = worst_qindex;
277
278 while (low < high) {
279 const int mid = (low + high) >> 1;
280 const double q_factor = calc_correction_factor(error_per_mb, mid);
281 const double q = av1_convert_qindex_to_q(mid, bit_depth);
282 const int mid_bits_per_mb = (int)((q_factor * group_weight_factor) / q);
283
284 if (mid_bits_per_mb > desired_bits_per_mb) {
285 low = mid + 1;
286 } else {
287 high = mid;
288 }
289 }
290 return low;
291 }
292
293 /*!\brief Choose a target maximum Q for a group of frames
294 *
295 * \ingroup rate_control
296 *
297 * This function is used to estimate a suitable maximum Q for a
298 * group of frames. Inititally it is called to get a crude estimate
299 * for the whole clip. It is then called for each ARF/GF group to get
300 * a revised estimate for that group.
301 *
302 * \param[in] cpi Top-level encoder structure
303 * \param[in] av_frame_err The average per frame coded error score
304 * for frames making up this section/group.
305 * \param[in] inactive_zone Used to mask off /ignore part of the
306 * frame. The most common use case is where
307 * a wide format video (e.g. 16:9) is
308 * letter-boxed into a more square format.
309 * Here we want to ignore the bands at the
310 * top and bottom.
311 * \param[in] av_target_bandwidth The target bits per frame
312 *
313 * \return The maximum Q for frames in the group.
314 */
get_twopass_worst_quality(AV1_COMP * cpi,const double av_frame_err,double inactive_zone,int av_target_bandwidth)315 static int get_twopass_worst_quality(AV1_COMP *cpi, const double av_frame_err,
316 double inactive_zone,
317 int av_target_bandwidth) {
318 const RATE_CONTROL *const rc = &cpi->rc;
319 const AV1EncoderConfig *const oxcf = &cpi->oxcf;
320 const RateControlCfg *const rc_cfg = &oxcf->rc_cfg;
321 inactive_zone = fclamp(inactive_zone, 0.0, 0.9999);
322
323 if (av_target_bandwidth <= 0) {
324 return rc->worst_quality; // Highest value allowed
325 } else {
326 const int num_mbs = (oxcf->resize_cfg.resize_mode != RESIZE_NONE)
327 ? cpi->initial_mbs
328 : cpi->common.mi_params.MBs;
329 const int active_mbs = AOMMAX(1, num_mbs - (int)(num_mbs * inactive_zone));
330 const double av_err_per_mb = av_frame_err / (1.0 - inactive_zone);
331 const int target_norm_bits_per_mb =
332 (int)((uint64_t)av_target_bandwidth << BPER_MB_NORMBITS) / active_mbs;
333 int rate_err_tol = AOMMIN(rc_cfg->under_shoot_pct, rc_cfg->over_shoot_pct);
334
335 // Update bpm correction factor based on previous GOP rate error.
336 twopass_update_bpm_factor(cpi, rate_err_tol);
337
338 // Try and pick a max Q that will be high enough to encode the
339 // content at the given rate.
340 int q = find_qindex_by_rate_with_correction(
341 target_norm_bits_per_mb, cpi->common.seq_params->bit_depth,
342 av_err_per_mb, cpi->ppi->twopass.bpm_factor, rc->best_quality,
343 rc->worst_quality);
344
345 // Restriction on active max q for constrained quality mode.
346 if (rc_cfg->mode == AOM_CQ) q = AOMMAX(q, rc_cfg->cq_level);
347 return q;
348 }
349 }
350
351 #define INTRA_PART 0.005
352 #define DEFAULT_DECAY_LIMIT 0.75
353 #define LOW_SR_DIFF_TRHESH 0.01
354 #define NCOUNT_FRAME_II_THRESH 5.0
355 #define LOW_CODED_ERR_PER_MB 0.01
356
357 /* This function considers how the quality of prediction may be deteriorating
358 * with distance. It comapres the coded error for the last frame and the
359 * second reference frame (usually two frames old) and also applies a factor
360 * based on the extent of INTRA coding.
361 *
362 * The decay factor is then used to reduce the contribution of frames further
363 * from the alt-ref or golden frame, to the bitframe boost calculation for that
364 * alt-ref or golden frame.
365 */
get_sr_decay_rate(const FIRSTPASS_STATS * frame)366 static double get_sr_decay_rate(const FIRSTPASS_STATS *frame) {
367 double sr_diff = (frame->sr_coded_error - frame->coded_error);
368 double sr_decay = 1.0;
369 double modified_pct_inter;
370 double modified_pcnt_intra;
371
372 modified_pct_inter = frame->pcnt_inter;
373 if ((frame->coded_error > LOW_CODED_ERR_PER_MB) &&
374 ((frame->intra_error / DOUBLE_DIVIDE_CHECK(frame->coded_error)) <
375 (double)NCOUNT_FRAME_II_THRESH)) {
376 modified_pct_inter = frame->pcnt_inter - frame->pcnt_neutral;
377 }
378 modified_pcnt_intra = 100 * (1.0 - modified_pct_inter);
379
380 if ((sr_diff > LOW_SR_DIFF_TRHESH)) {
381 double sr_diff_part = ((sr_diff * 0.25) / frame->intra_error);
382 sr_decay = 1.0 - sr_diff_part - (INTRA_PART * modified_pcnt_intra);
383 }
384 return AOMMAX(sr_decay, DEFAULT_DECAY_LIMIT);
385 }
386
387 // This function gives an estimate of how badly we believe the prediction
388 // quality is decaying from frame to frame.
get_zero_motion_factor(const FIRSTPASS_STATS * frame)389 static double get_zero_motion_factor(const FIRSTPASS_STATS *frame) {
390 const double zero_motion_pct = frame->pcnt_inter - frame->pcnt_motion;
391 double sr_decay = get_sr_decay_rate(frame);
392 return AOMMIN(sr_decay, zero_motion_pct);
393 }
394
395 #define DEFAULT_ZM_FACTOR 0.5
get_prediction_decay_rate(const FIRSTPASS_STATS * frame_stats)396 static double get_prediction_decay_rate(const FIRSTPASS_STATS *frame_stats) {
397 const double sr_decay_rate = get_sr_decay_rate(frame_stats);
398 double zero_motion_factor =
399 DEFAULT_ZM_FACTOR * (frame_stats->pcnt_inter - frame_stats->pcnt_motion);
400
401 // Clamp value to range 0.0 to 1.0
402 // This should happen anyway if input values are sensibly clamped but checked
403 // here just in case.
404 if (zero_motion_factor > 1.0)
405 zero_motion_factor = 1.0;
406 else if (zero_motion_factor < 0.0)
407 zero_motion_factor = 0.0;
408
409 return AOMMAX(zero_motion_factor,
410 (sr_decay_rate + ((1.0 - sr_decay_rate) * zero_motion_factor)));
411 }
412
413 // Function to test for a condition where a complex transition is followed
414 // by a static section. For example in slide shows where there is a fade
415 // between slides. This is to help with more optimal kf and gf positioning.
detect_transition_to_still(const FIRSTPASS_INFO * firstpass_info,int next_stats_index,const int min_gf_interval,const int frame_interval,const int still_interval,const double loop_decay_rate,const double last_decay_rate)416 static int detect_transition_to_still(const FIRSTPASS_INFO *firstpass_info,
417 int next_stats_index,
418 const int min_gf_interval,
419 const int frame_interval,
420 const int still_interval,
421 const double loop_decay_rate,
422 const double last_decay_rate) {
423 // Break clause to detect very still sections after motion
424 // For example a static image after a fade or other transition
425 // instead of a clean scene cut.
426 if (frame_interval > min_gf_interval && loop_decay_rate >= 0.999 &&
427 last_decay_rate < 0.9) {
428 int stats_left =
429 av1_firstpass_info_future_count(firstpass_info, next_stats_index);
430 if (stats_left >= still_interval) {
431 int j;
432 // Look ahead a few frames to see if static condition persists...
433 for (j = 0; j < still_interval; ++j) {
434 const FIRSTPASS_STATS *stats =
435 av1_firstpass_info_peek(firstpass_info, next_stats_index + j);
436 if (stats->pcnt_inter - stats->pcnt_motion < 0.999) break;
437 }
438 // Only if it does do we signal a transition to still.
439 return j == still_interval;
440 }
441 }
442 return 0;
443 }
444
445 // This function detects a flash through the high relative pcnt_second_ref
446 // score in the frame following a flash frame. The offset passed in should
447 // reflect this.
detect_flash(const TWO_PASS * twopass,const TWO_PASS_FRAME * twopass_frame,const int offset)448 static int detect_flash(const TWO_PASS *twopass,
449 const TWO_PASS_FRAME *twopass_frame, const int offset) {
450 const FIRSTPASS_STATS *const next_frame =
451 read_frame_stats(twopass, twopass_frame, offset);
452
453 // What we are looking for here is a situation where there is a
454 // brief break in prediction (such as a flash) but subsequent frames
455 // are reasonably well predicted by an earlier (pre flash) frame.
456 // The recovery after a flash is indicated by a high pcnt_second_ref
457 // compared to pcnt_inter.
458 return next_frame != NULL &&
459 next_frame->pcnt_second_ref > next_frame->pcnt_inter &&
460 next_frame->pcnt_second_ref >= 0.5;
461 }
462
463 // Update the motion related elements to the GF arf boost calculation.
accumulate_frame_motion_stats(const FIRSTPASS_STATS * stats,GF_GROUP_STATS * gf_stats,double f_w,double f_h)464 static void accumulate_frame_motion_stats(const FIRSTPASS_STATS *stats,
465 GF_GROUP_STATS *gf_stats, double f_w,
466 double f_h) {
467 const double pct = stats->pcnt_motion;
468
469 // Accumulate Motion In/Out of frame stats.
470 gf_stats->this_frame_mv_in_out = stats->mv_in_out_count * pct;
471 gf_stats->mv_in_out_accumulator += gf_stats->this_frame_mv_in_out;
472 gf_stats->abs_mv_in_out_accumulator += fabs(gf_stats->this_frame_mv_in_out);
473
474 // Accumulate a measure of how uniform (or conversely how random) the motion
475 // field is (a ratio of abs(mv) / mv).
476 if (pct > 0.05) {
477 const double mvr_ratio =
478 fabs(stats->mvr_abs) / DOUBLE_DIVIDE_CHECK(fabs(stats->MVr));
479 const double mvc_ratio =
480 fabs(stats->mvc_abs) / DOUBLE_DIVIDE_CHECK(fabs(stats->MVc));
481
482 gf_stats->mv_ratio_accumulator +=
483 pct *
484 (mvr_ratio < stats->mvr_abs * f_h ? mvr_ratio : stats->mvr_abs * f_h);
485 gf_stats->mv_ratio_accumulator +=
486 pct *
487 (mvc_ratio < stats->mvc_abs * f_w ? mvc_ratio : stats->mvc_abs * f_w);
488 }
489 }
490
accumulate_this_frame_stats(const FIRSTPASS_STATS * stats,const double mod_frame_err,GF_GROUP_STATS * gf_stats)491 static void accumulate_this_frame_stats(const FIRSTPASS_STATS *stats,
492 const double mod_frame_err,
493 GF_GROUP_STATS *gf_stats) {
494 gf_stats->gf_group_err += mod_frame_err;
495 #if GROUP_ADAPTIVE_MAXQ
496 gf_stats->gf_group_raw_error += stats->coded_error;
497 #endif
498 gf_stats->gf_group_skip_pct += stats->intra_skip_pct;
499 gf_stats->gf_group_inactive_zone_rows += stats->inactive_zone_rows;
500 }
501
accumulate_next_frame_stats(const FIRSTPASS_STATS * stats,const int flash_detected,const int frames_since_key,const int cur_idx,GF_GROUP_STATS * gf_stats,int f_w,int f_h)502 static void accumulate_next_frame_stats(const FIRSTPASS_STATS *stats,
503 const int flash_detected,
504 const int frames_since_key,
505 const int cur_idx,
506 GF_GROUP_STATS *gf_stats, int f_w,
507 int f_h) {
508 accumulate_frame_motion_stats(stats, gf_stats, f_w, f_h);
509 // sum up the metric values of current gf group
510 gf_stats->avg_sr_coded_error += stats->sr_coded_error;
511 gf_stats->avg_pcnt_second_ref += stats->pcnt_second_ref;
512 gf_stats->avg_new_mv_count += stats->new_mv_count;
513 gf_stats->avg_wavelet_energy += stats->frame_avg_wavelet_energy;
514 if (fabs(stats->raw_error_stdev) > 0.000001) {
515 gf_stats->non_zero_stdev_count++;
516 gf_stats->avg_raw_err_stdev += stats->raw_error_stdev;
517 }
518
519 // Accumulate the effect of prediction quality decay
520 if (!flash_detected) {
521 gf_stats->last_loop_decay_rate = gf_stats->loop_decay_rate;
522 gf_stats->loop_decay_rate = get_prediction_decay_rate(stats);
523
524 gf_stats->decay_accumulator =
525 gf_stats->decay_accumulator * gf_stats->loop_decay_rate;
526
527 // Monitor for static sections.
528 if ((frames_since_key + cur_idx - 1) > 1) {
529 gf_stats->zero_motion_accumulator = AOMMIN(
530 gf_stats->zero_motion_accumulator, get_zero_motion_factor(stats));
531 }
532 }
533 }
534
average_gf_stats(const int total_frame,GF_GROUP_STATS * gf_stats)535 static void average_gf_stats(const int total_frame, GF_GROUP_STATS *gf_stats) {
536 if (total_frame) {
537 gf_stats->avg_sr_coded_error /= total_frame;
538 gf_stats->avg_pcnt_second_ref /= total_frame;
539 gf_stats->avg_new_mv_count /= total_frame;
540 gf_stats->avg_wavelet_energy /= total_frame;
541 }
542
543 if (gf_stats->non_zero_stdev_count)
544 gf_stats->avg_raw_err_stdev /= gf_stats->non_zero_stdev_count;
545 }
546
547 #define BOOST_FACTOR 12.5
baseline_err_per_mb(const FRAME_INFO * frame_info)548 static double baseline_err_per_mb(const FRAME_INFO *frame_info) {
549 unsigned int screen_area = frame_info->frame_height * frame_info->frame_width;
550
551 // Use a different error per mb factor for calculating boost for
552 // different formats.
553 if (screen_area <= 640 * 360) {
554 return 500.0;
555 } else {
556 return 1000.0;
557 }
558 }
559
calc_frame_boost(const PRIMARY_RATE_CONTROL * p_rc,const FRAME_INFO * frame_info,const FIRSTPASS_STATS * this_frame,double this_frame_mv_in_out,double max_boost)560 static double calc_frame_boost(const PRIMARY_RATE_CONTROL *p_rc,
561 const FRAME_INFO *frame_info,
562 const FIRSTPASS_STATS *this_frame,
563 double this_frame_mv_in_out, double max_boost) {
564 double frame_boost;
565 const double lq = av1_convert_qindex_to_q(p_rc->avg_frame_qindex[INTER_FRAME],
566 frame_info->bit_depth);
567 const double boost_q_correction = AOMMIN((0.5 + (lq * 0.015)), 1.5);
568 const double active_area = calculate_active_area(frame_info, this_frame);
569
570 // Underlying boost factor is based on inter error ratio.
571 frame_boost = AOMMAX(baseline_err_per_mb(frame_info) * active_area,
572 this_frame->intra_error * active_area) /
573 DOUBLE_DIVIDE_CHECK(this_frame->coded_error);
574 frame_boost = frame_boost * BOOST_FACTOR * boost_q_correction;
575
576 // Increase boost for frames where new data coming into frame (e.g. zoom out).
577 // Slightly reduce boost if there is a net balance of motion out of the frame
578 // (zoom in). The range for this_frame_mv_in_out is -1.0 to +1.0.
579 if (this_frame_mv_in_out > 0.0)
580 frame_boost += frame_boost * (this_frame_mv_in_out * 2.0);
581 // In the extreme case the boost is halved.
582 else
583 frame_boost += frame_boost * (this_frame_mv_in_out / 2.0);
584
585 return AOMMIN(frame_boost, max_boost * boost_q_correction);
586 }
587
calc_kf_frame_boost(const PRIMARY_RATE_CONTROL * p_rc,const FRAME_INFO * frame_info,const FIRSTPASS_STATS * this_frame,double * sr_accumulator,double max_boost)588 static double calc_kf_frame_boost(const PRIMARY_RATE_CONTROL *p_rc,
589 const FRAME_INFO *frame_info,
590 const FIRSTPASS_STATS *this_frame,
591 double *sr_accumulator, double max_boost) {
592 double frame_boost;
593 const double lq = av1_convert_qindex_to_q(p_rc->avg_frame_qindex[INTER_FRAME],
594 frame_info->bit_depth);
595 const double boost_q_correction = AOMMIN((0.50 + (lq * 0.015)), 2.00);
596 const double active_area = calculate_active_area(frame_info, this_frame);
597
598 // Underlying boost factor is based on inter error ratio.
599 frame_boost = AOMMAX(baseline_err_per_mb(frame_info) * active_area,
600 this_frame->intra_error * active_area) /
601 DOUBLE_DIVIDE_CHECK(
602 (this_frame->coded_error + *sr_accumulator) * active_area);
603
604 // Update the accumulator for second ref error difference.
605 // This is intended to give an indication of how much the coded error is
606 // increasing over time.
607 *sr_accumulator += (this_frame->sr_coded_error - this_frame->coded_error);
608 *sr_accumulator = AOMMAX(0.0, *sr_accumulator);
609
610 // Q correction and scaling
611 // The 40.0 value here is an experimentally derived baseline minimum.
612 // This value is in line with the minimum per frame boost in the alt_ref
613 // boost calculation.
614 frame_boost = ((frame_boost + 40.0) * boost_q_correction);
615
616 return AOMMIN(frame_boost, max_boost * boost_q_correction);
617 }
618
get_projected_gfu_boost(const PRIMARY_RATE_CONTROL * p_rc,int gfu_boost,int frames_to_project,int num_stats_used_for_gfu_boost)619 static int get_projected_gfu_boost(const PRIMARY_RATE_CONTROL *p_rc,
620 int gfu_boost, int frames_to_project,
621 int num_stats_used_for_gfu_boost) {
622 /*
623 * If frames_to_project is equal to num_stats_used_for_gfu_boost,
624 * it means that gfu_boost was calculated over frames_to_project to
625 * begin with(ie; all stats required were available), hence return
626 * the original boost.
627 */
628 if (num_stats_used_for_gfu_boost >= frames_to_project) return gfu_boost;
629
630 double min_boost_factor = sqrt(p_rc->baseline_gf_interval);
631 // Get the current tpl factor (number of frames = frames_to_project).
632 double tpl_factor = av1_get_gfu_boost_projection_factor(
633 min_boost_factor, MAX_GFUBOOST_FACTOR, frames_to_project);
634 // Get the tpl factor when number of frames = num_stats_used_for_prior_boost.
635 double tpl_factor_num_stats = av1_get_gfu_boost_projection_factor(
636 min_boost_factor, MAX_GFUBOOST_FACTOR, num_stats_used_for_gfu_boost);
637 int projected_gfu_boost =
638 (int)rint((tpl_factor * gfu_boost) / tpl_factor_num_stats);
639 return projected_gfu_boost;
640 }
641
642 #define GF_MAX_BOOST 90.0
643 #define GF_MIN_BOOST 50
644 #define MIN_DECAY_FACTOR 0.01
av1_calc_arf_boost(const TWO_PASS * twopass,const TWO_PASS_FRAME * twopass_frame,const PRIMARY_RATE_CONTROL * p_rc,FRAME_INFO * frame_info,int offset,int f_frames,int b_frames,int * num_fpstats_used,int * num_fpstats_required,int project_gfu_boost)645 int av1_calc_arf_boost(const TWO_PASS *twopass,
646 const TWO_PASS_FRAME *twopass_frame,
647 const PRIMARY_RATE_CONTROL *p_rc, FRAME_INFO *frame_info,
648 int offset, int f_frames, int b_frames,
649 int *num_fpstats_used, int *num_fpstats_required,
650 int project_gfu_boost) {
651 int i;
652 GF_GROUP_STATS gf_stats;
653 init_gf_stats(&gf_stats);
654 double boost_score = (double)NORMAL_BOOST;
655 int arf_boost;
656 int flash_detected = 0;
657 if (num_fpstats_used) *num_fpstats_used = 0;
658
659 // Search forward from the proposed arf/next gf position.
660 for (i = 0; i < f_frames; ++i) {
661 const FIRSTPASS_STATS *this_frame =
662 read_frame_stats(twopass, twopass_frame, i + offset);
663 if (this_frame == NULL) break;
664
665 // Update the motion related elements to the boost calculation.
666 accumulate_frame_motion_stats(this_frame, &gf_stats,
667 frame_info->frame_width,
668 frame_info->frame_height);
669
670 // We want to discount the flash frame itself and the recovery
671 // frame that follows as both will have poor scores.
672 flash_detected = detect_flash(twopass, twopass_frame, i + offset) ||
673 detect_flash(twopass, twopass_frame, i + offset + 1);
674
675 // Accumulate the effect of prediction quality decay.
676 if (!flash_detected) {
677 gf_stats.decay_accumulator *= get_prediction_decay_rate(this_frame);
678 gf_stats.decay_accumulator = gf_stats.decay_accumulator < MIN_DECAY_FACTOR
679 ? MIN_DECAY_FACTOR
680 : gf_stats.decay_accumulator;
681 }
682
683 boost_score +=
684 gf_stats.decay_accumulator *
685 calc_frame_boost(p_rc, frame_info, this_frame,
686 gf_stats.this_frame_mv_in_out, GF_MAX_BOOST);
687 if (num_fpstats_used) (*num_fpstats_used)++;
688 }
689
690 arf_boost = (int)boost_score;
691
692 // Reset for backward looking loop.
693 boost_score = 0.0;
694 init_gf_stats(&gf_stats);
695 // Search backward towards last gf position.
696 for (i = -1; i >= -b_frames; --i) {
697 const FIRSTPASS_STATS *this_frame =
698 read_frame_stats(twopass, twopass_frame, i + offset);
699 if (this_frame == NULL) break;
700
701 // Update the motion related elements to the boost calculation.
702 accumulate_frame_motion_stats(this_frame, &gf_stats,
703 frame_info->frame_width,
704 frame_info->frame_height);
705
706 // We want to discount the the flash frame itself and the recovery
707 // frame that follows as both will have poor scores.
708 flash_detected = detect_flash(twopass, twopass_frame, i + offset) ||
709 detect_flash(twopass, twopass_frame, i + offset + 1);
710
711 // Cumulative effect of prediction quality decay.
712 if (!flash_detected) {
713 gf_stats.decay_accumulator *= get_prediction_decay_rate(this_frame);
714 gf_stats.decay_accumulator = gf_stats.decay_accumulator < MIN_DECAY_FACTOR
715 ? MIN_DECAY_FACTOR
716 : gf_stats.decay_accumulator;
717 }
718
719 boost_score +=
720 gf_stats.decay_accumulator *
721 calc_frame_boost(p_rc, frame_info, this_frame,
722 gf_stats.this_frame_mv_in_out, GF_MAX_BOOST);
723 if (num_fpstats_used) (*num_fpstats_used)++;
724 }
725 arf_boost += (int)boost_score;
726
727 if (project_gfu_boost) {
728 assert(num_fpstats_required != NULL);
729 assert(num_fpstats_used != NULL);
730 *num_fpstats_required = f_frames + b_frames;
731 arf_boost = get_projected_gfu_boost(p_rc, arf_boost, *num_fpstats_required,
732 *num_fpstats_used);
733 }
734
735 if (arf_boost < ((b_frames + f_frames) * GF_MIN_BOOST))
736 arf_boost = ((b_frames + f_frames) * GF_MIN_BOOST);
737
738 return arf_boost;
739 }
740
741 // Calculate a section intra ratio used in setting max loop filter.
calculate_section_intra_ratio(const FIRSTPASS_STATS * begin,const FIRSTPASS_STATS * end,int section_length)742 static int calculate_section_intra_ratio(const FIRSTPASS_STATS *begin,
743 const FIRSTPASS_STATS *end,
744 int section_length) {
745 const FIRSTPASS_STATS *s = begin;
746 double intra_error = 0.0;
747 double coded_error = 0.0;
748 int i = 0;
749
750 while (s < end && i < section_length) {
751 intra_error += s->intra_error;
752 coded_error += s->coded_error;
753 ++s;
754 ++i;
755 }
756
757 return (int)(intra_error / DOUBLE_DIVIDE_CHECK(coded_error));
758 }
759
760 /*!\brief Calculates the bit target for this GF/ARF group
761 *
762 * \ingroup rate_control
763 *
764 * Calculates the total bits to allocate in this GF/ARF group.
765 *
766 * \param[in] cpi Top-level encoder structure
767 * \param[in] gf_group_err Cumulative coded error score for the
768 * frames making up this group.
769 *
770 * \return The target total number of bits for this GF/ARF group.
771 */
calculate_total_gf_group_bits(AV1_COMP * cpi,double gf_group_err)772 static int64_t calculate_total_gf_group_bits(AV1_COMP *cpi,
773 double gf_group_err) {
774 const RATE_CONTROL *const rc = &cpi->rc;
775 const PRIMARY_RATE_CONTROL *const p_rc = &cpi->ppi->p_rc;
776 const TWO_PASS *const twopass = &cpi->ppi->twopass;
777 const int max_bits = frame_max_bits(rc, &cpi->oxcf);
778 int64_t total_group_bits;
779
780 // Calculate the bits to be allocated to the group as a whole.
781 if ((twopass->kf_group_bits > 0) && (twopass->kf_group_error_left > 0)) {
782 total_group_bits = (int64_t)(twopass->kf_group_bits *
783 (gf_group_err / twopass->kf_group_error_left));
784 } else {
785 total_group_bits = 0;
786 }
787
788 // Clamp odd edge cases.
789 total_group_bits = (total_group_bits < 0) ? 0
790 : (total_group_bits > twopass->kf_group_bits)
791 ? twopass->kf_group_bits
792 : total_group_bits;
793
794 // Clip based on user supplied data rate variability limit.
795 if (total_group_bits > (int64_t)max_bits * p_rc->baseline_gf_interval)
796 total_group_bits = (int64_t)max_bits * p_rc->baseline_gf_interval;
797
798 return total_group_bits;
799 }
800
801 // Calculate the number of bits to assign to boosted frames in a group.
calculate_boost_bits(int frame_count,int boost,int64_t total_group_bits)802 static int calculate_boost_bits(int frame_count, int boost,
803 int64_t total_group_bits) {
804 int allocation_chunks;
805
806 // return 0 for invalid inputs (could arise e.g. through rounding errors)
807 if (!boost || (total_group_bits <= 0)) return 0;
808
809 if (frame_count <= 0) return (int)(AOMMIN(total_group_bits, INT_MAX));
810
811 allocation_chunks = (frame_count * 100) + boost;
812
813 // Prevent overflow.
814 if (boost > 1023) {
815 int divisor = boost >> 10;
816 boost /= divisor;
817 allocation_chunks /= divisor;
818 }
819
820 // Calculate the number of extra bits for use in the boosted frame or frames.
821 return AOMMAX((int)(((int64_t)boost * total_group_bits) / allocation_chunks),
822 0);
823 }
824
825 // Calculate the boost factor based on the number of bits assigned, i.e. the
826 // inverse of calculate_boost_bits().
calculate_boost_factor(int frame_count,int bits,int64_t total_group_bits)827 static int calculate_boost_factor(int frame_count, int bits,
828 int64_t total_group_bits) {
829 return (int)(100.0 * frame_count * bits / (total_group_bits - bits));
830 }
831
832 // Reduce the number of bits assigned to keyframe or arf if necessary, to
833 // prevent bitrate spikes that may break level constraints.
834 // frame_type: 0: keyframe; 1: arf.
adjust_boost_bits_for_target_level(const AV1_COMP * const cpi,RATE_CONTROL * const rc,int bits_assigned,int64_t group_bits,int frame_type)835 static int adjust_boost_bits_for_target_level(const AV1_COMP *const cpi,
836 RATE_CONTROL *const rc,
837 int bits_assigned,
838 int64_t group_bits,
839 int frame_type) {
840 const AV1_COMMON *const cm = &cpi->common;
841 const SequenceHeader *const seq_params = cm->seq_params;
842 PRIMARY_RATE_CONTROL *const p_rc = &cpi->ppi->p_rc;
843 const int temporal_layer_id = cm->temporal_layer_id;
844 const int spatial_layer_id = cm->spatial_layer_id;
845 for (int index = 0; index < seq_params->operating_points_cnt_minus_1 + 1;
846 ++index) {
847 if (!is_in_operating_point(seq_params->operating_point_idc[index],
848 temporal_layer_id, spatial_layer_id)) {
849 continue;
850 }
851
852 const AV1_LEVEL target_level =
853 cpi->ppi->level_params.target_seq_level_idx[index];
854 if (target_level >= SEQ_LEVELS) continue;
855
856 assert(is_valid_seq_level_idx(target_level));
857
858 const double level_bitrate_limit = av1_get_max_bitrate_for_level(
859 target_level, seq_params->tier[0], seq_params->profile);
860 const int target_bits_per_frame =
861 (int)(level_bitrate_limit / cpi->framerate);
862 if (frame_type == 0) {
863 // Maximum bits for keyframe is 8 times the target_bits_per_frame.
864 const int level_enforced_max_kf_bits = target_bits_per_frame * 8;
865 if (bits_assigned > level_enforced_max_kf_bits) {
866 const int frames = rc->frames_to_key - 1;
867 p_rc->kf_boost = calculate_boost_factor(
868 frames, level_enforced_max_kf_bits, group_bits);
869 bits_assigned =
870 calculate_boost_bits(frames, p_rc->kf_boost, group_bits);
871 }
872 } else if (frame_type == 1) {
873 // Maximum bits for arf is 4 times the target_bits_per_frame.
874 const int level_enforced_max_arf_bits = target_bits_per_frame * 4;
875 if (bits_assigned > level_enforced_max_arf_bits) {
876 p_rc->gfu_boost =
877 calculate_boost_factor(p_rc->baseline_gf_interval,
878 level_enforced_max_arf_bits, group_bits);
879 bits_assigned = calculate_boost_bits(p_rc->baseline_gf_interval,
880 p_rc->gfu_boost, group_bits);
881 }
882 } else {
883 assert(0);
884 }
885 }
886
887 return bits_assigned;
888 }
889
890 // Allocate bits to each frame in a GF / ARF group
891 double layer_fraction[MAX_ARF_LAYERS + 1] = { 1.0, 0.70, 0.55, 0.60,
892 0.60, 1.0, 1.0 };
allocate_gf_group_bits(GF_GROUP * gf_group,PRIMARY_RATE_CONTROL * const p_rc,RATE_CONTROL * const rc,int64_t gf_group_bits,int gf_arf_bits,int key_frame,int use_arf)893 static void allocate_gf_group_bits(GF_GROUP *gf_group,
894 PRIMARY_RATE_CONTROL *const p_rc,
895 RATE_CONTROL *const rc,
896 int64_t gf_group_bits, int gf_arf_bits,
897 int key_frame, int use_arf) {
898 int64_t total_group_bits = gf_group_bits;
899 int base_frame_bits;
900 const int gf_group_size = gf_group->size;
901 int layer_frames[MAX_ARF_LAYERS + 1] = { 0 };
902
903 // For key frames the frame target rate is already set and it
904 // is also the golden frame.
905 // === [frame_index == 0] ===
906 int frame_index = !!key_frame;
907
908 // Subtract the extra bits set aside for ARF frames from the Group Total
909 if (use_arf) total_group_bits -= gf_arf_bits;
910
911 int num_frames =
912 AOMMAX(1, p_rc->baseline_gf_interval - (rc->frames_since_key == 0));
913 base_frame_bits = (int)(total_group_bits / num_frames);
914
915 // Check the number of frames in each layer in case we have a
916 // non standard group length.
917 int max_arf_layer = gf_group->max_layer_depth - 1;
918 for (int idx = frame_index; idx < gf_group_size; ++idx) {
919 if ((gf_group->update_type[idx] == ARF_UPDATE) ||
920 (gf_group->update_type[idx] == INTNL_ARF_UPDATE)) {
921 layer_frames[gf_group->layer_depth[idx]]++;
922 }
923 }
924
925 // Allocate extra bits to each ARF layer
926 int i;
927 int layer_extra_bits[MAX_ARF_LAYERS + 1] = { 0 };
928 assert(max_arf_layer <= MAX_ARF_LAYERS);
929 for (i = 1; i <= max_arf_layer; ++i) {
930 double fraction = (i == max_arf_layer) ? 1.0 : layer_fraction[i];
931 layer_extra_bits[i] =
932 (int)((gf_arf_bits * fraction) / AOMMAX(1, layer_frames[i]));
933 gf_arf_bits -= (int)(gf_arf_bits * fraction);
934 }
935
936 // Now combine ARF layer and baseline bits to give total bits for each frame.
937 int arf_extra_bits;
938 for (int idx = frame_index; idx < gf_group_size; ++idx) {
939 switch (gf_group->update_type[idx]) {
940 case ARF_UPDATE:
941 case INTNL_ARF_UPDATE:
942 arf_extra_bits = layer_extra_bits[gf_group->layer_depth[idx]];
943 gf_group->bit_allocation[idx] = base_frame_bits + arf_extra_bits;
944 break;
945 case INTNL_OVERLAY_UPDATE:
946 case OVERLAY_UPDATE: gf_group->bit_allocation[idx] = 0; break;
947 default: gf_group->bit_allocation[idx] = base_frame_bits; break;
948 }
949 }
950
951 // Set the frame following the current GOP to 0 bit allocation. For ARF
952 // groups, this next frame will be overlay frame, which is the first frame
953 // in the next GOP. For GF group, next GOP will overwrite the rate allocation.
954 // Setting this frame to use 0 bit (of out the current GOP budget) will
955 // simplify logics in reference frame management.
956 if (gf_group_size < MAX_STATIC_GF_GROUP_LENGTH)
957 gf_group->bit_allocation[gf_group_size] = 0;
958 }
959
960 // Returns true if KF group and GF group both are almost completely static.
is_almost_static(double gf_zero_motion,int kf_zero_motion,int is_lap_enabled)961 static INLINE int is_almost_static(double gf_zero_motion, int kf_zero_motion,
962 int is_lap_enabled) {
963 if (is_lap_enabled) {
964 /*
965 * when LAP enabled kf_zero_motion is not reliable, so use strict
966 * constraint on gf_zero_motion.
967 */
968 return (gf_zero_motion >= 0.999);
969 } else {
970 return (gf_zero_motion >= 0.995) &&
971 (kf_zero_motion >= STATIC_KF_GROUP_THRESH);
972 }
973 }
974
975 #define ARF_ABS_ZOOM_THRESH 4.4
detect_gf_cut(AV1_COMP * cpi,int frame_index,int cur_start,int flash_detected,int active_max_gf_interval,int active_min_gf_interval,GF_GROUP_STATS * gf_stats)976 static INLINE int detect_gf_cut(AV1_COMP *cpi, int frame_index, int cur_start,
977 int flash_detected, int active_max_gf_interval,
978 int active_min_gf_interval,
979 GF_GROUP_STATS *gf_stats) {
980 RATE_CONTROL *const rc = &cpi->rc;
981 TWO_PASS *const twopass = &cpi->ppi->twopass;
982 AV1_COMMON *const cm = &cpi->common;
983 // Motion breakout threshold for loop below depends on image size.
984 const double mv_ratio_accumulator_thresh = (cm->height + cm->width) / 4.0;
985
986 if (!flash_detected) {
987 // Break clause to detect very still sections after motion. For example,
988 // a static image after a fade or other transition.
989
990 // TODO(angiebird): This is a temporary change, we will avoid using
991 // twopass_frame.stats_in in the follow-up CL
992 int index = (int)(cpi->twopass_frame.stats_in -
993 twopass->stats_buf_ctx->stats_in_start);
994 if (detect_transition_to_still(&twopass->firstpass_info, index,
995 rc->min_gf_interval, frame_index - cur_start,
996 5, gf_stats->loop_decay_rate,
997 gf_stats->last_loop_decay_rate)) {
998 return 1;
999 }
1000 }
1001
1002 // Some conditions to breakout after min interval.
1003 if (frame_index - cur_start >= active_min_gf_interval &&
1004 // If possible don't break very close to a kf
1005 (rc->frames_to_key - frame_index >= rc->min_gf_interval) &&
1006 ((frame_index - cur_start) & 0x01) && !flash_detected &&
1007 (gf_stats->mv_ratio_accumulator > mv_ratio_accumulator_thresh ||
1008 gf_stats->abs_mv_in_out_accumulator > ARF_ABS_ZOOM_THRESH)) {
1009 return 1;
1010 }
1011
1012 // If almost totally static, we will not use the the max GF length later,
1013 // so we can continue for more frames.
1014 if (((frame_index - cur_start) >= active_max_gf_interval + 1) &&
1015 !is_almost_static(gf_stats->zero_motion_accumulator,
1016 twopass->kf_zeromotion_pct, cpi->ppi->lap_enabled)) {
1017 return 1;
1018 }
1019 return 0;
1020 }
1021
is_shorter_gf_interval_better(AV1_COMP * cpi,const EncodeFrameParams * frame_params)1022 static int is_shorter_gf_interval_better(
1023 AV1_COMP *cpi, const EncodeFrameParams *frame_params) {
1024 const RATE_CONTROL *const rc = &cpi->rc;
1025 PRIMARY_RATE_CONTROL *const p_rc = &cpi->ppi->p_rc;
1026 int gop_length_decision_method = cpi->sf.tpl_sf.gop_length_decision_method;
1027 int shorten_gf_interval;
1028
1029 av1_tpl_preload_rc_estimate(cpi, frame_params);
1030
1031 if (gop_length_decision_method == 2) {
1032 // GF group length is decided based on GF boost and tpl stats of ARFs from
1033 // base layer, (base+1) layer.
1034 shorten_gf_interval =
1035 (p_rc->gfu_boost <
1036 p_rc->num_stats_used_for_gfu_boost * GF_MIN_BOOST * 1.4) &&
1037 !av1_tpl_setup_stats(cpi, 3, frame_params);
1038 } else {
1039 int do_complete_tpl = 1;
1040 GF_GROUP *const gf_group = &cpi->ppi->gf_group;
1041 int is_temporal_filter_enabled =
1042 (rc->frames_since_key > 0 && gf_group->arf_index > -1);
1043
1044 if (gop_length_decision_method == 1) {
1045 // Check if tpl stats of ARFs from base layer, (base+1) layer,
1046 // (base+2) layer can decide the GF group length.
1047 int gop_length_eval = av1_tpl_setup_stats(cpi, 2, frame_params);
1048
1049 if (gop_length_eval != 2) {
1050 do_complete_tpl = 0;
1051 shorten_gf_interval = !gop_length_eval;
1052 }
1053 }
1054
1055 if (do_complete_tpl) {
1056 // Decide GF group length based on complete tpl stats.
1057 shorten_gf_interval = !av1_tpl_setup_stats(cpi, 1, frame_params);
1058 // Tpl stats is reused when the ARF is temporally filtered and GF
1059 // interval is not shortened.
1060 if (is_temporal_filter_enabled && !shorten_gf_interval) {
1061 cpi->skip_tpl_setup_stats = 1;
1062 #if CONFIG_BITRATE_ACCURACY && !CONFIG_THREE_PASS
1063 assert(cpi->gf_frame_index == 0);
1064 av1_vbr_rc_update_q_index_list(&cpi->vbr_rc_info, &cpi->ppi->tpl_data,
1065 gf_group,
1066 cpi->common.seq_params->bit_depth);
1067 #endif // CONFIG_BITRATE_ACCURACY
1068 }
1069 }
1070 }
1071 return shorten_gf_interval;
1072 }
1073
1074 #define MIN_SHRINK_LEN 6 // the minimum length of gf if we are shrinking
1075 #define SMOOTH_FILT_LEN 7
1076 #define HALF_FILT_LEN (SMOOTH_FILT_LEN / 2)
1077 #define WINDOW_SIZE 7
1078 #define HALF_WIN (WINDOW_SIZE / 2)
1079 // A 7-tap gaussian smooth filter
1080 const double smooth_filt[SMOOTH_FILT_LEN] = { 0.006, 0.061, 0.242, 0.383,
1081 0.242, 0.061, 0.006 };
1082
1083 // Smooth filter intra_error and coded_error in firstpass stats.
1084 // If stats[i].is_flash==1, the ith element should not be used in the filtering.
smooth_filter_stats(const FIRSTPASS_STATS * stats,int start_idx,int last_idx,double * filt_intra_err,double * filt_coded_err)1085 static void smooth_filter_stats(const FIRSTPASS_STATS *stats, int start_idx,
1086 int last_idx, double *filt_intra_err,
1087 double *filt_coded_err) {
1088 int i, j;
1089 for (i = start_idx; i <= last_idx; i++) {
1090 double total_wt = 0;
1091 for (j = -HALF_FILT_LEN; j <= HALF_FILT_LEN; j++) {
1092 int idx = AOMMIN(AOMMAX(i + j, start_idx), last_idx);
1093 if (stats[idx].is_flash) continue;
1094
1095 filt_intra_err[i] +=
1096 smooth_filt[j + HALF_FILT_LEN] * stats[idx].intra_error;
1097 total_wt += smooth_filt[j + HALF_FILT_LEN];
1098 }
1099 if (total_wt > 0.01) {
1100 filt_intra_err[i] /= total_wt;
1101 } else {
1102 filt_intra_err[i] = stats[i].intra_error;
1103 }
1104 }
1105 for (i = start_idx; i <= last_idx; i++) {
1106 double total_wt = 0;
1107 for (j = -HALF_FILT_LEN; j <= HALF_FILT_LEN; j++) {
1108 int idx = AOMMIN(AOMMAX(i + j, start_idx), last_idx);
1109 // Coded error involves idx and idx - 1.
1110 if (stats[idx].is_flash || (idx > 0 && stats[idx - 1].is_flash)) continue;
1111
1112 filt_coded_err[i] +=
1113 smooth_filt[j + HALF_FILT_LEN] * stats[idx].coded_error;
1114 total_wt += smooth_filt[j + HALF_FILT_LEN];
1115 }
1116 if (total_wt > 0.01) {
1117 filt_coded_err[i] /= total_wt;
1118 } else {
1119 filt_coded_err[i] = stats[i].coded_error;
1120 }
1121 }
1122 }
1123
1124 // Calculate gradient
get_gradient(const double * values,int start,int last,double * grad)1125 static void get_gradient(const double *values, int start, int last,
1126 double *grad) {
1127 if (start == last) {
1128 grad[start] = 0;
1129 return;
1130 }
1131 for (int i = start; i <= last; i++) {
1132 int prev = AOMMAX(i - 1, start);
1133 int next = AOMMIN(i + 1, last);
1134 grad[i] = (values[next] - values[prev]) / (next - prev);
1135 }
1136 }
1137
find_next_scenecut(const FIRSTPASS_STATS * const stats_start,int first,int last)1138 static int find_next_scenecut(const FIRSTPASS_STATS *const stats_start,
1139 int first, int last) {
1140 // Identify unstable areas caused by scenecuts.
1141 // Find the max and 2nd max coded error, and the average of the rest frames.
1142 // If there is only one frame that yields a huge coded error, it is likely a
1143 // scenecut.
1144 double this_ratio, max_prev_ratio, max_next_ratio, max_prev_coded,
1145 max_next_coded;
1146
1147 if (last - first == 0) return -1;
1148
1149 for (int i = first; i <= last; i++) {
1150 if (stats_start[i].is_flash || (i > 0 && stats_start[i - 1].is_flash))
1151 continue;
1152 double temp_intra = AOMMAX(stats_start[i].intra_error, 0.01);
1153 this_ratio = stats_start[i].coded_error / temp_intra;
1154 // find the avg ratio in the preceding neighborhood
1155 max_prev_ratio = 0;
1156 max_prev_coded = 0;
1157 for (int j = AOMMAX(first, i - HALF_WIN); j < i; j++) {
1158 if (stats_start[j].is_flash || (j > 0 && stats_start[j - 1].is_flash))
1159 continue;
1160 temp_intra = AOMMAX(stats_start[j].intra_error, 0.01);
1161 double temp_ratio = stats_start[j].coded_error / temp_intra;
1162 if (temp_ratio > max_prev_ratio) {
1163 max_prev_ratio = temp_ratio;
1164 }
1165 if (stats_start[j].coded_error > max_prev_coded) {
1166 max_prev_coded = stats_start[j].coded_error;
1167 }
1168 }
1169 // find the avg ratio in the following neighborhood
1170 max_next_ratio = 0;
1171 max_next_coded = 0;
1172 for (int j = i + 1; j <= AOMMIN(i + HALF_WIN, last); j++) {
1173 if (stats_start[i].is_flash || (i > 0 && stats_start[i - 1].is_flash))
1174 continue;
1175 temp_intra = AOMMAX(stats_start[j].intra_error, 0.01);
1176 double temp_ratio = stats_start[j].coded_error / temp_intra;
1177 if (temp_ratio > max_next_ratio) {
1178 max_next_ratio = temp_ratio;
1179 }
1180 if (stats_start[j].coded_error > max_next_coded) {
1181 max_next_coded = stats_start[j].coded_error;
1182 }
1183 }
1184
1185 if (max_prev_ratio < 0.001 && max_next_ratio < 0.001) {
1186 // the ratios are very small, only check a small fixed threshold
1187 if (this_ratio < 0.02) continue;
1188 } else {
1189 // check if this frame has a larger ratio than the neighborhood
1190 double max_sr = stats_start[i].sr_coded_error;
1191 if (i < last) max_sr = AOMMAX(max_sr, stats_start[i + 1].sr_coded_error);
1192 double max_sr_fr_ratio =
1193 max_sr / AOMMAX(stats_start[i].coded_error, 0.01);
1194
1195 if (max_sr_fr_ratio > 1.2) continue;
1196 if (this_ratio < 2 * AOMMAX(max_prev_ratio, max_next_ratio) &&
1197 stats_start[i].coded_error <
1198 2 * AOMMAX(max_prev_coded, max_next_coded)) {
1199 continue;
1200 }
1201 }
1202 return i;
1203 }
1204 return -1;
1205 }
1206
1207 // Remove the region with index next_region.
1208 // parameter merge: 0: merge with previous; 1: merge with next; 2:
1209 // merge with both, take type from previous if possible
1210 // After removing, next_region will be the index of the next region.
remove_region(int merge,REGIONS * regions,int * num_regions,int * next_region)1211 static void remove_region(int merge, REGIONS *regions, int *num_regions,
1212 int *next_region) {
1213 int k = *next_region;
1214 assert(k < *num_regions);
1215 if (*num_regions == 1) {
1216 *num_regions = 0;
1217 return;
1218 }
1219 if (k == 0) {
1220 merge = 1;
1221 } else if (k == *num_regions - 1) {
1222 merge = 0;
1223 }
1224 int num_merge = (merge == 2) ? 2 : 1;
1225 switch (merge) {
1226 case 0:
1227 regions[k - 1].last = regions[k].last;
1228 *next_region = k;
1229 break;
1230 case 1:
1231 regions[k + 1].start = regions[k].start;
1232 *next_region = k + 1;
1233 break;
1234 case 2:
1235 regions[k - 1].last = regions[k + 1].last;
1236 *next_region = k;
1237 break;
1238 default: assert(0);
1239 }
1240 *num_regions -= num_merge;
1241 for (k = *next_region - (merge == 1); k < *num_regions; k++) {
1242 regions[k] = regions[k + num_merge];
1243 }
1244 }
1245
1246 // Insert a region in the cur_region_idx. The start and last should both be in
1247 // the current region. After insertion, the cur_region_idx will point to the
1248 // last region that was splitted from the original region.
insert_region(int start,int last,REGION_TYPES type,REGIONS * regions,int * num_regions,int * cur_region_idx)1249 static void insert_region(int start, int last, REGION_TYPES type,
1250 REGIONS *regions, int *num_regions,
1251 int *cur_region_idx) {
1252 int k = *cur_region_idx;
1253 REGION_TYPES this_region_type = regions[k].type;
1254 int this_region_last = regions[k].last;
1255 int num_add = (start != regions[k].start) + (last != regions[k].last);
1256 // move the following regions further to the back
1257 for (int r = *num_regions - 1; r > k; r--) {
1258 regions[r + num_add] = regions[r];
1259 }
1260 *num_regions += num_add;
1261 if (start > regions[k].start) {
1262 regions[k].last = start - 1;
1263 k++;
1264 regions[k].start = start;
1265 }
1266 regions[k].type = type;
1267 if (last < this_region_last) {
1268 regions[k].last = last;
1269 k++;
1270 regions[k].start = last + 1;
1271 regions[k].last = this_region_last;
1272 regions[k].type = this_region_type;
1273 } else {
1274 regions[k].last = this_region_last;
1275 }
1276 *cur_region_idx = k;
1277 }
1278
1279 // Get the average of stats inside a region.
analyze_region(const FIRSTPASS_STATS * stats,int k,REGIONS * regions)1280 static void analyze_region(const FIRSTPASS_STATS *stats, int k,
1281 REGIONS *regions) {
1282 int i;
1283 regions[k].avg_cor_coeff = 0;
1284 regions[k].avg_sr_fr_ratio = 0;
1285 regions[k].avg_intra_err = 0;
1286 regions[k].avg_coded_err = 0;
1287
1288 int check_first_sr = (k != 0);
1289
1290 for (i = regions[k].start; i <= regions[k].last; i++) {
1291 if (i > regions[k].start || check_first_sr) {
1292 double num_frames =
1293 (double)(regions[k].last - regions[k].start + check_first_sr);
1294 double max_coded_error =
1295 AOMMAX(stats[i].coded_error, stats[i - 1].coded_error);
1296 double this_ratio =
1297 stats[i].sr_coded_error / AOMMAX(max_coded_error, 0.001);
1298 regions[k].avg_sr_fr_ratio += this_ratio / num_frames;
1299 }
1300
1301 regions[k].avg_intra_err +=
1302 stats[i].intra_error / (double)(regions[k].last - regions[k].start + 1);
1303 regions[k].avg_coded_err +=
1304 stats[i].coded_error / (double)(regions[k].last - regions[k].start + 1);
1305
1306 regions[k].avg_cor_coeff +=
1307 AOMMAX(stats[i].cor_coeff, 0.001) /
1308 (double)(regions[k].last - regions[k].start + 1);
1309 regions[k].avg_noise_var +=
1310 AOMMAX(stats[i].noise_var, 0.001) /
1311 (double)(regions[k].last - regions[k].start + 1);
1312 }
1313 }
1314
1315 // Calculate the regions stats of every region.
get_region_stats(const FIRSTPASS_STATS * stats,REGIONS * regions,int num_regions)1316 static void get_region_stats(const FIRSTPASS_STATS *stats, REGIONS *regions,
1317 int num_regions) {
1318 for (int k = 0; k < num_regions; k++) {
1319 analyze_region(stats, k, regions);
1320 }
1321 }
1322
1323 // Find tentative stable regions
find_stable_regions(const FIRSTPASS_STATS * stats,const double * grad_coded,int this_start,int this_last,REGIONS * regions)1324 static int find_stable_regions(const FIRSTPASS_STATS *stats,
1325 const double *grad_coded, int this_start,
1326 int this_last, REGIONS *regions) {
1327 int i, j, k = 0;
1328 regions[k].start = this_start;
1329 for (i = this_start; i <= this_last; i++) {
1330 // Check mean and variance of stats in a window
1331 double mean_intra = 0.001, var_intra = 0.001;
1332 double mean_coded = 0.001, var_coded = 0.001;
1333 int count = 0;
1334 for (j = -HALF_WIN; j <= HALF_WIN; j++) {
1335 int idx = AOMMIN(AOMMAX(i + j, this_start), this_last);
1336 if (stats[idx].is_flash || (idx > 0 && stats[idx - 1].is_flash)) continue;
1337 mean_intra += stats[idx].intra_error;
1338 var_intra += stats[idx].intra_error * stats[idx].intra_error;
1339 mean_coded += stats[idx].coded_error;
1340 var_coded += stats[idx].coded_error * stats[idx].coded_error;
1341 count++;
1342 }
1343
1344 REGION_TYPES cur_type;
1345 if (count > 0) {
1346 mean_intra /= (double)count;
1347 var_intra /= (double)count;
1348 mean_coded /= (double)count;
1349 var_coded /= (double)count;
1350 int is_intra_stable = (var_intra / (mean_intra * mean_intra) < 1.03);
1351 int is_coded_stable = (var_coded / (mean_coded * mean_coded) < 1.04 &&
1352 fabs(grad_coded[i]) / mean_coded < 0.05) ||
1353 mean_coded / mean_intra < 0.05;
1354 int is_coded_small = mean_coded < 0.5 * mean_intra;
1355 cur_type = (is_intra_stable && is_coded_stable && is_coded_small)
1356 ? STABLE_REGION
1357 : HIGH_VAR_REGION;
1358 } else {
1359 cur_type = HIGH_VAR_REGION;
1360 }
1361
1362 // mark a new region if type changes
1363 if (i == regions[k].start) {
1364 // first frame in the region
1365 regions[k].type = cur_type;
1366 } else if (cur_type != regions[k].type) {
1367 // Append a new region
1368 regions[k].last = i - 1;
1369 regions[k + 1].start = i;
1370 regions[k + 1].type = cur_type;
1371 k++;
1372 }
1373 }
1374 regions[k].last = this_last;
1375 return k + 1;
1376 }
1377
1378 // Clean up regions that should be removed or merged.
cleanup_regions(REGIONS * regions,int * num_regions)1379 static void cleanup_regions(REGIONS *regions, int *num_regions) {
1380 int k = 0;
1381 while (k < *num_regions) {
1382 if ((k > 0 && regions[k - 1].type == regions[k].type &&
1383 regions[k].type != SCENECUT_REGION) ||
1384 regions[k].last < regions[k].start) {
1385 remove_region(0, regions, num_regions, &k);
1386 } else {
1387 k++;
1388 }
1389 }
1390 }
1391
1392 // Remove regions that are of type and shorter than length.
1393 // Merge it with its neighboring regions.
remove_short_regions(REGIONS * regions,int * num_regions,REGION_TYPES type,int length)1394 static void remove_short_regions(REGIONS *regions, int *num_regions,
1395 REGION_TYPES type, int length) {
1396 int k = 0;
1397 while (k < *num_regions && (*num_regions) > 1) {
1398 if ((regions[k].last - regions[k].start + 1 < length &&
1399 regions[k].type == type)) {
1400 // merge current region with the previous and next regions
1401 remove_region(2, regions, num_regions, &k);
1402 } else {
1403 k++;
1404 }
1405 }
1406 cleanup_regions(regions, num_regions);
1407 }
1408
adjust_unstable_region_bounds(const FIRSTPASS_STATS * stats,REGIONS * regions,int * num_regions)1409 static void adjust_unstable_region_bounds(const FIRSTPASS_STATS *stats,
1410 REGIONS *regions, int *num_regions) {
1411 int i, j, k;
1412 // Remove regions that are too short. Likely noise.
1413 remove_short_regions(regions, num_regions, STABLE_REGION, HALF_WIN);
1414 remove_short_regions(regions, num_regions, HIGH_VAR_REGION, HALF_WIN);
1415
1416 get_region_stats(stats, regions, *num_regions);
1417
1418 // Adjust region boundaries. The thresholds are empirically obtained, but
1419 // overall the performance is not very sensitive to small changes to them.
1420 for (k = 0; k < *num_regions; k++) {
1421 if (regions[k].type == STABLE_REGION) continue;
1422 if (k > 0) {
1423 // Adjust previous boundary.
1424 // First find the average intra/coded error in the previous
1425 // neighborhood.
1426 double avg_intra_err = 0;
1427 const int starti = AOMMAX(regions[k - 1].last - WINDOW_SIZE + 1,
1428 regions[k - 1].start + 1);
1429 const int lasti = regions[k - 1].last;
1430 int counti = 0;
1431 for (i = starti; i <= lasti; i++) {
1432 avg_intra_err += stats[i].intra_error;
1433 counti++;
1434 }
1435 if (counti > 0) {
1436 avg_intra_err = AOMMAX(avg_intra_err / (double)counti, 0.001);
1437 int count_coded = 0, count_grad = 0;
1438 for (j = lasti + 1; j <= regions[k].last; j++) {
1439 const int intra_close =
1440 fabs(stats[j].intra_error - avg_intra_err) / avg_intra_err < 0.1;
1441 const int coded_small = stats[j].coded_error / avg_intra_err < 0.1;
1442 const int coeff_close = stats[j].cor_coeff > 0.995;
1443 if (!coeff_close || !coded_small) count_coded--;
1444 if (intra_close && count_coded >= 0 && count_grad >= 0) {
1445 // this frame probably belongs to the previous stable region
1446 regions[k - 1].last = j;
1447 regions[k].start = j + 1;
1448 } else {
1449 break;
1450 }
1451 }
1452 }
1453 } // if k > 0
1454 if (k < *num_regions - 1) {
1455 // Adjust next boundary.
1456 // First find the average intra/coded error in the next neighborhood.
1457 double avg_intra_err = 0;
1458 const int starti = regions[k + 1].start;
1459 const int lasti = AOMMIN(regions[k + 1].last - 1,
1460 regions[k + 1].start + WINDOW_SIZE - 1);
1461 int counti = 0;
1462 for (i = starti; i <= lasti; i++) {
1463 avg_intra_err += stats[i].intra_error;
1464 counti++;
1465 }
1466 if (counti > 0) {
1467 avg_intra_err = AOMMAX(avg_intra_err / (double)counti, 0.001);
1468 // At the boundary, coded error is large, but still the frame is stable
1469 int count_coded = 1, count_grad = 1;
1470 for (j = starti - 1; j >= regions[k].start; j--) {
1471 const int intra_close =
1472 fabs(stats[j].intra_error - avg_intra_err) / avg_intra_err < 0.1;
1473 const int coded_small =
1474 stats[j + 1].coded_error / avg_intra_err < 0.1;
1475 const int coeff_close = stats[j].cor_coeff > 0.995;
1476 if (!coeff_close || !coded_small) count_coded--;
1477 if (intra_close && count_coded >= 0 && count_grad >= 0) {
1478 // this frame probably belongs to the next stable region
1479 regions[k + 1].start = j;
1480 regions[k].last = j - 1;
1481 } else {
1482 break;
1483 }
1484 }
1485 }
1486 } // if k < *num_regions - 1
1487 } // end of loop over all regions
1488
1489 cleanup_regions(regions, num_regions);
1490 remove_short_regions(regions, num_regions, HIGH_VAR_REGION, HALF_WIN);
1491 get_region_stats(stats, regions, *num_regions);
1492
1493 // If a stable regions has higher error than neighboring high var regions,
1494 // or if the stable region has a lower average correlation,
1495 // then it should be merged with them
1496 k = 0;
1497 while (k < *num_regions && (*num_regions) > 1) {
1498 if (regions[k].type == STABLE_REGION &&
1499 (regions[k].last - regions[k].start + 1) < 2 * WINDOW_SIZE &&
1500 ((k > 0 && // previous regions
1501 (regions[k].avg_coded_err > regions[k - 1].avg_coded_err * 1.01 ||
1502 regions[k].avg_cor_coeff < regions[k - 1].avg_cor_coeff * 0.999)) &&
1503 (k < *num_regions - 1 && // next region
1504 (regions[k].avg_coded_err > regions[k + 1].avg_coded_err * 1.01 ||
1505 regions[k].avg_cor_coeff < regions[k + 1].avg_cor_coeff * 0.999)))) {
1506 // merge current region with the previous and next regions
1507 remove_region(2, regions, num_regions, &k);
1508 analyze_region(stats, k - 1, regions);
1509 } else if (regions[k].type == HIGH_VAR_REGION &&
1510 (regions[k].last - regions[k].start + 1) < 2 * WINDOW_SIZE &&
1511 ((k > 0 && // previous regions
1512 (regions[k].avg_coded_err <
1513 regions[k - 1].avg_coded_err * 0.99 ||
1514 regions[k].avg_cor_coeff >
1515 regions[k - 1].avg_cor_coeff * 1.001)) &&
1516 (k < *num_regions - 1 && // next region
1517 (regions[k].avg_coded_err <
1518 regions[k + 1].avg_coded_err * 0.99 ||
1519 regions[k].avg_cor_coeff >
1520 regions[k + 1].avg_cor_coeff * 1.001)))) {
1521 // merge current region with the previous and next regions
1522 remove_region(2, regions, num_regions, &k);
1523 analyze_region(stats, k - 1, regions);
1524 } else {
1525 k++;
1526 }
1527 }
1528
1529 remove_short_regions(regions, num_regions, STABLE_REGION, WINDOW_SIZE);
1530 remove_short_regions(regions, num_regions, HIGH_VAR_REGION, HALF_WIN);
1531 }
1532
1533 // Identify blending regions.
find_blending_regions(const FIRSTPASS_STATS * stats,REGIONS * regions,int * num_regions)1534 static void find_blending_regions(const FIRSTPASS_STATS *stats,
1535 REGIONS *regions, int *num_regions) {
1536 int i, k = 0;
1537 // Blending regions will have large content change, therefore will have a
1538 // large consistent change in intra error.
1539 int count_stable = 0;
1540 while (k < *num_regions) {
1541 if (regions[k].type == STABLE_REGION) {
1542 k++;
1543 count_stable++;
1544 continue;
1545 }
1546 int dir = 0;
1547 int start = 0, last;
1548 for (i = regions[k].start; i <= regions[k].last; i++) {
1549 // First mark the regions that has consistent large change of intra error.
1550 if (k == 0 && i == regions[k].start) continue;
1551 if (stats[i].is_flash || (i > 0 && stats[i - 1].is_flash)) continue;
1552 double grad = stats[i].intra_error - stats[i - 1].intra_error;
1553 int large_change = fabs(grad) / AOMMAX(stats[i].intra_error, 0.01) > 0.05;
1554 int this_dir = 0;
1555 if (large_change) {
1556 this_dir = (grad > 0) ? 1 : -1;
1557 }
1558 // the current trend continues
1559 if (dir == this_dir) continue;
1560 if (dir != 0) {
1561 // Mark the end of a new large change group and add it
1562 last = i - 1;
1563 insert_region(start, last, BLENDING_REGION, regions, num_regions, &k);
1564 }
1565 dir = this_dir;
1566 if (k == 0 && i == regions[k].start + 1) {
1567 start = i - 1;
1568 } else {
1569 start = i;
1570 }
1571 }
1572 if (dir != 0) {
1573 last = regions[k].last;
1574 insert_region(start, last, BLENDING_REGION, regions, num_regions, &k);
1575 }
1576 k++;
1577 }
1578
1579 // If the blending region has very low correlation, mark it as high variance
1580 // since we probably cannot benefit from it anyways.
1581 get_region_stats(stats, regions, *num_regions);
1582 for (k = 0; k < *num_regions; k++) {
1583 if (regions[k].type != BLENDING_REGION) continue;
1584 if (regions[k].last == regions[k].start || regions[k].avg_cor_coeff < 0.6 ||
1585 count_stable == 0)
1586 regions[k].type = HIGH_VAR_REGION;
1587 }
1588 get_region_stats(stats, regions, *num_regions);
1589
1590 // It is possible for blending to result in a "dip" in intra error (first
1591 // decrease then increase). Therefore we need to find the dip and combine the
1592 // two regions.
1593 k = 1;
1594 while (k < *num_regions) {
1595 if (k < *num_regions - 1 && regions[k].type == HIGH_VAR_REGION) {
1596 // Check if this short high variance regions is actually in the middle of
1597 // a blending region.
1598 if (regions[k - 1].type == BLENDING_REGION &&
1599 regions[k + 1].type == BLENDING_REGION &&
1600 regions[k].last - regions[k].start < 3) {
1601 int prev_dir = (stats[regions[k - 1].last].intra_error -
1602 stats[regions[k - 1].last - 1].intra_error) > 0
1603 ? 1
1604 : -1;
1605 int next_dir = (stats[regions[k + 1].last].intra_error -
1606 stats[regions[k + 1].last - 1].intra_error) > 0
1607 ? 1
1608 : -1;
1609 if (prev_dir < 0 && next_dir > 0) {
1610 // This is possibly a mid region of blending. Check the ratios
1611 double ratio_thres = AOMMIN(regions[k - 1].avg_sr_fr_ratio,
1612 regions[k + 1].avg_sr_fr_ratio) *
1613 0.95;
1614 if (regions[k].avg_sr_fr_ratio > ratio_thres) {
1615 regions[k].type = BLENDING_REGION;
1616 remove_region(2, regions, num_regions, &k);
1617 analyze_region(stats, k - 1, regions);
1618 continue;
1619 }
1620 }
1621 }
1622 }
1623 // Check if we have a pair of consecutive blending regions.
1624 if (regions[k - 1].type == BLENDING_REGION &&
1625 regions[k].type == BLENDING_REGION) {
1626 int prev_dir = (stats[regions[k - 1].last].intra_error -
1627 stats[regions[k - 1].last - 1].intra_error) > 0
1628 ? 1
1629 : -1;
1630 int next_dir = (stats[regions[k].last].intra_error -
1631 stats[regions[k].last - 1].intra_error) > 0
1632 ? 1
1633 : -1;
1634
1635 // if both are too short, no need to check
1636 int total_length = regions[k].last - regions[k - 1].start + 1;
1637 if (total_length < 4) {
1638 regions[k - 1].type = HIGH_VAR_REGION;
1639 k++;
1640 continue;
1641 }
1642
1643 int to_merge = 0;
1644 if (prev_dir < 0 && next_dir > 0) {
1645 // In this case we check the last frame in the previous region.
1646 double prev_length =
1647 (double)(regions[k - 1].last - regions[k - 1].start + 1);
1648 double last_ratio, ratio_thres;
1649 if (prev_length < 2.01) {
1650 // if the previous region is very short
1651 double max_coded_error =
1652 AOMMAX(stats[regions[k - 1].last].coded_error,
1653 stats[regions[k - 1].last - 1].coded_error);
1654 last_ratio = stats[regions[k - 1].last].sr_coded_error /
1655 AOMMAX(max_coded_error, 0.001);
1656 ratio_thres = regions[k].avg_sr_fr_ratio * 0.95;
1657 } else {
1658 double max_coded_error =
1659 AOMMAX(stats[regions[k - 1].last].coded_error,
1660 stats[regions[k - 1].last - 1].coded_error);
1661 last_ratio = stats[regions[k - 1].last].sr_coded_error /
1662 AOMMAX(max_coded_error, 0.001);
1663 double prev_ratio =
1664 (regions[k - 1].avg_sr_fr_ratio * prev_length - last_ratio) /
1665 (prev_length - 1.0);
1666 ratio_thres = AOMMIN(prev_ratio, regions[k].avg_sr_fr_ratio) * 0.95;
1667 }
1668 if (last_ratio > ratio_thres) {
1669 to_merge = 1;
1670 }
1671 }
1672
1673 if (to_merge) {
1674 remove_region(0, regions, num_regions, &k);
1675 analyze_region(stats, k - 1, regions);
1676 continue;
1677 } else {
1678 // These are possibly two separate blending regions. Mark the boundary
1679 // frame as HIGH_VAR_REGION to separate the two.
1680 int prev_k = k - 1;
1681 insert_region(regions[prev_k].last, regions[prev_k].last,
1682 HIGH_VAR_REGION, regions, num_regions, &prev_k);
1683 analyze_region(stats, prev_k, regions);
1684 k = prev_k + 1;
1685 analyze_region(stats, k, regions);
1686 }
1687 }
1688 k++;
1689 }
1690 cleanup_regions(regions, num_regions);
1691 }
1692
1693 // Clean up decision for blendings. Remove blending regions that are too short.
1694 // Also if a very short high var region is between a blending and a stable
1695 // region, just merge it with one of them.
cleanup_blendings(REGIONS * regions,int * num_regions)1696 static void cleanup_blendings(REGIONS *regions, int *num_regions) {
1697 int k = 0;
1698 while (k<*num_regions && * num_regions> 1) {
1699 int is_short_blending = regions[k].type == BLENDING_REGION &&
1700 regions[k].last - regions[k].start + 1 < 5;
1701 int is_short_hv = regions[k].type == HIGH_VAR_REGION &&
1702 regions[k].last - regions[k].start + 1 < 5;
1703 int has_stable_neighbor =
1704 ((k > 0 && regions[k - 1].type == STABLE_REGION) ||
1705 (k < *num_regions - 1 && regions[k + 1].type == STABLE_REGION));
1706 int has_blend_neighbor =
1707 ((k > 0 && regions[k - 1].type == BLENDING_REGION) ||
1708 (k < *num_regions - 1 && regions[k + 1].type == BLENDING_REGION));
1709 int total_neighbors = (k > 0) + (k < *num_regions - 1);
1710
1711 if (is_short_blending ||
1712 (is_short_hv &&
1713 has_stable_neighbor + has_blend_neighbor >= total_neighbors)) {
1714 // Remove this region.Try to determine whether to combine it with the
1715 // previous or next region.
1716 int merge;
1717 double prev_diff =
1718 (k > 0)
1719 ? fabs(regions[k].avg_cor_coeff - regions[k - 1].avg_cor_coeff)
1720 : 1;
1721 double next_diff =
1722 (k < *num_regions - 1)
1723 ? fabs(regions[k].avg_cor_coeff - regions[k + 1].avg_cor_coeff)
1724 : 1;
1725 // merge == 0 means to merge with previous, 1 means to merge with next
1726 merge = prev_diff > next_diff;
1727 remove_region(merge, regions, num_regions, &k);
1728 } else {
1729 k++;
1730 }
1731 }
1732 cleanup_regions(regions, num_regions);
1733 }
1734
free_firstpass_stats_buffers(REGIONS * temp_regions,double * filt_intra_err,double * filt_coded_err,double * grad_coded)1735 static void free_firstpass_stats_buffers(REGIONS *temp_regions,
1736 double *filt_intra_err,
1737 double *filt_coded_err,
1738 double *grad_coded) {
1739 aom_free(temp_regions);
1740 aom_free(filt_intra_err);
1741 aom_free(filt_coded_err);
1742 aom_free(grad_coded);
1743 }
1744
1745 // Identify stable and unstable regions from first pass stats.
1746 // stats_start points to the first frame to analyze.
1747 // |offset| is the offset from the current frame to the frame stats_start is
1748 // pointing to.
1749 // Returns 0 on success, -1 on memory allocation failure.
identify_regions(const FIRSTPASS_STATS * const stats_start,int total_frames,int offset,REGIONS * regions,int * total_regions)1750 static int identify_regions(const FIRSTPASS_STATS *const stats_start,
1751 int total_frames, int offset, REGIONS *regions,
1752 int *total_regions) {
1753 int k;
1754 if (total_frames <= 1) return 0;
1755
1756 // store the initial decisions
1757 REGIONS *temp_regions =
1758 (REGIONS *)aom_malloc(total_frames * sizeof(temp_regions[0]));
1759 // buffers for filtered stats
1760 double *filt_intra_err =
1761 (double *)aom_calloc(total_frames, sizeof(*filt_intra_err));
1762 double *filt_coded_err =
1763 (double *)aom_calloc(total_frames, sizeof(*filt_coded_err));
1764 double *grad_coded = (double *)aom_calloc(total_frames, sizeof(*grad_coded));
1765 if (!(temp_regions && filt_intra_err && filt_coded_err && grad_coded)) {
1766 free_firstpass_stats_buffers(temp_regions, filt_intra_err, filt_coded_err,
1767 grad_coded);
1768 return -1;
1769 }
1770 av1_zero_array(temp_regions, total_frames);
1771
1772 int cur_region = 0, this_start = 0, this_last;
1773
1774 int next_scenecut = -1;
1775 do {
1776 // first get the obvious scenecuts
1777 next_scenecut =
1778 find_next_scenecut(stats_start, this_start, total_frames - 1);
1779 this_last = (next_scenecut >= 0) ? (next_scenecut - 1) : total_frames - 1;
1780
1781 // low-pass filter the needed stats
1782 smooth_filter_stats(stats_start, this_start, this_last, filt_intra_err,
1783 filt_coded_err);
1784 get_gradient(filt_coded_err, this_start, this_last, grad_coded);
1785
1786 // find tentative stable regions and unstable regions
1787 int num_regions = find_stable_regions(stats_start, grad_coded, this_start,
1788 this_last, temp_regions);
1789
1790 adjust_unstable_region_bounds(stats_start, temp_regions, &num_regions);
1791
1792 get_region_stats(stats_start, temp_regions, num_regions);
1793
1794 // Try to identify blending regions in the unstable regions
1795 find_blending_regions(stats_start, temp_regions, &num_regions);
1796 cleanup_blendings(temp_regions, &num_regions);
1797
1798 // The flash points should all be considered high variance points
1799 k = 0;
1800 while (k < num_regions) {
1801 if (temp_regions[k].type != STABLE_REGION) {
1802 k++;
1803 continue;
1804 }
1805 int start = temp_regions[k].start;
1806 int last = temp_regions[k].last;
1807 for (int i = start; i <= last; i++) {
1808 if (stats_start[i].is_flash) {
1809 insert_region(i, i, HIGH_VAR_REGION, temp_regions, &num_regions, &k);
1810 }
1811 }
1812 k++;
1813 }
1814 cleanup_regions(temp_regions, &num_regions);
1815
1816 // copy the regions in the scenecut group
1817 for (k = 0; k < num_regions; k++) {
1818 if (temp_regions[k].last < temp_regions[k].start &&
1819 k == num_regions - 1) {
1820 num_regions--;
1821 break;
1822 }
1823 regions[k + cur_region] = temp_regions[k];
1824 }
1825 cur_region += num_regions;
1826
1827 // add the scenecut region
1828 if (next_scenecut > -1) {
1829 // add the scenecut region, and find the next scenecut
1830 regions[cur_region].type = SCENECUT_REGION;
1831 regions[cur_region].start = next_scenecut;
1832 regions[cur_region].last = next_scenecut;
1833 cur_region++;
1834 this_start = next_scenecut + 1;
1835 }
1836 } while (next_scenecut >= 0);
1837
1838 *total_regions = cur_region;
1839 get_region_stats(stats_start, regions, *total_regions);
1840
1841 for (k = 0; k < *total_regions; k++) {
1842 // If scenecuts are very minor, mark them as high variance.
1843 if (regions[k].type != SCENECUT_REGION ||
1844 regions[k].avg_cor_coeff *
1845 (1 - stats_start[regions[k].start].noise_var /
1846 regions[k].avg_intra_err) <
1847 0.8) {
1848 continue;
1849 }
1850 regions[k].type = HIGH_VAR_REGION;
1851 }
1852 cleanup_regions(regions, total_regions);
1853 get_region_stats(stats_start, regions, *total_regions);
1854
1855 for (k = 0; k < *total_regions; k++) {
1856 regions[k].start += offset;
1857 regions[k].last += offset;
1858 }
1859
1860 free_firstpass_stats_buffers(temp_regions, filt_intra_err, filt_coded_err,
1861 grad_coded);
1862 return 0;
1863 }
1864
find_regions_index(const REGIONS * regions,int num_regions,int frame_idx)1865 static int find_regions_index(const REGIONS *regions, int num_regions,
1866 int frame_idx) {
1867 for (int k = 0; k < num_regions; k++) {
1868 if (regions[k].start <= frame_idx && regions[k].last >= frame_idx) {
1869 return k;
1870 }
1871 }
1872 return -1;
1873 }
1874
1875 /*!\brief Determine the length of future GF groups.
1876 *
1877 * \ingroup gf_group_algo
1878 * This function decides the gf group length of future frames in batch
1879 *
1880 * \param[in] cpi Top-level encoder structure
1881 * \param[in] max_gop_length Maximum length of the GF group
1882 * \param[in] max_intervals Maximum number of intervals to decide
1883 *
1884 * \remark Nothing is returned. Instead, cpi->ppi->rc.gf_intervals is
1885 * changed to store the decided GF group lengths.
1886 */
calculate_gf_length(AV1_COMP * cpi,int max_gop_length,int max_intervals)1887 static void calculate_gf_length(AV1_COMP *cpi, int max_gop_length,
1888 int max_intervals) {
1889 RATE_CONTROL *const rc = &cpi->rc;
1890 PRIMARY_RATE_CONTROL *const p_rc = &cpi->ppi->p_rc;
1891 TWO_PASS *const twopass = &cpi->ppi->twopass;
1892 FIRSTPASS_STATS next_frame;
1893 const FIRSTPASS_STATS *const start_pos = cpi->twopass_frame.stats_in;
1894 const FIRSTPASS_STATS *const stats = start_pos - (rc->frames_since_key == 0);
1895
1896 const int f_w = cpi->common.width;
1897 const int f_h = cpi->common.height;
1898 int i;
1899
1900 int flash_detected;
1901
1902 av1_zero(next_frame);
1903
1904 if (has_no_stats_stage(cpi)) {
1905 for (i = 0; i < MAX_NUM_GF_INTERVALS; i++) {
1906 p_rc->gf_intervals[i] = AOMMIN(rc->max_gf_interval, max_gop_length);
1907 }
1908 p_rc->cur_gf_index = 0;
1909 rc->intervals_till_gf_calculate_due = MAX_NUM_GF_INTERVALS;
1910 return;
1911 }
1912
1913 // TODO(urvang): Try logic to vary min and max interval based on q.
1914 const int active_min_gf_interval = rc->min_gf_interval;
1915 const int active_max_gf_interval =
1916 AOMMIN(rc->max_gf_interval, max_gop_length);
1917 const int min_shrink_int = AOMMAX(MIN_SHRINK_LEN, active_min_gf_interval);
1918
1919 i = (rc->frames_since_key == 0);
1920 max_intervals = cpi->ppi->lap_enabled ? 1 : max_intervals;
1921 int count_cuts = 1;
1922 // If cpi->gf_state.arf_gf_boost_lst is 0, we are starting with a KF or GF.
1923 int cur_start = -1 + !cpi->ppi->gf_state.arf_gf_boost_lst, cur_last;
1924 int cut_pos[MAX_NUM_GF_INTERVALS + 1] = { -1 };
1925 int cut_here;
1926 GF_GROUP_STATS gf_stats;
1927 init_gf_stats(&gf_stats);
1928 while (count_cuts < max_intervals + 1) {
1929 // reaches next key frame, break here
1930 if (i >= rc->frames_to_key) {
1931 cut_here = 2;
1932 } else if (i - cur_start >= rc->static_scene_max_gf_interval) {
1933 // reached maximum len, but nothing special yet (almost static)
1934 // let's look at the next interval
1935 cut_here = 1;
1936 } else if (EOF == input_stats(twopass, &cpi->twopass_frame, &next_frame)) {
1937 // reaches last frame, break
1938 cut_here = 2;
1939 } else {
1940 // Test for the case where there is a brief flash but the prediction
1941 // quality back to an earlier frame is then restored.
1942 flash_detected = detect_flash(twopass, &cpi->twopass_frame, 0);
1943 // TODO(bohanli): remove redundant accumulations here, or unify
1944 // this and the ones in define_gf_group
1945 accumulate_next_frame_stats(&next_frame, flash_detected,
1946 rc->frames_since_key, i, &gf_stats, f_w, f_h);
1947
1948 cut_here = detect_gf_cut(cpi, i, cur_start, flash_detected,
1949 active_max_gf_interval, active_min_gf_interval,
1950 &gf_stats);
1951 }
1952 if (cut_here) {
1953 cur_last = i - 1; // the current last frame in the gf group
1954 int ori_last = cur_last;
1955 // The region frame idx does not start from the same frame as cur_start
1956 // and cur_last. Need to offset them.
1957 int offset = rc->frames_since_key - p_rc->regions_offset;
1958 REGIONS *regions = p_rc->regions;
1959 int num_regions = p_rc->num_regions;
1960
1961 int scenecut_idx = -1;
1962 // only try shrinking if interval smaller than active_max_gf_interval
1963 if (cur_last - cur_start <= active_max_gf_interval &&
1964 cur_last > cur_start) {
1965 // find the region indices of where the first and last frame belong.
1966 int k_start =
1967 find_regions_index(regions, num_regions, cur_start + offset);
1968 int k_last =
1969 find_regions_index(regions, num_regions, cur_last + offset);
1970 if (cur_start + offset == 0) k_start = 0;
1971
1972 // See if we have a scenecut in between
1973 for (int r = k_start + 1; r <= k_last; r++) {
1974 if (regions[r].type == SCENECUT_REGION &&
1975 regions[r].last - offset - cur_start > active_min_gf_interval) {
1976 scenecut_idx = r;
1977 break;
1978 }
1979 }
1980
1981 // if the found scenecut is very close to the end, ignore it.
1982 if (regions[num_regions - 1].last - regions[scenecut_idx].last < 4) {
1983 scenecut_idx = -1;
1984 }
1985
1986 if (scenecut_idx != -1) {
1987 // If we have a scenecut, then stop at it.
1988 // TODO(bohanli): add logic here to stop before the scenecut and for
1989 // the next gop start from the scenecut with GF
1990 int is_minor_sc =
1991 (regions[scenecut_idx].avg_cor_coeff *
1992 (1 - stats[regions[scenecut_idx].start - offset].noise_var /
1993 regions[scenecut_idx].avg_intra_err) >
1994 0.6);
1995 cur_last = regions[scenecut_idx].last - offset - !is_minor_sc;
1996 } else {
1997 int is_last_analysed = (k_last == num_regions - 1) &&
1998 (cur_last + offset == regions[k_last].last);
1999 int not_enough_regions =
2000 k_last - k_start <=
2001 1 + (regions[k_start].type == SCENECUT_REGION);
2002 // if we are very close to the end, then do not shrink since it may
2003 // introduce intervals that are too short
2004 if (!(is_last_analysed && not_enough_regions)) {
2005 const double arf_length_factor = 0.1;
2006 double best_score = 0;
2007 int best_j = -1;
2008 const int first_frame = regions[0].start - offset;
2009 const int last_frame = regions[num_regions - 1].last - offset;
2010 // score of how much the arf helps the whole GOP
2011 double base_score = 0.0;
2012 // Accumulate base_score in
2013 for (int j = cur_start + 1; j < cur_start + min_shrink_int; j++) {
2014 if (stats + j >= twopass->stats_buf_ctx->stats_in_end) break;
2015 base_score = (base_score + 1.0) * stats[j].cor_coeff;
2016 }
2017 int met_blending = 0; // Whether we have met blending areas before
2018 int last_blending = 0; // Whether the previous frame if blending
2019 for (int j = cur_start + min_shrink_int; j <= cur_last; j++) {
2020 if (stats + j >= twopass->stats_buf_ctx->stats_in_end) break;
2021 base_score = (base_score + 1.0) * stats[j].cor_coeff;
2022 int this_reg =
2023 find_regions_index(regions, num_regions, j + offset);
2024 if (this_reg < 0) continue;
2025 // A GOP should include at most 1 blending region.
2026 if (regions[this_reg].type == BLENDING_REGION) {
2027 last_blending = 1;
2028 if (met_blending) {
2029 break;
2030 } else {
2031 base_score = 0;
2032 continue;
2033 }
2034 } else {
2035 if (last_blending) met_blending = 1;
2036 last_blending = 0;
2037 }
2038
2039 // Add the factor of how good the neighborhood is for this
2040 // candidate arf.
2041 double this_score = arf_length_factor * base_score;
2042 double temp_accu_coeff = 1.0;
2043 // following frames
2044 int count_f = 0;
2045 for (int n = j + 1; n <= j + 3 && n <= last_frame; n++) {
2046 if (stats + n >= twopass->stats_buf_ctx->stats_in_end) break;
2047 temp_accu_coeff *= stats[n].cor_coeff;
2048 this_score +=
2049 temp_accu_coeff *
2050 sqrt(AOMMAX(0.5,
2051 1 - stats[n].noise_var /
2052 AOMMAX(stats[n].intra_error, 0.001)));
2053 count_f++;
2054 }
2055 // preceding frames
2056 temp_accu_coeff = 1.0;
2057 for (int n = j; n > j - 3 * 2 + count_f && n > first_frame; n--) {
2058 if (stats + n < twopass->stats_buf_ctx->stats_in_start) break;
2059 temp_accu_coeff *= stats[n].cor_coeff;
2060 this_score +=
2061 temp_accu_coeff *
2062 sqrt(AOMMAX(0.5,
2063 1 - stats[n].noise_var /
2064 AOMMAX(stats[n].intra_error, 0.001)));
2065 }
2066
2067 if (this_score > best_score) {
2068 best_score = this_score;
2069 best_j = j;
2070 }
2071 }
2072
2073 // For blending areas, move one more frame in case we missed the
2074 // first blending frame.
2075 int best_reg =
2076 find_regions_index(regions, num_regions, best_j + offset);
2077 if (best_reg < num_regions - 1 && best_reg > 0) {
2078 if (regions[best_reg - 1].type == BLENDING_REGION &&
2079 regions[best_reg + 1].type == BLENDING_REGION) {
2080 if (best_j + offset == regions[best_reg].start &&
2081 best_j + offset < regions[best_reg].last) {
2082 best_j += 1;
2083 } else if (best_j + offset == regions[best_reg].last &&
2084 best_j + offset > regions[best_reg].start) {
2085 best_j -= 1;
2086 }
2087 }
2088 }
2089
2090 if (cur_last - best_j < 2) best_j = cur_last;
2091 if (best_j > 0 && best_score > 0.1) cur_last = best_j;
2092 // if cannot find anything, just cut at the original place.
2093 }
2094 }
2095 }
2096 cut_pos[count_cuts] = cur_last;
2097 count_cuts++;
2098
2099 // reset pointers to the shrunken location
2100 cpi->twopass_frame.stats_in = start_pos + cur_last;
2101 cur_start = cur_last;
2102 int cur_region_idx =
2103 find_regions_index(regions, num_regions, cur_start + 1 + offset);
2104 if (cur_region_idx >= 0)
2105 if (regions[cur_region_idx].type == SCENECUT_REGION) cur_start++;
2106
2107 i = cur_last;
2108
2109 if (cut_here > 1 && cur_last == ori_last) break;
2110
2111 // reset accumulators
2112 init_gf_stats(&gf_stats);
2113 }
2114 ++i;
2115 }
2116
2117 // save intervals
2118 rc->intervals_till_gf_calculate_due = count_cuts - 1;
2119 for (int n = 1; n < count_cuts; n++) {
2120 p_rc->gf_intervals[n - 1] = cut_pos[n] - cut_pos[n - 1];
2121 }
2122 p_rc->cur_gf_index = 0;
2123 cpi->twopass_frame.stats_in = start_pos;
2124 }
2125
correct_frames_to_key(AV1_COMP * cpi)2126 static void correct_frames_to_key(AV1_COMP *cpi) {
2127 int lookahead_size =
2128 (int)av1_lookahead_depth(cpi->ppi->lookahead, cpi->compressor_stage);
2129 if (lookahead_size <
2130 av1_lookahead_pop_sz(cpi->ppi->lookahead, cpi->compressor_stage)) {
2131 assert(
2132 IMPLIES(cpi->oxcf.pass != AOM_RC_ONE_PASS && cpi->ppi->frames_left > 0,
2133 lookahead_size == cpi->ppi->frames_left));
2134 cpi->rc.frames_to_key = AOMMIN(cpi->rc.frames_to_key, lookahead_size);
2135 } else if (cpi->ppi->frames_left > 0) {
2136 // Correct frames to key based on limit
2137 cpi->rc.frames_to_key =
2138 AOMMIN(cpi->rc.frames_to_key, cpi->ppi->frames_left);
2139 }
2140 }
2141
2142 /*!\brief Define a GF group in one pass mode when no look ahead stats are
2143 * available.
2144 *
2145 * \ingroup gf_group_algo
2146 * This function defines the structure of a GF group, along with various
2147 * parameters regarding bit-allocation and quality setup in the special
2148 * case of one pass encoding where no lookahead stats are avialable.
2149 *
2150 * \param[in] cpi Top-level encoder structure
2151 *
2152 * \remark Nothing is returned. Instead, cpi->ppi->gf_group is changed.
2153 */
define_gf_group_pass0(AV1_COMP * cpi)2154 static void define_gf_group_pass0(AV1_COMP *cpi) {
2155 RATE_CONTROL *const rc = &cpi->rc;
2156 PRIMARY_RATE_CONTROL *const p_rc = &cpi->ppi->p_rc;
2157 GF_GROUP *const gf_group = &cpi->ppi->gf_group;
2158 const AV1EncoderConfig *const oxcf = &cpi->oxcf;
2159 const GFConfig *const gf_cfg = &oxcf->gf_cfg;
2160 int target;
2161
2162 if (oxcf->q_cfg.aq_mode == CYCLIC_REFRESH_AQ) {
2163 av1_cyclic_refresh_set_golden_update(cpi);
2164 } else {
2165 p_rc->baseline_gf_interval = p_rc->gf_intervals[p_rc->cur_gf_index];
2166 rc->intervals_till_gf_calculate_due--;
2167 p_rc->cur_gf_index++;
2168 }
2169
2170 // correct frames_to_key when lookahead queue is flushing
2171 correct_frames_to_key(cpi);
2172
2173 if (p_rc->baseline_gf_interval > rc->frames_to_key)
2174 p_rc->baseline_gf_interval = rc->frames_to_key;
2175
2176 p_rc->gfu_boost = DEFAULT_GF_BOOST;
2177 p_rc->constrained_gf_group =
2178 (p_rc->baseline_gf_interval >= rc->frames_to_key) ? 1 : 0;
2179
2180 gf_group->max_layer_depth_allowed = oxcf->gf_cfg.gf_max_pyr_height;
2181
2182 // Rare case when the look-ahead is less than the target GOP length, can't
2183 // generate ARF frame.
2184 if (p_rc->baseline_gf_interval > gf_cfg->lag_in_frames ||
2185 !is_altref_enabled(gf_cfg->lag_in_frames, gf_cfg->enable_auto_arf) ||
2186 p_rc->baseline_gf_interval < rc->min_gf_interval)
2187 gf_group->max_layer_depth_allowed = 0;
2188
2189 // Set up the structure of this Group-Of-Pictures (same as GF_GROUP)
2190 av1_gop_setup_structure(cpi);
2191
2192 // Allocate bits to each of the frames in the GF group.
2193 // TODO(sarahparker) Extend this to work with pyramid structure.
2194 for (int cur_index = 0; cur_index < gf_group->size; ++cur_index) {
2195 const FRAME_UPDATE_TYPE cur_update_type = gf_group->update_type[cur_index];
2196 if (oxcf->rc_cfg.mode == AOM_CBR) {
2197 if (cur_update_type == KF_UPDATE) {
2198 target = av1_calc_iframe_target_size_one_pass_cbr(cpi);
2199 } else {
2200 target = av1_calc_pframe_target_size_one_pass_cbr(cpi, cur_update_type);
2201 }
2202 } else {
2203 if (cur_update_type == KF_UPDATE) {
2204 target = av1_calc_iframe_target_size_one_pass_vbr(cpi);
2205 } else {
2206 target = av1_calc_pframe_target_size_one_pass_vbr(cpi, cur_update_type);
2207 }
2208 }
2209 gf_group->bit_allocation[cur_index] = target;
2210 }
2211 }
2212
set_baseline_gf_interval(PRIMARY_RATE_CONTROL * p_rc,int arf_position)2213 static INLINE void set_baseline_gf_interval(PRIMARY_RATE_CONTROL *p_rc,
2214 int arf_position) {
2215 p_rc->baseline_gf_interval = arf_position;
2216 }
2217
2218 // initialize GF_GROUP_STATS
init_gf_stats(GF_GROUP_STATS * gf_stats)2219 static void init_gf_stats(GF_GROUP_STATS *gf_stats) {
2220 gf_stats->gf_group_err = 0.0;
2221 gf_stats->gf_group_raw_error = 0.0;
2222 gf_stats->gf_group_skip_pct = 0.0;
2223 gf_stats->gf_group_inactive_zone_rows = 0.0;
2224
2225 gf_stats->mv_ratio_accumulator = 0.0;
2226 gf_stats->decay_accumulator = 1.0;
2227 gf_stats->zero_motion_accumulator = 1.0;
2228 gf_stats->loop_decay_rate = 1.0;
2229 gf_stats->last_loop_decay_rate = 1.0;
2230 gf_stats->this_frame_mv_in_out = 0.0;
2231 gf_stats->mv_in_out_accumulator = 0.0;
2232 gf_stats->abs_mv_in_out_accumulator = 0.0;
2233
2234 gf_stats->avg_sr_coded_error = 0.0;
2235 gf_stats->avg_pcnt_second_ref = 0.0;
2236 gf_stats->avg_new_mv_count = 0.0;
2237 gf_stats->avg_wavelet_energy = 0.0;
2238 gf_stats->avg_raw_err_stdev = 0.0;
2239 gf_stats->non_zero_stdev_count = 0;
2240 }
2241
accumulate_gop_stats(AV1_COMP * cpi,int is_intra_only,int f_w,int f_h,FIRSTPASS_STATS * next_frame,const FIRSTPASS_STATS * start_pos,GF_GROUP_STATS * gf_stats,int * idx)2242 static void accumulate_gop_stats(AV1_COMP *cpi, int is_intra_only, int f_w,
2243 int f_h, FIRSTPASS_STATS *next_frame,
2244 const FIRSTPASS_STATS *start_pos,
2245 GF_GROUP_STATS *gf_stats, int *idx) {
2246 int i, flash_detected;
2247 TWO_PASS *const twopass = &cpi->ppi->twopass;
2248 PRIMARY_RATE_CONTROL *const p_rc = &cpi->ppi->p_rc;
2249 RATE_CONTROL *const rc = &cpi->rc;
2250 FRAME_INFO *frame_info = &cpi->frame_info;
2251 const AV1EncoderConfig *const oxcf = &cpi->oxcf;
2252
2253 init_gf_stats(gf_stats);
2254 av1_zero(*next_frame);
2255
2256 // If this is a key frame or the overlay from a previous arf then
2257 // the error score / cost of this frame has already been accounted for.
2258 i = is_intra_only;
2259 // get the determined gf group length from p_rc->gf_intervals
2260 while (i < p_rc->gf_intervals[p_rc->cur_gf_index]) {
2261 // read in the next frame
2262 if (EOF == input_stats(twopass, &cpi->twopass_frame, next_frame)) break;
2263 // Accumulate error score of frames in this gf group.
2264 double mod_frame_err =
2265 calculate_modified_err(frame_info, twopass, oxcf, next_frame);
2266 // accumulate stats for this frame
2267 accumulate_this_frame_stats(next_frame, mod_frame_err, gf_stats);
2268 ++i;
2269 }
2270
2271 reset_fpf_position(&cpi->twopass_frame, start_pos);
2272
2273 i = is_intra_only;
2274 input_stats(twopass, &cpi->twopass_frame, next_frame);
2275 while (i < p_rc->gf_intervals[p_rc->cur_gf_index]) {
2276 // read in the next frame
2277 if (EOF == input_stats(twopass, &cpi->twopass_frame, next_frame)) break;
2278
2279 // Test for the case where there is a brief flash but the prediction
2280 // quality back to an earlier frame is then restored.
2281 flash_detected = detect_flash(twopass, &cpi->twopass_frame, 0);
2282
2283 // accumulate stats for next frame
2284 accumulate_next_frame_stats(next_frame, flash_detected,
2285 rc->frames_since_key, i, gf_stats, f_w, f_h);
2286
2287 ++i;
2288 }
2289
2290 i = p_rc->gf_intervals[p_rc->cur_gf_index];
2291 average_gf_stats(i, gf_stats);
2292
2293 *idx = i;
2294 }
2295
update_gop_length(RATE_CONTROL * rc,PRIMARY_RATE_CONTROL * p_rc,int idx,int is_final_pass)2296 static void update_gop_length(RATE_CONTROL *rc, PRIMARY_RATE_CONTROL *p_rc,
2297 int idx, int is_final_pass) {
2298 if (is_final_pass) {
2299 rc->intervals_till_gf_calculate_due--;
2300 p_rc->cur_gf_index++;
2301 }
2302
2303 // Was the group length constrained by the requirement for a new KF?
2304 p_rc->constrained_gf_group = (idx >= rc->frames_to_key) ? 1 : 0;
2305
2306 set_baseline_gf_interval(p_rc, idx);
2307 rc->frames_till_gf_update_due = p_rc->baseline_gf_interval;
2308 }
2309
2310 #define MAX_GF_BOOST 5400
2311 #define REDUCE_GF_LENGTH_THRESH 4
2312 #define REDUCE_GF_LENGTH_TO_KEY_THRESH 9
2313 #define REDUCE_GF_LENGTH_BY 1
set_gop_bits_boost(AV1_COMP * cpi,int i,int is_intra_only,int is_final_pass,int use_alt_ref,int alt_offset,const FIRSTPASS_STATS * start_pos,GF_GROUP_STATS * gf_stats)2314 static void set_gop_bits_boost(AV1_COMP *cpi, int i, int is_intra_only,
2315 int is_final_pass, int use_alt_ref,
2316 int alt_offset, const FIRSTPASS_STATS *start_pos,
2317 GF_GROUP_STATS *gf_stats) {
2318 // Should we use the alternate reference frame.
2319 AV1_COMMON *const cm = &cpi->common;
2320 RATE_CONTROL *const rc = &cpi->rc;
2321 PRIMARY_RATE_CONTROL *const p_rc = &cpi->ppi->p_rc;
2322 TWO_PASS *const twopass = &cpi->ppi->twopass;
2323 GF_GROUP *gf_group = &cpi->ppi->gf_group;
2324 FRAME_INFO *frame_info = &cpi->frame_info;
2325 const AV1EncoderConfig *const oxcf = &cpi->oxcf;
2326 const RateControlCfg *const rc_cfg = &oxcf->rc_cfg;
2327
2328 int ext_len = i - is_intra_only;
2329 if (use_alt_ref) {
2330 const int forward_frames = (rc->frames_to_key - i >= ext_len)
2331 ? ext_len
2332 : AOMMAX(0, rc->frames_to_key - i);
2333
2334 // Calculate the boost for alt ref.
2335 p_rc->gfu_boost = av1_calc_arf_boost(
2336 twopass, &cpi->twopass_frame, p_rc, frame_info, alt_offset,
2337 forward_frames, ext_len, &p_rc->num_stats_used_for_gfu_boost,
2338 &p_rc->num_stats_required_for_gfu_boost, cpi->ppi->lap_enabled);
2339 } else {
2340 reset_fpf_position(&cpi->twopass_frame, start_pos);
2341 p_rc->gfu_boost = AOMMIN(
2342 MAX_GF_BOOST,
2343 av1_calc_arf_boost(
2344 twopass, &cpi->twopass_frame, p_rc, frame_info, alt_offset, ext_len,
2345 0, &p_rc->num_stats_used_for_gfu_boost,
2346 &p_rc->num_stats_required_for_gfu_boost, cpi->ppi->lap_enabled));
2347 }
2348
2349 #define LAST_ALR_BOOST_FACTOR 0.2f
2350 p_rc->arf_boost_factor = 1.0;
2351 if (use_alt_ref && !is_lossless_requested(rc_cfg)) {
2352 // Reduce the boost of altref in the last gf group
2353 if (rc->frames_to_key - ext_len == REDUCE_GF_LENGTH_BY ||
2354 rc->frames_to_key - ext_len == 0) {
2355 p_rc->arf_boost_factor = LAST_ALR_BOOST_FACTOR;
2356 }
2357 }
2358
2359 // Reset the file position.
2360 reset_fpf_position(&cpi->twopass_frame, start_pos);
2361 if (cpi->ppi->lap_enabled) {
2362 // Since we don't have enough stats to know the actual error of the
2363 // gf group, we assume error of each frame to be equal to 1 and set
2364 // the error of the group as baseline_gf_interval.
2365 gf_stats->gf_group_err = p_rc->baseline_gf_interval;
2366 }
2367 // Calculate the bits to be allocated to the gf/arf group as a whole
2368 p_rc->gf_group_bits =
2369 calculate_total_gf_group_bits(cpi, gf_stats->gf_group_err);
2370
2371 #if GROUP_ADAPTIVE_MAXQ
2372 // Calculate an estimate of the maxq needed for the group.
2373 // We are more aggressive about correcting for sections
2374 // where there could be significant overshoot than for easier
2375 // sections where we do not wish to risk creating an overshoot
2376 // of the allocated bit budget.
2377 if ((rc_cfg->mode != AOM_Q) && (p_rc->baseline_gf_interval > 1) &&
2378 is_final_pass) {
2379 const int vbr_group_bits_per_frame =
2380 (int)(p_rc->gf_group_bits / p_rc->baseline_gf_interval);
2381 const double group_av_err =
2382 gf_stats->gf_group_raw_error / p_rc->baseline_gf_interval;
2383 const double group_av_skip_pct =
2384 gf_stats->gf_group_skip_pct / p_rc->baseline_gf_interval;
2385 const double group_av_inactive_zone =
2386 ((gf_stats->gf_group_inactive_zone_rows * 2) /
2387 (p_rc->baseline_gf_interval * (double)cm->mi_params.mb_rows));
2388
2389 int tmp_q;
2390 tmp_q = get_twopass_worst_quality(
2391 cpi, group_av_err, (group_av_skip_pct + group_av_inactive_zone),
2392 vbr_group_bits_per_frame);
2393 rc->active_worst_quality = AOMMAX(tmp_q, rc->active_worst_quality >> 1);
2394 }
2395 #endif
2396
2397 // Adjust KF group bits and error remaining.
2398 if (is_final_pass) twopass->kf_group_error_left -= gf_stats->gf_group_err;
2399
2400 // Reset the file position.
2401 reset_fpf_position(&cpi->twopass_frame, start_pos);
2402
2403 // Calculate a section intra ratio used in setting max loop filter.
2404 if (rc->frames_since_key != 0) {
2405 twopass->section_intra_rating = calculate_section_intra_ratio(
2406 start_pos, twopass->stats_buf_ctx->stats_in_end,
2407 p_rc->baseline_gf_interval);
2408 }
2409
2410 av1_gop_bit_allocation(cpi, rc, gf_group, rc->frames_since_key == 0,
2411 use_alt_ref, p_rc->gf_group_bits);
2412
2413 // TODO(jingning): Generalize this condition.
2414 if (is_final_pass) {
2415 cpi->ppi->gf_state.arf_gf_boost_lst = use_alt_ref;
2416
2417 // Reset rolling actual and target bits counters for ARF groups.
2418 twopass->rolling_arf_group_target_bits = 1;
2419 twopass->rolling_arf_group_actual_bits = 1;
2420 }
2421 #if CONFIG_BITRATE_ACCURACY
2422 if (is_final_pass) {
2423 av1_vbr_rc_set_gop_bit_budget(&cpi->vbr_rc_info,
2424 p_rc->baseline_gf_interval);
2425 }
2426 #endif
2427 }
2428
2429 /*!\brief Define a GF group.
2430 *
2431 * \ingroup gf_group_algo
2432 * This function defines the structure of a GF group, along with various
2433 * parameters regarding bit-allocation and quality setup.
2434 *
2435 * \param[in] cpi Top-level encoder structure
2436 * \param[in] frame_params Structure with frame parameters
2437 * \param[in] is_final_pass Whether this is the final pass for the
2438 * GF group, or a trial (non-zero)
2439 *
2440 * \remark Nothing is returned. Instead, cpi->ppi->gf_group is changed.
2441 */
define_gf_group(AV1_COMP * cpi,EncodeFrameParams * frame_params,int is_final_pass)2442 static void define_gf_group(AV1_COMP *cpi, EncodeFrameParams *frame_params,
2443 int is_final_pass) {
2444 AV1_COMMON *const cm = &cpi->common;
2445 RATE_CONTROL *const rc = &cpi->rc;
2446 PRIMARY_RATE_CONTROL *const p_rc = &cpi->ppi->p_rc;
2447 const AV1EncoderConfig *const oxcf = &cpi->oxcf;
2448 TWO_PASS *const twopass = &cpi->ppi->twopass;
2449 FIRSTPASS_STATS next_frame;
2450 const FIRSTPASS_STATS *const start_pos = cpi->twopass_frame.stats_in;
2451 GF_GROUP *gf_group = &cpi->ppi->gf_group;
2452 const GFConfig *const gf_cfg = &oxcf->gf_cfg;
2453 const RateControlCfg *const rc_cfg = &oxcf->rc_cfg;
2454 const int f_w = cm->width;
2455 const int f_h = cm->height;
2456 int i;
2457 const int is_intra_only = rc->frames_since_key == 0;
2458
2459 cpi->ppi->internal_altref_allowed = (gf_cfg->gf_max_pyr_height > 1);
2460
2461 // Reset the GF group data structures unless this is a key
2462 // frame in which case it will already have been done.
2463 if (!is_intra_only) {
2464 av1_zero(cpi->ppi->gf_group);
2465 cpi->gf_frame_index = 0;
2466 }
2467
2468 if (has_no_stats_stage(cpi)) {
2469 define_gf_group_pass0(cpi);
2470 return;
2471 }
2472
2473 if (cpi->third_pass_ctx && oxcf->pass == AOM_RC_THIRD_PASS) {
2474 int ret = define_gf_group_pass3(cpi, frame_params, is_final_pass);
2475 if (ret == 0) return;
2476
2477 av1_free_thirdpass_ctx(cpi->third_pass_ctx);
2478 cpi->third_pass_ctx = NULL;
2479 }
2480
2481 // correct frames_to_key when lookahead queue is emptying
2482 if (cpi->ppi->lap_enabled) {
2483 correct_frames_to_key(cpi);
2484 }
2485
2486 GF_GROUP_STATS gf_stats;
2487 accumulate_gop_stats(cpi, is_intra_only, f_w, f_h, &next_frame, start_pos,
2488 &gf_stats, &i);
2489
2490 const int can_disable_arf = !gf_cfg->gf_min_pyr_height;
2491
2492 // If this is a key frame or the overlay from a previous arf then
2493 // the error score / cost of this frame has already been accounted for.
2494 const int active_min_gf_interval = rc->min_gf_interval;
2495
2496 // Disable internal ARFs for "still" gf groups.
2497 // zero_motion_accumulator: minimum percentage of (0,0) motion;
2498 // avg_sr_coded_error: average of the SSE per pixel of each frame;
2499 // avg_raw_err_stdev: average of the standard deviation of (0,0)
2500 // motion error per block of each frame.
2501 const int can_disable_internal_arfs = gf_cfg->gf_min_pyr_height <= 1;
2502 if (can_disable_internal_arfs &&
2503 gf_stats.zero_motion_accumulator > MIN_ZERO_MOTION &&
2504 gf_stats.avg_sr_coded_error < MAX_SR_CODED_ERROR &&
2505 gf_stats.avg_raw_err_stdev < MAX_RAW_ERR_VAR) {
2506 cpi->ppi->internal_altref_allowed = 0;
2507 }
2508
2509 int use_alt_ref;
2510 if (can_disable_arf) {
2511 use_alt_ref =
2512 !is_almost_static(gf_stats.zero_motion_accumulator,
2513 twopass->kf_zeromotion_pct, cpi->ppi->lap_enabled) &&
2514 p_rc->use_arf_in_this_kf_group && (i < gf_cfg->lag_in_frames) &&
2515 (i >= MIN_GF_INTERVAL);
2516 } else {
2517 use_alt_ref = p_rc->use_arf_in_this_kf_group &&
2518 (i < gf_cfg->lag_in_frames) && (i > 2);
2519 }
2520 if (use_alt_ref) {
2521 gf_group->max_layer_depth_allowed = gf_cfg->gf_max_pyr_height;
2522 } else {
2523 gf_group->max_layer_depth_allowed = 0;
2524 }
2525
2526 int alt_offset = 0;
2527 // The length reduction strategy is tweaked for certain cases, and doesn't
2528 // work well for certain other cases.
2529 const int allow_gf_length_reduction =
2530 ((rc_cfg->mode == AOM_Q && rc_cfg->cq_level <= 128) ||
2531 !cpi->ppi->internal_altref_allowed) &&
2532 !is_lossless_requested(rc_cfg);
2533
2534 if (allow_gf_length_reduction && use_alt_ref) {
2535 // adjust length of this gf group if one of the following condition met
2536 // 1: only one overlay frame left and this gf is too long
2537 // 2: next gf group is too short to have arf compared to the current gf
2538
2539 // maximum length of next gf group
2540 const int next_gf_len = rc->frames_to_key - i;
2541 const int single_overlay_left =
2542 next_gf_len == 0 && i > REDUCE_GF_LENGTH_THRESH;
2543 // the next gf is probably going to have a ARF but it will be shorter than
2544 // this gf
2545 const int unbalanced_gf =
2546 i > REDUCE_GF_LENGTH_TO_KEY_THRESH &&
2547 next_gf_len + 1 < REDUCE_GF_LENGTH_TO_KEY_THRESH &&
2548 next_gf_len + 1 >= rc->min_gf_interval;
2549
2550 if (single_overlay_left || unbalanced_gf) {
2551 const int roll_back = REDUCE_GF_LENGTH_BY;
2552 // Reduce length only if active_min_gf_interval will be respected later.
2553 if (i - roll_back >= active_min_gf_interval + 1) {
2554 alt_offset = -roll_back;
2555 i -= roll_back;
2556 if (is_final_pass) rc->intervals_till_gf_calculate_due = 0;
2557 p_rc->gf_intervals[p_rc->cur_gf_index] -= roll_back;
2558 reset_fpf_position(&cpi->twopass_frame, start_pos);
2559 accumulate_gop_stats(cpi, is_intra_only, f_w, f_h, &next_frame,
2560 start_pos, &gf_stats, &i);
2561 }
2562 }
2563 }
2564
2565 update_gop_length(rc, p_rc, i, is_final_pass);
2566
2567 // Set up the structure of this Group-Of-Pictures (same as GF_GROUP)
2568 av1_gop_setup_structure(cpi);
2569
2570 set_gop_bits_boost(cpi, i, is_intra_only, is_final_pass, use_alt_ref,
2571 alt_offset, start_pos, &gf_stats);
2572
2573 frame_params->frame_type =
2574 rc->frames_since_key == 0 ? KEY_FRAME : INTER_FRAME;
2575 frame_params->show_frame =
2576 !(gf_group->update_type[cpi->gf_frame_index] == ARF_UPDATE ||
2577 gf_group->update_type[cpi->gf_frame_index] == INTNL_ARF_UPDATE);
2578 }
2579
2580 /*!\brief Define a GF group for the third apss.
2581 *
2582 * \ingroup gf_group_algo
2583 * This function defines the structure of a GF group for the third pass, along
2584 * with various parameters regarding bit-allocation and quality setup based on
2585 * the two-pass bitstream.
2586 * Much of the function still uses the strategies used for the second pass and
2587 * relies on first pass statistics. It is expected that over time these portions
2588 * would be replaced with strategies specific to the third pass.
2589 *
2590 * \param[in] cpi Top-level encoder structure
2591 * \param[in] frame_params Structure with frame parameters
2592 * \param[in] is_final_pass Whether this is the final pass for the
2593 * GF group, or a trial (non-zero)
2594 *
2595 * \return 0: Success;
2596 * -1: There are conflicts between the bitstream and current config
2597 * The values in cpi->ppi->gf_group are also changed.
2598 */
define_gf_group_pass3(AV1_COMP * cpi,EncodeFrameParams * frame_params,int is_final_pass)2599 static int define_gf_group_pass3(AV1_COMP *cpi, EncodeFrameParams *frame_params,
2600 int is_final_pass) {
2601 if (!cpi->third_pass_ctx) return -1;
2602 AV1_COMMON *const cm = &cpi->common;
2603 RATE_CONTROL *const rc = &cpi->rc;
2604 PRIMARY_RATE_CONTROL *const p_rc = &cpi->ppi->p_rc;
2605 const AV1EncoderConfig *const oxcf = &cpi->oxcf;
2606 FIRSTPASS_STATS next_frame;
2607 const FIRSTPASS_STATS *const start_pos = cpi->twopass_frame.stats_in;
2608 GF_GROUP *gf_group = &cpi->ppi->gf_group;
2609 const GFConfig *const gf_cfg = &oxcf->gf_cfg;
2610 const int f_w = cm->width;
2611 const int f_h = cm->height;
2612 int i;
2613 const int is_intra_only = rc->frames_since_key == 0;
2614
2615 cpi->ppi->internal_altref_allowed = (gf_cfg->gf_max_pyr_height > 1);
2616
2617 // Reset the GF group data structures unless this is a key
2618 // frame in which case it will already have been done.
2619 if (!is_intra_only) {
2620 av1_zero(cpi->ppi->gf_group);
2621 cpi->gf_frame_index = 0;
2622 }
2623
2624 GF_GROUP_STATS gf_stats;
2625 accumulate_gop_stats(cpi, is_intra_only, f_w, f_h, &next_frame, start_pos,
2626 &gf_stats, &i);
2627
2628 const int can_disable_arf = !gf_cfg->gf_min_pyr_height;
2629
2630 // TODO(any): set cpi->ppi->internal_altref_allowed accordingly;
2631
2632 int use_alt_ref = av1_check_use_arf(cpi->third_pass_ctx);
2633 if (use_alt_ref == 0 && !can_disable_arf) return -1;
2634 if (use_alt_ref) {
2635 gf_group->max_layer_depth_allowed = gf_cfg->gf_max_pyr_height;
2636 } else {
2637 gf_group->max_layer_depth_allowed = 0;
2638 }
2639
2640 update_gop_length(rc, p_rc, i, is_final_pass);
2641
2642 // Set up the structure of this Group-Of-Pictures (same as GF_GROUP)
2643 av1_gop_setup_structure(cpi);
2644
2645 set_gop_bits_boost(cpi, i, is_intra_only, is_final_pass, use_alt_ref, 0,
2646 start_pos, &gf_stats);
2647
2648 frame_params->frame_type = cpi->third_pass_ctx->frame_info[0].frame_type;
2649 frame_params->show_frame = cpi->third_pass_ctx->frame_info[0].is_show_frame;
2650 return 0;
2651 }
2652
2653 // #define FIXED_ARF_BITS
2654 #ifdef FIXED_ARF_BITS
2655 #define ARF_BITS_FRACTION 0.75
2656 #endif
av1_gop_bit_allocation(const AV1_COMP * cpi,RATE_CONTROL * const rc,GF_GROUP * gf_group,int is_key_frame,int use_arf,int64_t gf_group_bits)2657 void av1_gop_bit_allocation(const AV1_COMP *cpi, RATE_CONTROL *const rc,
2658 GF_GROUP *gf_group, int is_key_frame, int use_arf,
2659 int64_t gf_group_bits) {
2660 PRIMARY_RATE_CONTROL *const p_rc = &cpi->ppi->p_rc;
2661 // Calculate the extra bits to be used for boosted frame(s)
2662 #ifdef FIXED_ARF_BITS
2663 int gf_arf_bits = (int)(ARF_BITS_FRACTION * gf_group_bits);
2664 #else
2665 int gf_arf_bits = calculate_boost_bits(
2666 p_rc->baseline_gf_interval - (rc->frames_since_key == 0), p_rc->gfu_boost,
2667 gf_group_bits);
2668 #endif
2669
2670 gf_arf_bits = adjust_boost_bits_for_target_level(cpi, rc, gf_arf_bits,
2671 gf_group_bits, 1);
2672
2673 // Allocate bits to each of the frames in the GF group.
2674 allocate_gf_group_bits(gf_group, p_rc, rc, gf_group_bits, gf_arf_bits,
2675 is_key_frame, use_arf);
2676 }
2677
2678 // Minimum % intra coding observed in first pass (1.0 = 100%)
2679 #define MIN_INTRA_LEVEL 0.25
2680 // Minimum ratio between the % of intra coding and inter coding in the first
2681 // pass after discounting neutral blocks (discounting neutral blocks in this
2682 // way helps catch scene cuts in clips with very flat areas or letter box
2683 // format clips with image padding.
2684 #define INTRA_VS_INTER_THRESH 2.0
2685 // Hard threshold where the first pass chooses intra for almost all blocks.
2686 // In such a case even if the frame is not a scene cut coding a key frame
2687 // may be a good option.
2688 #define VERY_LOW_INTER_THRESH 0.05
2689 // Maximum threshold for the relative ratio of intra error score vs best
2690 // inter error score.
2691 #define KF_II_ERR_THRESHOLD 1.9
2692 // In real scene cuts there is almost always a sharp change in the intra
2693 // or inter error score.
2694 #define ERR_CHANGE_THRESHOLD 0.4
2695 // For real scene cuts we expect an improvment in the intra inter error
2696 // ratio in the next frame.
2697 #define II_IMPROVEMENT_THRESHOLD 3.5
2698 #define KF_II_MAX 128.0
2699 // Intra / Inter threshold very low
2700 #define VERY_LOW_II 1.5
2701 // Clean slide transitions we expect a sharp single frame spike in error.
2702 #define ERROR_SPIKE 5.0
2703
2704 // Slide show transition detection.
2705 // Tests for case where there is very low error either side of the current frame
2706 // but much higher just for this frame. This can help detect key frames in
2707 // slide shows even where the slides are pictures of different sizes.
2708 // Also requires that intra and inter errors are very similar to help eliminate
2709 // harmful false positives.
2710 // It will not help if the transition is a fade or other multi-frame effect.
slide_transition(const FIRSTPASS_STATS * this_frame,const FIRSTPASS_STATS * last_frame,const FIRSTPASS_STATS * next_frame)2711 static int slide_transition(const FIRSTPASS_STATS *this_frame,
2712 const FIRSTPASS_STATS *last_frame,
2713 const FIRSTPASS_STATS *next_frame) {
2714 return (this_frame->intra_error < (this_frame->coded_error * VERY_LOW_II)) &&
2715 (this_frame->coded_error > (last_frame->coded_error * ERROR_SPIKE)) &&
2716 (this_frame->coded_error > (next_frame->coded_error * ERROR_SPIKE));
2717 }
2718
2719 // Threshold for use of the lagging second reference frame. High second ref
2720 // usage may point to a transient event like a flash or occlusion rather than
2721 // a real scene cut.
2722 // We adapt the threshold based on number of frames in this key-frame group so
2723 // far.
get_second_ref_usage_thresh(int frame_count_so_far)2724 static double get_second_ref_usage_thresh(int frame_count_so_far) {
2725 const int adapt_upto = 32;
2726 const double min_second_ref_usage_thresh = 0.085;
2727 const double second_ref_usage_thresh_max_delta = 0.035;
2728 if (frame_count_so_far >= adapt_upto) {
2729 return min_second_ref_usage_thresh + second_ref_usage_thresh_max_delta;
2730 }
2731 return min_second_ref_usage_thresh +
2732 ((double)frame_count_so_far / (adapt_upto - 1)) *
2733 second_ref_usage_thresh_max_delta;
2734 }
2735
test_candidate_kf(const FIRSTPASS_INFO * firstpass_info,int this_stats_index,int frame_count_so_far,enum aom_rc_mode rc_mode,int scenecut_mode,int num_mbs)2736 static int test_candidate_kf(const FIRSTPASS_INFO *firstpass_info,
2737 int this_stats_index, int frame_count_so_far,
2738 enum aom_rc_mode rc_mode, int scenecut_mode,
2739 int num_mbs) {
2740 const FIRSTPASS_STATS *last_stats =
2741 av1_firstpass_info_peek(firstpass_info, this_stats_index - 1);
2742 const FIRSTPASS_STATS *this_stats =
2743 av1_firstpass_info_peek(firstpass_info, this_stats_index);
2744 const FIRSTPASS_STATS *next_stats =
2745 av1_firstpass_info_peek(firstpass_info, this_stats_index + 1);
2746 if (last_stats == NULL || this_stats == NULL || next_stats == NULL) {
2747 return 0;
2748 }
2749
2750 int is_viable_kf = 0;
2751 double pcnt_intra = 1.0 - this_stats->pcnt_inter;
2752 double modified_pcnt_inter =
2753 this_stats->pcnt_inter - this_stats->pcnt_neutral;
2754 const double second_ref_usage_thresh =
2755 get_second_ref_usage_thresh(frame_count_so_far);
2756 int frames_to_test_after_candidate_key = SCENE_CUT_KEY_TEST_INTERVAL;
2757 int count_for_tolerable_prediction = 3;
2758
2759 // We do "-1" because the candidate key is not counted.
2760 int stats_after_this_stats =
2761 av1_firstpass_info_future_count(firstpass_info, this_stats_index) - 1;
2762
2763 if (scenecut_mode == ENABLE_SCENECUT_MODE_1) {
2764 if (stats_after_this_stats < 3) {
2765 return 0;
2766 } else {
2767 frames_to_test_after_candidate_key = 3;
2768 count_for_tolerable_prediction = 1;
2769 }
2770 }
2771 // Make sure we have enough stats after the candidate key.
2772 frames_to_test_after_candidate_key =
2773 AOMMIN(frames_to_test_after_candidate_key, stats_after_this_stats);
2774
2775 // Does the frame satisfy the primary criteria of a key frame?
2776 // See above for an explanation of the test criteria.
2777 // If so, then examine how well it predicts subsequent frames.
2778 if (IMPLIES(rc_mode == AOM_Q, frame_count_so_far >= 3) &&
2779 (this_stats->pcnt_second_ref < second_ref_usage_thresh) &&
2780 (next_stats->pcnt_second_ref < second_ref_usage_thresh) &&
2781 ((this_stats->pcnt_inter < VERY_LOW_INTER_THRESH) ||
2782 slide_transition(this_stats, last_stats, next_stats) ||
2783 ((pcnt_intra > MIN_INTRA_LEVEL) &&
2784 (pcnt_intra > (INTRA_VS_INTER_THRESH * modified_pcnt_inter)) &&
2785 ((this_stats->intra_error /
2786 DOUBLE_DIVIDE_CHECK(this_stats->coded_error)) <
2787 KF_II_ERR_THRESHOLD) &&
2788 ((fabs(last_stats->coded_error - this_stats->coded_error) /
2789 DOUBLE_DIVIDE_CHECK(this_stats->coded_error) >
2790 ERR_CHANGE_THRESHOLD) ||
2791 (fabs(last_stats->intra_error - this_stats->intra_error) /
2792 DOUBLE_DIVIDE_CHECK(this_stats->intra_error) >
2793 ERR_CHANGE_THRESHOLD) ||
2794 ((next_stats->intra_error /
2795 DOUBLE_DIVIDE_CHECK(next_stats->coded_error)) >
2796 II_IMPROVEMENT_THRESHOLD))))) {
2797 int i;
2798 double boost_score = 0.0;
2799 double old_boost_score = 0.0;
2800 double decay_accumulator = 1.0;
2801
2802 // Examine how well the key frame predicts subsequent frames.
2803 for (i = 1; i <= frames_to_test_after_candidate_key; ++i) {
2804 // Get the next frame details
2805 const FIRSTPASS_STATS *local_next_frame =
2806 av1_firstpass_info_peek(firstpass_info, this_stats_index + i);
2807 double next_iiratio =
2808 (BOOST_FACTOR * local_next_frame->intra_error /
2809 DOUBLE_DIVIDE_CHECK(local_next_frame->coded_error));
2810
2811 if (next_iiratio > KF_II_MAX) next_iiratio = KF_II_MAX;
2812
2813 // Cumulative effect of decay in prediction quality.
2814 if (local_next_frame->pcnt_inter > 0.85)
2815 decay_accumulator *= local_next_frame->pcnt_inter;
2816 else
2817 decay_accumulator *= (0.85 + local_next_frame->pcnt_inter) / 2.0;
2818
2819 // Keep a running total.
2820 boost_score += (decay_accumulator * next_iiratio);
2821
2822 // Test various breakout clauses.
2823 // TODO(any): Test of intra error should be normalized to an MB.
2824 if ((local_next_frame->pcnt_inter < 0.05) || (next_iiratio < 1.5) ||
2825 (((local_next_frame->pcnt_inter - local_next_frame->pcnt_neutral) <
2826 0.20) &&
2827 (next_iiratio < 3.0)) ||
2828 ((boost_score - old_boost_score) < 3.0) ||
2829 (local_next_frame->intra_error < (200.0 / (double)num_mbs))) {
2830 break;
2831 }
2832
2833 old_boost_score = boost_score;
2834 }
2835
2836 // If there is tolerable prediction for at least the next 3 frames then
2837 // break out else discard this potential key frame and move on
2838 if (boost_score > 30.0 && (i > count_for_tolerable_prediction)) {
2839 is_viable_kf = 1;
2840 } else {
2841 is_viable_kf = 0;
2842 }
2843 }
2844 return is_viable_kf;
2845 }
2846
2847 #define FRAMES_TO_CHECK_DECAY 8
2848 #define KF_MIN_FRAME_BOOST 80.0
2849 #define KF_MAX_FRAME_BOOST 128.0
2850 #define MIN_KF_BOOST 600 // Minimum boost for non-static KF interval
2851 #define MAX_KF_BOOST 3200
2852 #define MIN_STATIC_KF_BOOST 5400 // Minimum boost for static KF interval
2853
detect_app_forced_key(AV1_COMP * cpi)2854 static int detect_app_forced_key(AV1_COMP *cpi) {
2855 int num_frames_to_app_forced_key = is_forced_keyframe_pending(
2856 cpi->ppi->lookahead, cpi->ppi->lookahead->max_sz, cpi->compressor_stage);
2857 return num_frames_to_app_forced_key;
2858 }
2859
get_projected_kf_boost(AV1_COMP * cpi)2860 static int get_projected_kf_boost(AV1_COMP *cpi) {
2861 /*
2862 * If num_stats_used_for_kf_boost >= frames_to_key, then
2863 * all stats needed for prior boost calculation are available.
2864 * Hence projecting the prior boost is not needed in this cases.
2865 */
2866 if (cpi->ppi->p_rc.num_stats_used_for_kf_boost >= cpi->rc.frames_to_key)
2867 return cpi->ppi->p_rc.kf_boost;
2868
2869 // Get the current tpl factor (number of frames = frames_to_key).
2870 double tpl_factor = av1_get_kf_boost_projection_factor(cpi->rc.frames_to_key);
2871 // Get the tpl factor when number of frames = num_stats_used_for_kf_boost.
2872 double tpl_factor_num_stats = av1_get_kf_boost_projection_factor(
2873 cpi->ppi->p_rc.num_stats_used_for_kf_boost);
2874 int projected_kf_boost =
2875 (int)rint((tpl_factor * cpi->ppi->p_rc.kf_boost) / tpl_factor_num_stats);
2876 return projected_kf_boost;
2877 }
2878
2879 /*!\brief Determine the location of the next key frame
2880 *
2881 * \ingroup gf_group_algo
2882 * This function decides the placement of the next key frame when a
2883 * scenecut is detected or the maximum key frame distance is reached.
2884 *
2885 * \param[in] cpi Top-level encoder structure
2886 * \param[in] firstpass_info struct for firstpass info
2887 * \param[in] num_frames_to_detect_scenecut Maximum lookahead frames.
2888 * \param[in] search_start_idx the start index for searching key frame.
2889 * Set it to one if we already know the
2890 * current frame is key frame. Otherwise,
2891 * set it to zero.
2892 *
2893 * \return Number of frames to the next key including the current frame.
2894 */
define_kf_interval(AV1_COMP * cpi,const FIRSTPASS_INFO * firstpass_info,int num_frames_to_detect_scenecut,int search_start_idx)2895 static int define_kf_interval(AV1_COMP *cpi,
2896 const FIRSTPASS_INFO *firstpass_info,
2897 int num_frames_to_detect_scenecut,
2898 int search_start_idx) {
2899 const TWO_PASS *const twopass = &cpi->ppi->twopass;
2900 const RATE_CONTROL *const rc = &cpi->rc;
2901 PRIMARY_RATE_CONTROL *const p_rc = &cpi->ppi->p_rc;
2902 const AV1EncoderConfig *const oxcf = &cpi->oxcf;
2903 const KeyFrameCfg *const kf_cfg = &oxcf->kf_cfg;
2904 double recent_loop_decay[FRAMES_TO_CHECK_DECAY];
2905 double decay_accumulator = 1.0;
2906 int i = 0, j;
2907 int frames_to_key = search_start_idx;
2908 int frames_since_key = rc->frames_since_key + 1;
2909 int scenecut_detected = 0;
2910
2911 int num_frames_to_next_key = detect_app_forced_key(cpi);
2912
2913 if (num_frames_to_detect_scenecut == 0) {
2914 if (num_frames_to_next_key != -1)
2915 return num_frames_to_next_key;
2916 else
2917 return rc->frames_to_key;
2918 }
2919
2920 if (num_frames_to_next_key != -1)
2921 num_frames_to_detect_scenecut =
2922 AOMMIN(num_frames_to_detect_scenecut, num_frames_to_next_key);
2923
2924 // Initialize the decay rates for the recent frames to check
2925 for (j = 0; j < FRAMES_TO_CHECK_DECAY; ++j) recent_loop_decay[j] = 1.0;
2926
2927 i = 0;
2928 const int num_mbs = (oxcf->resize_cfg.resize_mode != RESIZE_NONE)
2929 ? cpi->initial_mbs
2930 : cpi->common.mi_params.MBs;
2931 const int future_stats_count =
2932 av1_firstpass_info_future_count(firstpass_info, 0);
2933 while (frames_to_key < future_stats_count &&
2934 frames_to_key < num_frames_to_detect_scenecut) {
2935 // Provided that we are not at the end of the file...
2936 if ((cpi->ppi->p_rc.enable_scenecut_detection > 0) && kf_cfg->auto_key &&
2937 frames_to_key + 1 < future_stats_count) {
2938 double loop_decay_rate;
2939
2940 // Check for a scene cut.
2941 if (frames_since_key >= kf_cfg->key_freq_min) {
2942 scenecut_detected = test_candidate_kf(
2943 &twopass->firstpass_info, frames_to_key, frames_since_key,
2944 oxcf->rc_cfg.mode, cpi->ppi->p_rc.enable_scenecut_detection,
2945 num_mbs);
2946 if (scenecut_detected) {
2947 break;
2948 }
2949 }
2950
2951 // How fast is the prediction quality decaying?
2952 const FIRSTPASS_STATS *next_stats =
2953 av1_firstpass_info_peek(firstpass_info, frames_to_key + 1);
2954 loop_decay_rate = get_prediction_decay_rate(next_stats);
2955
2956 // We want to know something about the recent past... rather than
2957 // as used elsewhere where we are concerned with decay in prediction
2958 // quality since the last GF or KF.
2959 recent_loop_decay[i % FRAMES_TO_CHECK_DECAY] = loop_decay_rate;
2960 decay_accumulator = 1.0;
2961 for (j = 0; j < FRAMES_TO_CHECK_DECAY; ++j)
2962 decay_accumulator *= recent_loop_decay[j];
2963
2964 // Special check for transition or high motion followed by a
2965 // static scene.
2966 if (frames_since_key >= kf_cfg->key_freq_min) {
2967 scenecut_detected = detect_transition_to_still(
2968 firstpass_info, frames_to_key + 1, rc->min_gf_interval, i,
2969 kf_cfg->key_freq_max - i, loop_decay_rate, decay_accumulator);
2970 if (scenecut_detected) {
2971 // In the case of transition followed by a static scene, the key frame
2972 // could be a good predictor for the following frames, therefore we
2973 // do not use an arf.
2974 p_rc->use_arf_in_this_kf_group = 0;
2975 break;
2976 }
2977 }
2978
2979 // Step on to the next frame.
2980 ++frames_to_key;
2981 ++frames_since_key;
2982
2983 // If we don't have a real key frame within the next two
2984 // key_freq_max intervals then break out of the loop.
2985 if (frames_to_key >= 2 * kf_cfg->key_freq_max) {
2986 break;
2987 }
2988 } else {
2989 ++frames_to_key;
2990 ++frames_since_key;
2991 }
2992 ++i;
2993 }
2994 if (cpi->ppi->lap_enabled && !scenecut_detected)
2995 frames_to_key = num_frames_to_next_key;
2996
2997 return frames_to_key;
2998 }
2999
get_kf_group_avg_error(TWO_PASS * twopass,TWO_PASS_FRAME * twopass_frame,const FIRSTPASS_STATS * first_frame,const FIRSTPASS_STATS * start_position,int frames_to_key)3000 static double get_kf_group_avg_error(TWO_PASS *twopass,
3001 TWO_PASS_FRAME *twopass_frame,
3002 const FIRSTPASS_STATS *first_frame,
3003 const FIRSTPASS_STATS *start_position,
3004 int frames_to_key) {
3005 FIRSTPASS_STATS cur_frame = *first_frame;
3006 int num_frames, i;
3007 double kf_group_avg_error = 0.0;
3008
3009 reset_fpf_position(twopass_frame, start_position);
3010
3011 for (i = 0; i < frames_to_key; ++i) {
3012 kf_group_avg_error += cur_frame.coded_error;
3013 if (EOF == input_stats(twopass, twopass_frame, &cur_frame)) break;
3014 }
3015 num_frames = i + 1;
3016 num_frames = AOMMIN(num_frames, frames_to_key);
3017 kf_group_avg_error = kf_group_avg_error / num_frames;
3018
3019 return (kf_group_avg_error);
3020 }
3021
get_kf_group_bits(AV1_COMP * cpi,double kf_group_err,double kf_group_avg_error)3022 static int64_t get_kf_group_bits(AV1_COMP *cpi, double kf_group_err,
3023 double kf_group_avg_error) {
3024 RATE_CONTROL *const rc = &cpi->rc;
3025 TWO_PASS *const twopass = &cpi->ppi->twopass;
3026 int64_t kf_group_bits;
3027 if (cpi->ppi->lap_enabled) {
3028 kf_group_bits = (int64_t)rc->frames_to_key * rc->avg_frame_bandwidth;
3029 if (cpi->oxcf.rc_cfg.vbr_corpus_complexity_lap) {
3030 double vbr_corpus_complexity_lap =
3031 cpi->oxcf.rc_cfg.vbr_corpus_complexity_lap / 10.0;
3032 /* Get the average corpus complexity of the frame */
3033 kf_group_bits = (int64_t)(
3034 kf_group_bits * (kf_group_avg_error / vbr_corpus_complexity_lap));
3035 }
3036 } else {
3037 kf_group_bits = (int64_t)(twopass->bits_left *
3038 (kf_group_err / twopass->modified_error_left));
3039 }
3040
3041 return kf_group_bits;
3042 }
3043
calc_avg_stats(AV1_COMP * cpi,FIRSTPASS_STATS * avg_frame_stat)3044 static int calc_avg_stats(AV1_COMP *cpi, FIRSTPASS_STATS *avg_frame_stat) {
3045 RATE_CONTROL *const rc = &cpi->rc;
3046 TWO_PASS *const twopass = &cpi->ppi->twopass;
3047 FIRSTPASS_STATS cur_frame;
3048 av1_zero(cur_frame);
3049 int num_frames = 0;
3050 // Accumulate total stat using available number of stats.
3051 for (num_frames = 0; num_frames < (rc->frames_to_key - 1); ++num_frames) {
3052 if (EOF == input_stats(twopass, &cpi->twopass_frame, &cur_frame)) break;
3053 av1_accumulate_stats(avg_frame_stat, &cur_frame);
3054 }
3055
3056 if (num_frames < 2) {
3057 return num_frames;
3058 }
3059 // Average the total stat
3060 avg_frame_stat->weight = avg_frame_stat->weight / num_frames;
3061 avg_frame_stat->intra_error = avg_frame_stat->intra_error / num_frames;
3062 avg_frame_stat->frame_avg_wavelet_energy =
3063 avg_frame_stat->frame_avg_wavelet_energy / num_frames;
3064 avg_frame_stat->coded_error = avg_frame_stat->coded_error / num_frames;
3065 avg_frame_stat->sr_coded_error = avg_frame_stat->sr_coded_error / num_frames;
3066 avg_frame_stat->pcnt_inter = avg_frame_stat->pcnt_inter / num_frames;
3067 avg_frame_stat->pcnt_motion = avg_frame_stat->pcnt_motion / num_frames;
3068 avg_frame_stat->pcnt_second_ref =
3069 avg_frame_stat->pcnt_second_ref / num_frames;
3070 avg_frame_stat->pcnt_neutral = avg_frame_stat->pcnt_neutral / num_frames;
3071 avg_frame_stat->intra_skip_pct = avg_frame_stat->intra_skip_pct / num_frames;
3072 avg_frame_stat->inactive_zone_rows =
3073 avg_frame_stat->inactive_zone_rows / num_frames;
3074 avg_frame_stat->inactive_zone_cols =
3075 avg_frame_stat->inactive_zone_cols / num_frames;
3076 avg_frame_stat->MVr = avg_frame_stat->MVr / num_frames;
3077 avg_frame_stat->mvr_abs = avg_frame_stat->mvr_abs / num_frames;
3078 avg_frame_stat->MVc = avg_frame_stat->MVc / num_frames;
3079 avg_frame_stat->mvc_abs = avg_frame_stat->mvc_abs / num_frames;
3080 avg_frame_stat->MVrv = avg_frame_stat->MVrv / num_frames;
3081 avg_frame_stat->MVcv = avg_frame_stat->MVcv / num_frames;
3082 avg_frame_stat->mv_in_out_count =
3083 avg_frame_stat->mv_in_out_count / num_frames;
3084 avg_frame_stat->new_mv_count = avg_frame_stat->new_mv_count / num_frames;
3085 avg_frame_stat->count = avg_frame_stat->count / num_frames;
3086 avg_frame_stat->duration = avg_frame_stat->duration / num_frames;
3087
3088 return num_frames;
3089 }
3090
get_kf_boost_score(AV1_COMP * cpi,double kf_raw_err,double * zero_motion_accumulator,double * sr_accumulator,int use_avg_stat)3091 static double get_kf_boost_score(AV1_COMP *cpi, double kf_raw_err,
3092 double *zero_motion_accumulator,
3093 double *sr_accumulator, int use_avg_stat) {
3094 RATE_CONTROL *const rc = &cpi->rc;
3095 TWO_PASS *const twopass = &cpi->ppi->twopass;
3096 FRAME_INFO *const frame_info = &cpi->frame_info;
3097 FIRSTPASS_STATS frame_stat;
3098 av1_zero(frame_stat);
3099 int i = 0, num_stat_used = 0;
3100 double boost_score = 0.0;
3101 const double kf_max_boost =
3102 cpi->oxcf.rc_cfg.mode == AOM_Q
3103 ? AOMMIN(AOMMAX(rc->frames_to_key * 2.0, KF_MIN_FRAME_BOOST),
3104 KF_MAX_FRAME_BOOST)
3105 : KF_MAX_FRAME_BOOST;
3106
3107 // Calculate the average using available number of stats.
3108 if (use_avg_stat) num_stat_used = calc_avg_stats(cpi, &frame_stat);
3109
3110 for (i = num_stat_used; i < (rc->frames_to_key - 1); ++i) {
3111 if (!use_avg_stat &&
3112 EOF == input_stats(twopass, &cpi->twopass_frame, &frame_stat))
3113 break;
3114
3115 // Monitor for static sections.
3116 // For the first frame in kf group, the second ref indicator is invalid.
3117 if (i > 0) {
3118 *zero_motion_accumulator =
3119 AOMMIN(*zero_motion_accumulator, get_zero_motion_factor(&frame_stat));
3120 } else {
3121 *zero_motion_accumulator = frame_stat.pcnt_inter - frame_stat.pcnt_motion;
3122 }
3123
3124 // Not all frames in the group are necessarily used in calculating boost.
3125 if ((*sr_accumulator < (kf_raw_err * 1.50)) &&
3126 (i <= rc->max_gf_interval * 2)) {
3127 double frame_boost;
3128 double zm_factor;
3129
3130 // Factor 0.75-1.25 based on how much of frame is static.
3131 zm_factor = (0.75 + (*zero_motion_accumulator / 2.0));
3132
3133 if (i < 2) *sr_accumulator = 0.0;
3134 frame_boost =
3135 calc_kf_frame_boost(&cpi->ppi->p_rc, frame_info, &frame_stat,
3136 sr_accumulator, kf_max_boost);
3137 boost_score += frame_boost * zm_factor;
3138 }
3139 }
3140 return boost_score;
3141 }
3142
3143 /*!\brief Interval(in seconds) to clip key-frame distance to in LAP.
3144 */
3145 #define MAX_KF_BITS_INTERVAL_SINGLE_PASS 5
3146
3147 /*!\brief Determine the next key frame group
3148 *
3149 * \ingroup gf_group_algo
3150 * This function decides the placement of the next key frame, and
3151 * calculates the bit allocation of the KF group and the keyframe itself.
3152 *
3153 * \param[in] cpi Top-level encoder structure
3154 * \param[in] this_frame Pointer to first pass stats
3155 */
find_next_key_frame(AV1_COMP * cpi,FIRSTPASS_STATS * this_frame)3156 static void find_next_key_frame(AV1_COMP *cpi, FIRSTPASS_STATS *this_frame) {
3157 RATE_CONTROL *const rc = &cpi->rc;
3158 PRIMARY_RATE_CONTROL *const p_rc = &cpi->ppi->p_rc;
3159 TWO_PASS *const twopass = &cpi->ppi->twopass;
3160 GF_GROUP *const gf_group = &cpi->ppi->gf_group;
3161 FRAME_INFO *const frame_info = &cpi->frame_info;
3162 AV1_COMMON *const cm = &cpi->common;
3163 CurrentFrame *const current_frame = &cm->current_frame;
3164 const AV1EncoderConfig *const oxcf = &cpi->oxcf;
3165 const KeyFrameCfg *const kf_cfg = &oxcf->kf_cfg;
3166 const FIRSTPASS_STATS first_frame = *this_frame;
3167 FIRSTPASS_STATS next_frame;
3168 const FIRSTPASS_INFO *firstpass_info = &twopass->firstpass_info;
3169 av1_zero(next_frame);
3170
3171 rc->frames_since_key = 0;
3172 // Use arfs if possible.
3173 p_rc->use_arf_in_this_kf_group = is_altref_enabled(
3174 oxcf->gf_cfg.lag_in_frames, oxcf->gf_cfg.enable_auto_arf);
3175
3176 // Reset the GF group data structures.
3177 av1_zero(*gf_group);
3178 cpi->gf_frame_index = 0;
3179
3180 // KF is always a GF so clear frames till next gf counter.
3181 rc->frames_till_gf_update_due = 0;
3182
3183 if (has_no_stats_stage(cpi)) {
3184 int num_frames_to_app_forced_key = detect_app_forced_key(cpi);
3185 p_rc->this_key_frame_forced =
3186 current_frame->frame_number != 0 && rc->frames_to_key == 0;
3187 if (num_frames_to_app_forced_key != -1)
3188 rc->frames_to_key = num_frames_to_app_forced_key;
3189 else
3190 rc->frames_to_key = AOMMAX(1, kf_cfg->key_freq_max);
3191 correct_frames_to_key(cpi);
3192 p_rc->kf_boost = DEFAULT_KF_BOOST;
3193 gf_group->update_type[0] = KF_UPDATE;
3194 return;
3195 }
3196 int i;
3197 const FIRSTPASS_STATS *const start_position = cpi->twopass_frame.stats_in;
3198 int kf_bits = 0;
3199 double zero_motion_accumulator = 1.0;
3200 double boost_score = 0.0;
3201 double kf_raw_err = 0.0;
3202 double kf_mod_err = 0.0;
3203 double sr_accumulator = 0.0;
3204 double kf_group_avg_error = 0.0;
3205 int frames_to_key, frames_to_key_clipped = INT_MAX;
3206 int64_t kf_group_bits_clipped = INT64_MAX;
3207
3208 // Is this a forced key frame by interval.
3209 p_rc->this_key_frame_forced = p_rc->next_key_frame_forced;
3210
3211 twopass->kf_group_bits = 0; // Total bits available to kf group
3212 twopass->kf_group_error_left = 0; // Group modified error score.
3213
3214 kf_raw_err = this_frame->intra_error;
3215 kf_mod_err = calculate_modified_err(frame_info, twopass, oxcf, this_frame);
3216
3217 // We assume the current frame is a key frame and we are looking for the next
3218 // key frame. Therefore search_start_idx = 1
3219 frames_to_key = define_kf_interval(cpi, firstpass_info, kf_cfg->key_freq_max,
3220 /*search_start_idx=*/1);
3221
3222 if (frames_to_key != -1) {
3223 rc->frames_to_key = AOMMIN(kf_cfg->key_freq_max, frames_to_key);
3224 } else {
3225 rc->frames_to_key = kf_cfg->key_freq_max;
3226 }
3227
3228 if (cpi->ppi->lap_enabled) correct_frames_to_key(cpi);
3229
3230 // If there is a max kf interval set by the user we must obey it.
3231 // We already breakout of the loop above at 2x max.
3232 // This code centers the extra kf if the actual natural interval
3233 // is between 1x and 2x.
3234 if (kf_cfg->auto_key && rc->frames_to_key > kf_cfg->key_freq_max) {
3235 FIRSTPASS_STATS tmp_frame = first_frame;
3236
3237 rc->frames_to_key /= 2;
3238
3239 // Reset to the start of the group.
3240 reset_fpf_position(&cpi->twopass_frame, start_position);
3241 // Rescan to get the correct error data for the forced kf group.
3242 for (i = 0; i < rc->frames_to_key; ++i) {
3243 if (EOF == input_stats(twopass, &cpi->twopass_frame, &tmp_frame)) break;
3244 }
3245 p_rc->next_key_frame_forced = 1;
3246 } else if ((cpi->twopass_frame.stats_in ==
3247 twopass->stats_buf_ctx->stats_in_end &&
3248 is_stat_consumption_stage_twopass(cpi)) ||
3249 rc->frames_to_key >= kf_cfg->key_freq_max) {
3250 p_rc->next_key_frame_forced = 1;
3251 } else {
3252 p_rc->next_key_frame_forced = 0;
3253 }
3254
3255 double kf_group_err = 0;
3256 for (i = 0; i < rc->frames_to_key; ++i) {
3257 const FIRSTPASS_STATS *this_stats =
3258 av1_firstpass_info_peek(&twopass->firstpass_info, i);
3259 if (this_stats != NULL) {
3260 // Accumulate kf group error.
3261 kf_group_err += calculate_modified_err_new(
3262 frame_info, &firstpass_info->total_stats, this_stats,
3263 oxcf->rc_cfg.vbrbias, twopass->modified_error_min,
3264 twopass->modified_error_max);
3265 ++p_rc->num_stats_used_for_kf_boost;
3266 }
3267 }
3268
3269 // Calculate the number of bits that should be assigned to the kf group.
3270 if ((twopass->bits_left > 0 && twopass->modified_error_left > 0.0) ||
3271 (cpi->ppi->lap_enabled && oxcf->rc_cfg.mode != AOM_Q)) {
3272 // Maximum number of bits for a single normal frame (not key frame).
3273 const int max_bits = frame_max_bits(rc, oxcf);
3274
3275 // Maximum number of bits allocated to the key frame group.
3276 int64_t max_grp_bits;
3277
3278 if (oxcf->rc_cfg.vbr_corpus_complexity_lap) {
3279 kf_group_avg_error =
3280 get_kf_group_avg_error(twopass, &cpi->twopass_frame, &first_frame,
3281 start_position, rc->frames_to_key);
3282 }
3283
3284 // Default allocation based on bits left and relative
3285 // complexity of the section.
3286 twopass->kf_group_bits =
3287 get_kf_group_bits(cpi, kf_group_err, kf_group_avg_error);
3288 // Clip based on maximum per frame rate defined by the user.
3289 max_grp_bits = (int64_t)max_bits * (int64_t)rc->frames_to_key;
3290 if (twopass->kf_group_bits > max_grp_bits)
3291 twopass->kf_group_bits = max_grp_bits;
3292 } else {
3293 twopass->kf_group_bits = 0;
3294 }
3295 twopass->kf_group_bits = AOMMAX(0, twopass->kf_group_bits);
3296
3297 if (cpi->ppi->lap_enabled) {
3298 // In the case of single pass based on LAP, frames to key may have an
3299 // inaccurate value, and hence should be clipped to an appropriate
3300 // interval.
3301 frames_to_key_clipped =
3302 (int)(MAX_KF_BITS_INTERVAL_SINGLE_PASS * cpi->framerate);
3303
3304 // This variable calculates the bits allocated to kf_group with a clipped
3305 // frames_to_key.
3306 if (rc->frames_to_key > frames_to_key_clipped) {
3307 kf_group_bits_clipped =
3308 (int64_t)((double)twopass->kf_group_bits * frames_to_key_clipped /
3309 rc->frames_to_key);
3310 }
3311 }
3312
3313 // Reset the first pass file position.
3314 reset_fpf_position(&cpi->twopass_frame, start_position);
3315
3316 // Scan through the kf group collating various stats used to determine
3317 // how many bits to spend on it.
3318 boost_score = get_kf_boost_score(cpi, kf_raw_err, &zero_motion_accumulator,
3319 &sr_accumulator, 0);
3320 reset_fpf_position(&cpi->twopass_frame, start_position);
3321 // Store the zero motion percentage
3322 twopass->kf_zeromotion_pct = (int)(zero_motion_accumulator * 100.0);
3323
3324 // Calculate a section intra ratio used in setting max loop filter.
3325 twopass->section_intra_rating = calculate_section_intra_ratio(
3326 start_position, twopass->stats_buf_ctx->stats_in_end, rc->frames_to_key);
3327
3328 p_rc->kf_boost = (int)boost_score;
3329
3330 if (cpi->ppi->lap_enabled) {
3331 if (oxcf->rc_cfg.mode == AOM_Q) {
3332 p_rc->kf_boost = get_projected_kf_boost(cpi);
3333 } else {
3334 // TODO(any): Explore using average frame stats for AOM_Q as well.
3335 boost_score = get_kf_boost_score(
3336 cpi, kf_raw_err, &zero_motion_accumulator, &sr_accumulator, 1);
3337 reset_fpf_position(&cpi->twopass_frame, start_position);
3338 p_rc->kf_boost += (int)boost_score;
3339 }
3340 }
3341
3342 // Special case for static / slide show content but don't apply
3343 // if the kf group is very short.
3344 if ((zero_motion_accumulator > STATIC_KF_GROUP_FLOAT_THRESH) &&
3345 (rc->frames_to_key > 8)) {
3346 p_rc->kf_boost = AOMMAX(p_rc->kf_boost, MIN_STATIC_KF_BOOST);
3347 } else {
3348 // Apply various clamps for min and max boost
3349 p_rc->kf_boost = AOMMAX(p_rc->kf_boost, (rc->frames_to_key * 3));
3350 p_rc->kf_boost = AOMMAX(p_rc->kf_boost, MIN_KF_BOOST);
3351 #ifdef STRICT_RC
3352 p_rc->kf_boost = AOMMIN(p_rc->kf_boost, MAX_KF_BOOST);
3353 #endif
3354 }
3355
3356 // Work out how many bits to allocate for the key frame itself.
3357 // In case of LAP enabled for VBR, if the frames_to_key value is
3358 // very high, we calculate the bits based on a clipped value of
3359 // frames_to_key.
3360 kf_bits = calculate_boost_bits(
3361 AOMMIN(rc->frames_to_key, frames_to_key_clipped) - 1, p_rc->kf_boost,
3362 AOMMIN(twopass->kf_group_bits, kf_group_bits_clipped));
3363 // printf("kf boost = %d kf_bits = %d kf_zeromotion_pct = %d\n",
3364 // p_rc->kf_boost,
3365 // kf_bits, twopass->kf_zeromotion_pct);
3366 kf_bits = adjust_boost_bits_for_target_level(cpi, rc, kf_bits,
3367 twopass->kf_group_bits, 0);
3368
3369 twopass->kf_group_bits -= kf_bits;
3370
3371 // Save the bits to spend on the key frame.
3372 gf_group->bit_allocation[0] = kf_bits;
3373 gf_group->update_type[0] = KF_UPDATE;
3374
3375 // Note the total error score of the kf group minus the key frame itself.
3376 if (cpi->ppi->lap_enabled)
3377 // As we don't have enough stats to know the actual error of the group,
3378 // we assume the complexity of each frame to be equal to 1, and set the
3379 // error as the number of frames in the group(minus the keyframe).
3380 twopass->kf_group_error_left = (double)(rc->frames_to_key - 1);
3381 else
3382 twopass->kf_group_error_left = kf_group_err - kf_mod_err;
3383
3384 // Adjust the count of total modified error left.
3385 // The count of bits left is adjusted elsewhere based on real coded frame
3386 // sizes.
3387 twopass->modified_error_left -= kf_group_err;
3388 }
3389
3390 #define ARF_STATS_OUTPUT 0
3391 #if ARF_STATS_OUTPUT
3392 unsigned int arf_count = 0;
3393 #endif
3394
get_section_target_bandwidth(AV1_COMP * cpi)3395 static int get_section_target_bandwidth(AV1_COMP *cpi) {
3396 AV1_COMMON *const cm = &cpi->common;
3397 CurrentFrame *const current_frame = &cm->current_frame;
3398 RATE_CONTROL *const rc = &cpi->rc;
3399 TWO_PASS *const twopass = &cpi->ppi->twopass;
3400 int section_target_bandwidth;
3401 const int frames_left = (int)(twopass->stats_buf_ctx->total_stats->count -
3402 current_frame->frame_number);
3403 if (cpi->ppi->lap_enabled)
3404 section_target_bandwidth = (int)rc->avg_frame_bandwidth;
3405 else
3406 section_target_bandwidth = (int)(twopass->bits_left / frames_left);
3407 return section_target_bandwidth;
3408 }
3409
set_twopass_params_based_on_fp_stats(AV1_COMP * cpi,const FIRSTPASS_STATS * this_frame_ptr)3410 static INLINE void set_twopass_params_based_on_fp_stats(
3411 AV1_COMP *cpi, const FIRSTPASS_STATS *this_frame_ptr) {
3412 if (this_frame_ptr == NULL) return;
3413
3414 TWO_PASS_FRAME *twopass_frame = &cpi->twopass_frame;
3415 // The multiplication by 256 reverses a scaling factor of (>> 8)
3416 // applied when combining MB error values for the frame.
3417 twopass_frame->mb_av_energy = log1p(this_frame_ptr->intra_error);
3418
3419 const FIRSTPASS_STATS *const total_stats =
3420 cpi->ppi->twopass.stats_buf_ctx->total_stats;
3421 if (is_fp_wavelet_energy_invalid(total_stats) == 0) {
3422 twopass_frame->frame_avg_haar_energy =
3423 log1p(this_frame_ptr->frame_avg_wavelet_energy);
3424 }
3425
3426 // Set the frame content type flag.
3427 if (this_frame_ptr->intra_skip_pct >= FC_ANIMATION_THRESH)
3428 twopass_frame->fr_content_type = FC_GRAPHICS_ANIMATION;
3429 else
3430 twopass_frame->fr_content_type = FC_NORMAL;
3431 }
3432
process_first_pass_stats(AV1_COMP * cpi,FIRSTPASS_STATS * this_frame)3433 static void process_first_pass_stats(AV1_COMP *cpi,
3434 FIRSTPASS_STATS *this_frame) {
3435 AV1_COMMON *const cm = &cpi->common;
3436 CurrentFrame *const current_frame = &cm->current_frame;
3437 RATE_CONTROL *const rc = &cpi->rc;
3438 PRIMARY_RATE_CONTROL *const p_rc = &cpi->ppi->p_rc;
3439 TWO_PASS *const twopass = &cpi->ppi->twopass;
3440 FIRSTPASS_STATS *total_stats = twopass->stats_buf_ctx->total_stats;
3441
3442 if (cpi->oxcf.rc_cfg.mode != AOM_Q && current_frame->frame_number == 0 &&
3443 cpi->gf_frame_index == 0 && total_stats &&
3444 twopass->stats_buf_ctx->total_left_stats) {
3445 if (cpi->ppi->lap_enabled) {
3446 /*
3447 * Accumulate total_stats using available limited number of stats,
3448 * and assign it to total_left_stats.
3449 */
3450 *twopass->stats_buf_ctx->total_left_stats = *total_stats;
3451 }
3452 // Special case code for first frame.
3453 const int section_target_bandwidth = get_section_target_bandwidth(cpi);
3454 const double section_length =
3455 twopass->stats_buf_ctx->total_left_stats->count;
3456 const double section_error =
3457 twopass->stats_buf_ctx->total_left_stats->coded_error / section_length;
3458 const double section_intra_skip =
3459 twopass->stats_buf_ctx->total_left_stats->intra_skip_pct /
3460 section_length;
3461 const double section_inactive_zone =
3462 (twopass->stats_buf_ctx->total_left_stats->inactive_zone_rows * 2) /
3463 ((double)cm->mi_params.mb_rows * section_length);
3464 const int tmp_q = get_twopass_worst_quality(
3465 cpi, section_error, section_intra_skip + section_inactive_zone,
3466 section_target_bandwidth);
3467
3468 rc->active_worst_quality = tmp_q;
3469 rc->ni_av_qi = tmp_q;
3470 p_rc->last_q[INTER_FRAME] = tmp_q;
3471 p_rc->avg_q = av1_convert_qindex_to_q(tmp_q, cm->seq_params->bit_depth);
3472 p_rc->avg_frame_qindex[INTER_FRAME] = tmp_q;
3473 p_rc->last_q[KEY_FRAME] = (tmp_q + cpi->oxcf.rc_cfg.best_allowed_q) / 2;
3474 p_rc->avg_frame_qindex[KEY_FRAME] = p_rc->last_q[KEY_FRAME];
3475 }
3476
3477 if (cpi->twopass_frame.stats_in < twopass->stats_buf_ctx->stats_in_end) {
3478 *this_frame = *cpi->twopass_frame.stats_in;
3479 ++cpi->twopass_frame.stats_in;
3480 }
3481 set_twopass_params_based_on_fp_stats(cpi, this_frame);
3482 }
3483
setup_target_rate(AV1_COMP * cpi)3484 static void setup_target_rate(AV1_COMP *cpi) {
3485 RATE_CONTROL *const rc = &cpi->rc;
3486 GF_GROUP *const gf_group = &cpi->ppi->gf_group;
3487
3488 int target_rate = gf_group->bit_allocation[cpi->gf_frame_index];
3489
3490 if (has_no_stats_stage(cpi)) {
3491 av1_rc_set_frame_target(cpi, target_rate, cpi->common.width,
3492 cpi->common.height);
3493 }
3494
3495 rc->base_frame_target = target_rate;
3496 }
3497
av1_mark_flashes(FIRSTPASS_STATS * first_stats,FIRSTPASS_STATS * last_stats)3498 void av1_mark_flashes(FIRSTPASS_STATS *first_stats,
3499 FIRSTPASS_STATS *last_stats) {
3500 FIRSTPASS_STATS *this_stats = first_stats, *next_stats;
3501 while (this_stats < last_stats - 1) {
3502 next_stats = this_stats + 1;
3503 if (next_stats->pcnt_second_ref > next_stats->pcnt_inter &&
3504 next_stats->pcnt_second_ref >= 0.5) {
3505 this_stats->is_flash = 1;
3506 } else {
3507 this_stats->is_flash = 0;
3508 }
3509 this_stats = next_stats;
3510 }
3511 // We always treat the last one as none flash.
3512 if (last_stats - 1 >= first_stats) {
3513 (last_stats - 1)->is_flash = 0;
3514 }
3515 }
3516
3517 // Smooth-out the noise variance so it is more stable
3518 // Returns 0 on success, -1 on memory allocation failure.
3519 // TODO(bohanli): Use a better low-pass filter than averaging
smooth_filter_noise(FIRSTPASS_STATS * first_stats,FIRSTPASS_STATS * last_stats)3520 static int smooth_filter_noise(FIRSTPASS_STATS *first_stats,
3521 FIRSTPASS_STATS *last_stats) {
3522 int len = (int)(last_stats - first_stats);
3523 double *smooth_noise = aom_malloc(len * sizeof(*smooth_noise));
3524 if (!smooth_noise) return -1;
3525
3526 for (int i = 0; i < len; i++) {
3527 double total_noise = 0;
3528 double total_wt = 0;
3529 for (int j = -HALF_FILT_LEN; j <= HALF_FILT_LEN; j++) {
3530 int idx = AOMMIN(AOMMAX(i + j, 0), len - 1);
3531 if (first_stats[idx].is_flash) continue;
3532
3533 total_noise += first_stats[idx].noise_var;
3534 total_wt += 1.0;
3535 }
3536 if (total_wt > 0.01) {
3537 total_noise /= total_wt;
3538 } else {
3539 total_noise = first_stats[i].noise_var;
3540 }
3541 smooth_noise[i] = total_noise;
3542 }
3543
3544 for (int i = 0; i < len; i++) {
3545 first_stats[i].noise_var = smooth_noise[i];
3546 }
3547
3548 aom_free(smooth_noise);
3549 return 0;
3550 }
3551
3552 // Estimate the noise variance of each frame from the first pass stats
av1_estimate_noise(FIRSTPASS_STATS * first_stats,FIRSTPASS_STATS * last_stats,struct aom_internal_error_info * error_info)3553 void av1_estimate_noise(FIRSTPASS_STATS *first_stats,
3554 FIRSTPASS_STATS *last_stats,
3555 struct aom_internal_error_info *error_info) {
3556 FIRSTPASS_STATS *this_stats, *next_stats;
3557 double C1, C2, C3, noise;
3558 for (this_stats = first_stats + 2; this_stats < last_stats; this_stats++) {
3559 this_stats->noise_var = 0.0;
3560 // flashes tend to have high correlation of innovations, so ignore them.
3561 if (this_stats->is_flash || (this_stats - 1)->is_flash ||
3562 (this_stats - 2)->is_flash)
3563 continue;
3564
3565 C1 = (this_stats - 1)->intra_error *
3566 (this_stats->intra_error - this_stats->coded_error);
3567 C2 = (this_stats - 2)->intra_error *
3568 ((this_stats - 1)->intra_error - (this_stats - 1)->coded_error);
3569 C3 = (this_stats - 2)->intra_error *
3570 (this_stats->intra_error - this_stats->sr_coded_error);
3571 if (C1 <= 0 || C2 <= 0 || C3 <= 0) continue;
3572 C1 = sqrt(C1);
3573 C2 = sqrt(C2);
3574 C3 = sqrt(C3);
3575
3576 noise = (this_stats - 1)->intra_error - C1 * C2 / C3;
3577 noise = AOMMAX(noise, 0.01);
3578 this_stats->noise_var = noise;
3579 }
3580
3581 // Copy noise from the neighbor if the noise value is not trustworthy
3582 for (this_stats = first_stats + 2; this_stats < last_stats; this_stats++) {
3583 if (this_stats->is_flash || (this_stats - 1)->is_flash ||
3584 (this_stats - 2)->is_flash)
3585 continue;
3586 if (this_stats->noise_var < 1.0) {
3587 int found = 0;
3588 // TODO(bohanli): consider expanding to two directions at the same time
3589 for (next_stats = this_stats + 1; next_stats < last_stats; next_stats++) {
3590 if (next_stats->is_flash || (next_stats - 1)->is_flash ||
3591 (next_stats - 2)->is_flash || next_stats->noise_var < 1.0)
3592 continue;
3593 found = 1;
3594 this_stats->noise_var = next_stats->noise_var;
3595 break;
3596 }
3597 if (found) continue;
3598 for (next_stats = this_stats - 1; next_stats >= first_stats + 2;
3599 next_stats--) {
3600 if (next_stats->is_flash || (next_stats - 1)->is_flash ||
3601 (next_stats - 2)->is_flash || next_stats->noise_var < 1.0)
3602 continue;
3603 this_stats->noise_var = next_stats->noise_var;
3604 break;
3605 }
3606 }
3607 }
3608
3609 // copy the noise if this is a flash
3610 for (this_stats = first_stats + 2; this_stats < last_stats; this_stats++) {
3611 if (this_stats->is_flash || (this_stats - 1)->is_flash ||
3612 (this_stats - 2)->is_flash) {
3613 int found = 0;
3614 for (next_stats = this_stats + 1; next_stats < last_stats; next_stats++) {
3615 if (next_stats->is_flash || (next_stats - 1)->is_flash ||
3616 (next_stats - 2)->is_flash)
3617 continue;
3618 found = 1;
3619 this_stats->noise_var = next_stats->noise_var;
3620 break;
3621 }
3622 if (found) continue;
3623 for (next_stats = this_stats - 1; next_stats >= first_stats + 2;
3624 next_stats--) {
3625 if (next_stats->is_flash || (next_stats - 1)->is_flash ||
3626 (next_stats - 2)->is_flash)
3627 continue;
3628 this_stats->noise_var = next_stats->noise_var;
3629 break;
3630 }
3631 }
3632 }
3633
3634 // if we are at the first 2 frames, copy the noise
3635 for (this_stats = first_stats;
3636 this_stats < first_stats + 2 && (first_stats + 2) < last_stats;
3637 this_stats++) {
3638 this_stats->noise_var = (first_stats + 2)->noise_var;
3639 }
3640
3641 if (smooth_filter_noise(first_stats, last_stats) == -1) {
3642 aom_internal_error(error_info, AOM_CODEC_MEM_ERROR,
3643 "Error allocating buffers in smooth_filter_noise()");
3644 }
3645 }
3646
3647 // Estimate correlation coefficient of each frame with its previous frame.
av1_estimate_coeff(FIRSTPASS_STATS * first_stats,FIRSTPASS_STATS * last_stats)3648 void av1_estimate_coeff(FIRSTPASS_STATS *first_stats,
3649 FIRSTPASS_STATS *last_stats) {
3650 FIRSTPASS_STATS *this_stats;
3651 for (this_stats = first_stats + 1; this_stats < last_stats; this_stats++) {
3652 const double C =
3653 sqrt(AOMMAX((this_stats - 1)->intra_error *
3654 (this_stats->intra_error - this_stats->coded_error),
3655 0.001));
3656 const double cor_coeff =
3657 C /
3658 AOMMAX((this_stats - 1)->intra_error - this_stats->noise_var, 0.001);
3659
3660 this_stats->cor_coeff =
3661 cor_coeff *
3662 sqrt(AOMMAX((this_stats - 1)->intra_error - this_stats->noise_var,
3663 0.001) /
3664 AOMMAX(this_stats->intra_error - this_stats->noise_var, 0.001));
3665 // clip correlation coefficient.
3666 this_stats->cor_coeff = AOMMIN(AOMMAX(this_stats->cor_coeff, 0), 1);
3667 }
3668 first_stats->cor_coeff = 1.0;
3669 }
3670
av1_get_second_pass_params(AV1_COMP * cpi,EncodeFrameParams * const frame_params,unsigned int frame_flags)3671 void av1_get_second_pass_params(AV1_COMP *cpi,
3672 EncodeFrameParams *const frame_params,
3673 unsigned int frame_flags) {
3674 RATE_CONTROL *const rc = &cpi->rc;
3675 PRIMARY_RATE_CONTROL *const p_rc = &cpi->ppi->p_rc;
3676 TWO_PASS *const twopass = &cpi->ppi->twopass;
3677 GF_GROUP *const gf_group = &cpi->ppi->gf_group;
3678 const AV1EncoderConfig *const oxcf = &cpi->oxcf;
3679
3680 if (cpi->use_ducky_encode &&
3681 cpi->ducky_encode_info.frame_info.gop_mode == DUCKY_ENCODE_GOP_MODE_RCL) {
3682 frame_params->frame_type = gf_group->frame_type[cpi->gf_frame_index];
3683 frame_params->show_frame =
3684 !(gf_group->update_type[cpi->gf_frame_index] == ARF_UPDATE ||
3685 gf_group->update_type[cpi->gf_frame_index] == INTNL_ARF_UPDATE);
3686 if (cpi->gf_frame_index == 0) {
3687 av1_tf_info_reset(&cpi->ppi->tf_info);
3688 av1_tf_info_filtering(&cpi->ppi->tf_info, cpi, gf_group);
3689 }
3690 return;
3691 }
3692
3693 const FIRSTPASS_STATS *const start_pos = cpi->twopass_frame.stats_in;
3694 int update_total_stats = 0;
3695
3696 if (is_stat_consumption_stage(cpi) && !cpi->twopass_frame.stats_in) return;
3697
3698 // Check forced key frames.
3699 const int frames_to_next_forced_key = detect_app_forced_key(cpi);
3700 if (frames_to_next_forced_key == 0) {
3701 rc->frames_to_key = 0;
3702 frame_flags &= FRAMEFLAGS_KEY;
3703 } else if (frames_to_next_forced_key > 0 &&
3704 frames_to_next_forced_key < rc->frames_to_key) {
3705 rc->frames_to_key = frames_to_next_forced_key;
3706 }
3707
3708 assert(cpi->twopass_frame.stats_in != NULL);
3709 const int update_type = gf_group->update_type[cpi->gf_frame_index];
3710 frame_params->frame_type = gf_group->frame_type[cpi->gf_frame_index];
3711
3712 if (cpi->gf_frame_index < gf_group->size && !(frame_flags & FRAMEFLAGS_KEY)) {
3713 assert(cpi->gf_frame_index < gf_group->size);
3714
3715 setup_target_rate(cpi);
3716
3717 // If this is an arf frame then we dont want to read the stats file or
3718 // advance the input pointer as we already have what we need.
3719 if (update_type == ARF_UPDATE || update_type == INTNL_ARF_UPDATE) {
3720 const FIRSTPASS_STATS *const this_frame_ptr =
3721 read_frame_stats(twopass, &cpi->twopass_frame,
3722 gf_group->arf_src_offset[cpi->gf_frame_index]);
3723 set_twopass_params_based_on_fp_stats(cpi, this_frame_ptr);
3724 return;
3725 }
3726 }
3727
3728 if (oxcf->rc_cfg.mode == AOM_Q)
3729 rc->active_worst_quality = oxcf->rc_cfg.cq_level;
3730
3731 if (cpi->gf_frame_index == gf_group->size) {
3732 if (cpi->ppi->lap_enabled && cpi->ppi->p_rc.enable_scenecut_detection) {
3733 const int num_frames_to_detect_scenecut = MAX_GF_LENGTH_LAP + 1;
3734 const int frames_to_key = define_kf_interval(
3735 cpi, &twopass->firstpass_info, num_frames_to_detect_scenecut,
3736 /*search_start_idx=*/0);
3737 if (frames_to_key != -1)
3738 rc->frames_to_key = AOMMIN(rc->frames_to_key, frames_to_key);
3739 }
3740 }
3741
3742 FIRSTPASS_STATS this_frame;
3743 av1_zero(this_frame);
3744 // call above fn
3745 if (is_stat_consumption_stage(cpi)) {
3746 if (cpi->gf_frame_index < gf_group->size || rc->frames_to_key == 0) {
3747 process_first_pass_stats(cpi, &this_frame);
3748 update_total_stats = 1;
3749 }
3750 } else {
3751 rc->active_worst_quality = oxcf->rc_cfg.cq_level;
3752 }
3753
3754 // Keyframe and section processing.
3755 FIRSTPASS_STATS this_frame_copy;
3756 this_frame_copy = this_frame;
3757 if (rc->frames_to_key <= 0) {
3758 assert(rc->frames_to_key == 0);
3759 // Define next KF group and assign bits to it.
3760 frame_params->frame_type = KEY_FRAME;
3761 find_next_key_frame(cpi, &this_frame);
3762 this_frame = this_frame_copy;
3763 }
3764
3765 if (rc->frames_to_fwd_kf <= 0)
3766 rc->frames_to_fwd_kf = oxcf->kf_cfg.fwd_kf_dist;
3767
3768 // Define a new GF/ARF group. (Should always enter here for key frames).
3769 if (cpi->gf_frame_index == gf_group->size) {
3770 av1_tf_info_reset(&cpi->ppi->tf_info);
3771 #if CONFIG_BITRATE_ACCURACY && !CONFIG_THREE_PASS
3772 vbr_rc_reset_gop_data(&cpi->vbr_rc_info);
3773 #endif // CONFIG_BITRATE_ACCURACY
3774 int max_gop_length =
3775 (oxcf->gf_cfg.lag_in_frames >= 32)
3776 ? AOMMIN(MAX_GF_INTERVAL, oxcf->gf_cfg.lag_in_frames -
3777 oxcf->algo_cfg.arnr_max_frames / 2)
3778 : MAX_GF_LENGTH_LAP;
3779
3780 // Handle forward key frame when enabled.
3781 if (oxcf->kf_cfg.fwd_kf_dist > 0)
3782 max_gop_length = AOMMIN(rc->frames_to_fwd_kf + 1, max_gop_length);
3783
3784 // Use the provided gop size in low delay setting
3785 if (oxcf->gf_cfg.lag_in_frames == 0) max_gop_length = rc->max_gf_interval;
3786
3787 // Limit the max gop length for the last gop in 1 pass setting.
3788 max_gop_length = AOMMIN(max_gop_length, rc->frames_to_key);
3789
3790 // Identify regions if needed.
3791 // TODO(bohanli): identify regions for all stats available.
3792 if (rc->frames_since_key == 0 || rc->frames_since_key == 1 ||
3793 (p_rc->frames_till_regions_update - rc->frames_since_key <
3794 rc->frames_to_key &&
3795 p_rc->frames_till_regions_update - rc->frames_since_key <
3796 max_gop_length + 1)) {
3797 // how many frames we can analyze from this frame
3798 int rest_frames =
3799 AOMMIN(rc->frames_to_key, MAX_FIRSTPASS_ANALYSIS_FRAMES);
3800 rest_frames =
3801 AOMMIN(rest_frames, (int)(twopass->stats_buf_ctx->stats_in_end -
3802 cpi->twopass_frame.stats_in +
3803 (rc->frames_since_key == 0)));
3804 p_rc->frames_till_regions_update = rest_frames;
3805
3806 int ret;
3807 if (cpi->ppi->lap_enabled) {
3808 av1_mark_flashes(twopass->stats_buf_ctx->stats_in_start,
3809 twopass->stats_buf_ctx->stats_in_end);
3810 av1_estimate_noise(twopass->stats_buf_ctx->stats_in_start,
3811 twopass->stats_buf_ctx->stats_in_end,
3812 cpi->common.error);
3813 av1_estimate_coeff(twopass->stats_buf_ctx->stats_in_start,
3814 twopass->stats_buf_ctx->stats_in_end);
3815 ret = identify_regions(cpi->twopass_frame.stats_in, rest_frames,
3816 (rc->frames_since_key == 0), p_rc->regions,
3817 &p_rc->num_regions);
3818 } else {
3819 ret = identify_regions(
3820 cpi->twopass_frame.stats_in - (rc->frames_since_key == 0),
3821 rest_frames, 0, p_rc->regions, &p_rc->num_regions);
3822 }
3823 if (ret == -1) {
3824 aom_internal_error(cpi->common.error, AOM_CODEC_MEM_ERROR,
3825 "Error allocating buffers in identify_regions");
3826 }
3827 }
3828
3829 int cur_region_idx =
3830 find_regions_index(p_rc->regions, p_rc->num_regions,
3831 rc->frames_since_key - p_rc->regions_offset);
3832 if ((cur_region_idx >= 0 &&
3833 p_rc->regions[cur_region_idx].type == SCENECUT_REGION) ||
3834 rc->frames_since_key == 0) {
3835 // If we start from a scenecut, then the last GOP's arf boost is not
3836 // needed for this GOP.
3837 cpi->ppi->gf_state.arf_gf_boost_lst = 0;
3838 }
3839
3840 int need_gf_len = 1;
3841 if (cpi->third_pass_ctx && oxcf->pass == AOM_RC_THIRD_PASS) {
3842 // set up bitstream to read
3843 if (!cpi->third_pass_ctx->input_file_name && oxcf->two_pass_output) {
3844 cpi->third_pass_ctx->input_file_name = oxcf->two_pass_output;
3845 }
3846 av1_open_second_pass_log(cpi, 1);
3847 THIRD_PASS_GOP_INFO *gop_info = &cpi->third_pass_ctx->gop_info;
3848 // Read in GOP information from the second pass file.
3849 av1_read_second_pass_gop_info(cpi->second_pass_log_stream, gop_info,
3850 cpi->common.error);
3851 #if CONFIG_BITRATE_ACCURACY
3852 TPL_INFO *tpl_info;
3853 AOM_CHECK_MEM_ERROR(cpi->common.error, tpl_info,
3854 aom_malloc(sizeof(*tpl_info)));
3855 av1_read_tpl_info(tpl_info, cpi->second_pass_log_stream,
3856 cpi->common.error);
3857 aom_free(tpl_info);
3858 #if CONFIG_THREE_PASS
3859 // TODO(angiebird): Put this part into a func
3860 cpi->vbr_rc_info.cur_gop_idx++;
3861 #endif // CONFIG_THREE_PASS
3862 #endif // CONFIG_BITRATE_ACCURACY
3863 // Read in third_pass_info from the bitstream.
3864 av1_set_gop_third_pass(cpi->third_pass_ctx);
3865 // Read in per-frame info from second-pass encoding
3866 av1_read_second_pass_per_frame_info(
3867 cpi->second_pass_log_stream, cpi->third_pass_ctx->frame_info,
3868 gop_info->num_frames, cpi->common.error);
3869
3870 p_rc->cur_gf_index = 0;
3871 p_rc->gf_intervals[0] = cpi->third_pass_ctx->gop_info.gf_length;
3872 need_gf_len = 0;
3873 }
3874
3875 if (need_gf_len) {
3876 // If we cannot obtain GF group length from second_pass_file
3877 // TODO(jingning): Resolve the redundant calls here.
3878 if (rc->intervals_till_gf_calculate_due == 0 || 1) {
3879 calculate_gf_length(cpi, max_gop_length, MAX_NUM_GF_INTERVALS);
3880 }
3881
3882 if (max_gop_length > 16 && oxcf->algo_cfg.enable_tpl_model &&
3883 oxcf->gf_cfg.lag_in_frames >= 32 &&
3884 cpi->sf.tpl_sf.gop_length_decision_method != 3) {
3885 int this_idx = rc->frames_since_key +
3886 p_rc->gf_intervals[p_rc->cur_gf_index] -
3887 p_rc->regions_offset - 1;
3888 int this_region =
3889 find_regions_index(p_rc->regions, p_rc->num_regions, this_idx);
3890 int next_region =
3891 find_regions_index(p_rc->regions, p_rc->num_regions, this_idx + 1);
3892 // TODO(angiebird): Figure out why this_region and next_region are -1 in
3893 // unit test like AltRefFramePresenceTestLarge (aomedia:3134)
3894 int is_last_scenecut =
3895 p_rc->gf_intervals[p_rc->cur_gf_index] >= rc->frames_to_key ||
3896 (this_region != -1 &&
3897 p_rc->regions[this_region].type == SCENECUT_REGION) ||
3898 (next_region != -1 &&
3899 p_rc->regions[next_region].type == SCENECUT_REGION);
3900
3901 int ori_gf_int = p_rc->gf_intervals[p_rc->cur_gf_index];
3902
3903 if (p_rc->gf_intervals[p_rc->cur_gf_index] > 16 &&
3904 rc->min_gf_interval <= 16) {
3905 // The calculate_gf_length function is previously used with
3906 // max_gop_length = 32 with look-ahead gf intervals.
3907 define_gf_group(cpi, frame_params, 0);
3908 av1_tf_info_filtering(&cpi->ppi->tf_info, cpi, gf_group);
3909 this_frame = this_frame_copy;
3910
3911 if (is_shorter_gf_interval_better(cpi, frame_params)) {
3912 // A shorter gf interval is better.
3913 // TODO(jingning): Remove redundant computations here.
3914 max_gop_length = 16;
3915 calculate_gf_length(cpi, max_gop_length, 1);
3916 if (is_last_scenecut &&
3917 (ori_gf_int - p_rc->gf_intervals[p_rc->cur_gf_index] < 4)) {
3918 p_rc->gf_intervals[p_rc->cur_gf_index] = ori_gf_int;
3919 }
3920 }
3921 }
3922 }
3923 }
3924
3925 define_gf_group(cpi, frame_params, 0);
3926
3927 if (gf_group->update_type[cpi->gf_frame_index] != ARF_UPDATE &&
3928 rc->frames_since_key > 0)
3929 process_first_pass_stats(cpi, &this_frame);
3930
3931 define_gf_group(cpi, frame_params, 1);
3932
3933 // write gop info if needed for third pass. Per-frame info is written after
3934 // each frame is encoded.
3935 av1_write_second_pass_gop_info(cpi);
3936
3937 av1_tf_info_filtering(&cpi->ppi->tf_info, cpi, gf_group);
3938
3939 rc->frames_till_gf_update_due = p_rc->baseline_gf_interval;
3940 assert(cpi->gf_frame_index == 0);
3941 #if ARF_STATS_OUTPUT
3942 {
3943 FILE *fpfile;
3944 fpfile = fopen("arf.stt", "a");
3945 ++arf_count;
3946 fprintf(fpfile, "%10d %10d %10d %10d %10d\n",
3947 cpi->common.current_frame.frame_number,
3948 rc->frames_till_gf_update_due, cpi->ppi->p_rc.kf_boost, arf_count,
3949 p_rc->gfu_boost);
3950
3951 fclose(fpfile);
3952 }
3953 #endif
3954 }
3955 assert(cpi->gf_frame_index < gf_group->size);
3956
3957 if (gf_group->update_type[cpi->gf_frame_index] == ARF_UPDATE ||
3958 gf_group->update_type[cpi->gf_frame_index] == INTNL_ARF_UPDATE) {
3959 reset_fpf_position(&cpi->twopass_frame, start_pos);
3960
3961 const FIRSTPASS_STATS *const this_frame_ptr =
3962 read_frame_stats(twopass, &cpi->twopass_frame,
3963 gf_group->arf_src_offset[cpi->gf_frame_index]);
3964 set_twopass_params_based_on_fp_stats(cpi, this_frame_ptr);
3965 } else {
3966 // Back up this frame's stats for updating total stats during post encode.
3967 cpi->twopass_frame.this_frame = update_total_stats ? start_pos : NULL;
3968 }
3969
3970 frame_params->frame_type = gf_group->frame_type[cpi->gf_frame_index];
3971 setup_target_rate(cpi);
3972 }
3973
av1_init_second_pass(AV1_COMP * cpi)3974 void av1_init_second_pass(AV1_COMP *cpi) {
3975 const AV1EncoderConfig *const oxcf = &cpi->oxcf;
3976 TWO_PASS *const twopass = &cpi->ppi->twopass;
3977 FRAME_INFO *const frame_info = &cpi->frame_info;
3978 double frame_rate;
3979 FIRSTPASS_STATS *stats;
3980
3981 if (!twopass->stats_buf_ctx->stats_in_end) return;
3982
3983 av1_mark_flashes(twopass->stats_buf_ctx->stats_in_start,
3984 twopass->stats_buf_ctx->stats_in_end);
3985 av1_estimate_noise(twopass->stats_buf_ctx->stats_in_start,
3986 twopass->stats_buf_ctx->stats_in_end, cpi->common.error);
3987 av1_estimate_coeff(twopass->stats_buf_ctx->stats_in_start,
3988 twopass->stats_buf_ctx->stats_in_end);
3989
3990 stats = twopass->stats_buf_ctx->total_stats;
3991
3992 *stats = *twopass->stats_buf_ctx->stats_in_end;
3993 *twopass->stats_buf_ctx->total_left_stats = *stats;
3994
3995 frame_rate = 10000000.0 * stats->count / stats->duration;
3996 // Each frame can have a different duration, as the frame rate in the source
3997 // isn't guaranteed to be constant. The frame rate prior to the first frame
3998 // encoded in the second pass is a guess. However, the sum duration is not.
3999 // It is calculated based on the actual durations of all frames from the
4000 // first pass.
4001 av1_new_framerate(cpi, frame_rate);
4002 twopass->bits_left =
4003 (int64_t)(stats->duration * oxcf->rc_cfg.target_bandwidth / 10000000.0);
4004
4005 #if CONFIG_BITRATE_ACCURACY
4006 av1_vbr_rc_init(&cpi->vbr_rc_info, twopass->bits_left,
4007 (int)round(stats->count));
4008 #endif
4009
4010 #if CONFIG_RATECTRL_LOG
4011 rc_log_init(&cpi->rc_log);
4012 #endif
4013
4014 // This variable monitors how far behind the second ref update is lagging.
4015 twopass->sr_update_lag = 1;
4016
4017 // Scan the first pass file and calculate a modified total error based upon
4018 // the bias/power function used to allocate bits.
4019 {
4020 const double avg_error =
4021 stats->coded_error / DOUBLE_DIVIDE_CHECK(stats->count);
4022 const FIRSTPASS_STATS *s = cpi->twopass_frame.stats_in;
4023 double modified_error_total = 0.0;
4024 twopass->modified_error_min =
4025 (avg_error * oxcf->rc_cfg.vbrmin_section) / 100;
4026 twopass->modified_error_max =
4027 (avg_error * oxcf->rc_cfg.vbrmax_section) / 100;
4028 while (s < twopass->stats_buf_ctx->stats_in_end) {
4029 modified_error_total +=
4030 calculate_modified_err(frame_info, twopass, oxcf, s);
4031 ++s;
4032 }
4033 twopass->modified_error_left = modified_error_total;
4034 }
4035
4036 // Reset the vbr bits off target counters
4037 cpi->ppi->p_rc.vbr_bits_off_target = 0;
4038 cpi->ppi->p_rc.vbr_bits_off_target_fast = 0;
4039
4040 cpi->ppi->p_rc.rate_error_estimate = 0;
4041
4042 // Static sequence monitor variables.
4043 twopass->kf_zeromotion_pct = 100;
4044 twopass->last_kfgroup_zeromotion_pct = 100;
4045
4046 // Initialize bits per macro_block estimate correction factor.
4047 twopass->bpm_factor = 1.0;
4048 // Initialize actual and target bits counters for ARF groups so that
4049 // at the start we have a neutral bpm adjustment.
4050 twopass->rolling_arf_group_target_bits = 1;
4051 twopass->rolling_arf_group_actual_bits = 1;
4052 }
4053
av1_init_single_pass_lap(AV1_COMP * cpi)4054 void av1_init_single_pass_lap(AV1_COMP *cpi) {
4055 TWO_PASS *const twopass = &cpi->ppi->twopass;
4056
4057 if (!twopass->stats_buf_ctx->stats_in_end) return;
4058
4059 // This variable monitors how far behind the second ref update is lagging.
4060 twopass->sr_update_lag = 1;
4061
4062 twopass->bits_left = 0;
4063 twopass->modified_error_min = 0.0;
4064 twopass->modified_error_max = 0.0;
4065 twopass->modified_error_left = 0.0;
4066
4067 // Reset the vbr bits off target counters
4068 cpi->ppi->p_rc.vbr_bits_off_target = 0;
4069 cpi->ppi->p_rc.vbr_bits_off_target_fast = 0;
4070
4071 cpi->ppi->p_rc.rate_error_estimate = 0;
4072
4073 // Static sequence monitor variables.
4074 twopass->kf_zeromotion_pct = 100;
4075 twopass->last_kfgroup_zeromotion_pct = 100;
4076
4077 // Initialize bits per macro_block estimate correction factor.
4078 twopass->bpm_factor = 1.0;
4079 // Initialize actual and target bits counters for ARF groups so that
4080 // at the start we have a neutral bpm adjustment.
4081 twopass->rolling_arf_group_target_bits = 1;
4082 twopass->rolling_arf_group_actual_bits = 1;
4083 }
4084
4085 #define MINQ_ADJ_LIMIT 48
4086 #define MINQ_ADJ_LIMIT_CQ 20
4087 #define HIGH_UNDERSHOOT_RATIO 2
av1_twopass_postencode_update(AV1_COMP * cpi)4088 void av1_twopass_postencode_update(AV1_COMP *cpi) {
4089 TWO_PASS *const twopass = &cpi->ppi->twopass;
4090 RATE_CONTROL *const rc = &cpi->rc;
4091 PRIMARY_RATE_CONTROL *const p_rc = &cpi->ppi->p_rc;
4092 const RateControlCfg *const rc_cfg = &cpi->oxcf.rc_cfg;
4093
4094 // Increment the stats_in pointer.
4095 if (is_stat_consumption_stage(cpi) &&
4096 !(cpi->use_ducky_encode && cpi->ducky_encode_info.frame_info.gop_mode ==
4097 DUCKY_ENCODE_GOP_MODE_RCL) &&
4098 (cpi->gf_frame_index < cpi->ppi->gf_group.size ||
4099 rc->frames_to_key == 0)) {
4100 const int update_type = cpi->ppi->gf_group.update_type[cpi->gf_frame_index];
4101 if (update_type != ARF_UPDATE && update_type != INTNL_ARF_UPDATE) {
4102 FIRSTPASS_STATS this_frame;
4103 assert(cpi->twopass_frame.stats_in >
4104 twopass->stats_buf_ctx->stats_in_start);
4105 --cpi->twopass_frame.stats_in;
4106 if (cpi->ppi->lap_enabled) {
4107 input_stats_lap(twopass, &cpi->twopass_frame, &this_frame);
4108 } else {
4109 input_stats(twopass, &cpi->twopass_frame, &this_frame);
4110 }
4111 } else if (cpi->ppi->lap_enabled) {
4112 cpi->twopass_frame.stats_in = twopass->stats_buf_ctx->stats_in_start;
4113 }
4114 }
4115
4116 // VBR correction is done through rc->vbr_bits_off_target. Based on the
4117 // sign of this value, a limited % adjustment is made to the target rate
4118 // of subsequent frames, to try and push it back towards 0. This method
4119 // is designed to prevent extreme behaviour at the end of a clip
4120 // or group of frames.
4121 p_rc->vbr_bits_off_target += rc->base_frame_target - rc->projected_frame_size;
4122 twopass->bits_left = AOMMAX(twopass->bits_left - rc->base_frame_target, 0);
4123
4124 if (cpi->do_update_vbr_bits_off_target_fast) {
4125 // Subtract current frame's fast_extra_bits.
4126 p_rc->vbr_bits_off_target_fast -= rc->frame_level_fast_extra_bits;
4127 rc->frame_level_fast_extra_bits = 0;
4128 }
4129
4130 // Target vs actual bits for this arf group.
4131 twopass->rolling_arf_group_target_bits += rc->base_frame_target;
4132 twopass->rolling_arf_group_actual_bits += rc->projected_frame_size;
4133
4134 // Calculate the pct rc error.
4135 if (p_rc->total_actual_bits) {
4136 p_rc->rate_error_estimate =
4137 (int)((p_rc->vbr_bits_off_target * 100) / p_rc->total_actual_bits);
4138 p_rc->rate_error_estimate = clamp(p_rc->rate_error_estimate, -100, 100);
4139 } else {
4140 p_rc->rate_error_estimate = 0;
4141 }
4142
4143 #if CONFIG_FPMT_TEST
4144 /* The variables temp_vbr_bits_off_target, temp_bits_left,
4145 * temp_rolling_arf_group_target_bits, temp_rolling_arf_group_actual_bits
4146 * temp_rate_error_estimate are introduced for quality simulation purpose,
4147 * it retains the value previous to the parallel encode frames. The
4148 * variables are updated based on the update flag.
4149 *
4150 * If there exist show_existing_frames between parallel frames, then to
4151 * retain the temp state do not update it. */
4152 const int simulate_parallel_frame =
4153 cpi->ppi->fpmt_unit_test_cfg == PARALLEL_SIMULATION_ENCODE;
4154 int show_existing_between_parallel_frames =
4155 (cpi->ppi->gf_group.update_type[cpi->gf_frame_index] ==
4156 INTNL_OVERLAY_UPDATE &&
4157 cpi->ppi->gf_group.frame_parallel_level[cpi->gf_frame_index + 1] == 2);
4158
4159 if (cpi->do_frame_data_update && !show_existing_between_parallel_frames &&
4160 simulate_parallel_frame) {
4161 cpi->ppi->p_rc.temp_vbr_bits_off_target = p_rc->vbr_bits_off_target;
4162 cpi->ppi->p_rc.temp_bits_left = twopass->bits_left;
4163 cpi->ppi->p_rc.temp_rolling_arf_group_target_bits =
4164 twopass->rolling_arf_group_target_bits;
4165 cpi->ppi->p_rc.temp_rolling_arf_group_actual_bits =
4166 twopass->rolling_arf_group_actual_bits;
4167 cpi->ppi->p_rc.temp_rate_error_estimate = p_rc->rate_error_estimate;
4168 }
4169 #endif
4170 // Update the active best quality pyramid.
4171 if (!rc->is_src_frame_alt_ref) {
4172 const int pyramid_level =
4173 cpi->ppi->gf_group.layer_depth[cpi->gf_frame_index];
4174 int i;
4175 for (i = pyramid_level; i <= MAX_ARF_LAYERS; ++i) {
4176 p_rc->active_best_quality[i] = cpi->common.quant_params.base_qindex;
4177 #if CONFIG_TUNE_VMAF
4178 if (cpi->vmaf_info.original_qindex != -1 &&
4179 (cpi->oxcf.tune_cfg.tuning >= AOM_TUNE_VMAF_WITH_PREPROCESSING &&
4180 cpi->oxcf.tune_cfg.tuning <= AOM_TUNE_VMAF_NEG_MAX_GAIN)) {
4181 p_rc->active_best_quality[i] = cpi->vmaf_info.original_qindex;
4182 }
4183 #endif
4184 }
4185 }
4186
4187 #if 0
4188 {
4189 AV1_COMMON *cm = &cpi->common;
4190 FILE *fpfile;
4191 fpfile = fopen("details.stt", "a");
4192 fprintf(fpfile,
4193 "%10d %10d %10d %10" PRId64 " %10" PRId64
4194 " %10d %10d %10d %10.4lf %10.4lf %10.4lf %10.4lf\n",
4195 cm->current_frame.frame_number, rc->base_frame_target,
4196 rc->projected_frame_size, rc->total_actual_bits,
4197 rc->vbr_bits_off_target, p_rc->rate_error_estimate,
4198 twopass->rolling_arf_group_target_bits,
4199 twopass->rolling_arf_group_actual_bits,
4200 (double)twopass->rolling_arf_group_actual_bits /
4201 (double)twopass->rolling_arf_group_target_bits,
4202 twopass->bpm_factor,
4203 av1_convert_qindex_to_q(cpi->common.quant_params.base_qindex,
4204 cm->seq_params->bit_depth),
4205 av1_convert_qindex_to_q(rc->active_worst_quality,
4206 cm->seq_params->bit_depth));
4207 fclose(fpfile);
4208 }
4209 #endif
4210
4211 if (cpi->common.current_frame.frame_type != KEY_FRAME) {
4212 twopass->kf_group_bits -= rc->base_frame_target;
4213 twopass->last_kfgroup_zeromotion_pct = twopass->kf_zeromotion_pct;
4214 }
4215 twopass->kf_group_bits = AOMMAX(twopass->kf_group_bits, 0);
4216
4217 // If the rate control is drifting consider adjustment to min or maxq.
4218 if ((rc_cfg->mode != AOM_Q) && !cpi->rc.is_src_frame_alt_ref &&
4219 (p_rc->rolling_target_bits > 0)) {
4220 int minq_adj_limit;
4221 int maxq_adj_limit;
4222 minq_adj_limit =
4223 (rc_cfg->mode == AOM_CQ ? MINQ_ADJ_LIMIT_CQ : MINQ_ADJ_LIMIT);
4224 maxq_adj_limit = (rc->worst_quality - rc->active_worst_quality);
4225
4226 // Undershoot
4227 if ((rc_cfg->under_shoot_pct < 100) &&
4228 (p_rc->rolling_actual_bits < p_rc->rolling_target_bits)) {
4229 int pct_error =
4230 ((p_rc->rolling_target_bits - p_rc->rolling_actual_bits) * 100) /
4231 p_rc->rolling_target_bits;
4232
4233 if ((pct_error >= rc_cfg->under_shoot_pct) &&
4234 (p_rc->rate_error_estimate > 0)) {
4235 twopass->extend_minq += 1;
4236 twopass->extend_maxq -= 1;
4237 }
4238
4239 // Overshoot
4240 } else if ((rc_cfg->over_shoot_pct < 100) &&
4241 (p_rc->rolling_actual_bits > p_rc->rolling_target_bits)) {
4242 int pct_error =
4243 ((p_rc->rolling_actual_bits - p_rc->rolling_target_bits) * 100) /
4244 p_rc->rolling_target_bits;
4245
4246 pct_error = clamp(pct_error, 0, 100);
4247 if ((pct_error >= rc_cfg->over_shoot_pct) &&
4248 (p_rc->rate_error_estimate < 0)) {
4249 twopass->extend_maxq += 1;
4250 twopass->extend_minq -= 1;
4251 }
4252 }
4253 twopass->extend_minq =
4254 clamp(twopass->extend_minq, -minq_adj_limit, minq_adj_limit);
4255 twopass->extend_maxq = clamp(twopass->extend_maxq, 0, maxq_adj_limit);
4256
4257 // If there is a big and undexpected undershoot then feed the extra
4258 // bits back in quickly. One situation where this may happen is if a
4259 // frame is unexpectedly almost perfectly predicted by the ARF or GF
4260 // but not very well predcited by the previous frame.
4261 if (!frame_is_kf_gf_arf(cpi) && !cpi->rc.is_src_frame_alt_ref) {
4262 int fast_extra_thresh = rc->base_frame_target / HIGH_UNDERSHOOT_RATIO;
4263 if (rc->projected_frame_size < fast_extra_thresh) {
4264 p_rc->vbr_bits_off_target_fast +=
4265 fast_extra_thresh - rc->projected_frame_size;
4266 p_rc->vbr_bits_off_target_fast = AOMMIN(p_rc->vbr_bits_off_target_fast,
4267 (4 * rc->avg_frame_bandwidth));
4268 }
4269 }
4270
4271 #if CONFIG_FPMT_TEST
4272 if (cpi->do_frame_data_update && !show_existing_between_parallel_frames &&
4273 simulate_parallel_frame) {
4274 cpi->ppi->p_rc.temp_vbr_bits_off_target_fast =
4275 p_rc->vbr_bits_off_target_fast;
4276 cpi->ppi->p_rc.temp_extend_minq = twopass->extend_minq;
4277 cpi->ppi->p_rc.temp_extend_maxq = twopass->extend_maxq;
4278 }
4279 #endif
4280 }
4281
4282 // Update the frame probabilities obtained from parallel encode frames
4283 FrameProbInfo *const frame_probs = &cpi->ppi->frame_probs;
4284 #if CONFIG_FPMT_TEST
4285 /* The variable temp_active_best_quality is introduced only for quality
4286 * simulation purpose, it retains the value previous to the parallel
4287 * encode frames. The variable is updated based on the update flag.
4288 *
4289 * If there exist show_existing_frames between parallel frames, then to
4290 * retain the temp state do not update it. */
4291 if (cpi->do_frame_data_update && !show_existing_between_parallel_frames &&
4292 simulate_parallel_frame) {
4293 int i;
4294 const int pyramid_level =
4295 cpi->ppi->gf_group.layer_depth[cpi->gf_frame_index];
4296 if (!rc->is_src_frame_alt_ref) {
4297 for (i = pyramid_level; i <= MAX_ARF_LAYERS; ++i)
4298 cpi->ppi->p_rc.temp_active_best_quality[i] =
4299 p_rc->active_best_quality[i];
4300 }
4301 }
4302
4303 // Update the frame probabilities obtained from parallel encode frames
4304 FrameProbInfo *const temp_frame_probs_simulation =
4305 simulate_parallel_frame ? &cpi->ppi->temp_frame_probs_simulation
4306 : frame_probs;
4307 FrameProbInfo *const temp_frame_probs =
4308 simulate_parallel_frame ? &cpi->ppi->temp_frame_probs : NULL;
4309 #endif
4310 int i, j, loop;
4311 // Sequentially do average on temp_frame_probs_simulation which holds
4312 // probabilities of last frame before parallel encode
4313 for (loop = 0; loop <= cpi->num_frame_recode; loop++) {
4314 // Sequentially update tx_type_probs
4315 if (cpi->do_update_frame_probs_txtype[loop] &&
4316 (cpi->ppi->gf_group.frame_parallel_level[cpi->gf_frame_index] > 0)) {
4317 const FRAME_UPDATE_TYPE update_type =
4318 get_frame_update_type(&cpi->ppi->gf_group, cpi->gf_frame_index);
4319 for (i = 0; i < TX_SIZES_ALL; i++) {
4320 int left = 1024;
4321
4322 for (j = TX_TYPES - 1; j >= 0; j--) {
4323 const int new_prob =
4324 cpi->frame_new_probs[loop].tx_type_probs[update_type][i][j];
4325 #if CONFIG_FPMT_TEST
4326 int prob =
4327 (temp_frame_probs_simulation->tx_type_probs[update_type][i][j] +
4328 new_prob) >>
4329 1;
4330 left -= prob;
4331 if (j == 0) prob += left;
4332 temp_frame_probs_simulation->tx_type_probs[update_type][i][j] = prob;
4333 #else
4334 int prob =
4335 (frame_probs->tx_type_probs[update_type][i][j] + new_prob) >> 1;
4336 left -= prob;
4337 if (j == 0) prob += left;
4338 frame_probs->tx_type_probs[update_type][i][j] = prob;
4339 #endif
4340 }
4341 }
4342 }
4343
4344 // Sequentially update obmc_probs
4345 if (cpi->do_update_frame_probs_obmc[loop] &&
4346 cpi->ppi->gf_group.frame_parallel_level[cpi->gf_frame_index] > 0) {
4347 const FRAME_UPDATE_TYPE update_type =
4348 get_frame_update_type(&cpi->ppi->gf_group, cpi->gf_frame_index);
4349
4350 for (i = 0; i < BLOCK_SIZES_ALL; i++) {
4351 const int new_prob =
4352 cpi->frame_new_probs[loop].obmc_probs[update_type][i];
4353 #if CONFIG_FPMT_TEST
4354 temp_frame_probs_simulation->obmc_probs[update_type][i] =
4355 (temp_frame_probs_simulation->obmc_probs[update_type][i] +
4356 new_prob) >>
4357 1;
4358 #else
4359 frame_probs->obmc_probs[update_type][i] =
4360 (frame_probs->obmc_probs[update_type][i] + new_prob) >> 1;
4361 #endif
4362 }
4363 }
4364
4365 // Sequentially update warped_probs
4366 if (cpi->do_update_frame_probs_warp[loop] &&
4367 cpi->ppi->gf_group.frame_parallel_level[cpi->gf_frame_index] > 0) {
4368 const FRAME_UPDATE_TYPE update_type =
4369 get_frame_update_type(&cpi->ppi->gf_group, cpi->gf_frame_index);
4370 const int new_prob = cpi->frame_new_probs[loop].warped_probs[update_type];
4371 #if CONFIG_FPMT_TEST
4372 temp_frame_probs_simulation->warped_probs[update_type] =
4373 (temp_frame_probs_simulation->warped_probs[update_type] + new_prob) >>
4374 1;
4375 #else
4376 frame_probs->warped_probs[update_type] =
4377 (frame_probs->warped_probs[update_type] + new_prob) >> 1;
4378 #endif
4379 }
4380
4381 // Sequentially update switchable_interp_probs
4382 if (cpi->do_update_frame_probs_interpfilter[loop] &&
4383 cpi->ppi->gf_group.frame_parallel_level[cpi->gf_frame_index] > 0) {
4384 const FRAME_UPDATE_TYPE update_type =
4385 get_frame_update_type(&cpi->ppi->gf_group, cpi->gf_frame_index);
4386
4387 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++) {
4388 int left = 1536;
4389
4390 for (j = SWITCHABLE_FILTERS - 1; j >= 0; j--) {
4391 const int new_prob = cpi->frame_new_probs[loop]
4392 .switchable_interp_probs[update_type][i][j];
4393 #if CONFIG_FPMT_TEST
4394 int prob = (temp_frame_probs_simulation
4395 ->switchable_interp_probs[update_type][i][j] +
4396 new_prob) >>
4397 1;
4398 left -= prob;
4399 if (j == 0) prob += left;
4400
4401 temp_frame_probs_simulation
4402 ->switchable_interp_probs[update_type][i][j] = prob;
4403 #else
4404 int prob = (frame_probs->switchable_interp_probs[update_type][i][j] +
4405 new_prob) >>
4406 1;
4407 left -= prob;
4408 if (j == 0) prob += left;
4409 frame_probs->switchable_interp_probs[update_type][i][j] = prob;
4410 #endif
4411 }
4412 }
4413 }
4414 }
4415
4416 #if CONFIG_FPMT_TEST
4417 // Copying temp_frame_probs_simulation to temp_frame_probs based on
4418 // the flag
4419 if (cpi->do_frame_data_update &&
4420 cpi->ppi->gf_group.frame_parallel_level[cpi->gf_frame_index] > 0 &&
4421 simulate_parallel_frame) {
4422 for (int update_type_idx = 0; update_type_idx < FRAME_UPDATE_TYPES;
4423 update_type_idx++) {
4424 for (i = 0; i < BLOCK_SIZES_ALL; i++) {
4425 temp_frame_probs->obmc_probs[update_type_idx][i] =
4426 temp_frame_probs_simulation->obmc_probs[update_type_idx][i];
4427 }
4428 temp_frame_probs->warped_probs[update_type_idx] =
4429 temp_frame_probs_simulation->warped_probs[update_type_idx];
4430 for (i = 0; i < TX_SIZES_ALL; i++) {
4431 for (j = 0; j < TX_TYPES; j++) {
4432 temp_frame_probs->tx_type_probs[update_type_idx][i][j] =
4433 temp_frame_probs_simulation->tx_type_probs[update_type_idx][i][j];
4434 }
4435 }
4436 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++) {
4437 for (j = 0; j < SWITCHABLE_FILTERS; j++) {
4438 temp_frame_probs->switchable_interp_probs[update_type_idx][i][j] =
4439 temp_frame_probs_simulation
4440 ->switchable_interp_probs[update_type_idx][i][j];
4441 }
4442 }
4443 }
4444 }
4445 #endif
4446 // Update framerate obtained from parallel encode frames
4447 if (cpi->common.show_frame &&
4448 cpi->ppi->gf_group.frame_parallel_level[cpi->gf_frame_index] > 0)
4449 cpi->framerate = cpi->new_framerate;
4450 #if CONFIG_FPMT_TEST
4451 // SIMULATION PURPOSE
4452 int show_existing_between_parallel_frames_cndn =
4453 (cpi->ppi->gf_group.update_type[cpi->gf_frame_index] ==
4454 INTNL_OVERLAY_UPDATE &&
4455 cpi->ppi->gf_group.frame_parallel_level[cpi->gf_frame_index + 1] == 2);
4456 if (cpi->common.show_frame && !show_existing_between_parallel_frames_cndn &&
4457 cpi->do_frame_data_update && simulate_parallel_frame)
4458 cpi->temp_framerate = cpi->framerate;
4459 #endif
4460 }
4461