1 /*
2 * Copyright (c) 2019, Alliance for Open Media. All rights reserved
3 *
4 * This source code is subject to the terms of the BSD 2 Clause License and
5 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6 * was not distributed with this source code in the LICENSE file, you can
7 * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8 * Media Patent License 1.0 was not distributed with this source code in the
9 * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10 */
11
12 /*!\defgroup gf_group_algo Golden Frame Group
13 * \ingroup high_level_algo
14 * Algorithms regarding determining the length of GF groups and defining GF
15 * group structures.
16 * @{
17 */
18 /*! @} - end defgroup gf_group_algo */
19
20 #include <stdint.h>
21
22 #include "config/aom_config.h"
23 #include "config/aom_scale_rtcd.h"
24
25 #include "aom/aom_codec.h"
26 #include "aom/aom_encoder.h"
27
28 #include "av1/common/av1_common_int.h"
29
30 #include "av1/encoder/encoder.h"
31 #include "av1/encoder/firstpass.h"
32 #include "av1/encoder/gop_structure.h"
33 #include "av1/encoder/pass2_strategy.h"
34 #include "av1/encoder/ratectrl.h"
35 #include "av1/encoder/rc_utils.h"
36 #include "av1/encoder/temporal_filter.h"
37 #include "av1/encoder/thirdpass.h"
38 #include "av1/encoder/tpl_model.h"
39 #include "av1/encoder/encode_strategy.h"
40
41 #define DEFAULT_KF_BOOST 2300
42 #define DEFAULT_GF_BOOST 2000
43 #define GROUP_ADAPTIVE_MAXQ 1
44
45 static void init_gf_stats(GF_GROUP_STATS *gf_stats);
46 static int define_gf_group_pass3(AV1_COMP *cpi, EncodeFrameParams *frame_params,
47 int is_final_pass);
48
49 // Calculate an active area of the image that discounts formatting
50 // bars and partially discounts other 0 energy areas.
51 #define MIN_ACTIVE_AREA 0.5
52 #define MAX_ACTIVE_AREA 1.0
calculate_active_area(const FRAME_INFO * frame_info,const FIRSTPASS_STATS * this_frame)53 static double calculate_active_area(const FRAME_INFO *frame_info,
54 const FIRSTPASS_STATS *this_frame) {
55 const double active_pct =
56 1.0 -
57 ((this_frame->intra_skip_pct / 2) +
58 ((this_frame->inactive_zone_rows * 2) / (double)frame_info->mb_rows));
59 return fclamp(active_pct, MIN_ACTIVE_AREA, MAX_ACTIVE_AREA);
60 }
61
62 // Calculate a modified Error used in distributing bits between easier and
63 // harder frames.
64 #define ACT_AREA_CORRECTION 0.5
calculate_modified_err_new(const FRAME_INFO * frame_info,const FIRSTPASS_STATS * total_stats,const FIRSTPASS_STATS * this_stats,int vbrbias,double modified_error_min,double modified_error_max)65 static double calculate_modified_err_new(const FRAME_INFO *frame_info,
66 const FIRSTPASS_STATS *total_stats,
67 const FIRSTPASS_STATS *this_stats,
68 int vbrbias, double modified_error_min,
69 double modified_error_max) {
70 if (total_stats == NULL) {
71 return 0;
72 }
73 const double av_weight = total_stats->weight / total_stats->count;
74 const double av_err =
75 (total_stats->coded_error * av_weight) / total_stats->count;
76 double modified_error =
77 av_err * pow(this_stats->coded_error * this_stats->weight /
78 DOUBLE_DIVIDE_CHECK(av_err),
79 vbrbias / 100.0);
80
81 // Correction for active area. Frames with a reduced active area
82 // (eg due to formatting bars) have a higher error per mb for the
83 // remaining active MBs. The correction here assumes that coding
84 // 0.5N blocks of complexity 2X is a little easier than coding N
85 // blocks of complexity X.
86 modified_error *=
87 pow(calculate_active_area(frame_info, this_stats), ACT_AREA_CORRECTION);
88
89 return fclamp(modified_error, modified_error_min, modified_error_max);
90 }
91
calculate_modified_err(const FRAME_INFO * frame_info,const TWO_PASS * twopass,const AV1EncoderConfig * oxcf,const FIRSTPASS_STATS * this_frame)92 static double calculate_modified_err(const FRAME_INFO *frame_info,
93 const TWO_PASS *twopass,
94 const AV1EncoderConfig *oxcf,
95 const FIRSTPASS_STATS *this_frame) {
96 const FIRSTPASS_STATS *total_stats = twopass->stats_buf_ctx->total_stats;
97 return calculate_modified_err_new(
98 frame_info, total_stats, this_frame, oxcf->rc_cfg.vbrbias,
99 twopass->modified_error_min, twopass->modified_error_max);
100 }
101
102 // Resets the first pass file to the given position using a relative seek from
103 // the current position.
reset_fpf_position(TWO_PASS_FRAME * p_frame,const FIRSTPASS_STATS * position)104 static void reset_fpf_position(TWO_PASS_FRAME *p_frame,
105 const FIRSTPASS_STATS *position) {
106 p_frame->stats_in = position;
107 }
108
input_stats(TWO_PASS * p,TWO_PASS_FRAME * p_frame,FIRSTPASS_STATS * fps)109 static int input_stats(TWO_PASS *p, TWO_PASS_FRAME *p_frame,
110 FIRSTPASS_STATS *fps) {
111 if (p_frame->stats_in >= p->stats_buf_ctx->stats_in_end) return EOF;
112
113 *fps = *p_frame->stats_in;
114 ++p_frame->stats_in;
115 return 1;
116 }
117
input_stats_lap(TWO_PASS * p,TWO_PASS_FRAME * p_frame,FIRSTPASS_STATS * fps)118 static int input_stats_lap(TWO_PASS *p, TWO_PASS_FRAME *p_frame,
119 FIRSTPASS_STATS *fps) {
120 if (p_frame->stats_in >= p->stats_buf_ctx->stats_in_end) return EOF;
121
122 *fps = *p_frame->stats_in;
123 /* Move old stats[0] out to accommodate for next frame stats */
124 memmove(p->frame_stats_arr[0], p->frame_stats_arr[1],
125 (p->stats_buf_ctx->stats_in_end - p_frame->stats_in - 1) *
126 sizeof(FIRSTPASS_STATS));
127 p->stats_buf_ctx->stats_in_end--;
128 return 1;
129 }
130
131 // Read frame stats at an offset from the current position.
read_frame_stats(const TWO_PASS * p,const TWO_PASS_FRAME * p_frame,int offset)132 static const FIRSTPASS_STATS *read_frame_stats(const TWO_PASS *p,
133 const TWO_PASS_FRAME *p_frame,
134 int offset) {
135 if ((offset >= 0 &&
136 p_frame->stats_in + offset >= p->stats_buf_ctx->stats_in_end) ||
137 (offset < 0 &&
138 p_frame->stats_in + offset < p->stats_buf_ctx->stats_in_start)) {
139 return NULL;
140 }
141
142 return &p_frame->stats_in[offset];
143 }
144
145 // This function returns the maximum target rate per frame.
frame_max_bits(const RATE_CONTROL * rc,const AV1EncoderConfig * oxcf)146 static int frame_max_bits(const RATE_CONTROL *rc,
147 const AV1EncoderConfig *oxcf) {
148 int64_t max_bits = ((int64_t)rc->avg_frame_bandwidth *
149 (int64_t)oxcf->rc_cfg.vbrmax_section) /
150 100;
151 if (max_bits < 0)
152 max_bits = 0;
153 else if (max_bits > rc->max_frame_bandwidth)
154 max_bits = rc->max_frame_bandwidth;
155
156 return (int)max_bits;
157 }
158
159 static const double q_pow_term[(QINDEX_RANGE >> 5) + 1] = { 0.65, 0.70, 0.75,
160 0.80, 0.85, 0.90,
161 0.95, 0.95, 0.95 };
162 #define ERR_DIVISOR 96.0
calc_correction_factor(double err_per_mb,int q)163 static double calc_correction_factor(double err_per_mb, int q) {
164 const double error_term = err_per_mb / ERR_DIVISOR;
165 const int index = q >> 5;
166 // Adjustment to power term based on qindex
167 const double power_term =
168 q_pow_term[index] +
169 (((q_pow_term[index + 1] - q_pow_term[index]) * (q % 32)) / 32.0);
170 assert(error_term >= 0.0);
171 return fclamp(pow(error_term, power_term), 0.05, 5.0);
172 }
173
174 // Based on history adjust expectations of bits per macroblock.
twopass_update_bpm_factor(AV1_COMP * cpi,int rate_err_tol)175 static void twopass_update_bpm_factor(AV1_COMP *cpi, int rate_err_tol) {
176 TWO_PASS *const twopass = &cpi->ppi->twopass;
177 const PRIMARY_RATE_CONTROL *const p_rc = &cpi->ppi->p_rc;
178
179 // Based on recent history adjust expectations of bits per macroblock.
180 double damp_fac = AOMMAX(5.0, rate_err_tol / 10.0);
181 double rate_err_factor = 1.0;
182 const double adj_limit = AOMMAX(0.20, (double)(100 - rate_err_tol) / 200.0);
183 const double min_fac = 1.0 - adj_limit;
184 const double max_fac = 1.0 + adj_limit;
185
186 if (cpi->third_pass_ctx && cpi->third_pass_ctx->frame_info_count > 0) {
187 int64_t actual_bits = 0;
188 int64_t target_bits = 0;
189 double factor = 0.0;
190 int count = 0;
191 for (int i = 0; i < cpi->third_pass_ctx->frame_info_count; i++) {
192 actual_bits += cpi->third_pass_ctx->frame_info[i].actual_bits;
193 target_bits += cpi->third_pass_ctx->frame_info[i].bits_allocated;
194 factor += cpi->third_pass_ctx->frame_info[i].bpm_factor;
195 count++;
196 }
197
198 if (count == 0) {
199 factor = 1.0;
200 } else {
201 factor /= (double)count;
202 }
203
204 factor *= (double)actual_bits / DOUBLE_DIVIDE_CHECK((double)target_bits);
205
206 if ((twopass->bpm_factor <= 1 && factor < twopass->bpm_factor) ||
207 (twopass->bpm_factor >= 1 && factor > twopass->bpm_factor)) {
208 twopass->bpm_factor = factor;
209 twopass->bpm_factor =
210 AOMMAX(min_fac, AOMMIN(max_fac, twopass->bpm_factor));
211 }
212 }
213
214 int err_estimate = p_rc->rate_error_estimate;
215 int64_t bits_left = twopass->bits_left;
216 int64_t total_actual_bits = p_rc->total_actual_bits;
217 int64_t bits_off_target = p_rc->vbr_bits_off_target;
218 double rolling_arf_group_actual_bits =
219 (double)twopass->rolling_arf_group_actual_bits;
220 double rolling_arf_group_target_bits =
221 (double)twopass->rolling_arf_group_target_bits;
222
223 #if CONFIG_FPMT_TEST
224 const int is_parallel_frame =
225 cpi->ppi->gf_group.frame_parallel_level[cpi->gf_frame_index] > 0 ? 1 : 0;
226 const int simulate_parallel_frame =
227 cpi->ppi->fpmt_unit_test_cfg == PARALLEL_SIMULATION_ENCODE
228 ? is_parallel_frame
229 : 0;
230 total_actual_bits = simulate_parallel_frame ? p_rc->temp_total_actual_bits
231 : p_rc->total_actual_bits;
232 bits_off_target = simulate_parallel_frame ? p_rc->temp_vbr_bits_off_target
233 : p_rc->vbr_bits_off_target;
234 bits_left =
235 simulate_parallel_frame ? p_rc->temp_bits_left : twopass->bits_left;
236 rolling_arf_group_target_bits =
237 (double)(simulate_parallel_frame
238 ? p_rc->temp_rolling_arf_group_target_bits
239 : twopass->rolling_arf_group_target_bits);
240 rolling_arf_group_actual_bits =
241 (double)(simulate_parallel_frame
242 ? p_rc->temp_rolling_arf_group_actual_bits
243 : twopass->rolling_arf_group_actual_bits);
244 err_estimate = simulate_parallel_frame ? p_rc->temp_rate_error_estimate
245 : p_rc->rate_error_estimate;
246 #endif
247
248 if (p_rc->bits_off_target && total_actual_bits > 0) {
249 if (cpi->ppi->lap_enabled) {
250 rate_err_factor = rolling_arf_group_actual_bits /
251 DOUBLE_DIVIDE_CHECK(rolling_arf_group_target_bits);
252 } else {
253 rate_err_factor = 1.0 - ((double)(bits_off_target) /
254 AOMMAX(total_actual_bits, bits_left));
255 }
256 rate_err_factor = AOMMAX(min_fac, AOMMIN(max_fac, rate_err_factor));
257
258 // Adjustment is damped if this is 1 pass with look ahead processing
259 // (as there are only ever a few frames of data) and for all but the first
260 // GOP in normal two pass.
261 if ((twopass->bpm_factor != 1.0) || cpi->ppi->lap_enabled) {
262 rate_err_factor = 1.0 + ((rate_err_factor - 1.0) / damp_fac);
263 }
264 }
265
266 // Is the rate control trending in the right direction. Only make
267 // an adjustment if things are getting worse.
268 if ((rate_err_factor < 1.0 && err_estimate >= 0) ||
269 (rate_err_factor > 1.0 && err_estimate <= 0)) {
270 twopass->bpm_factor *= rate_err_factor;
271 twopass->bpm_factor = AOMMAX(min_fac, AOMMIN(max_fac, twopass->bpm_factor));
272 }
273 }
274
qbpm_enumerator(int rate_err_tol)275 static int qbpm_enumerator(int rate_err_tol) {
276 return 1200000 + ((300000 * AOMMIN(75, AOMMAX(rate_err_tol - 25, 0))) / 75);
277 }
278
279 // Similar to find_qindex_by_rate() function in ratectrl.c, but includes
280 // calculation of a correction_factor.
find_qindex_by_rate_with_correction(int desired_bits_per_mb,aom_bit_depth_t bit_depth,double error_per_mb,double group_weight_factor,int rate_err_tol,int best_qindex,int worst_qindex)281 static int find_qindex_by_rate_with_correction(
282 int desired_bits_per_mb, aom_bit_depth_t bit_depth, double error_per_mb,
283 double group_weight_factor, int rate_err_tol, int best_qindex,
284 int worst_qindex) {
285 assert(best_qindex <= worst_qindex);
286 int low = best_qindex;
287 int high = worst_qindex;
288
289 while (low < high) {
290 const int mid = (low + high) >> 1;
291 const double mid_factor = calc_correction_factor(error_per_mb, mid);
292 const double q = av1_convert_qindex_to_q(mid, bit_depth);
293 const int enumerator = qbpm_enumerator(rate_err_tol);
294 const int mid_bits_per_mb =
295 (int)((enumerator * mid_factor * group_weight_factor) / q);
296
297 if (mid_bits_per_mb > desired_bits_per_mb) {
298 low = mid + 1;
299 } else {
300 high = mid;
301 }
302 }
303 return low;
304 }
305
306 /*!\brief Choose a target maximum Q for a group of frames
307 *
308 * \ingroup rate_control
309 *
310 * This function is used to estimate a suitable maximum Q for a
311 * group of frames. Inititally it is called to get a crude estimate
312 * for the whole clip. It is then called for each ARF/GF group to get
313 * a revised estimate for that group.
314 *
315 * \param[in] cpi Top-level encoder structure
316 * \param[in] av_frame_err The average per frame coded error score
317 * for frames making up this section/group.
318 * \param[in] inactive_zone Used to mask off /ignore part of the
319 * frame. The most common use case is where
320 * a wide format video (e.g. 16:9) is
321 * letter-boxed into a more square format.
322 * Here we want to ignore the bands at the
323 * top and bottom.
324 * \param[in] av_target_bandwidth The target bits per frame
325 *
326 * \return The maximum Q for frames in the group.
327 */
get_twopass_worst_quality(AV1_COMP * cpi,const double av_frame_err,double inactive_zone,int av_target_bandwidth)328 static int get_twopass_worst_quality(AV1_COMP *cpi, const double av_frame_err,
329 double inactive_zone,
330 int av_target_bandwidth) {
331 const RATE_CONTROL *const rc = &cpi->rc;
332 const AV1EncoderConfig *const oxcf = &cpi->oxcf;
333 const RateControlCfg *const rc_cfg = &oxcf->rc_cfg;
334 inactive_zone = fclamp(inactive_zone, 0.0, 0.9999);
335
336 if (av_target_bandwidth <= 0) {
337 return rc->worst_quality; // Highest value allowed
338 } else {
339 const int num_mbs = (oxcf->resize_cfg.resize_mode != RESIZE_NONE)
340 ? cpi->initial_mbs
341 : cpi->common.mi_params.MBs;
342 const int active_mbs = AOMMAX(1, num_mbs - (int)(num_mbs * inactive_zone));
343 const double av_err_per_mb = av_frame_err / (1.0 - inactive_zone);
344 const int target_norm_bits_per_mb =
345 (int)((uint64_t)av_target_bandwidth << BPER_MB_NORMBITS) / active_mbs;
346 int rate_err_tol = AOMMIN(rc_cfg->under_shoot_pct, rc_cfg->over_shoot_pct);
347
348 // Update bpm correction factor based on previous GOP rate error.
349 twopass_update_bpm_factor(cpi, rate_err_tol);
350
351 // Try and pick a max Q that will be high enough to encode the
352 // content at the given rate.
353 int q = find_qindex_by_rate_with_correction(
354 target_norm_bits_per_mb, cpi->common.seq_params->bit_depth,
355 av_err_per_mb, cpi->ppi->twopass.bpm_factor, rate_err_tol,
356 rc->best_quality, rc->worst_quality);
357
358 // Restriction on active max q for constrained quality mode.
359 if (rc_cfg->mode == AOM_CQ) q = AOMMAX(q, rc_cfg->cq_level);
360 return q;
361 }
362 }
363
364 #define INTRA_PART 0.005
365 #define DEFAULT_DECAY_LIMIT 0.75
366 #define LOW_SR_DIFF_TRHESH 0.01
367 #define NCOUNT_FRAME_II_THRESH 5.0
368 #define LOW_CODED_ERR_PER_MB 0.01
369
370 /* This function considers how the quality of prediction may be deteriorating
371 * with distance. It comapres the coded error for the last frame and the
372 * second reference frame (usually two frames old) and also applies a factor
373 * based on the extent of INTRA coding.
374 *
375 * The decay factor is then used to reduce the contribution of frames further
376 * from the alt-ref or golden frame, to the bitframe boost calculation for that
377 * alt-ref or golden frame.
378 */
get_sr_decay_rate(const FIRSTPASS_STATS * frame)379 static double get_sr_decay_rate(const FIRSTPASS_STATS *frame) {
380 double sr_diff = (frame->sr_coded_error - frame->coded_error);
381 double sr_decay = 1.0;
382 double modified_pct_inter;
383 double modified_pcnt_intra;
384
385 modified_pct_inter = frame->pcnt_inter;
386 if ((frame->coded_error > LOW_CODED_ERR_PER_MB) &&
387 ((frame->intra_error / DOUBLE_DIVIDE_CHECK(frame->coded_error)) <
388 (double)NCOUNT_FRAME_II_THRESH)) {
389 modified_pct_inter = frame->pcnt_inter - frame->pcnt_neutral;
390 }
391 modified_pcnt_intra = 100 * (1.0 - modified_pct_inter);
392
393 if ((sr_diff > LOW_SR_DIFF_TRHESH)) {
394 double sr_diff_part = ((sr_diff * 0.25) / frame->intra_error);
395 sr_decay = 1.0 - sr_diff_part - (INTRA_PART * modified_pcnt_intra);
396 }
397 return AOMMAX(sr_decay, DEFAULT_DECAY_LIMIT);
398 }
399
400 // This function gives an estimate of how badly we believe the prediction
401 // quality is decaying from frame to frame.
get_zero_motion_factor(const FIRSTPASS_STATS * frame)402 static double get_zero_motion_factor(const FIRSTPASS_STATS *frame) {
403 const double zero_motion_pct = frame->pcnt_inter - frame->pcnt_motion;
404 double sr_decay = get_sr_decay_rate(frame);
405 return AOMMIN(sr_decay, zero_motion_pct);
406 }
407
408 #define DEFAULT_ZM_FACTOR 0.5
get_prediction_decay_rate(const FIRSTPASS_STATS * frame_stats)409 static double get_prediction_decay_rate(const FIRSTPASS_STATS *frame_stats) {
410 const double sr_decay_rate = get_sr_decay_rate(frame_stats);
411 double zero_motion_factor =
412 DEFAULT_ZM_FACTOR * (frame_stats->pcnt_inter - frame_stats->pcnt_motion);
413
414 // Clamp value to range 0.0 to 1.0
415 // This should happen anyway if input values are sensibly clamped but checked
416 // here just in case.
417 if (zero_motion_factor > 1.0)
418 zero_motion_factor = 1.0;
419 else if (zero_motion_factor < 0.0)
420 zero_motion_factor = 0.0;
421
422 return AOMMAX(zero_motion_factor,
423 (sr_decay_rate + ((1.0 - sr_decay_rate) * zero_motion_factor)));
424 }
425
426 // Function to test for a condition where a complex transition is followed
427 // by a static section. For example in slide shows where there is a fade
428 // between slides. This is to help with more optimal kf and gf positioning.
detect_transition_to_still(const FIRSTPASS_INFO * firstpass_info,int next_stats_index,const int min_gf_interval,const int frame_interval,const int still_interval,const double loop_decay_rate,const double last_decay_rate)429 static int detect_transition_to_still(const FIRSTPASS_INFO *firstpass_info,
430 int next_stats_index,
431 const int min_gf_interval,
432 const int frame_interval,
433 const int still_interval,
434 const double loop_decay_rate,
435 const double last_decay_rate) {
436 // Break clause to detect very still sections after motion
437 // For example a static image after a fade or other transition
438 // instead of a clean scene cut.
439 if (frame_interval > min_gf_interval && loop_decay_rate >= 0.999 &&
440 last_decay_rate < 0.9) {
441 int stats_left =
442 av1_firstpass_info_future_count(firstpass_info, next_stats_index);
443 if (stats_left >= still_interval) {
444 int j;
445 // Look ahead a few frames to see if static condition persists...
446 for (j = 0; j < still_interval; ++j) {
447 const FIRSTPASS_STATS *stats =
448 av1_firstpass_info_peek(firstpass_info, next_stats_index + j);
449 if (stats->pcnt_inter - stats->pcnt_motion < 0.999) break;
450 }
451 // Only if it does do we signal a transition to still.
452 return j == still_interval;
453 }
454 }
455 return 0;
456 }
457
458 // This function detects a flash through the high relative pcnt_second_ref
459 // score in the frame following a flash frame. The offset passed in should
460 // reflect this.
detect_flash(const TWO_PASS * twopass,const TWO_PASS_FRAME * twopass_frame,const int offset)461 static int detect_flash(const TWO_PASS *twopass,
462 const TWO_PASS_FRAME *twopass_frame, const int offset) {
463 const FIRSTPASS_STATS *const next_frame =
464 read_frame_stats(twopass, twopass_frame, offset);
465
466 // What we are looking for here is a situation where there is a
467 // brief break in prediction (such as a flash) but subsequent frames
468 // are reasonably well predicted by an earlier (pre flash) frame.
469 // The recovery after a flash is indicated by a high pcnt_second_ref
470 // compared to pcnt_inter.
471 return next_frame != NULL &&
472 next_frame->pcnt_second_ref > next_frame->pcnt_inter &&
473 next_frame->pcnt_second_ref >= 0.5;
474 }
475
476 // Update the motion related elements to the GF arf boost calculation.
accumulate_frame_motion_stats(const FIRSTPASS_STATS * stats,GF_GROUP_STATS * gf_stats,double f_w,double f_h)477 static void accumulate_frame_motion_stats(const FIRSTPASS_STATS *stats,
478 GF_GROUP_STATS *gf_stats, double f_w,
479 double f_h) {
480 const double pct = stats->pcnt_motion;
481
482 // Accumulate Motion In/Out of frame stats.
483 gf_stats->this_frame_mv_in_out = stats->mv_in_out_count * pct;
484 gf_stats->mv_in_out_accumulator += gf_stats->this_frame_mv_in_out;
485 gf_stats->abs_mv_in_out_accumulator += fabs(gf_stats->this_frame_mv_in_out);
486
487 // Accumulate a measure of how uniform (or conversely how random) the motion
488 // field is (a ratio of abs(mv) / mv).
489 if (pct > 0.05) {
490 const double mvr_ratio =
491 fabs(stats->mvr_abs) / DOUBLE_DIVIDE_CHECK(fabs(stats->MVr));
492 const double mvc_ratio =
493 fabs(stats->mvc_abs) / DOUBLE_DIVIDE_CHECK(fabs(stats->MVc));
494
495 gf_stats->mv_ratio_accumulator +=
496 pct *
497 (mvr_ratio < stats->mvr_abs * f_h ? mvr_ratio : stats->mvr_abs * f_h);
498 gf_stats->mv_ratio_accumulator +=
499 pct *
500 (mvc_ratio < stats->mvc_abs * f_w ? mvc_ratio : stats->mvc_abs * f_w);
501 }
502 }
503
accumulate_this_frame_stats(const FIRSTPASS_STATS * stats,const double mod_frame_err,GF_GROUP_STATS * gf_stats)504 static void accumulate_this_frame_stats(const FIRSTPASS_STATS *stats,
505 const double mod_frame_err,
506 GF_GROUP_STATS *gf_stats) {
507 gf_stats->gf_group_err += mod_frame_err;
508 #if GROUP_ADAPTIVE_MAXQ
509 gf_stats->gf_group_raw_error += stats->coded_error;
510 #endif
511 gf_stats->gf_group_skip_pct += stats->intra_skip_pct;
512 gf_stats->gf_group_inactive_zone_rows += stats->inactive_zone_rows;
513 }
514
av1_accumulate_next_frame_stats(const FIRSTPASS_STATS * stats,const int flash_detected,const int frames_since_key,const int cur_idx,GF_GROUP_STATS * gf_stats,int f_w,int f_h)515 void av1_accumulate_next_frame_stats(const FIRSTPASS_STATS *stats,
516 const int flash_detected,
517 const int frames_since_key,
518 const int cur_idx,
519 GF_GROUP_STATS *gf_stats, int f_w,
520 int f_h) {
521 accumulate_frame_motion_stats(stats, gf_stats, f_w, f_h);
522 // sum up the metric values of current gf group
523 gf_stats->avg_sr_coded_error += stats->sr_coded_error;
524 gf_stats->avg_pcnt_second_ref += stats->pcnt_second_ref;
525 gf_stats->avg_new_mv_count += stats->new_mv_count;
526 gf_stats->avg_wavelet_energy += stats->frame_avg_wavelet_energy;
527 if (fabs(stats->raw_error_stdev) > 0.000001) {
528 gf_stats->non_zero_stdev_count++;
529 gf_stats->avg_raw_err_stdev += stats->raw_error_stdev;
530 }
531
532 // Accumulate the effect of prediction quality decay
533 if (!flash_detected) {
534 gf_stats->last_loop_decay_rate = gf_stats->loop_decay_rate;
535 gf_stats->loop_decay_rate = get_prediction_decay_rate(stats);
536
537 gf_stats->decay_accumulator =
538 gf_stats->decay_accumulator * gf_stats->loop_decay_rate;
539
540 // Monitor for static sections.
541 if ((frames_since_key + cur_idx - 1) > 1) {
542 gf_stats->zero_motion_accumulator = AOMMIN(
543 gf_stats->zero_motion_accumulator, get_zero_motion_factor(stats));
544 }
545 }
546 }
547
average_gf_stats(const int total_frame,GF_GROUP_STATS * gf_stats)548 static void average_gf_stats(const int total_frame, GF_GROUP_STATS *gf_stats) {
549 if (total_frame) {
550 gf_stats->avg_sr_coded_error /= total_frame;
551 gf_stats->avg_pcnt_second_ref /= total_frame;
552 gf_stats->avg_new_mv_count /= total_frame;
553 gf_stats->avg_wavelet_energy /= total_frame;
554 }
555
556 if (gf_stats->non_zero_stdev_count)
557 gf_stats->avg_raw_err_stdev /= gf_stats->non_zero_stdev_count;
558 }
559
560 #define BOOST_FACTOR 12.5
baseline_err_per_mb(const FRAME_INFO * frame_info)561 static double baseline_err_per_mb(const FRAME_INFO *frame_info) {
562 unsigned int screen_area = frame_info->frame_height * frame_info->frame_width;
563
564 // Use a different error per mb factor for calculating boost for
565 // different formats.
566 if (screen_area <= 640 * 360) {
567 return 500.0;
568 } else {
569 return 1000.0;
570 }
571 }
572
calc_frame_boost(const PRIMARY_RATE_CONTROL * p_rc,const FRAME_INFO * frame_info,const FIRSTPASS_STATS * this_frame,double this_frame_mv_in_out,double max_boost)573 static double calc_frame_boost(const PRIMARY_RATE_CONTROL *p_rc,
574 const FRAME_INFO *frame_info,
575 const FIRSTPASS_STATS *this_frame,
576 double this_frame_mv_in_out, double max_boost) {
577 double frame_boost;
578 const double lq = av1_convert_qindex_to_q(p_rc->avg_frame_qindex[INTER_FRAME],
579 frame_info->bit_depth);
580 const double boost_q_correction = AOMMIN((0.5 + (lq * 0.015)), 1.5);
581 const double active_area = calculate_active_area(frame_info, this_frame);
582
583 // Underlying boost factor is based on inter error ratio.
584 frame_boost = AOMMAX(baseline_err_per_mb(frame_info) * active_area,
585 this_frame->intra_error * active_area) /
586 DOUBLE_DIVIDE_CHECK(this_frame->coded_error);
587 frame_boost = frame_boost * BOOST_FACTOR * boost_q_correction;
588
589 // Increase boost for frames where new data coming into frame (e.g. zoom out).
590 // Slightly reduce boost if there is a net balance of motion out of the frame
591 // (zoom in). The range for this_frame_mv_in_out is -1.0 to +1.0.
592 if (this_frame_mv_in_out > 0.0)
593 frame_boost += frame_boost * (this_frame_mv_in_out * 2.0);
594 // In the extreme case the boost is halved.
595 else
596 frame_boost += frame_boost * (this_frame_mv_in_out / 2.0);
597
598 return AOMMIN(frame_boost, max_boost * boost_q_correction);
599 }
600
calc_kf_frame_boost(const PRIMARY_RATE_CONTROL * p_rc,const FRAME_INFO * frame_info,const FIRSTPASS_STATS * this_frame,double * sr_accumulator,double max_boost)601 static double calc_kf_frame_boost(const PRIMARY_RATE_CONTROL *p_rc,
602 const FRAME_INFO *frame_info,
603 const FIRSTPASS_STATS *this_frame,
604 double *sr_accumulator, double max_boost) {
605 double frame_boost;
606 const double lq = av1_convert_qindex_to_q(p_rc->avg_frame_qindex[INTER_FRAME],
607 frame_info->bit_depth);
608 const double boost_q_correction = AOMMIN((0.50 + (lq * 0.015)), 2.00);
609 const double active_area = calculate_active_area(frame_info, this_frame);
610
611 // Underlying boost factor is based on inter error ratio.
612 frame_boost = AOMMAX(baseline_err_per_mb(frame_info) * active_area,
613 this_frame->intra_error * active_area) /
614 DOUBLE_DIVIDE_CHECK(
615 (this_frame->coded_error + *sr_accumulator) * active_area);
616
617 // Update the accumulator for second ref error difference.
618 // This is intended to give an indication of how much the coded error is
619 // increasing over time.
620 *sr_accumulator += (this_frame->sr_coded_error - this_frame->coded_error);
621 *sr_accumulator = AOMMAX(0.0, *sr_accumulator);
622
623 // Q correction and scaling
624 // The 40.0 value here is an experimentally derived baseline minimum.
625 // This value is in line with the minimum per frame boost in the alt_ref
626 // boost calculation.
627 frame_boost = ((frame_boost + 40.0) * boost_q_correction);
628
629 return AOMMIN(frame_boost, max_boost * boost_q_correction);
630 }
631
get_projected_gfu_boost(const PRIMARY_RATE_CONTROL * p_rc,int gfu_boost,int frames_to_project,int num_stats_used_for_gfu_boost)632 static int get_projected_gfu_boost(const PRIMARY_RATE_CONTROL *p_rc,
633 int gfu_boost, int frames_to_project,
634 int num_stats_used_for_gfu_boost) {
635 /*
636 * If frames_to_project is equal to num_stats_used_for_gfu_boost,
637 * it means that gfu_boost was calculated over frames_to_project to
638 * begin with(ie; all stats required were available), hence return
639 * the original boost.
640 */
641 if (num_stats_used_for_gfu_boost >= frames_to_project) return gfu_boost;
642
643 double min_boost_factor = sqrt(p_rc->baseline_gf_interval);
644 // Get the current tpl factor (number of frames = frames_to_project).
645 double tpl_factor = av1_get_gfu_boost_projection_factor(
646 min_boost_factor, MAX_GFUBOOST_FACTOR, frames_to_project);
647 // Get the tpl factor when number of frames = num_stats_used_for_prior_boost.
648 double tpl_factor_num_stats = av1_get_gfu_boost_projection_factor(
649 min_boost_factor, MAX_GFUBOOST_FACTOR, num_stats_used_for_gfu_boost);
650 int projected_gfu_boost =
651 (int)rint((tpl_factor * gfu_boost) / tpl_factor_num_stats);
652 return projected_gfu_boost;
653 }
654
655 #define GF_MAX_BOOST 90.0
656 #define GF_MIN_BOOST 50
657 #define MIN_DECAY_FACTOR 0.01
av1_calc_arf_boost(const TWO_PASS * twopass,const TWO_PASS_FRAME * twopass_frame,const PRIMARY_RATE_CONTROL * p_rc,FRAME_INFO * frame_info,int offset,int f_frames,int b_frames,int * num_fpstats_used,int * num_fpstats_required,int project_gfu_boost)658 int av1_calc_arf_boost(const TWO_PASS *twopass,
659 const TWO_PASS_FRAME *twopass_frame,
660 const PRIMARY_RATE_CONTROL *p_rc, FRAME_INFO *frame_info,
661 int offset, int f_frames, int b_frames,
662 int *num_fpstats_used, int *num_fpstats_required,
663 int project_gfu_boost) {
664 int i;
665 GF_GROUP_STATS gf_stats;
666 init_gf_stats(&gf_stats);
667 double boost_score = (double)NORMAL_BOOST;
668 int arf_boost;
669 int flash_detected = 0;
670 if (num_fpstats_used) *num_fpstats_used = 0;
671
672 // Search forward from the proposed arf/next gf position.
673 for (i = 0; i < f_frames; ++i) {
674 const FIRSTPASS_STATS *this_frame =
675 read_frame_stats(twopass, twopass_frame, i + offset);
676 if (this_frame == NULL) break;
677
678 // Update the motion related elements to the boost calculation.
679 accumulate_frame_motion_stats(this_frame, &gf_stats,
680 frame_info->frame_width,
681 frame_info->frame_height);
682
683 // We want to discount the flash frame itself and the recovery
684 // frame that follows as both will have poor scores.
685 flash_detected = detect_flash(twopass, twopass_frame, i + offset) ||
686 detect_flash(twopass, twopass_frame, i + offset + 1);
687
688 // Accumulate the effect of prediction quality decay.
689 if (!flash_detected) {
690 gf_stats.decay_accumulator *= get_prediction_decay_rate(this_frame);
691 gf_stats.decay_accumulator = gf_stats.decay_accumulator < MIN_DECAY_FACTOR
692 ? MIN_DECAY_FACTOR
693 : gf_stats.decay_accumulator;
694 }
695
696 boost_score +=
697 gf_stats.decay_accumulator *
698 calc_frame_boost(p_rc, frame_info, this_frame,
699 gf_stats.this_frame_mv_in_out, GF_MAX_BOOST);
700 if (num_fpstats_used) (*num_fpstats_used)++;
701 }
702
703 arf_boost = (int)boost_score;
704
705 // Reset for backward looking loop.
706 boost_score = 0.0;
707 init_gf_stats(&gf_stats);
708 // Search backward towards last gf position.
709 for (i = -1; i >= -b_frames; --i) {
710 const FIRSTPASS_STATS *this_frame =
711 read_frame_stats(twopass, twopass_frame, i + offset);
712 if (this_frame == NULL) break;
713
714 // Update the motion related elements to the boost calculation.
715 accumulate_frame_motion_stats(this_frame, &gf_stats,
716 frame_info->frame_width,
717 frame_info->frame_height);
718
719 // We want to discount the the flash frame itself and the recovery
720 // frame that follows as both will have poor scores.
721 flash_detected = detect_flash(twopass, twopass_frame, i + offset) ||
722 detect_flash(twopass, twopass_frame, i + offset + 1);
723
724 // Cumulative effect of prediction quality decay.
725 if (!flash_detected) {
726 gf_stats.decay_accumulator *= get_prediction_decay_rate(this_frame);
727 gf_stats.decay_accumulator = gf_stats.decay_accumulator < MIN_DECAY_FACTOR
728 ? MIN_DECAY_FACTOR
729 : gf_stats.decay_accumulator;
730 }
731
732 boost_score +=
733 gf_stats.decay_accumulator *
734 calc_frame_boost(p_rc, frame_info, this_frame,
735 gf_stats.this_frame_mv_in_out, GF_MAX_BOOST);
736 if (num_fpstats_used) (*num_fpstats_used)++;
737 }
738 arf_boost += (int)boost_score;
739
740 if (project_gfu_boost) {
741 assert(num_fpstats_required != NULL);
742 assert(num_fpstats_used != NULL);
743 *num_fpstats_required = f_frames + b_frames;
744 arf_boost = get_projected_gfu_boost(p_rc, arf_boost, *num_fpstats_required,
745 *num_fpstats_used);
746 }
747
748 if (arf_boost < ((b_frames + f_frames) * GF_MIN_BOOST))
749 arf_boost = ((b_frames + f_frames) * GF_MIN_BOOST);
750
751 return arf_boost;
752 }
753
754 // Calculate a section intra ratio used in setting max loop filter.
calculate_section_intra_ratio(const FIRSTPASS_STATS * begin,const FIRSTPASS_STATS * end,int section_length)755 static int calculate_section_intra_ratio(const FIRSTPASS_STATS *begin,
756 const FIRSTPASS_STATS *end,
757 int section_length) {
758 const FIRSTPASS_STATS *s = begin;
759 double intra_error = 0.0;
760 double coded_error = 0.0;
761 int i = 0;
762
763 while (s < end && i < section_length) {
764 intra_error += s->intra_error;
765 coded_error += s->coded_error;
766 ++s;
767 ++i;
768 }
769
770 return (int)(intra_error / DOUBLE_DIVIDE_CHECK(coded_error));
771 }
772
773 /*!\brief Calculates the bit target for this GF/ARF group
774 *
775 * \ingroup rate_control
776 *
777 * Calculates the total bits to allocate in this GF/ARF group.
778 *
779 * \param[in] cpi Top-level encoder structure
780 * \param[in] gf_group_err Cumulative coded error score for the
781 * frames making up this group.
782 *
783 * \return The target total number of bits for this GF/ARF group.
784 */
calculate_total_gf_group_bits(AV1_COMP * cpi,double gf_group_err)785 static int64_t calculate_total_gf_group_bits(AV1_COMP *cpi,
786 double gf_group_err) {
787 const RATE_CONTROL *const rc = &cpi->rc;
788 const PRIMARY_RATE_CONTROL *const p_rc = &cpi->ppi->p_rc;
789 const TWO_PASS *const twopass = &cpi->ppi->twopass;
790 const int max_bits = frame_max_bits(rc, &cpi->oxcf);
791 int64_t total_group_bits;
792
793 // Calculate the bits to be allocated to the group as a whole.
794 if ((twopass->kf_group_bits > 0) && (twopass->kf_group_error_left > 0)) {
795 total_group_bits = (int64_t)(twopass->kf_group_bits *
796 (gf_group_err / twopass->kf_group_error_left));
797 } else {
798 total_group_bits = 0;
799 }
800
801 // Clamp odd edge cases.
802 total_group_bits = (total_group_bits < 0) ? 0
803 : (total_group_bits > twopass->kf_group_bits)
804 ? twopass->kf_group_bits
805 : total_group_bits;
806
807 // Clip based on user supplied data rate variability limit.
808 if (total_group_bits > (int64_t)max_bits * p_rc->baseline_gf_interval)
809 total_group_bits = (int64_t)max_bits * p_rc->baseline_gf_interval;
810
811 return total_group_bits;
812 }
813
814 // Calculate the number of bits to assign to boosted frames in a group.
calculate_boost_bits(int frame_count,int boost,int64_t total_group_bits)815 static int calculate_boost_bits(int frame_count, int boost,
816 int64_t total_group_bits) {
817 int allocation_chunks;
818
819 // return 0 for invalid inputs (could arise e.g. through rounding errors)
820 if (!boost || (total_group_bits <= 0)) return 0;
821
822 if (frame_count <= 0) return (int)(AOMMIN(total_group_bits, INT_MAX));
823
824 allocation_chunks = (frame_count * 100) + boost;
825
826 // Prevent overflow.
827 if (boost > 1023) {
828 int divisor = boost >> 10;
829 boost /= divisor;
830 allocation_chunks /= divisor;
831 }
832
833 // Calculate the number of extra bits for use in the boosted frame or frames.
834 return AOMMAX((int)(((int64_t)boost * total_group_bits) / allocation_chunks),
835 0);
836 }
837
838 // Calculate the boost factor based on the number of bits assigned, i.e. the
839 // inverse of calculate_boost_bits().
calculate_boost_factor(int frame_count,int bits,int64_t total_group_bits)840 static int calculate_boost_factor(int frame_count, int bits,
841 int64_t total_group_bits) {
842 return (int)(100.0 * frame_count * bits / (total_group_bits - bits));
843 }
844
845 // Reduce the number of bits assigned to keyframe or arf if necessary, to
846 // prevent bitrate spikes that may break level constraints.
847 // frame_type: 0: keyframe; 1: arf.
adjust_boost_bits_for_target_level(const AV1_COMP * const cpi,RATE_CONTROL * const rc,int bits_assigned,int64_t group_bits,int frame_type)848 static int adjust_boost_bits_for_target_level(const AV1_COMP *const cpi,
849 RATE_CONTROL *const rc,
850 int bits_assigned,
851 int64_t group_bits,
852 int frame_type) {
853 const AV1_COMMON *const cm = &cpi->common;
854 const SequenceHeader *const seq_params = cm->seq_params;
855 PRIMARY_RATE_CONTROL *const p_rc = &cpi->ppi->p_rc;
856 const int temporal_layer_id = cm->temporal_layer_id;
857 const int spatial_layer_id = cm->spatial_layer_id;
858 for (int index = 0; index < seq_params->operating_points_cnt_minus_1 + 1;
859 ++index) {
860 if (!is_in_operating_point(seq_params->operating_point_idc[index],
861 temporal_layer_id, spatial_layer_id)) {
862 continue;
863 }
864
865 const AV1_LEVEL target_level =
866 cpi->ppi->level_params.target_seq_level_idx[index];
867 if (target_level >= SEQ_LEVELS) continue;
868
869 assert(is_valid_seq_level_idx(target_level));
870
871 const double level_bitrate_limit = av1_get_max_bitrate_for_level(
872 target_level, seq_params->tier[0], seq_params->profile);
873 const int target_bits_per_frame =
874 (int)(level_bitrate_limit / cpi->framerate);
875 if (frame_type == 0) {
876 // Maximum bits for keyframe is 8 times the target_bits_per_frame.
877 const int level_enforced_max_kf_bits = target_bits_per_frame * 8;
878 if (bits_assigned > level_enforced_max_kf_bits) {
879 const int frames = rc->frames_to_key - 1;
880 p_rc->kf_boost = calculate_boost_factor(
881 frames, level_enforced_max_kf_bits, group_bits);
882 bits_assigned =
883 calculate_boost_bits(frames, p_rc->kf_boost, group_bits);
884 }
885 } else if (frame_type == 1) {
886 // Maximum bits for arf is 4 times the target_bits_per_frame.
887 const int level_enforced_max_arf_bits = target_bits_per_frame * 4;
888 if (bits_assigned > level_enforced_max_arf_bits) {
889 p_rc->gfu_boost =
890 calculate_boost_factor(p_rc->baseline_gf_interval,
891 level_enforced_max_arf_bits, group_bits);
892 bits_assigned = calculate_boost_bits(p_rc->baseline_gf_interval,
893 p_rc->gfu_boost, group_bits);
894 }
895 } else {
896 assert(0);
897 }
898 }
899
900 return bits_assigned;
901 }
902
903 // Allocate bits to each frame in a GF / ARF group
904 double layer_fraction[MAX_ARF_LAYERS + 1] = { 1.0, 0.70, 0.55, 0.60,
905 0.60, 1.0, 1.0 };
allocate_gf_group_bits(GF_GROUP * gf_group,PRIMARY_RATE_CONTROL * const p_rc,RATE_CONTROL * const rc,int64_t gf_group_bits,int gf_arf_bits,int key_frame,int use_arf)906 static void allocate_gf_group_bits(GF_GROUP *gf_group,
907 PRIMARY_RATE_CONTROL *const p_rc,
908 RATE_CONTROL *const rc,
909 int64_t gf_group_bits, int gf_arf_bits,
910 int key_frame, int use_arf) {
911 int64_t total_group_bits = gf_group_bits;
912 int base_frame_bits;
913 const int gf_group_size = gf_group->size;
914 int layer_frames[MAX_ARF_LAYERS + 1] = { 0 };
915
916 // For key frames the frame target rate is already set and it
917 // is also the golden frame.
918 // === [frame_index == 0] ===
919 int frame_index = !!key_frame;
920
921 // Subtract the extra bits set aside for ARF frames from the Group Total
922 if (use_arf) total_group_bits -= gf_arf_bits;
923
924 int num_frames =
925 AOMMAX(1, p_rc->baseline_gf_interval - (rc->frames_since_key == 0));
926 base_frame_bits = (int)(total_group_bits / num_frames);
927
928 // Check the number of frames in each layer in case we have a
929 // non standard group length.
930 int max_arf_layer = gf_group->max_layer_depth - 1;
931 for (int idx = frame_index; idx < gf_group_size; ++idx) {
932 if ((gf_group->update_type[idx] == ARF_UPDATE) ||
933 (gf_group->update_type[idx] == INTNL_ARF_UPDATE)) {
934 layer_frames[gf_group->layer_depth[idx]]++;
935 }
936 }
937
938 // Allocate extra bits to each ARF layer
939 int i;
940 int layer_extra_bits[MAX_ARF_LAYERS + 1] = { 0 };
941 for (i = 1; i <= max_arf_layer; ++i) {
942 double fraction = (i == max_arf_layer) ? 1.0 : layer_fraction[i];
943 layer_extra_bits[i] =
944 (int)((gf_arf_bits * fraction) / AOMMAX(1, layer_frames[i]));
945 gf_arf_bits -= (int)(gf_arf_bits * fraction);
946 }
947
948 // Now combine ARF layer and baseline bits to give total bits for each frame.
949 int arf_extra_bits;
950 for (int idx = frame_index; idx < gf_group_size; ++idx) {
951 switch (gf_group->update_type[idx]) {
952 case ARF_UPDATE:
953 case INTNL_ARF_UPDATE:
954 arf_extra_bits = layer_extra_bits[gf_group->layer_depth[idx]];
955 gf_group->bit_allocation[idx] = base_frame_bits + arf_extra_bits;
956 break;
957 case INTNL_OVERLAY_UPDATE:
958 case OVERLAY_UPDATE: gf_group->bit_allocation[idx] = 0; break;
959 default: gf_group->bit_allocation[idx] = base_frame_bits; break;
960 }
961 }
962
963 // Set the frame following the current GOP to 0 bit allocation. For ARF
964 // groups, this next frame will be overlay frame, which is the first frame
965 // in the next GOP. For GF group, next GOP will overwrite the rate allocation.
966 // Setting this frame to use 0 bit (of out the current GOP budget) will
967 // simplify logics in reference frame management.
968 if (gf_group_size < MAX_STATIC_GF_GROUP_LENGTH)
969 gf_group->bit_allocation[gf_group_size] = 0;
970 }
971
972 // Returns true if KF group and GF group both are almost completely static.
is_almost_static(double gf_zero_motion,int kf_zero_motion,int is_lap_enabled)973 static INLINE int is_almost_static(double gf_zero_motion, int kf_zero_motion,
974 int is_lap_enabled) {
975 if (is_lap_enabled) {
976 /*
977 * when LAP enabled kf_zero_motion is not reliable, so use strict
978 * constraint on gf_zero_motion.
979 */
980 return (gf_zero_motion >= 0.999);
981 } else {
982 return (gf_zero_motion >= 0.995) &&
983 (kf_zero_motion >= STATIC_KF_GROUP_THRESH);
984 }
985 }
986
987 #define ARF_ABS_ZOOM_THRESH 4.4
detect_gf_cut(AV1_COMP * cpi,int frame_index,int cur_start,int flash_detected,int active_max_gf_interval,int active_min_gf_interval,GF_GROUP_STATS * gf_stats)988 static INLINE int detect_gf_cut(AV1_COMP *cpi, int frame_index, int cur_start,
989 int flash_detected, int active_max_gf_interval,
990 int active_min_gf_interval,
991 GF_GROUP_STATS *gf_stats) {
992 RATE_CONTROL *const rc = &cpi->rc;
993 TWO_PASS *const twopass = &cpi->ppi->twopass;
994 InitialDimensions *const initial_dimensions = &cpi->initial_dimensions;
995 // Motion breakout threshold for loop below depends on image size.
996 const double mv_ratio_accumulator_thresh =
997 (initial_dimensions->height + initial_dimensions->width) / 4.0;
998
999 if (!flash_detected) {
1000 // Break clause to detect very still sections after motion. For example,
1001 // a static image after a fade or other transition.
1002
1003 // TODO(angiebird): This is a temporary change, we will avoid using
1004 // twopass_frame.stats_in in the follow-up CL
1005 int index = (int)(cpi->twopass_frame.stats_in -
1006 twopass->stats_buf_ctx->stats_in_start);
1007 if (detect_transition_to_still(&twopass->firstpass_info, index,
1008 rc->min_gf_interval, frame_index - cur_start,
1009 5, gf_stats->loop_decay_rate,
1010 gf_stats->last_loop_decay_rate)) {
1011 return 1;
1012 }
1013 }
1014
1015 // Some conditions to breakout after min interval.
1016 if (frame_index - cur_start >= active_min_gf_interval &&
1017 // If possible don't break very close to a kf
1018 (rc->frames_to_key - frame_index >= rc->min_gf_interval) &&
1019 ((frame_index - cur_start) & 0x01) && !flash_detected &&
1020 (gf_stats->mv_ratio_accumulator > mv_ratio_accumulator_thresh ||
1021 gf_stats->abs_mv_in_out_accumulator > ARF_ABS_ZOOM_THRESH)) {
1022 return 1;
1023 }
1024
1025 // If almost totally static, we will not use the the max GF length later,
1026 // so we can continue for more frames.
1027 if (((frame_index - cur_start) >= active_max_gf_interval + 1) &&
1028 !is_almost_static(gf_stats->zero_motion_accumulator,
1029 twopass->kf_zeromotion_pct, cpi->ppi->lap_enabled)) {
1030 return 1;
1031 }
1032 return 0;
1033 }
1034
is_shorter_gf_interval_better(AV1_COMP * cpi,EncodeFrameParams * frame_params)1035 static int is_shorter_gf_interval_better(AV1_COMP *cpi,
1036 EncodeFrameParams *frame_params) {
1037 RATE_CONTROL *const rc = &cpi->rc;
1038 PRIMARY_RATE_CONTROL *const p_rc = &cpi->ppi->p_rc;
1039 int gop_length_decision_method = cpi->sf.tpl_sf.gop_length_decision_method;
1040 int shorten_gf_interval;
1041
1042 av1_tpl_preload_rc_estimate(cpi, frame_params);
1043
1044 if (gop_length_decision_method == 2) {
1045 // GF group length is decided based on GF boost and tpl stats of ARFs from
1046 // base layer, (base+1) layer.
1047 shorten_gf_interval =
1048 (p_rc->gfu_boost <
1049 p_rc->num_stats_used_for_gfu_boost * GF_MIN_BOOST * 1.4) &&
1050 !av1_tpl_setup_stats(cpi, 3, frame_params);
1051 } else {
1052 int do_complete_tpl = 1;
1053 GF_GROUP *const gf_group = &cpi->ppi->gf_group;
1054 int is_temporal_filter_enabled =
1055 (rc->frames_since_key > 0 && gf_group->arf_index > -1);
1056
1057 if (gop_length_decision_method == 1) {
1058 // Check if tpl stats of ARFs from base layer, (base+1) layer,
1059 // (base+2) layer can decide the GF group length.
1060 int gop_length_eval = av1_tpl_setup_stats(cpi, 2, frame_params);
1061
1062 if (gop_length_eval != 2) {
1063 do_complete_tpl = 0;
1064 shorten_gf_interval = !gop_length_eval;
1065 }
1066 }
1067
1068 if (do_complete_tpl) {
1069 // Decide GF group length based on complete tpl stats.
1070 shorten_gf_interval = !av1_tpl_setup_stats(cpi, 1, frame_params);
1071 // Tpl stats is reused when the ARF is temporally filtered and GF
1072 // interval is not shortened.
1073 if (is_temporal_filter_enabled && !shorten_gf_interval) {
1074 cpi->skip_tpl_setup_stats = 1;
1075 #if CONFIG_BITRATE_ACCURACY && !CONFIG_THREE_PASS
1076 assert(cpi->gf_frame_index == 0);
1077 av1_vbr_rc_update_q_index_list(&cpi->vbr_rc_info, &cpi->ppi->tpl_data,
1078 gf_group,
1079 cpi->common.seq_params->bit_depth);
1080 #endif // CONFIG_BITRATE_ACCURACY
1081 }
1082 }
1083 }
1084 return shorten_gf_interval;
1085 }
1086
1087 #define MIN_SHRINK_LEN 6 // the minimum length of gf if we are shrinking
1088 #define SMOOTH_FILT_LEN 7
1089 #define HALF_FILT_LEN (SMOOTH_FILT_LEN / 2)
1090 #define WINDOW_SIZE 7
1091 #define HALF_WIN (WINDOW_SIZE / 2)
1092 // A 7-tap gaussian smooth filter
1093 const double smooth_filt[SMOOTH_FILT_LEN] = { 0.006, 0.061, 0.242, 0.383,
1094 0.242, 0.061, 0.006 };
1095
1096 // Smooth filter intra_error and coded_error in firstpass stats.
1097 // If stats[i].is_flash==1, the ith element should not be used in the filtering.
smooth_filter_stats(const FIRSTPASS_STATS * stats,int start_idx,int last_idx,double * filt_intra_err,double * filt_coded_err)1098 static void smooth_filter_stats(const FIRSTPASS_STATS *stats, int start_idx,
1099 int last_idx, double *filt_intra_err,
1100 double *filt_coded_err) {
1101 int i, j;
1102 for (i = start_idx; i <= last_idx; i++) {
1103 double total_wt = 0;
1104 for (j = -HALF_FILT_LEN; j <= HALF_FILT_LEN; j++) {
1105 int idx = AOMMIN(AOMMAX(i + j, start_idx), last_idx);
1106 if (stats[idx].is_flash) continue;
1107
1108 filt_intra_err[i] +=
1109 smooth_filt[j + HALF_FILT_LEN] * stats[idx].intra_error;
1110 total_wt += smooth_filt[j + HALF_FILT_LEN];
1111 }
1112 if (total_wt > 0.01) {
1113 filt_intra_err[i] /= total_wt;
1114 } else {
1115 filt_intra_err[i] = stats[i].intra_error;
1116 }
1117 }
1118 for (i = start_idx; i <= last_idx; i++) {
1119 double total_wt = 0;
1120 for (j = -HALF_FILT_LEN; j <= HALF_FILT_LEN; j++) {
1121 int idx = AOMMIN(AOMMAX(i + j, start_idx), last_idx);
1122 // Coded error involves idx and idx - 1.
1123 if (stats[idx].is_flash || (idx > 0 && stats[idx - 1].is_flash)) continue;
1124
1125 filt_coded_err[i] +=
1126 smooth_filt[j + HALF_FILT_LEN] * stats[idx].coded_error;
1127 total_wt += smooth_filt[j + HALF_FILT_LEN];
1128 }
1129 if (total_wt > 0.01) {
1130 filt_coded_err[i] /= total_wt;
1131 } else {
1132 filt_coded_err[i] = stats[i].coded_error;
1133 }
1134 }
1135 }
1136
1137 // Calculate gradient
get_gradient(const double * values,int start,int last,double * grad)1138 static void get_gradient(const double *values, int start, int last,
1139 double *grad) {
1140 if (start == last) {
1141 grad[start] = 0;
1142 return;
1143 }
1144 for (int i = start; i <= last; i++) {
1145 int prev = AOMMAX(i - 1, start);
1146 int next = AOMMIN(i + 1, last);
1147 grad[i] = (values[next] - values[prev]) / (next - prev);
1148 }
1149 }
1150
find_next_scenecut(const FIRSTPASS_STATS * const stats_start,int first,int last)1151 static int find_next_scenecut(const FIRSTPASS_STATS *const stats_start,
1152 int first, int last) {
1153 // Identify unstable areas caused by scenecuts.
1154 // Find the max and 2nd max coded error, and the average of the rest frames.
1155 // If there is only one frame that yields a huge coded error, it is likely a
1156 // scenecut.
1157 double this_ratio, max_prev_ratio, max_next_ratio, max_prev_coded,
1158 max_next_coded;
1159
1160 if (last - first == 0) return -1;
1161
1162 for (int i = first; i <= last; i++) {
1163 if (stats_start[i].is_flash || (i > 0 && stats_start[i - 1].is_flash))
1164 continue;
1165 double temp_intra = AOMMAX(stats_start[i].intra_error, 0.01);
1166 this_ratio = stats_start[i].coded_error / temp_intra;
1167 // find the avg ratio in the preceding neighborhood
1168 max_prev_ratio = 0;
1169 max_prev_coded = 0;
1170 for (int j = AOMMAX(first, i - HALF_WIN); j < i; j++) {
1171 if (stats_start[j].is_flash || (j > 0 && stats_start[j - 1].is_flash))
1172 continue;
1173 temp_intra = AOMMAX(stats_start[j].intra_error, 0.01);
1174 double temp_ratio = stats_start[j].coded_error / temp_intra;
1175 if (temp_ratio > max_prev_ratio) {
1176 max_prev_ratio = temp_ratio;
1177 }
1178 if (stats_start[j].coded_error > max_prev_coded) {
1179 max_prev_coded = stats_start[j].coded_error;
1180 }
1181 }
1182 // find the avg ratio in the following neighborhood
1183 max_next_ratio = 0;
1184 max_next_coded = 0;
1185 for (int j = i + 1; j <= AOMMIN(i + HALF_WIN, last); j++) {
1186 if (stats_start[i].is_flash || (i > 0 && stats_start[i - 1].is_flash))
1187 continue;
1188 temp_intra = AOMMAX(stats_start[j].intra_error, 0.01);
1189 double temp_ratio = stats_start[j].coded_error / temp_intra;
1190 if (temp_ratio > max_next_ratio) {
1191 max_next_ratio = temp_ratio;
1192 }
1193 if (stats_start[j].coded_error > max_next_coded) {
1194 max_next_coded = stats_start[j].coded_error;
1195 }
1196 }
1197
1198 if (max_prev_ratio < 0.001 && max_next_ratio < 0.001) {
1199 // the ratios are very small, only check a small fixed threshold
1200 if (this_ratio < 0.02) continue;
1201 } else {
1202 // check if this frame has a larger ratio than the neighborhood
1203 double max_sr = stats_start[i].sr_coded_error;
1204 if (i < last) max_sr = AOMMAX(max_sr, stats_start[i + 1].sr_coded_error);
1205 double max_sr_fr_ratio =
1206 max_sr / AOMMAX(stats_start[i].coded_error, 0.01);
1207
1208 if (max_sr_fr_ratio > 1.2) continue;
1209 if (this_ratio < 2 * AOMMAX(max_prev_ratio, max_next_ratio) &&
1210 stats_start[i].coded_error <
1211 2 * AOMMAX(max_prev_coded, max_next_coded)) {
1212 continue;
1213 }
1214 }
1215 return i;
1216 }
1217 return -1;
1218 }
1219
1220 // Remove the region with index next_region.
1221 // parameter merge: 0: merge with previous; 1: merge with next; 2:
1222 // merge with both, take type from previous if possible
1223 // After removing, next_region will be the index of the next region.
remove_region(int merge,REGIONS * regions,int * num_regions,int * next_region)1224 static void remove_region(int merge, REGIONS *regions, int *num_regions,
1225 int *next_region) {
1226 int k = *next_region;
1227 assert(k < *num_regions);
1228 if (*num_regions == 1) {
1229 *num_regions = 0;
1230 return;
1231 }
1232 if (k == 0) {
1233 merge = 1;
1234 } else if (k == *num_regions - 1) {
1235 merge = 0;
1236 }
1237 int num_merge = (merge == 2) ? 2 : 1;
1238 switch (merge) {
1239 case 0:
1240 regions[k - 1].last = regions[k].last;
1241 *next_region = k;
1242 break;
1243 case 1:
1244 regions[k + 1].start = regions[k].start;
1245 *next_region = k + 1;
1246 break;
1247 case 2:
1248 regions[k - 1].last = regions[k + 1].last;
1249 *next_region = k;
1250 break;
1251 default: assert(0);
1252 }
1253 *num_regions -= num_merge;
1254 for (k = *next_region - (merge == 1); k < *num_regions; k++) {
1255 regions[k] = regions[k + num_merge];
1256 }
1257 }
1258
1259 // Insert a region in the cur_region_idx. The start and last should both be in
1260 // the current region. After insertion, the cur_region_idx will point to the
1261 // last region that was splitted from the original region.
insert_region(int start,int last,REGION_TYPES type,REGIONS * regions,int * num_regions,int * cur_region_idx)1262 static void insert_region(int start, int last, REGION_TYPES type,
1263 REGIONS *regions, int *num_regions,
1264 int *cur_region_idx) {
1265 int k = *cur_region_idx;
1266 REGION_TYPES this_region_type = regions[k].type;
1267 int this_region_last = regions[k].last;
1268 int num_add = (start != regions[k].start) + (last != regions[k].last);
1269 // move the following regions further to the back
1270 for (int r = *num_regions - 1; r > k; r--) {
1271 regions[r + num_add] = regions[r];
1272 }
1273 *num_regions += num_add;
1274 if (start > regions[k].start) {
1275 regions[k].last = start - 1;
1276 k++;
1277 regions[k].start = start;
1278 }
1279 regions[k].type = type;
1280 if (last < this_region_last) {
1281 regions[k].last = last;
1282 k++;
1283 regions[k].start = last + 1;
1284 regions[k].last = this_region_last;
1285 regions[k].type = this_region_type;
1286 } else {
1287 regions[k].last = this_region_last;
1288 }
1289 *cur_region_idx = k;
1290 }
1291
1292 // Get the average of stats inside a region.
analyze_region(const FIRSTPASS_STATS * stats,int k,REGIONS * regions)1293 static void analyze_region(const FIRSTPASS_STATS *stats, int k,
1294 REGIONS *regions) {
1295 int i;
1296 regions[k].avg_cor_coeff = 0;
1297 regions[k].avg_sr_fr_ratio = 0;
1298 regions[k].avg_intra_err = 0;
1299 regions[k].avg_coded_err = 0;
1300
1301 int check_first_sr = (k != 0);
1302
1303 for (i = regions[k].start; i <= regions[k].last; i++) {
1304 if (i > regions[k].start || check_first_sr) {
1305 double num_frames =
1306 (double)(regions[k].last - regions[k].start + check_first_sr);
1307 double max_coded_error =
1308 AOMMAX(stats[i].coded_error, stats[i - 1].coded_error);
1309 double this_ratio =
1310 stats[i].sr_coded_error / AOMMAX(max_coded_error, 0.001);
1311 regions[k].avg_sr_fr_ratio += this_ratio / num_frames;
1312 }
1313
1314 regions[k].avg_intra_err +=
1315 stats[i].intra_error / (double)(regions[k].last - regions[k].start + 1);
1316 regions[k].avg_coded_err +=
1317 stats[i].coded_error / (double)(regions[k].last - regions[k].start + 1);
1318
1319 regions[k].avg_cor_coeff +=
1320 AOMMAX(stats[i].cor_coeff, 0.001) /
1321 (double)(regions[k].last - regions[k].start + 1);
1322 regions[k].avg_noise_var +=
1323 AOMMAX(stats[i].noise_var, 0.001) /
1324 (double)(regions[k].last - regions[k].start + 1);
1325 }
1326 }
1327
1328 // Calculate the regions stats of every region.
get_region_stats(const FIRSTPASS_STATS * stats,REGIONS * regions,int num_regions)1329 static void get_region_stats(const FIRSTPASS_STATS *stats, REGIONS *regions,
1330 int num_regions) {
1331 for (int k = 0; k < num_regions; k++) {
1332 analyze_region(stats, k, regions);
1333 }
1334 }
1335
1336 // Find tentative stable regions
find_stable_regions(const FIRSTPASS_STATS * stats,const double * grad_coded,int this_start,int this_last,REGIONS * regions)1337 static int find_stable_regions(const FIRSTPASS_STATS *stats,
1338 const double *grad_coded, int this_start,
1339 int this_last, REGIONS *regions) {
1340 int i, j, k = 0;
1341 regions[k].start = this_start;
1342 for (i = this_start; i <= this_last; i++) {
1343 // Check mean and variance of stats in a window
1344 double mean_intra = 0.001, var_intra = 0.001;
1345 double mean_coded = 0.001, var_coded = 0.001;
1346 int count = 0;
1347 for (j = -HALF_WIN; j <= HALF_WIN; j++) {
1348 int idx = AOMMIN(AOMMAX(i + j, this_start), this_last);
1349 if (stats[idx].is_flash || (idx > 0 && stats[idx - 1].is_flash)) continue;
1350 mean_intra += stats[idx].intra_error;
1351 var_intra += stats[idx].intra_error * stats[idx].intra_error;
1352 mean_coded += stats[idx].coded_error;
1353 var_coded += stats[idx].coded_error * stats[idx].coded_error;
1354 count++;
1355 }
1356
1357 REGION_TYPES cur_type;
1358 if (count > 0) {
1359 mean_intra /= (double)count;
1360 var_intra /= (double)count;
1361 mean_coded /= (double)count;
1362 var_coded /= (double)count;
1363 int is_intra_stable = (var_intra / (mean_intra * mean_intra) < 1.03);
1364 int is_coded_stable = (var_coded / (mean_coded * mean_coded) < 1.04 &&
1365 fabs(grad_coded[i]) / mean_coded < 0.05) ||
1366 mean_coded / mean_intra < 0.05;
1367 int is_coded_small = mean_coded < 0.5 * mean_intra;
1368 cur_type = (is_intra_stable && is_coded_stable && is_coded_small)
1369 ? STABLE_REGION
1370 : HIGH_VAR_REGION;
1371 } else {
1372 cur_type = HIGH_VAR_REGION;
1373 }
1374
1375 // mark a new region if type changes
1376 if (i == regions[k].start) {
1377 // first frame in the region
1378 regions[k].type = cur_type;
1379 } else if (cur_type != regions[k].type) {
1380 // Append a new region
1381 regions[k].last = i - 1;
1382 regions[k + 1].start = i;
1383 regions[k + 1].type = cur_type;
1384 k++;
1385 }
1386 }
1387 regions[k].last = this_last;
1388 return k + 1;
1389 }
1390
1391 // Clean up regions that should be removed or merged.
cleanup_regions(REGIONS * regions,int * num_regions)1392 static void cleanup_regions(REGIONS *regions, int *num_regions) {
1393 int k = 0;
1394 while (k < *num_regions) {
1395 if ((k > 0 && regions[k - 1].type == regions[k].type &&
1396 regions[k].type != SCENECUT_REGION) ||
1397 regions[k].last < regions[k].start) {
1398 remove_region(0, regions, num_regions, &k);
1399 } else {
1400 k++;
1401 }
1402 }
1403 }
1404
1405 // Remove regions that are of type and shorter than length.
1406 // Merge it with its neighboring regions.
remove_short_regions(REGIONS * regions,int * num_regions,REGION_TYPES type,int length)1407 static void remove_short_regions(REGIONS *regions, int *num_regions,
1408 REGION_TYPES type, int length) {
1409 int k = 0;
1410 while (k < *num_regions && (*num_regions) > 1) {
1411 if ((regions[k].last - regions[k].start + 1 < length &&
1412 regions[k].type == type)) {
1413 // merge current region with the previous and next regions
1414 remove_region(2, regions, num_regions, &k);
1415 } else {
1416 k++;
1417 }
1418 }
1419 cleanup_regions(regions, num_regions);
1420 }
1421
adjust_unstable_region_bounds(const FIRSTPASS_STATS * stats,REGIONS * regions,int * num_regions)1422 static void adjust_unstable_region_bounds(const FIRSTPASS_STATS *stats,
1423 REGIONS *regions, int *num_regions) {
1424 int i, j, k;
1425 // Remove regions that are too short. Likely noise.
1426 remove_short_regions(regions, num_regions, STABLE_REGION, HALF_WIN);
1427 remove_short_regions(regions, num_regions, HIGH_VAR_REGION, HALF_WIN);
1428
1429 get_region_stats(stats, regions, *num_regions);
1430
1431 // Adjust region boundaries. The thresholds are empirically obtained, but
1432 // overall the performance is not very sensitive to small changes to them.
1433 for (k = 0; k < *num_regions; k++) {
1434 if (regions[k].type == STABLE_REGION) continue;
1435 if (k > 0) {
1436 // Adjust previous boundary.
1437 // First find the average intra/coded error in the previous
1438 // neighborhood.
1439 double avg_intra_err = 0;
1440 const int starti = AOMMAX(regions[k - 1].last - WINDOW_SIZE + 1,
1441 regions[k - 1].start + 1);
1442 const int lasti = regions[k - 1].last;
1443 int counti = 0;
1444 for (i = starti; i <= lasti; i++) {
1445 avg_intra_err += stats[i].intra_error;
1446 counti++;
1447 }
1448 if (counti > 0) {
1449 avg_intra_err = AOMMAX(avg_intra_err / (double)counti, 0.001);
1450 int count_coded = 0, count_grad = 0;
1451 for (j = lasti + 1; j <= regions[k].last; j++) {
1452 const int intra_close =
1453 fabs(stats[j].intra_error - avg_intra_err) / avg_intra_err < 0.1;
1454 const int coded_small = stats[j].coded_error / avg_intra_err < 0.1;
1455 const int coeff_close = stats[j].cor_coeff > 0.995;
1456 if (!coeff_close || !coded_small) count_coded--;
1457 if (intra_close && count_coded >= 0 && count_grad >= 0) {
1458 // this frame probably belongs to the previous stable region
1459 regions[k - 1].last = j;
1460 regions[k].start = j + 1;
1461 } else {
1462 break;
1463 }
1464 }
1465 }
1466 } // if k > 0
1467 if (k < *num_regions - 1) {
1468 // Adjust next boundary.
1469 // First find the average intra/coded error in the next neighborhood.
1470 double avg_intra_err = 0;
1471 const int starti = regions[k + 1].start;
1472 const int lasti = AOMMIN(regions[k + 1].last - 1,
1473 regions[k + 1].start + WINDOW_SIZE - 1);
1474 int counti = 0;
1475 for (i = starti; i <= lasti; i++) {
1476 avg_intra_err += stats[i].intra_error;
1477 counti++;
1478 }
1479 if (counti > 0) {
1480 avg_intra_err = AOMMAX(avg_intra_err / (double)counti, 0.001);
1481 // At the boundary, coded error is large, but still the frame is stable
1482 int count_coded = 1, count_grad = 1;
1483 for (j = starti - 1; j >= regions[k].start; j--) {
1484 const int intra_close =
1485 fabs(stats[j].intra_error - avg_intra_err) / avg_intra_err < 0.1;
1486 const int coded_small =
1487 stats[j + 1].coded_error / avg_intra_err < 0.1;
1488 const int coeff_close = stats[j].cor_coeff > 0.995;
1489 if (!coeff_close || !coded_small) count_coded--;
1490 if (intra_close && count_coded >= 0 && count_grad >= 0) {
1491 // this frame probably belongs to the next stable region
1492 regions[k + 1].start = j;
1493 regions[k].last = j - 1;
1494 } else {
1495 break;
1496 }
1497 }
1498 }
1499 } // if k < *num_regions - 1
1500 } // end of loop over all regions
1501
1502 cleanup_regions(regions, num_regions);
1503 remove_short_regions(regions, num_regions, HIGH_VAR_REGION, HALF_WIN);
1504 get_region_stats(stats, regions, *num_regions);
1505
1506 // If a stable regions has higher error than neighboring high var regions,
1507 // or if the stable region has a lower average correlation,
1508 // then it should be merged with them
1509 k = 0;
1510 while (k < *num_regions && (*num_regions) > 1) {
1511 if (regions[k].type == STABLE_REGION &&
1512 (regions[k].last - regions[k].start + 1) < 2 * WINDOW_SIZE &&
1513 ((k > 0 && // previous regions
1514 (regions[k].avg_coded_err > regions[k - 1].avg_coded_err * 1.01 ||
1515 regions[k].avg_cor_coeff < regions[k - 1].avg_cor_coeff * 0.999)) &&
1516 (k < *num_regions - 1 && // next region
1517 (regions[k].avg_coded_err > regions[k + 1].avg_coded_err * 1.01 ||
1518 regions[k].avg_cor_coeff < regions[k + 1].avg_cor_coeff * 0.999)))) {
1519 // merge current region with the previous and next regions
1520 remove_region(2, regions, num_regions, &k);
1521 analyze_region(stats, k - 1, regions);
1522 } else if (regions[k].type == HIGH_VAR_REGION &&
1523 (regions[k].last - regions[k].start + 1) < 2 * WINDOW_SIZE &&
1524 ((k > 0 && // previous regions
1525 (regions[k].avg_coded_err <
1526 regions[k - 1].avg_coded_err * 0.99 ||
1527 regions[k].avg_cor_coeff >
1528 regions[k - 1].avg_cor_coeff * 1.001)) &&
1529 (k < *num_regions - 1 && // next region
1530 (regions[k].avg_coded_err <
1531 regions[k + 1].avg_coded_err * 0.99 ||
1532 regions[k].avg_cor_coeff >
1533 regions[k + 1].avg_cor_coeff * 1.001)))) {
1534 // merge current region with the previous and next regions
1535 remove_region(2, regions, num_regions, &k);
1536 analyze_region(stats, k - 1, regions);
1537 } else {
1538 k++;
1539 }
1540 }
1541
1542 remove_short_regions(regions, num_regions, STABLE_REGION, WINDOW_SIZE);
1543 remove_short_regions(regions, num_regions, HIGH_VAR_REGION, HALF_WIN);
1544 }
1545
1546 // Identify blending regions.
find_blending_regions(const FIRSTPASS_STATS * stats,REGIONS * regions,int * num_regions)1547 static void find_blending_regions(const FIRSTPASS_STATS *stats,
1548 REGIONS *regions, int *num_regions) {
1549 int i, k = 0;
1550 // Blending regions will have large content change, therefore will have a
1551 // large consistent change in intra error.
1552 int count_stable = 0;
1553 while (k < *num_regions) {
1554 if (regions[k].type == STABLE_REGION) {
1555 k++;
1556 count_stable++;
1557 continue;
1558 }
1559 int dir = 0;
1560 int start = 0, last;
1561 for (i = regions[k].start; i <= regions[k].last; i++) {
1562 // First mark the regions that has consistent large change of intra error.
1563 if (k == 0 && i == regions[k].start) continue;
1564 if (stats[i].is_flash || (i > 0 && stats[i - 1].is_flash)) continue;
1565 double grad = stats[i].intra_error - stats[i - 1].intra_error;
1566 int large_change = fabs(grad) / AOMMAX(stats[i].intra_error, 0.01) > 0.05;
1567 int this_dir = 0;
1568 if (large_change) {
1569 this_dir = (grad > 0) ? 1 : -1;
1570 }
1571 // the current trend continues
1572 if (dir == this_dir) continue;
1573 if (dir != 0) {
1574 // Mark the end of a new large change group and add it
1575 last = i - 1;
1576 insert_region(start, last, BLENDING_REGION, regions, num_regions, &k);
1577 }
1578 dir = this_dir;
1579 if (k == 0 && i == regions[k].start + 1) {
1580 start = i - 1;
1581 } else {
1582 start = i;
1583 }
1584 }
1585 if (dir != 0) {
1586 last = regions[k].last;
1587 insert_region(start, last, BLENDING_REGION, regions, num_regions, &k);
1588 }
1589 k++;
1590 }
1591
1592 // If the blending region has very low correlation, mark it as high variance
1593 // since we probably cannot benefit from it anyways.
1594 get_region_stats(stats, regions, *num_regions);
1595 for (k = 0; k < *num_regions; k++) {
1596 if (regions[k].type != BLENDING_REGION) continue;
1597 if (regions[k].last == regions[k].start || regions[k].avg_cor_coeff < 0.6 ||
1598 count_stable == 0)
1599 regions[k].type = HIGH_VAR_REGION;
1600 }
1601 get_region_stats(stats, regions, *num_regions);
1602
1603 // It is possible for blending to result in a "dip" in intra error (first
1604 // decrease then increase). Therefore we need to find the dip and combine the
1605 // two regions.
1606 k = 1;
1607 while (k < *num_regions) {
1608 if (k < *num_regions - 1 && regions[k].type == HIGH_VAR_REGION) {
1609 // Check if this short high variance regions is actually in the middle of
1610 // a blending region.
1611 if (regions[k - 1].type == BLENDING_REGION &&
1612 regions[k + 1].type == BLENDING_REGION &&
1613 regions[k].last - regions[k].start < 3) {
1614 int prev_dir = (stats[regions[k - 1].last].intra_error -
1615 stats[regions[k - 1].last - 1].intra_error) > 0
1616 ? 1
1617 : -1;
1618 int next_dir = (stats[regions[k + 1].last].intra_error -
1619 stats[regions[k + 1].last - 1].intra_error) > 0
1620 ? 1
1621 : -1;
1622 if (prev_dir < 0 && next_dir > 0) {
1623 // This is possibly a mid region of blending. Check the ratios
1624 double ratio_thres = AOMMIN(regions[k - 1].avg_sr_fr_ratio,
1625 regions[k + 1].avg_sr_fr_ratio) *
1626 0.95;
1627 if (regions[k].avg_sr_fr_ratio > ratio_thres) {
1628 regions[k].type = BLENDING_REGION;
1629 remove_region(2, regions, num_regions, &k);
1630 analyze_region(stats, k - 1, regions);
1631 continue;
1632 }
1633 }
1634 }
1635 }
1636 // Check if we have a pair of consecutive blending regions.
1637 if (regions[k - 1].type == BLENDING_REGION &&
1638 regions[k].type == BLENDING_REGION) {
1639 int prev_dir = (stats[regions[k - 1].last].intra_error -
1640 stats[regions[k - 1].last - 1].intra_error) > 0
1641 ? 1
1642 : -1;
1643 int next_dir = (stats[regions[k].last].intra_error -
1644 stats[regions[k].last - 1].intra_error) > 0
1645 ? 1
1646 : -1;
1647
1648 // if both are too short, no need to check
1649 int total_length = regions[k].last - regions[k - 1].start + 1;
1650 if (total_length < 4) {
1651 regions[k - 1].type = HIGH_VAR_REGION;
1652 k++;
1653 continue;
1654 }
1655
1656 int to_merge = 0;
1657 if (prev_dir < 0 && next_dir > 0) {
1658 // In this case we check the last frame in the previous region.
1659 double prev_length =
1660 (double)(regions[k - 1].last - regions[k - 1].start + 1);
1661 double last_ratio, ratio_thres;
1662 if (prev_length < 2.01) {
1663 // if the previous region is very short
1664 double max_coded_error =
1665 AOMMAX(stats[regions[k - 1].last].coded_error,
1666 stats[regions[k - 1].last - 1].coded_error);
1667 last_ratio = stats[regions[k - 1].last].sr_coded_error /
1668 AOMMAX(max_coded_error, 0.001);
1669 ratio_thres = regions[k].avg_sr_fr_ratio * 0.95;
1670 } else {
1671 double max_coded_error =
1672 AOMMAX(stats[regions[k - 1].last].coded_error,
1673 stats[regions[k - 1].last - 1].coded_error);
1674 last_ratio = stats[regions[k - 1].last].sr_coded_error /
1675 AOMMAX(max_coded_error, 0.001);
1676 double prev_ratio =
1677 (regions[k - 1].avg_sr_fr_ratio * prev_length - last_ratio) /
1678 (prev_length - 1.0);
1679 ratio_thres = AOMMIN(prev_ratio, regions[k].avg_sr_fr_ratio) * 0.95;
1680 }
1681 if (last_ratio > ratio_thres) {
1682 to_merge = 1;
1683 }
1684 }
1685
1686 if (to_merge) {
1687 remove_region(0, regions, num_regions, &k);
1688 analyze_region(stats, k - 1, regions);
1689 continue;
1690 } else {
1691 // These are possibly two separate blending regions. Mark the boundary
1692 // frame as HIGH_VAR_REGION to separate the two.
1693 int prev_k = k - 1;
1694 insert_region(regions[prev_k].last, regions[prev_k].last,
1695 HIGH_VAR_REGION, regions, num_regions, &prev_k);
1696 analyze_region(stats, prev_k, regions);
1697 k = prev_k + 1;
1698 analyze_region(stats, k, regions);
1699 }
1700 }
1701 k++;
1702 }
1703 cleanup_regions(regions, num_regions);
1704 }
1705
1706 // Clean up decision for blendings. Remove blending regions that are too short.
1707 // Also if a very short high var region is between a blending and a stable
1708 // region, just merge it with one of them.
cleanup_blendings(REGIONS * regions,int * num_regions)1709 static void cleanup_blendings(REGIONS *regions, int *num_regions) {
1710 int k = 0;
1711 while (k<*num_regions && * num_regions> 1) {
1712 int is_short_blending = regions[k].type == BLENDING_REGION &&
1713 regions[k].last - regions[k].start + 1 < 5;
1714 int is_short_hv = regions[k].type == HIGH_VAR_REGION &&
1715 regions[k].last - regions[k].start + 1 < 5;
1716 int has_stable_neighbor =
1717 ((k > 0 && regions[k - 1].type == STABLE_REGION) ||
1718 (k < *num_regions - 1 && regions[k + 1].type == STABLE_REGION));
1719 int has_blend_neighbor =
1720 ((k > 0 && regions[k - 1].type == BLENDING_REGION) ||
1721 (k < *num_regions - 1 && regions[k + 1].type == BLENDING_REGION));
1722 int total_neighbors = (k > 0) + (k < *num_regions - 1);
1723
1724 if (is_short_blending ||
1725 (is_short_hv &&
1726 has_stable_neighbor + has_blend_neighbor >= total_neighbors)) {
1727 // Remove this region.Try to determine whether to combine it with the
1728 // previous or next region.
1729 int merge;
1730 double prev_diff =
1731 (k > 0)
1732 ? fabs(regions[k].avg_cor_coeff - regions[k - 1].avg_cor_coeff)
1733 : 1;
1734 double next_diff =
1735 (k < *num_regions - 1)
1736 ? fabs(regions[k].avg_cor_coeff - regions[k + 1].avg_cor_coeff)
1737 : 1;
1738 // merge == 0 means to merge with previous, 1 means to merge with next
1739 merge = prev_diff > next_diff;
1740 remove_region(merge, regions, num_regions, &k);
1741 } else {
1742 k++;
1743 }
1744 }
1745 cleanup_regions(regions, num_regions);
1746 }
1747
av1_identify_regions(const FIRSTPASS_STATS * const stats_start,int total_frames,int offset,REGIONS * regions,int * total_regions)1748 void av1_identify_regions(const FIRSTPASS_STATS *const stats_start,
1749 int total_frames, int offset, REGIONS *regions,
1750 int *total_regions) {
1751 int k;
1752 if (total_frames <= 1) return;
1753
1754 // store the initial decisions
1755 REGIONS *temp_regions =
1756 (REGIONS *)aom_malloc(total_frames * sizeof(temp_regions[0]));
1757 av1_zero_array(temp_regions, total_frames);
1758 // buffers for filtered stats
1759 double *filt_intra_err =
1760 (double *)aom_calloc(total_frames, sizeof(*filt_intra_err));
1761 double *filt_coded_err =
1762 (double *)aom_calloc(total_frames, sizeof(*filt_coded_err));
1763 double *grad_coded = (double *)aom_calloc(total_frames, sizeof(*grad_coded));
1764
1765 int cur_region = 0, this_start = 0, this_last;
1766
1767 int next_scenecut = -1;
1768 do {
1769 // first get the obvious scenecuts
1770 next_scenecut =
1771 find_next_scenecut(stats_start, this_start, total_frames - 1);
1772 this_last = (next_scenecut >= 0) ? (next_scenecut - 1) : total_frames - 1;
1773
1774 // low-pass filter the needed stats
1775 smooth_filter_stats(stats_start, this_start, this_last, filt_intra_err,
1776 filt_coded_err);
1777 get_gradient(filt_coded_err, this_start, this_last, grad_coded);
1778
1779 // find tentative stable regions and unstable regions
1780 int num_regions = find_stable_regions(stats_start, grad_coded, this_start,
1781 this_last, temp_regions);
1782
1783 adjust_unstable_region_bounds(stats_start, temp_regions, &num_regions);
1784
1785 get_region_stats(stats_start, temp_regions, num_regions);
1786
1787 // Try to identify blending regions in the unstable regions
1788 find_blending_regions(stats_start, temp_regions, &num_regions);
1789 cleanup_blendings(temp_regions, &num_regions);
1790
1791 // The flash points should all be considered high variance points
1792 k = 0;
1793 while (k < num_regions) {
1794 if (temp_regions[k].type != STABLE_REGION) {
1795 k++;
1796 continue;
1797 }
1798 int start = temp_regions[k].start;
1799 int last = temp_regions[k].last;
1800 for (int i = start; i <= last; i++) {
1801 if (stats_start[i].is_flash) {
1802 insert_region(i, i, HIGH_VAR_REGION, temp_regions, &num_regions, &k);
1803 }
1804 }
1805 k++;
1806 }
1807 cleanup_regions(temp_regions, &num_regions);
1808
1809 // copy the regions in the scenecut group
1810 for (k = 0; k < num_regions; k++) {
1811 if (temp_regions[k].last < temp_regions[k].start &&
1812 k == num_regions - 1) {
1813 num_regions--;
1814 break;
1815 }
1816 regions[k + cur_region] = temp_regions[k];
1817 }
1818 cur_region += num_regions;
1819
1820 // add the scenecut region
1821 if (next_scenecut > -1) {
1822 // add the scenecut region, and find the next scenecut
1823 regions[cur_region].type = SCENECUT_REGION;
1824 regions[cur_region].start = next_scenecut;
1825 regions[cur_region].last = next_scenecut;
1826 cur_region++;
1827 this_start = next_scenecut + 1;
1828 }
1829 } while (next_scenecut >= 0);
1830
1831 *total_regions = cur_region;
1832 get_region_stats(stats_start, regions, *total_regions);
1833
1834 for (k = 0; k < *total_regions; k++) {
1835 // If scenecuts are very minor, mark them as high variance.
1836 if (regions[k].type != SCENECUT_REGION ||
1837 regions[k].avg_cor_coeff *
1838 (1 - stats_start[regions[k].start].noise_var /
1839 regions[k].avg_intra_err) <
1840 0.8) {
1841 continue;
1842 }
1843 regions[k].type = HIGH_VAR_REGION;
1844 }
1845 cleanup_regions(regions, total_regions);
1846 get_region_stats(stats_start, regions, *total_regions);
1847
1848 for (k = 0; k < *total_regions; k++) {
1849 regions[k].start += offset;
1850 regions[k].last += offset;
1851 }
1852
1853 aom_free(temp_regions);
1854 aom_free(filt_coded_err);
1855 aom_free(filt_intra_err);
1856 aom_free(grad_coded);
1857 }
1858
find_regions_index(const REGIONS * regions,int num_regions,int frame_idx)1859 static int find_regions_index(const REGIONS *regions, int num_regions,
1860 int frame_idx) {
1861 for (int k = 0; k < num_regions; k++) {
1862 if (regions[k].start <= frame_idx && regions[k].last >= frame_idx) {
1863 return k;
1864 }
1865 }
1866 return -1;
1867 }
1868
1869 /*!\brief Determine the length of future GF groups.
1870 *
1871 * \ingroup gf_group_algo
1872 * This function decides the gf group length of future frames in batch
1873 *
1874 * \param[in] cpi Top-level encoder structure
1875 * \param[in] max_gop_length Maximum length of the GF group
1876 * \param[in] max_intervals Maximum number of intervals to decide
1877 *
1878 * \remark Nothing is returned. Instead, cpi->ppi->rc.gf_intervals is
1879 * changed to store the decided GF group lengths.
1880 */
calculate_gf_length(AV1_COMP * cpi,int max_gop_length,int max_intervals)1881 static void calculate_gf_length(AV1_COMP *cpi, int max_gop_length,
1882 int max_intervals) {
1883 RATE_CONTROL *const rc = &cpi->rc;
1884 PRIMARY_RATE_CONTROL *const p_rc = &cpi->ppi->p_rc;
1885 TWO_PASS *const twopass = &cpi->ppi->twopass;
1886 FIRSTPASS_STATS next_frame;
1887 const FIRSTPASS_STATS *const start_pos = cpi->twopass_frame.stats_in;
1888 const FIRSTPASS_STATS *const stats = start_pos - (rc->frames_since_key == 0);
1889
1890 const int f_w = cpi->common.width;
1891 const int f_h = cpi->common.height;
1892 int i;
1893
1894 int flash_detected;
1895
1896 av1_zero(next_frame);
1897
1898 if (has_no_stats_stage(cpi)) {
1899 for (i = 0; i < MAX_NUM_GF_INTERVALS; i++) {
1900 p_rc->gf_intervals[i] = AOMMIN(rc->max_gf_interval, max_gop_length);
1901 }
1902 p_rc->cur_gf_index = 0;
1903 rc->intervals_till_gf_calculate_due = MAX_NUM_GF_INTERVALS;
1904 return;
1905 }
1906
1907 // TODO(urvang): Try logic to vary min and max interval based on q.
1908 const int active_min_gf_interval = rc->min_gf_interval;
1909 const int active_max_gf_interval =
1910 AOMMIN(rc->max_gf_interval, max_gop_length);
1911 const int min_shrink_int = AOMMAX(MIN_SHRINK_LEN, active_min_gf_interval);
1912
1913 i = (rc->frames_since_key == 0);
1914 max_intervals = cpi->ppi->lap_enabled ? 1 : max_intervals;
1915 int count_cuts = 1;
1916 // If cpi->gf_state.arf_gf_boost_lst is 0, we are starting with a KF or GF.
1917 int cur_start = -1 + !cpi->ppi->gf_state.arf_gf_boost_lst, cur_last;
1918 int cut_pos[MAX_NUM_GF_INTERVALS + 1] = { -1 };
1919 int cut_here;
1920 GF_GROUP_STATS gf_stats;
1921 init_gf_stats(&gf_stats);
1922 while (count_cuts < max_intervals + 1) {
1923 // reaches next key frame, break here
1924 if (i >= rc->frames_to_key) {
1925 cut_here = 2;
1926 } else if (i - cur_start >= rc->static_scene_max_gf_interval) {
1927 // reached maximum len, but nothing special yet (almost static)
1928 // let's look at the next interval
1929 cut_here = 1;
1930 } else if (EOF == input_stats(twopass, &cpi->twopass_frame, &next_frame)) {
1931 // reaches last frame, break
1932 cut_here = 2;
1933 } else {
1934 // Test for the case where there is a brief flash but the prediction
1935 // quality back to an earlier frame is then restored.
1936 flash_detected = detect_flash(twopass, &cpi->twopass_frame, 0);
1937 // TODO(bohanli): remove redundant accumulations here, or unify
1938 // this and the ones in define_gf_group
1939 av1_accumulate_next_frame_stats(&next_frame, flash_detected,
1940 rc->frames_since_key, i, &gf_stats, f_w,
1941 f_h);
1942
1943 cut_here = detect_gf_cut(cpi, i, cur_start, flash_detected,
1944 active_max_gf_interval, active_min_gf_interval,
1945 &gf_stats);
1946 }
1947 if (cut_here) {
1948 cur_last = i - 1; // the current last frame in the gf group
1949 int ori_last = cur_last;
1950 // The region frame idx does not start from the same frame as cur_start
1951 // and cur_last. Need to offset them.
1952 int offset = rc->frames_since_key - p_rc->regions_offset;
1953 REGIONS *regions = p_rc->regions;
1954 int num_regions = p_rc->num_regions;
1955
1956 int scenecut_idx = -1;
1957 // only try shrinking if interval smaller than active_max_gf_interval
1958 if (cur_last - cur_start <= active_max_gf_interval &&
1959 cur_last > cur_start) {
1960 // find the region indices of where the first and last frame belong.
1961 int k_start =
1962 find_regions_index(regions, num_regions, cur_start + offset);
1963 int k_last =
1964 find_regions_index(regions, num_regions, cur_last + offset);
1965 if (cur_start + offset == 0) k_start = 0;
1966
1967 // See if we have a scenecut in between
1968 for (int r = k_start + 1; r <= k_last; r++) {
1969 if (regions[r].type == SCENECUT_REGION &&
1970 regions[r].last - offset - cur_start > active_min_gf_interval) {
1971 scenecut_idx = r;
1972 break;
1973 }
1974 }
1975
1976 // if the found scenecut is very close to the end, ignore it.
1977 if (regions[num_regions - 1].last - regions[scenecut_idx].last < 4) {
1978 scenecut_idx = -1;
1979 }
1980
1981 if (scenecut_idx != -1) {
1982 // If we have a scenecut, then stop at it.
1983 // TODO(bohanli): add logic here to stop before the scenecut and for
1984 // the next gop start from the scenecut with GF
1985 int is_minor_sc =
1986 (regions[scenecut_idx].avg_cor_coeff *
1987 (1 - stats[regions[scenecut_idx].start - offset].noise_var /
1988 regions[scenecut_idx].avg_intra_err) >
1989 0.6);
1990 cur_last = regions[scenecut_idx].last - offset - !is_minor_sc;
1991 } else {
1992 int is_last_analysed = (k_last == num_regions - 1) &&
1993 (cur_last + offset == regions[k_last].last);
1994 int not_enough_regions =
1995 k_last - k_start <=
1996 1 + (regions[k_start].type == SCENECUT_REGION);
1997 // if we are very close to the end, then do not shrink since it may
1998 // introduce intervals that are too short
1999 if (!(is_last_analysed && not_enough_regions)) {
2000 const double arf_length_factor = 0.1;
2001 double best_score = 0;
2002 int best_j = -1;
2003 const int first_frame = regions[0].start - offset;
2004 const int last_frame = regions[num_regions - 1].last - offset;
2005 // score of how much the arf helps the whole GOP
2006 double base_score = 0.0;
2007 // Accumulate base_score in
2008 for (int j = cur_start + 1; j < cur_start + min_shrink_int; j++) {
2009 if (stats + j >= twopass->stats_buf_ctx->stats_in_end) break;
2010 base_score = (base_score + 1.0) * stats[j].cor_coeff;
2011 }
2012 int met_blending = 0; // Whether we have met blending areas before
2013 int last_blending = 0; // Whether the previous frame if blending
2014 for (int j = cur_start + min_shrink_int; j <= cur_last; j++) {
2015 if (stats + j >= twopass->stats_buf_ctx->stats_in_end) break;
2016 base_score = (base_score + 1.0) * stats[j].cor_coeff;
2017 int this_reg =
2018 find_regions_index(regions, num_regions, j + offset);
2019 if (this_reg < 0) continue;
2020 // A GOP should include at most 1 blending region.
2021 if (regions[this_reg].type == BLENDING_REGION) {
2022 last_blending = 1;
2023 if (met_blending) {
2024 break;
2025 } else {
2026 base_score = 0;
2027 continue;
2028 }
2029 } else {
2030 if (last_blending) met_blending = 1;
2031 last_blending = 0;
2032 }
2033
2034 // Add the factor of how good the neighborhood is for this
2035 // candidate arf.
2036 double this_score = arf_length_factor * base_score;
2037 double temp_accu_coeff = 1.0;
2038 // following frames
2039 int count_f = 0;
2040 for (int n = j + 1; n <= j + 3 && n <= last_frame; n++) {
2041 if (stats + n >= twopass->stats_buf_ctx->stats_in_end) break;
2042 temp_accu_coeff *= stats[n].cor_coeff;
2043 this_score +=
2044 temp_accu_coeff *
2045 (1 - stats[n].noise_var /
2046 AOMMAX(regions[this_reg].avg_intra_err, 0.001));
2047 count_f++;
2048 }
2049 // preceding frames
2050 temp_accu_coeff = 1.0;
2051 for (int n = j; n > j - 3 * 2 + count_f && n > first_frame; n--) {
2052 if (stats + n < twopass->stats_buf_ctx->stats_in_start) break;
2053 temp_accu_coeff *= stats[n].cor_coeff;
2054 this_score +=
2055 temp_accu_coeff *
2056 (1 - stats[n].noise_var /
2057 AOMMAX(regions[this_reg].avg_intra_err, 0.001));
2058 }
2059
2060 if (this_score > best_score) {
2061 best_score = this_score;
2062 best_j = j;
2063 }
2064 }
2065
2066 // For blending areas, move one more frame in case we missed the
2067 // first blending frame.
2068 int best_reg =
2069 find_regions_index(regions, num_regions, best_j + offset);
2070 if (best_reg < num_regions - 1 && best_reg > 0) {
2071 if (regions[best_reg - 1].type == BLENDING_REGION &&
2072 regions[best_reg + 1].type == BLENDING_REGION) {
2073 if (best_j + offset == regions[best_reg].start &&
2074 best_j + offset < regions[best_reg].last) {
2075 best_j += 1;
2076 } else if (best_j + offset == regions[best_reg].last &&
2077 best_j + offset > regions[best_reg].start) {
2078 best_j -= 1;
2079 }
2080 }
2081 }
2082
2083 if (cur_last - best_j < 2) best_j = cur_last;
2084 if (best_j > 0 && best_score > 0.1) cur_last = best_j;
2085 // if cannot find anything, just cut at the original place.
2086 }
2087 }
2088 }
2089 cut_pos[count_cuts] = cur_last;
2090 count_cuts++;
2091
2092 // reset pointers to the shrunken location
2093 cpi->twopass_frame.stats_in = start_pos + cur_last;
2094 cur_start = cur_last;
2095 int cur_region_idx =
2096 find_regions_index(regions, num_regions, cur_start + 1 + offset);
2097 if (cur_region_idx >= 0)
2098 if (regions[cur_region_idx].type == SCENECUT_REGION) cur_start++;
2099
2100 i = cur_last;
2101
2102 if (cut_here > 1 && cur_last == ori_last) break;
2103
2104 // reset accumulators
2105 init_gf_stats(&gf_stats);
2106 }
2107 ++i;
2108 }
2109
2110 // save intervals
2111 rc->intervals_till_gf_calculate_due = count_cuts - 1;
2112 for (int n = 1; n < count_cuts; n++) {
2113 p_rc->gf_intervals[n - 1] = cut_pos[n] - cut_pos[n - 1];
2114 }
2115 p_rc->cur_gf_index = 0;
2116 cpi->twopass_frame.stats_in = start_pos;
2117 }
2118
correct_frames_to_key(AV1_COMP * cpi)2119 static void correct_frames_to_key(AV1_COMP *cpi) {
2120 int lookahead_size =
2121 (int)av1_lookahead_depth(cpi->ppi->lookahead, cpi->compressor_stage);
2122 if (lookahead_size <
2123 av1_lookahead_pop_sz(cpi->ppi->lookahead, cpi->compressor_stage)) {
2124 assert(
2125 IMPLIES(cpi->oxcf.pass != AOM_RC_ONE_PASS && cpi->ppi->frames_left > 0,
2126 lookahead_size == cpi->ppi->frames_left));
2127 cpi->rc.frames_to_key = AOMMIN(cpi->rc.frames_to_key, lookahead_size);
2128 } else if (cpi->ppi->frames_left > 0) {
2129 // Correct frames to key based on limit
2130 cpi->rc.frames_to_key =
2131 AOMMIN(cpi->rc.frames_to_key, cpi->ppi->frames_left);
2132 }
2133 }
2134
2135 /*!\brief Define a GF group in one pass mode when no look ahead stats are
2136 * available.
2137 *
2138 * \ingroup gf_group_algo
2139 * This function defines the structure of a GF group, along with various
2140 * parameters regarding bit-allocation and quality setup in the special
2141 * case of one pass encoding where no lookahead stats are avialable.
2142 *
2143 * \param[in] cpi Top-level encoder structure
2144 *
2145 * \remark Nothing is returned. Instead, cpi->ppi->gf_group is changed.
2146 */
define_gf_group_pass0(AV1_COMP * cpi)2147 static void define_gf_group_pass0(AV1_COMP *cpi) {
2148 RATE_CONTROL *const rc = &cpi->rc;
2149 PRIMARY_RATE_CONTROL *const p_rc = &cpi->ppi->p_rc;
2150 GF_GROUP *const gf_group = &cpi->ppi->gf_group;
2151 const AV1EncoderConfig *const oxcf = &cpi->oxcf;
2152 const GFConfig *const gf_cfg = &oxcf->gf_cfg;
2153 int target;
2154
2155 if (oxcf->q_cfg.aq_mode == CYCLIC_REFRESH_AQ) {
2156 av1_cyclic_refresh_set_golden_update(cpi);
2157 } else {
2158 p_rc->baseline_gf_interval = p_rc->gf_intervals[p_rc->cur_gf_index];
2159 rc->intervals_till_gf_calculate_due--;
2160 p_rc->cur_gf_index++;
2161 }
2162
2163 // correct frames_to_key when lookahead queue is flushing
2164 correct_frames_to_key(cpi);
2165
2166 if (p_rc->baseline_gf_interval > rc->frames_to_key)
2167 p_rc->baseline_gf_interval = rc->frames_to_key;
2168
2169 p_rc->gfu_boost = DEFAULT_GF_BOOST;
2170 p_rc->constrained_gf_group =
2171 (p_rc->baseline_gf_interval >= rc->frames_to_key) ? 1 : 0;
2172
2173 gf_group->max_layer_depth_allowed = oxcf->gf_cfg.gf_max_pyr_height;
2174
2175 // Rare case when the look-ahead is less than the target GOP length, can't
2176 // generate ARF frame.
2177 if (p_rc->baseline_gf_interval > gf_cfg->lag_in_frames ||
2178 !is_altref_enabled(gf_cfg->lag_in_frames, gf_cfg->enable_auto_arf) ||
2179 p_rc->baseline_gf_interval < rc->min_gf_interval)
2180 gf_group->max_layer_depth_allowed = 0;
2181
2182 // Set up the structure of this Group-Of-Pictures (same as GF_GROUP)
2183 av1_gop_setup_structure(cpi);
2184
2185 // Allocate bits to each of the frames in the GF group.
2186 // TODO(sarahparker) Extend this to work with pyramid structure.
2187 for (int cur_index = 0; cur_index < gf_group->size; ++cur_index) {
2188 const FRAME_UPDATE_TYPE cur_update_type = gf_group->update_type[cur_index];
2189 if (oxcf->rc_cfg.mode == AOM_CBR) {
2190 if (cur_update_type == KF_UPDATE) {
2191 target = av1_calc_iframe_target_size_one_pass_cbr(cpi);
2192 } else {
2193 target = av1_calc_pframe_target_size_one_pass_cbr(cpi, cur_update_type);
2194 }
2195 } else {
2196 if (cur_update_type == KF_UPDATE) {
2197 target = av1_calc_iframe_target_size_one_pass_vbr(cpi);
2198 } else {
2199 target = av1_calc_pframe_target_size_one_pass_vbr(cpi, cur_update_type);
2200 }
2201 }
2202 gf_group->bit_allocation[cur_index] = target;
2203 }
2204 }
2205
set_baseline_gf_interval(PRIMARY_RATE_CONTROL * p_rc,int arf_position)2206 static INLINE void set_baseline_gf_interval(PRIMARY_RATE_CONTROL *p_rc,
2207 int arf_position) {
2208 p_rc->baseline_gf_interval = arf_position;
2209 }
2210
2211 // initialize GF_GROUP_STATS
init_gf_stats(GF_GROUP_STATS * gf_stats)2212 static void init_gf_stats(GF_GROUP_STATS *gf_stats) {
2213 gf_stats->gf_group_err = 0.0;
2214 gf_stats->gf_group_raw_error = 0.0;
2215 gf_stats->gf_group_skip_pct = 0.0;
2216 gf_stats->gf_group_inactive_zone_rows = 0.0;
2217
2218 gf_stats->mv_ratio_accumulator = 0.0;
2219 gf_stats->decay_accumulator = 1.0;
2220 gf_stats->zero_motion_accumulator = 1.0;
2221 gf_stats->loop_decay_rate = 1.0;
2222 gf_stats->last_loop_decay_rate = 1.0;
2223 gf_stats->this_frame_mv_in_out = 0.0;
2224 gf_stats->mv_in_out_accumulator = 0.0;
2225 gf_stats->abs_mv_in_out_accumulator = 0.0;
2226
2227 gf_stats->avg_sr_coded_error = 0.0;
2228 gf_stats->avg_pcnt_second_ref = 0.0;
2229 gf_stats->avg_new_mv_count = 0.0;
2230 gf_stats->avg_wavelet_energy = 0.0;
2231 gf_stats->avg_raw_err_stdev = 0.0;
2232 gf_stats->non_zero_stdev_count = 0;
2233 }
2234
accumulate_gop_stats(AV1_COMP * cpi,int is_intra_only,int f_w,int f_h,FIRSTPASS_STATS * next_frame,const FIRSTPASS_STATS * start_pos,GF_GROUP_STATS * gf_stats,int * idx)2235 static void accumulate_gop_stats(AV1_COMP *cpi, int is_intra_only, int f_w,
2236 int f_h, FIRSTPASS_STATS *next_frame,
2237 const FIRSTPASS_STATS *start_pos,
2238 GF_GROUP_STATS *gf_stats, int *idx) {
2239 int i, flash_detected;
2240 TWO_PASS *const twopass = &cpi->ppi->twopass;
2241 PRIMARY_RATE_CONTROL *const p_rc = &cpi->ppi->p_rc;
2242 RATE_CONTROL *const rc = &cpi->rc;
2243 FRAME_INFO *frame_info = &cpi->frame_info;
2244 const AV1EncoderConfig *const oxcf = &cpi->oxcf;
2245
2246 init_gf_stats(gf_stats);
2247 av1_zero(*next_frame);
2248
2249 // If this is a key frame or the overlay from a previous arf then
2250 // the error score / cost of this frame has already been accounted for.
2251 i = is_intra_only;
2252 // get the determined gf group length from p_rc->gf_intervals
2253 while (i < p_rc->gf_intervals[p_rc->cur_gf_index]) {
2254 // read in the next frame
2255 if (EOF == input_stats(twopass, &cpi->twopass_frame, next_frame)) break;
2256 // Accumulate error score of frames in this gf group.
2257 double mod_frame_err =
2258 calculate_modified_err(frame_info, twopass, oxcf, next_frame);
2259 // accumulate stats for this frame
2260 accumulate_this_frame_stats(next_frame, mod_frame_err, gf_stats);
2261 ++i;
2262 }
2263
2264 reset_fpf_position(&cpi->twopass_frame, start_pos);
2265
2266 i = is_intra_only;
2267 input_stats(twopass, &cpi->twopass_frame, next_frame);
2268 while (i < p_rc->gf_intervals[p_rc->cur_gf_index]) {
2269 // read in the next frame
2270 if (EOF == input_stats(twopass, &cpi->twopass_frame, next_frame)) break;
2271
2272 // Test for the case where there is a brief flash but the prediction
2273 // quality back to an earlier frame is then restored.
2274 flash_detected = detect_flash(twopass, &cpi->twopass_frame, 0);
2275
2276 // accumulate stats for next frame
2277 av1_accumulate_next_frame_stats(next_frame, flash_detected,
2278 rc->frames_since_key, i, gf_stats, f_w,
2279 f_h);
2280
2281 ++i;
2282 }
2283
2284 i = p_rc->gf_intervals[p_rc->cur_gf_index];
2285 average_gf_stats(i, gf_stats);
2286
2287 *idx = i;
2288 }
2289
update_gop_length(RATE_CONTROL * rc,PRIMARY_RATE_CONTROL * p_rc,int idx,int is_final_pass)2290 static void update_gop_length(RATE_CONTROL *rc, PRIMARY_RATE_CONTROL *p_rc,
2291 int idx, int is_final_pass) {
2292 if (is_final_pass) {
2293 rc->intervals_till_gf_calculate_due--;
2294 p_rc->cur_gf_index++;
2295 }
2296
2297 // Was the group length constrained by the requirement for a new KF?
2298 p_rc->constrained_gf_group = (idx >= rc->frames_to_key) ? 1 : 0;
2299
2300 set_baseline_gf_interval(p_rc, idx);
2301 rc->frames_till_gf_update_due = p_rc->baseline_gf_interval;
2302 }
2303
2304 #define MAX_GF_BOOST 5400
2305 #define REDUCE_GF_LENGTH_THRESH 4
2306 #define REDUCE_GF_LENGTH_TO_KEY_THRESH 9
2307 #define REDUCE_GF_LENGTH_BY 1
set_gop_bits_boost(AV1_COMP * cpi,int i,int is_intra_only,int is_final_pass,int use_alt_ref,int alt_offset,const FIRSTPASS_STATS * start_pos,GF_GROUP_STATS * gf_stats)2308 static void set_gop_bits_boost(AV1_COMP *cpi, int i, int is_intra_only,
2309 int is_final_pass, int use_alt_ref,
2310 int alt_offset, const FIRSTPASS_STATS *start_pos,
2311 GF_GROUP_STATS *gf_stats) {
2312 // Should we use the alternate reference frame.
2313 AV1_COMMON *const cm = &cpi->common;
2314 RATE_CONTROL *const rc = &cpi->rc;
2315 PRIMARY_RATE_CONTROL *const p_rc = &cpi->ppi->p_rc;
2316 TWO_PASS *const twopass = &cpi->ppi->twopass;
2317 GF_GROUP *gf_group = &cpi->ppi->gf_group;
2318 FRAME_INFO *frame_info = &cpi->frame_info;
2319 const AV1EncoderConfig *const oxcf = &cpi->oxcf;
2320 const RateControlCfg *const rc_cfg = &oxcf->rc_cfg;
2321
2322 int ext_len = i - is_intra_only;
2323 if (use_alt_ref) {
2324 const int forward_frames = (rc->frames_to_key - i >= ext_len)
2325 ? ext_len
2326 : AOMMAX(0, rc->frames_to_key - i);
2327
2328 // Calculate the boost for alt ref.
2329 p_rc->gfu_boost = av1_calc_arf_boost(
2330 twopass, &cpi->twopass_frame, p_rc, frame_info, alt_offset,
2331 forward_frames, ext_len, &p_rc->num_stats_used_for_gfu_boost,
2332 &p_rc->num_stats_required_for_gfu_boost, cpi->ppi->lap_enabled);
2333 } else {
2334 reset_fpf_position(&cpi->twopass_frame, start_pos);
2335 p_rc->gfu_boost = AOMMIN(
2336 MAX_GF_BOOST,
2337 av1_calc_arf_boost(
2338 twopass, &cpi->twopass_frame, p_rc, frame_info, alt_offset, ext_len,
2339 0, &p_rc->num_stats_used_for_gfu_boost,
2340 &p_rc->num_stats_required_for_gfu_boost, cpi->ppi->lap_enabled));
2341 }
2342
2343 #define LAST_ALR_BOOST_FACTOR 0.2f
2344 p_rc->arf_boost_factor = 1.0;
2345 if (use_alt_ref && !is_lossless_requested(rc_cfg)) {
2346 // Reduce the boost of altref in the last gf group
2347 if (rc->frames_to_key - ext_len == REDUCE_GF_LENGTH_BY ||
2348 rc->frames_to_key - ext_len == 0) {
2349 p_rc->arf_boost_factor = LAST_ALR_BOOST_FACTOR;
2350 }
2351 }
2352
2353 // Reset the file position.
2354 reset_fpf_position(&cpi->twopass_frame, start_pos);
2355 if (cpi->ppi->lap_enabled) {
2356 // Since we don't have enough stats to know the actual error of the
2357 // gf group, we assume error of each frame to be equal to 1 and set
2358 // the error of the group as baseline_gf_interval.
2359 gf_stats->gf_group_err = p_rc->baseline_gf_interval;
2360 }
2361 // Calculate the bits to be allocated to the gf/arf group as a whole
2362 p_rc->gf_group_bits =
2363 calculate_total_gf_group_bits(cpi, gf_stats->gf_group_err);
2364
2365 #if GROUP_ADAPTIVE_MAXQ
2366 // Calculate an estimate of the maxq needed for the group.
2367 // We are more aggressive about correcting for sections
2368 // where there could be significant overshoot than for easier
2369 // sections where we do not wish to risk creating an overshoot
2370 // of the allocated bit budget.
2371 if ((rc_cfg->mode != AOM_Q) && (p_rc->baseline_gf_interval > 1) &&
2372 is_final_pass) {
2373 const int vbr_group_bits_per_frame =
2374 (int)(p_rc->gf_group_bits / p_rc->baseline_gf_interval);
2375 const double group_av_err =
2376 gf_stats->gf_group_raw_error / p_rc->baseline_gf_interval;
2377 const double group_av_skip_pct =
2378 gf_stats->gf_group_skip_pct / p_rc->baseline_gf_interval;
2379 const double group_av_inactive_zone =
2380 ((gf_stats->gf_group_inactive_zone_rows * 2) /
2381 (p_rc->baseline_gf_interval * (double)cm->mi_params.mb_rows));
2382
2383 int tmp_q;
2384 tmp_q = get_twopass_worst_quality(
2385 cpi, group_av_err, (group_av_skip_pct + group_av_inactive_zone),
2386 vbr_group_bits_per_frame);
2387 rc->active_worst_quality = AOMMAX(tmp_q, rc->active_worst_quality >> 1);
2388 }
2389 #endif
2390
2391 // Adjust KF group bits and error remaining.
2392 if (is_final_pass) twopass->kf_group_error_left -= gf_stats->gf_group_err;
2393
2394 // Reset the file position.
2395 reset_fpf_position(&cpi->twopass_frame, start_pos);
2396
2397 // Calculate a section intra ratio used in setting max loop filter.
2398 if (rc->frames_since_key != 0) {
2399 twopass->section_intra_rating = calculate_section_intra_ratio(
2400 start_pos, twopass->stats_buf_ctx->stats_in_end,
2401 p_rc->baseline_gf_interval);
2402 }
2403
2404 av1_gop_bit_allocation(cpi, rc, gf_group, rc->frames_since_key == 0,
2405 use_alt_ref, p_rc->gf_group_bits);
2406
2407 // TODO(jingning): Generalize this condition.
2408 if (is_final_pass) {
2409 cpi->ppi->gf_state.arf_gf_boost_lst = use_alt_ref;
2410
2411 // Reset rolling actual and target bits counters for ARF groups.
2412 twopass->rolling_arf_group_target_bits = 1;
2413 twopass->rolling_arf_group_actual_bits = 1;
2414 }
2415 #if CONFIG_BITRATE_ACCURACY
2416 if (is_final_pass) {
2417 av1_vbr_rc_set_gop_bit_budget(&cpi->vbr_rc_info,
2418 p_rc->baseline_gf_interval);
2419 }
2420 #endif
2421 }
2422
2423 /*!\brief Define a GF group.
2424 *
2425 * \ingroup gf_group_algo
2426 * This function defines the structure of a GF group, along with various
2427 * parameters regarding bit-allocation and quality setup.
2428 *
2429 * \param[in] cpi Top-level encoder structure
2430 * \param[in] frame_params Structure with frame parameters
2431 * \param[in] is_final_pass Whether this is the final pass for the
2432 * GF group, or a trial (non-zero)
2433 *
2434 * \remark Nothing is returned. Instead, cpi->ppi->gf_group is changed.
2435 */
define_gf_group(AV1_COMP * cpi,EncodeFrameParams * frame_params,int is_final_pass)2436 static void define_gf_group(AV1_COMP *cpi, EncodeFrameParams *frame_params,
2437 int is_final_pass) {
2438 AV1_COMMON *const cm = &cpi->common;
2439 RATE_CONTROL *const rc = &cpi->rc;
2440 PRIMARY_RATE_CONTROL *const p_rc = &cpi->ppi->p_rc;
2441 const AV1EncoderConfig *const oxcf = &cpi->oxcf;
2442 TWO_PASS *const twopass = &cpi->ppi->twopass;
2443 FIRSTPASS_STATS next_frame;
2444 const FIRSTPASS_STATS *const start_pos = cpi->twopass_frame.stats_in;
2445 GF_GROUP *gf_group = &cpi->ppi->gf_group;
2446 const GFConfig *const gf_cfg = &oxcf->gf_cfg;
2447 const RateControlCfg *const rc_cfg = &oxcf->rc_cfg;
2448 const int f_w = cm->width;
2449 const int f_h = cm->height;
2450 int i;
2451 const int is_intra_only = rc->frames_since_key == 0;
2452
2453 cpi->ppi->internal_altref_allowed = (gf_cfg->gf_max_pyr_height > 1);
2454
2455 // Reset the GF group data structures unless this is a key
2456 // frame in which case it will already have been done.
2457 if (!is_intra_only) {
2458 av1_zero(cpi->ppi->gf_group);
2459 cpi->gf_frame_index = 0;
2460 }
2461
2462 if (has_no_stats_stage(cpi)) {
2463 define_gf_group_pass0(cpi);
2464 return;
2465 }
2466
2467 if (cpi->third_pass_ctx && oxcf->pass == AOM_RC_THIRD_PASS) {
2468 int ret = define_gf_group_pass3(cpi, frame_params, is_final_pass);
2469 if (ret == 0) return;
2470
2471 av1_free_thirdpass_ctx(cpi->third_pass_ctx);
2472 cpi->third_pass_ctx = NULL;
2473 }
2474
2475 // correct frames_to_key when lookahead queue is emptying
2476 if (cpi->ppi->lap_enabled) {
2477 correct_frames_to_key(cpi);
2478 }
2479
2480 GF_GROUP_STATS gf_stats;
2481 accumulate_gop_stats(cpi, is_intra_only, f_w, f_h, &next_frame, start_pos,
2482 &gf_stats, &i);
2483
2484 const int can_disable_arf = !gf_cfg->gf_min_pyr_height;
2485
2486 // If this is a key frame or the overlay from a previous arf then
2487 // the error score / cost of this frame has already been accounted for.
2488 const int active_min_gf_interval = rc->min_gf_interval;
2489
2490 // Disable internal ARFs for "still" gf groups.
2491 // zero_motion_accumulator: minimum percentage of (0,0) motion;
2492 // avg_sr_coded_error: average of the SSE per pixel of each frame;
2493 // avg_raw_err_stdev: average of the standard deviation of (0,0)
2494 // motion error per block of each frame.
2495 const int can_disable_internal_arfs = gf_cfg->gf_min_pyr_height <= 1;
2496 if (can_disable_internal_arfs &&
2497 gf_stats.zero_motion_accumulator > MIN_ZERO_MOTION &&
2498 gf_stats.avg_sr_coded_error < MAX_SR_CODED_ERROR &&
2499 gf_stats.avg_raw_err_stdev < MAX_RAW_ERR_VAR) {
2500 cpi->ppi->internal_altref_allowed = 0;
2501 }
2502
2503 int use_alt_ref;
2504 if (can_disable_arf) {
2505 use_alt_ref =
2506 !is_almost_static(gf_stats.zero_motion_accumulator,
2507 twopass->kf_zeromotion_pct, cpi->ppi->lap_enabled) &&
2508 p_rc->use_arf_in_this_kf_group && (i < gf_cfg->lag_in_frames) &&
2509 (i >= MIN_GF_INTERVAL);
2510 } else {
2511 use_alt_ref = p_rc->use_arf_in_this_kf_group &&
2512 (i < gf_cfg->lag_in_frames) && (i > 2);
2513 }
2514 if (use_alt_ref) {
2515 gf_group->max_layer_depth_allowed = gf_cfg->gf_max_pyr_height;
2516 } else {
2517 gf_group->max_layer_depth_allowed = 0;
2518 }
2519
2520 int alt_offset = 0;
2521 // The length reduction strategy is tweaked for certain cases, and doesn't
2522 // work well for certain other cases.
2523 const int allow_gf_length_reduction =
2524 ((rc_cfg->mode == AOM_Q && rc_cfg->cq_level <= 128) ||
2525 !cpi->ppi->internal_altref_allowed) &&
2526 !is_lossless_requested(rc_cfg);
2527
2528 if (allow_gf_length_reduction && use_alt_ref) {
2529 // adjust length of this gf group if one of the following condition met
2530 // 1: only one overlay frame left and this gf is too long
2531 // 2: next gf group is too short to have arf compared to the current gf
2532
2533 // maximum length of next gf group
2534 const int next_gf_len = rc->frames_to_key - i;
2535 const int single_overlay_left =
2536 next_gf_len == 0 && i > REDUCE_GF_LENGTH_THRESH;
2537 // the next gf is probably going to have a ARF but it will be shorter than
2538 // this gf
2539 const int unbalanced_gf =
2540 i > REDUCE_GF_LENGTH_TO_KEY_THRESH &&
2541 next_gf_len + 1 < REDUCE_GF_LENGTH_TO_KEY_THRESH &&
2542 next_gf_len + 1 >= rc->min_gf_interval;
2543
2544 if (single_overlay_left || unbalanced_gf) {
2545 const int roll_back = REDUCE_GF_LENGTH_BY;
2546 // Reduce length only if active_min_gf_interval will be respected later.
2547 if (i - roll_back >= active_min_gf_interval + 1) {
2548 alt_offset = -roll_back;
2549 i -= roll_back;
2550 if (is_final_pass) rc->intervals_till_gf_calculate_due = 0;
2551 p_rc->gf_intervals[p_rc->cur_gf_index] -= roll_back;
2552 reset_fpf_position(&cpi->twopass_frame, start_pos);
2553 accumulate_gop_stats(cpi, is_intra_only, f_w, f_h, &next_frame,
2554 start_pos, &gf_stats, &i);
2555 }
2556 }
2557 }
2558
2559 update_gop_length(rc, p_rc, i, is_final_pass);
2560
2561 // Set up the structure of this Group-Of-Pictures (same as GF_GROUP)
2562 av1_gop_setup_structure(cpi);
2563
2564 set_gop_bits_boost(cpi, i, is_intra_only, is_final_pass, use_alt_ref,
2565 alt_offset, start_pos, &gf_stats);
2566
2567 frame_params->frame_type =
2568 rc->frames_since_key == 0 ? KEY_FRAME : INTER_FRAME;
2569 frame_params->show_frame =
2570 !(gf_group->update_type[cpi->gf_frame_index] == ARF_UPDATE ||
2571 gf_group->update_type[cpi->gf_frame_index] == INTNL_ARF_UPDATE);
2572 }
2573
2574 /*!\brief Define a GF group for the third apss.
2575 *
2576 * \ingroup gf_group_algo
2577 * This function defines the structure of a GF group for the third pass, along
2578 * with various parameters regarding bit-allocation and quality setup based on
2579 * the two-pass bitstream.
2580 * Much of the function still uses the strategies used for the second pass and
2581 * relies on first pass statistics. It is expected that over time these portions
2582 * would be replaced with strategies specific to the third pass.
2583 *
2584 * \param[in] cpi Top-level encoder structure
2585 * \param[in] frame_params Structure with frame parameters
2586 * \param[in] is_final_pass Whether this is the final pass for the
2587 * GF group, or a trial (non-zero)
2588 *
2589 * \return 0: Success;
2590 * -1: There are conflicts between the bitstream and current config
2591 * The values in cpi->ppi->gf_group are also changed.
2592 */
define_gf_group_pass3(AV1_COMP * cpi,EncodeFrameParams * frame_params,int is_final_pass)2593 static int define_gf_group_pass3(AV1_COMP *cpi, EncodeFrameParams *frame_params,
2594 int is_final_pass) {
2595 if (!cpi->third_pass_ctx) return -1;
2596 AV1_COMMON *const cm = &cpi->common;
2597 RATE_CONTROL *const rc = &cpi->rc;
2598 PRIMARY_RATE_CONTROL *const p_rc = &cpi->ppi->p_rc;
2599 const AV1EncoderConfig *const oxcf = &cpi->oxcf;
2600 FIRSTPASS_STATS next_frame;
2601 const FIRSTPASS_STATS *const start_pos = cpi->twopass_frame.stats_in;
2602 GF_GROUP *gf_group = &cpi->ppi->gf_group;
2603 const GFConfig *const gf_cfg = &oxcf->gf_cfg;
2604 const int f_w = cm->width;
2605 const int f_h = cm->height;
2606 int i;
2607 const int is_intra_only = rc->frames_since_key == 0;
2608
2609 cpi->ppi->internal_altref_allowed = (gf_cfg->gf_max_pyr_height > 1);
2610
2611 // Reset the GF group data structures unless this is a key
2612 // frame in which case it will already have been done.
2613 if (!is_intra_only) {
2614 av1_zero(cpi->ppi->gf_group);
2615 cpi->gf_frame_index = 0;
2616 }
2617
2618 GF_GROUP_STATS gf_stats;
2619 accumulate_gop_stats(cpi, is_intra_only, f_w, f_h, &next_frame, start_pos,
2620 &gf_stats, &i);
2621
2622 const int can_disable_arf = !gf_cfg->gf_min_pyr_height;
2623
2624 // TODO(any): set cpi->ppi->internal_altref_allowed accordingly;
2625
2626 int use_alt_ref = av1_check_use_arf(cpi->third_pass_ctx);
2627 if (use_alt_ref == 0 && !can_disable_arf) return -1;
2628 if (use_alt_ref) {
2629 gf_group->max_layer_depth_allowed = gf_cfg->gf_max_pyr_height;
2630 } else {
2631 gf_group->max_layer_depth_allowed = 0;
2632 }
2633
2634 update_gop_length(rc, p_rc, i, is_final_pass);
2635
2636 // Set up the structure of this Group-Of-Pictures (same as GF_GROUP)
2637 av1_gop_setup_structure(cpi);
2638
2639 set_gop_bits_boost(cpi, i, is_intra_only, is_final_pass, use_alt_ref, 0,
2640 start_pos, &gf_stats);
2641
2642 frame_params->frame_type = cpi->third_pass_ctx->frame_info[0].frame_type;
2643 frame_params->show_frame = cpi->third_pass_ctx->frame_info[0].is_show_frame;
2644 return 0;
2645 }
2646
2647 // #define FIXED_ARF_BITS
2648 #ifdef FIXED_ARF_BITS
2649 #define ARF_BITS_FRACTION 0.75
2650 #endif
av1_gop_bit_allocation(const AV1_COMP * cpi,RATE_CONTROL * const rc,GF_GROUP * gf_group,int is_key_frame,int use_arf,int64_t gf_group_bits)2651 void av1_gop_bit_allocation(const AV1_COMP *cpi, RATE_CONTROL *const rc,
2652 GF_GROUP *gf_group, int is_key_frame, int use_arf,
2653 int64_t gf_group_bits) {
2654 PRIMARY_RATE_CONTROL *const p_rc = &cpi->ppi->p_rc;
2655 // Calculate the extra bits to be used for boosted frame(s)
2656 #ifdef FIXED_ARF_BITS
2657 int gf_arf_bits = (int)(ARF_BITS_FRACTION * gf_group_bits);
2658 #else
2659 int gf_arf_bits = calculate_boost_bits(
2660 p_rc->baseline_gf_interval - (rc->frames_since_key == 0), p_rc->gfu_boost,
2661 gf_group_bits);
2662 #endif
2663
2664 gf_arf_bits = adjust_boost_bits_for_target_level(cpi, rc, gf_arf_bits,
2665 gf_group_bits, 1);
2666
2667 // Allocate bits to each of the frames in the GF group.
2668 allocate_gf_group_bits(gf_group, p_rc, rc, gf_group_bits, gf_arf_bits,
2669 is_key_frame, use_arf);
2670 }
2671
2672 // Minimum % intra coding observed in first pass (1.0 = 100%)
2673 #define MIN_INTRA_LEVEL 0.25
2674 // Minimum ratio between the % of intra coding and inter coding in the first
2675 // pass after discounting neutral blocks (discounting neutral blocks in this
2676 // way helps catch scene cuts in clips with very flat areas or letter box
2677 // format clips with image padding.
2678 #define INTRA_VS_INTER_THRESH 2.0
2679 // Hard threshold where the first pass chooses intra for almost all blocks.
2680 // In such a case even if the frame is not a scene cut coding a key frame
2681 // may be a good option.
2682 #define VERY_LOW_INTER_THRESH 0.05
2683 // Maximum threshold for the relative ratio of intra error score vs best
2684 // inter error score.
2685 #define KF_II_ERR_THRESHOLD 1.9
2686 // In real scene cuts there is almost always a sharp change in the intra
2687 // or inter error score.
2688 #define ERR_CHANGE_THRESHOLD 0.4
2689 // For real scene cuts we expect an improvment in the intra inter error
2690 // ratio in the next frame.
2691 #define II_IMPROVEMENT_THRESHOLD 3.5
2692 #define KF_II_MAX 128.0
2693 // Intra / Inter threshold very low
2694 #define VERY_LOW_II 1.5
2695 // Clean slide transitions we expect a sharp single frame spike in error.
2696 #define ERROR_SPIKE 5.0
2697
2698 // Slide show transition detection.
2699 // Tests for case where there is very low error either side of the current frame
2700 // but much higher just for this frame. This can help detect key frames in
2701 // slide shows even where the slides are pictures of different sizes.
2702 // Also requires that intra and inter errors are very similar to help eliminate
2703 // harmful false positives.
2704 // It will not help if the transition is a fade or other multi-frame effect.
slide_transition(const FIRSTPASS_STATS * this_frame,const FIRSTPASS_STATS * last_frame,const FIRSTPASS_STATS * next_frame)2705 static int slide_transition(const FIRSTPASS_STATS *this_frame,
2706 const FIRSTPASS_STATS *last_frame,
2707 const FIRSTPASS_STATS *next_frame) {
2708 return (this_frame->intra_error < (this_frame->coded_error * VERY_LOW_II)) &&
2709 (this_frame->coded_error > (last_frame->coded_error * ERROR_SPIKE)) &&
2710 (this_frame->coded_error > (next_frame->coded_error * ERROR_SPIKE));
2711 }
2712
2713 // Threshold for use of the lagging second reference frame. High second ref
2714 // usage may point to a transient event like a flash or occlusion rather than
2715 // a real scene cut.
2716 // We adapt the threshold based on number of frames in this key-frame group so
2717 // far.
get_second_ref_usage_thresh(int frame_count_so_far)2718 static double get_second_ref_usage_thresh(int frame_count_so_far) {
2719 const int adapt_upto = 32;
2720 const double min_second_ref_usage_thresh = 0.085;
2721 const double second_ref_usage_thresh_max_delta = 0.035;
2722 if (frame_count_so_far >= adapt_upto) {
2723 return min_second_ref_usage_thresh + second_ref_usage_thresh_max_delta;
2724 }
2725 return min_second_ref_usage_thresh +
2726 ((double)frame_count_so_far / (adapt_upto - 1)) *
2727 second_ref_usage_thresh_max_delta;
2728 }
2729
test_candidate_kf(const FIRSTPASS_INFO * firstpass_info,int this_stats_index,int frame_count_so_far,enum aom_rc_mode rc_mode,int scenecut_mode,int num_mbs)2730 static int test_candidate_kf(const FIRSTPASS_INFO *firstpass_info,
2731 int this_stats_index, int frame_count_so_far,
2732 enum aom_rc_mode rc_mode, int scenecut_mode,
2733 int num_mbs) {
2734 const FIRSTPASS_STATS *last_stats =
2735 av1_firstpass_info_peek(firstpass_info, this_stats_index - 1);
2736 const FIRSTPASS_STATS *this_stats =
2737 av1_firstpass_info_peek(firstpass_info, this_stats_index);
2738 const FIRSTPASS_STATS *next_stats =
2739 av1_firstpass_info_peek(firstpass_info, this_stats_index + 1);
2740 if (last_stats == NULL || this_stats == NULL || next_stats == NULL) {
2741 return 0;
2742 }
2743
2744 int is_viable_kf = 0;
2745 double pcnt_intra = 1.0 - this_stats->pcnt_inter;
2746 double modified_pcnt_inter =
2747 this_stats->pcnt_inter - this_stats->pcnt_neutral;
2748 const double second_ref_usage_thresh =
2749 get_second_ref_usage_thresh(frame_count_so_far);
2750 int frames_to_test_after_candidate_key = SCENE_CUT_KEY_TEST_INTERVAL;
2751 int count_for_tolerable_prediction = 3;
2752
2753 // We do "-1" because the candidate key is not counted.
2754 int stats_after_this_stats =
2755 av1_firstpass_info_future_count(firstpass_info, this_stats_index) - 1;
2756
2757 if (scenecut_mode == ENABLE_SCENECUT_MODE_1) {
2758 if (stats_after_this_stats < 3) {
2759 return 0;
2760 } else {
2761 frames_to_test_after_candidate_key = 3;
2762 count_for_tolerable_prediction = 1;
2763 }
2764 }
2765 // Make sure we have enough stats after the candidate key.
2766 frames_to_test_after_candidate_key =
2767 AOMMIN(frames_to_test_after_candidate_key, stats_after_this_stats);
2768
2769 // Does the frame satisfy the primary criteria of a key frame?
2770 // See above for an explanation of the test criteria.
2771 // If so, then examine how well it predicts subsequent frames.
2772 if (IMPLIES(rc_mode == AOM_Q, frame_count_so_far >= 3) &&
2773 (this_stats->pcnt_second_ref < second_ref_usage_thresh) &&
2774 (next_stats->pcnt_second_ref < second_ref_usage_thresh) &&
2775 ((this_stats->pcnt_inter < VERY_LOW_INTER_THRESH) ||
2776 slide_transition(this_stats, last_stats, next_stats) ||
2777 ((pcnt_intra > MIN_INTRA_LEVEL) &&
2778 (pcnt_intra > (INTRA_VS_INTER_THRESH * modified_pcnt_inter)) &&
2779 ((this_stats->intra_error /
2780 DOUBLE_DIVIDE_CHECK(this_stats->coded_error)) <
2781 KF_II_ERR_THRESHOLD) &&
2782 ((fabs(last_stats->coded_error - this_stats->coded_error) /
2783 DOUBLE_DIVIDE_CHECK(this_stats->coded_error) >
2784 ERR_CHANGE_THRESHOLD) ||
2785 (fabs(last_stats->intra_error - this_stats->intra_error) /
2786 DOUBLE_DIVIDE_CHECK(this_stats->intra_error) >
2787 ERR_CHANGE_THRESHOLD) ||
2788 ((next_stats->intra_error /
2789 DOUBLE_DIVIDE_CHECK(next_stats->coded_error)) >
2790 II_IMPROVEMENT_THRESHOLD))))) {
2791 int i;
2792 double boost_score = 0.0;
2793 double old_boost_score = 0.0;
2794 double decay_accumulator = 1.0;
2795
2796 // Examine how well the key frame predicts subsequent frames.
2797 for (i = 1; i <= frames_to_test_after_candidate_key; ++i) {
2798 // Get the next frame details
2799 const FIRSTPASS_STATS *local_next_frame =
2800 av1_firstpass_info_peek(firstpass_info, this_stats_index + i);
2801 double next_iiratio =
2802 (BOOST_FACTOR * local_next_frame->intra_error /
2803 DOUBLE_DIVIDE_CHECK(local_next_frame->coded_error));
2804
2805 if (next_iiratio > KF_II_MAX) next_iiratio = KF_II_MAX;
2806
2807 // Cumulative effect of decay in prediction quality.
2808 if (local_next_frame->pcnt_inter > 0.85)
2809 decay_accumulator *= local_next_frame->pcnt_inter;
2810 else
2811 decay_accumulator *= (0.85 + local_next_frame->pcnt_inter) / 2.0;
2812
2813 // Keep a running total.
2814 boost_score += (decay_accumulator * next_iiratio);
2815
2816 // Test various breakout clauses.
2817 // TODO(any): Test of intra error should be normalized to an MB.
2818 if ((local_next_frame->pcnt_inter < 0.05) || (next_iiratio < 1.5) ||
2819 (((local_next_frame->pcnt_inter - local_next_frame->pcnt_neutral) <
2820 0.20) &&
2821 (next_iiratio < 3.0)) ||
2822 ((boost_score - old_boost_score) < 3.0) ||
2823 (local_next_frame->intra_error < (200.0 / (double)num_mbs))) {
2824 break;
2825 }
2826
2827 old_boost_score = boost_score;
2828 }
2829
2830 // If there is tolerable prediction for at least the next 3 frames then
2831 // break out else discard this potential key frame and move on
2832 if (boost_score > 30.0 && (i > count_for_tolerable_prediction)) {
2833 is_viable_kf = 1;
2834 } else {
2835 is_viable_kf = 0;
2836 }
2837 }
2838 return is_viable_kf;
2839 }
2840
2841 #define FRAMES_TO_CHECK_DECAY 8
2842 #define KF_MIN_FRAME_BOOST 80.0
2843 #define KF_MAX_FRAME_BOOST 128.0
2844 #define MIN_KF_BOOST 600 // Minimum boost for non-static KF interval
2845 #define MAX_KF_BOOST 3200
2846 #define MIN_STATIC_KF_BOOST 5400 // Minimum boost for static KF interval
2847
detect_app_forced_key(AV1_COMP * cpi)2848 static int detect_app_forced_key(AV1_COMP *cpi) {
2849 int num_frames_to_app_forced_key = is_forced_keyframe_pending(
2850 cpi->ppi->lookahead, cpi->ppi->lookahead->max_sz, cpi->compressor_stage);
2851 return num_frames_to_app_forced_key;
2852 }
2853
get_projected_kf_boost(AV1_COMP * cpi)2854 static int get_projected_kf_boost(AV1_COMP *cpi) {
2855 /*
2856 * If num_stats_used_for_kf_boost >= frames_to_key, then
2857 * all stats needed for prior boost calculation are available.
2858 * Hence projecting the prior boost is not needed in this cases.
2859 */
2860 if (cpi->ppi->p_rc.num_stats_used_for_kf_boost >= cpi->rc.frames_to_key)
2861 return cpi->ppi->p_rc.kf_boost;
2862
2863 // Get the current tpl factor (number of frames = frames_to_key).
2864 double tpl_factor = av1_get_kf_boost_projection_factor(cpi->rc.frames_to_key);
2865 // Get the tpl factor when number of frames = num_stats_used_for_kf_boost.
2866 double tpl_factor_num_stats = av1_get_kf_boost_projection_factor(
2867 cpi->ppi->p_rc.num_stats_used_for_kf_boost);
2868 int projected_kf_boost =
2869 (int)rint((tpl_factor * cpi->ppi->p_rc.kf_boost) / tpl_factor_num_stats);
2870 return projected_kf_boost;
2871 }
2872
2873 /*!\brief Determine the location of the next key frame
2874 *
2875 * \ingroup gf_group_algo
2876 * This function decides the placement of the next key frame when a
2877 * scenecut is detected or the maximum key frame distance is reached.
2878 *
2879 * \param[in] cpi Top-level encoder structure
2880 * \param[in] firstpass_info struct for firstpass info
2881 * \param[in] num_frames_to_detect_scenecut Maximum lookahead frames.
2882 * \param[in] search_start_idx the start index for searching key frame.
2883 * Set it to one if we already know the
2884 * current frame is key frame. Otherwise,
2885 * set it to zero.
2886 *
2887 * \return Number of frames to the next key including the current frame.
2888 */
define_kf_interval(AV1_COMP * cpi,const FIRSTPASS_INFO * firstpass_info,int num_frames_to_detect_scenecut,int search_start_idx)2889 static int define_kf_interval(AV1_COMP *cpi,
2890 const FIRSTPASS_INFO *firstpass_info,
2891 int num_frames_to_detect_scenecut,
2892 int search_start_idx) {
2893 const TWO_PASS *const twopass = &cpi->ppi->twopass;
2894 const RATE_CONTROL *const rc = &cpi->rc;
2895 PRIMARY_RATE_CONTROL *const p_rc = &cpi->ppi->p_rc;
2896 const AV1EncoderConfig *const oxcf = &cpi->oxcf;
2897 const KeyFrameCfg *const kf_cfg = &oxcf->kf_cfg;
2898 double recent_loop_decay[FRAMES_TO_CHECK_DECAY];
2899 double decay_accumulator = 1.0;
2900 int i = 0, j;
2901 int frames_to_key = search_start_idx;
2902 int frames_since_key = rc->frames_since_key + 1;
2903 int scenecut_detected = 0;
2904
2905 int num_frames_to_next_key = detect_app_forced_key(cpi);
2906
2907 if (num_frames_to_detect_scenecut == 0) {
2908 if (num_frames_to_next_key != -1)
2909 return num_frames_to_next_key;
2910 else
2911 return rc->frames_to_key;
2912 }
2913
2914 if (num_frames_to_next_key != -1)
2915 num_frames_to_detect_scenecut =
2916 AOMMIN(num_frames_to_detect_scenecut, num_frames_to_next_key);
2917
2918 // Initialize the decay rates for the recent frames to check
2919 for (j = 0; j < FRAMES_TO_CHECK_DECAY; ++j) recent_loop_decay[j] = 1.0;
2920
2921 i = 0;
2922 const int num_mbs = (oxcf->resize_cfg.resize_mode != RESIZE_NONE)
2923 ? cpi->initial_mbs
2924 : cpi->common.mi_params.MBs;
2925 const int future_stats_count =
2926 av1_firstpass_info_future_count(firstpass_info, 0);
2927 while (frames_to_key < future_stats_count &&
2928 frames_to_key < num_frames_to_detect_scenecut) {
2929 // Provided that we are not at the end of the file...
2930 if ((cpi->ppi->p_rc.enable_scenecut_detection > 0) && kf_cfg->auto_key &&
2931 frames_to_key + 1 < future_stats_count) {
2932 double loop_decay_rate;
2933
2934 // Check for a scene cut.
2935 if (frames_since_key >= kf_cfg->key_freq_min) {
2936 scenecut_detected = test_candidate_kf(
2937 &twopass->firstpass_info, frames_to_key, frames_since_key,
2938 oxcf->rc_cfg.mode, cpi->ppi->p_rc.enable_scenecut_detection,
2939 num_mbs);
2940 if (scenecut_detected) {
2941 break;
2942 }
2943 }
2944
2945 // How fast is the prediction quality decaying?
2946 const FIRSTPASS_STATS *next_stats =
2947 av1_firstpass_info_peek(firstpass_info, frames_to_key + 1);
2948 loop_decay_rate = get_prediction_decay_rate(next_stats);
2949
2950 // We want to know something about the recent past... rather than
2951 // as used elsewhere where we are concerned with decay in prediction
2952 // quality since the last GF or KF.
2953 recent_loop_decay[i % FRAMES_TO_CHECK_DECAY] = loop_decay_rate;
2954 decay_accumulator = 1.0;
2955 for (j = 0; j < FRAMES_TO_CHECK_DECAY; ++j)
2956 decay_accumulator *= recent_loop_decay[j];
2957
2958 // Special check for transition or high motion followed by a
2959 // static scene.
2960 if (frames_since_key >= kf_cfg->key_freq_min) {
2961 scenecut_detected = detect_transition_to_still(
2962 firstpass_info, frames_to_key + 1, rc->min_gf_interval, i,
2963 kf_cfg->key_freq_max - i, loop_decay_rate, decay_accumulator);
2964 if (scenecut_detected) {
2965 // In the case of transition followed by a static scene, the key frame
2966 // could be a good predictor for the following frames, therefore we
2967 // do not use an arf.
2968 p_rc->use_arf_in_this_kf_group = 0;
2969 break;
2970 }
2971 }
2972
2973 // Step on to the next frame.
2974 ++frames_to_key;
2975 ++frames_since_key;
2976
2977 // If we don't have a real key frame within the next two
2978 // key_freq_max intervals then break out of the loop.
2979 if (frames_to_key >= 2 * kf_cfg->key_freq_max) {
2980 break;
2981 }
2982 } else {
2983 ++frames_to_key;
2984 ++frames_since_key;
2985 }
2986 ++i;
2987 }
2988 if (cpi->ppi->lap_enabled && !scenecut_detected)
2989 frames_to_key = num_frames_to_next_key;
2990
2991 return frames_to_key;
2992 }
2993
get_kf_group_avg_error(TWO_PASS * twopass,TWO_PASS_FRAME * twopass_frame,const FIRSTPASS_STATS * first_frame,const FIRSTPASS_STATS * start_position,int frames_to_key)2994 static double get_kf_group_avg_error(TWO_PASS *twopass,
2995 TWO_PASS_FRAME *twopass_frame,
2996 const FIRSTPASS_STATS *first_frame,
2997 const FIRSTPASS_STATS *start_position,
2998 int frames_to_key) {
2999 FIRSTPASS_STATS cur_frame = *first_frame;
3000 int num_frames, i;
3001 double kf_group_avg_error = 0.0;
3002
3003 reset_fpf_position(twopass_frame, start_position);
3004
3005 for (i = 0; i < frames_to_key; ++i) {
3006 kf_group_avg_error += cur_frame.coded_error;
3007 if (EOF == input_stats(twopass, twopass_frame, &cur_frame)) break;
3008 }
3009 num_frames = i + 1;
3010 num_frames = AOMMIN(num_frames, frames_to_key);
3011 kf_group_avg_error = kf_group_avg_error / num_frames;
3012
3013 return (kf_group_avg_error);
3014 }
3015
get_kf_group_bits(AV1_COMP * cpi,double kf_group_err,double kf_group_avg_error)3016 static int64_t get_kf_group_bits(AV1_COMP *cpi, double kf_group_err,
3017 double kf_group_avg_error) {
3018 RATE_CONTROL *const rc = &cpi->rc;
3019 TWO_PASS *const twopass = &cpi->ppi->twopass;
3020 int64_t kf_group_bits;
3021 if (cpi->ppi->lap_enabled) {
3022 kf_group_bits = (int64_t)rc->frames_to_key * rc->avg_frame_bandwidth;
3023 if (cpi->oxcf.rc_cfg.vbr_corpus_complexity_lap) {
3024 double vbr_corpus_complexity_lap =
3025 cpi->oxcf.rc_cfg.vbr_corpus_complexity_lap / 10.0;
3026 /* Get the average corpus complexity of the frame */
3027 kf_group_bits = (int64_t)(
3028 kf_group_bits * (kf_group_avg_error / vbr_corpus_complexity_lap));
3029 }
3030 } else {
3031 kf_group_bits = (int64_t)(twopass->bits_left *
3032 (kf_group_err / twopass->modified_error_left));
3033 }
3034
3035 return kf_group_bits;
3036 }
3037
calc_avg_stats(AV1_COMP * cpi,FIRSTPASS_STATS * avg_frame_stat)3038 static int calc_avg_stats(AV1_COMP *cpi, FIRSTPASS_STATS *avg_frame_stat) {
3039 RATE_CONTROL *const rc = &cpi->rc;
3040 TWO_PASS *const twopass = &cpi->ppi->twopass;
3041 FIRSTPASS_STATS cur_frame;
3042 av1_zero(cur_frame);
3043 int num_frames = 0;
3044 // Accumulate total stat using available number of stats.
3045 for (num_frames = 0; num_frames < (rc->frames_to_key - 1); ++num_frames) {
3046 if (EOF == input_stats(twopass, &cpi->twopass_frame, &cur_frame)) break;
3047 av1_accumulate_stats(avg_frame_stat, &cur_frame);
3048 }
3049
3050 if (num_frames < 2) {
3051 return num_frames;
3052 }
3053 // Average the total stat
3054 avg_frame_stat->weight = avg_frame_stat->weight / num_frames;
3055 avg_frame_stat->intra_error = avg_frame_stat->intra_error / num_frames;
3056 avg_frame_stat->frame_avg_wavelet_energy =
3057 avg_frame_stat->frame_avg_wavelet_energy / num_frames;
3058 avg_frame_stat->coded_error = avg_frame_stat->coded_error / num_frames;
3059 avg_frame_stat->sr_coded_error = avg_frame_stat->sr_coded_error / num_frames;
3060 avg_frame_stat->pcnt_inter = avg_frame_stat->pcnt_inter / num_frames;
3061 avg_frame_stat->pcnt_motion = avg_frame_stat->pcnt_motion / num_frames;
3062 avg_frame_stat->pcnt_second_ref =
3063 avg_frame_stat->pcnt_second_ref / num_frames;
3064 avg_frame_stat->pcnt_neutral = avg_frame_stat->pcnt_neutral / num_frames;
3065 avg_frame_stat->intra_skip_pct = avg_frame_stat->intra_skip_pct / num_frames;
3066 avg_frame_stat->inactive_zone_rows =
3067 avg_frame_stat->inactive_zone_rows / num_frames;
3068 avg_frame_stat->inactive_zone_cols =
3069 avg_frame_stat->inactive_zone_cols / num_frames;
3070 avg_frame_stat->MVr = avg_frame_stat->MVr / num_frames;
3071 avg_frame_stat->mvr_abs = avg_frame_stat->mvr_abs / num_frames;
3072 avg_frame_stat->MVc = avg_frame_stat->MVc / num_frames;
3073 avg_frame_stat->mvc_abs = avg_frame_stat->mvc_abs / num_frames;
3074 avg_frame_stat->MVrv = avg_frame_stat->MVrv / num_frames;
3075 avg_frame_stat->MVcv = avg_frame_stat->MVcv / num_frames;
3076 avg_frame_stat->mv_in_out_count =
3077 avg_frame_stat->mv_in_out_count / num_frames;
3078 avg_frame_stat->new_mv_count = avg_frame_stat->new_mv_count / num_frames;
3079 avg_frame_stat->count = avg_frame_stat->count / num_frames;
3080 avg_frame_stat->duration = avg_frame_stat->duration / num_frames;
3081
3082 return num_frames;
3083 }
3084
get_kf_boost_score(AV1_COMP * cpi,double kf_raw_err,double * zero_motion_accumulator,double * sr_accumulator,int use_avg_stat)3085 static double get_kf_boost_score(AV1_COMP *cpi, double kf_raw_err,
3086 double *zero_motion_accumulator,
3087 double *sr_accumulator, int use_avg_stat) {
3088 RATE_CONTROL *const rc = &cpi->rc;
3089 TWO_PASS *const twopass = &cpi->ppi->twopass;
3090 FRAME_INFO *const frame_info = &cpi->frame_info;
3091 FIRSTPASS_STATS frame_stat;
3092 av1_zero(frame_stat);
3093 int i = 0, num_stat_used = 0;
3094 double boost_score = 0.0;
3095 const double kf_max_boost =
3096 cpi->oxcf.rc_cfg.mode == AOM_Q
3097 ? AOMMIN(AOMMAX(rc->frames_to_key * 2.0, KF_MIN_FRAME_BOOST),
3098 KF_MAX_FRAME_BOOST)
3099 : KF_MAX_FRAME_BOOST;
3100
3101 // Calculate the average using available number of stats.
3102 if (use_avg_stat) num_stat_used = calc_avg_stats(cpi, &frame_stat);
3103
3104 for (i = num_stat_used; i < (rc->frames_to_key - 1); ++i) {
3105 if (!use_avg_stat &&
3106 EOF == input_stats(twopass, &cpi->twopass_frame, &frame_stat))
3107 break;
3108
3109 // Monitor for static sections.
3110 // For the first frame in kf group, the second ref indicator is invalid.
3111 if (i > 0) {
3112 *zero_motion_accumulator =
3113 AOMMIN(*zero_motion_accumulator, get_zero_motion_factor(&frame_stat));
3114 } else {
3115 *zero_motion_accumulator = frame_stat.pcnt_inter - frame_stat.pcnt_motion;
3116 }
3117
3118 // Not all frames in the group are necessarily used in calculating boost.
3119 if ((*sr_accumulator < (kf_raw_err * 1.50)) &&
3120 (i <= rc->max_gf_interval * 2)) {
3121 double frame_boost;
3122 double zm_factor;
3123
3124 // Factor 0.75-1.25 based on how much of frame is static.
3125 zm_factor = (0.75 + (*zero_motion_accumulator / 2.0));
3126
3127 if (i < 2) *sr_accumulator = 0.0;
3128 frame_boost =
3129 calc_kf_frame_boost(&cpi->ppi->p_rc, frame_info, &frame_stat,
3130 sr_accumulator, kf_max_boost);
3131 boost_score += frame_boost * zm_factor;
3132 }
3133 }
3134 return boost_score;
3135 }
3136
3137 /*!\brief Interval(in seconds) to clip key-frame distance to in LAP.
3138 */
3139 #define MAX_KF_BITS_INTERVAL_SINGLE_PASS 5
3140
3141 /*!\brief Determine the next key frame group
3142 *
3143 * \ingroup gf_group_algo
3144 * This function decides the placement of the next key frame, and
3145 * calculates the bit allocation of the KF group and the keyframe itself.
3146 *
3147 * \param[in] cpi Top-level encoder structure
3148 * \param[in] this_frame Pointer to first pass stats
3149 */
find_next_key_frame(AV1_COMP * cpi,FIRSTPASS_STATS * this_frame)3150 static void find_next_key_frame(AV1_COMP *cpi, FIRSTPASS_STATS *this_frame) {
3151 RATE_CONTROL *const rc = &cpi->rc;
3152 PRIMARY_RATE_CONTROL *const p_rc = &cpi->ppi->p_rc;
3153 TWO_PASS *const twopass = &cpi->ppi->twopass;
3154 GF_GROUP *const gf_group = &cpi->ppi->gf_group;
3155 FRAME_INFO *const frame_info = &cpi->frame_info;
3156 AV1_COMMON *const cm = &cpi->common;
3157 CurrentFrame *const current_frame = &cm->current_frame;
3158 const AV1EncoderConfig *const oxcf = &cpi->oxcf;
3159 const KeyFrameCfg *const kf_cfg = &oxcf->kf_cfg;
3160 const FIRSTPASS_STATS first_frame = *this_frame;
3161 FIRSTPASS_STATS next_frame;
3162 const FIRSTPASS_INFO *firstpass_info = &twopass->firstpass_info;
3163 av1_zero(next_frame);
3164
3165 rc->frames_since_key = 0;
3166 // Use arfs if possible.
3167 p_rc->use_arf_in_this_kf_group = is_altref_enabled(
3168 oxcf->gf_cfg.lag_in_frames, oxcf->gf_cfg.enable_auto_arf);
3169
3170 // Reset the GF group data structures.
3171 av1_zero(*gf_group);
3172 cpi->gf_frame_index = 0;
3173
3174 // KF is always a GF so clear frames till next gf counter.
3175 rc->frames_till_gf_update_due = 0;
3176
3177 if (has_no_stats_stage(cpi)) {
3178 int num_frames_to_app_forced_key = detect_app_forced_key(cpi);
3179 p_rc->this_key_frame_forced =
3180 current_frame->frame_number != 0 && rc->frames_to_key == 0;
3181 if (num_frames_to_app_forced_key != -1)
3182 rc->frames_to_key = num_frames_to_app_forced_key;
3183 else
3184 rc->frames_to_key = AOMMAX(1, kf_cfg->key_freq_max);
3185 correct_frames_to_key(cpi);
3186 p_rc->kf_boost = DEFAULT_KF_BOOST;
3187 gf_group->update_type[0] = KF_UPDATE;
3188 return;
3189 }
3190 int i;
3191 const FIRSTPASS_STATS *const start_position = cpi->twopass_frame.stats_in;
3192 int kf_bits = 0;
3193 double zero_motion_accumulator = 1.0;
3194 double boost_score = 0.0;
3195 double kf_raw_err = 0.0;
3196 double kf_mod_err = 0.0;
3197 double sr_accumulator = 0.0;
3198 double kf_group_avg_error = 0.0;
3199 int frames_to_key, frames_to_key_clipped = INT_MAX;
3200 int64_t kf_group_bits_clipped = INT64_MAX;
3201
3202 // Is this a forced key frame by interval.
3203 p_rc->this_key_frame_forced = p_rc->next_key_frame_forced;
3204
3205 twopass->kf_group_bits = 0; // Total bits available to kf group
3206 twopass->kf_group_error_left = 0; // Group modified error score.
3207
3208 kf_raw_err = this_frame->intra_error;
3209 kf_mod_err = calculate_modified_err(frame_info, twopass, oxcf, this_frame);
3210
3211 // We assume the current frame is a key frame and we are looking for the next
3212 // key frame. Therefore search_start_idx = 1
3213 frames_to_key = define_kf_interval(cpi, firstpass_info, kf_cfg->key_freq_max,
3214 /*search_start_idx=*/1);
3215
3216 if (frames_to_key != -1) {
3217 rc->frames_to_key = AOMMIN(kf_cfg->key_freq_max, frames_to_key);
3218 } else {
3219 rc->frames_to_key = kf_cfg->key_freq_max;
3220 }
3221
3222 if (cpi->ppi->lap_enabled) correct_frames_to_key(cpi);
3223
3224 // If there is a max kf interval set by the user we must obey it.
3225 // We already breakout of the loop above at 2x max.
3226 // This code centers the extra kf if the actual natural interval
3227 // is between 1x and 2x.
3228 if (kf_cfg->auto_key && rc->frames_to_key > kf_cfg->key_freq_max) {
3229 FIRSTPASS_STATS tmp_frame = first_frame;
3230
3231 rc->frames_to_key /= 2;
3232
3233 // Reset to the start of the group.
3234 reset_fpf_position(&cpi->twopass_frame, start_position);
3235 // Rescan to get the correct error data for the forced kf group.
3236 for (i = 0; i < rc->frames_to_key; ++i) {
3237 if (EOF == input_stats(twopass, &cpi->twopass_frame, &tmp_frame)) break;
3238 }
3239 p_rc->next_key_frame_forced = 1;
3240 } else if ((cpi->twopass_frame.stats_in ==
3241 twopass->stats_buf_ctx->stats_in_end &&
3242 is_stat_consumption_stage_twopass(cpi)) ||
3243 rc->frames_to_key >= kf_cfg->key_freq_max) {
3244 p_rc->next_key_frame_forced = 1;
3245 } else {
3246 p_rc->next_key_frame_forced = 0;
3247 }
3248
3249 double kf_group_err = 0;
3250 for (i = 0; i < rc->frames_to_key; ++i) {
3251 const FIRSTPASS_STATS *this_stats =
3252 av1_firstpass_info_peek(&twopass->firstpass_info, i);
3253 if (this_stats != NULL) {
3254 // Accumulate kf group error.
3255 kf_group_err += calculate_modified_err_new(
3256 frame_info, &firstpass_info->total_stats, this_stats,
3257 oxcf->rc_cfg.vbrbias, twopass->modified_error_min,
3258 twopass->modified_error_max);
3259 ++p_rc->num_stats_used_for_kf_boost;
3260 }
3261 }
3262
3263 // Calculate the number of bits that should be assigned to the kf group.
3264 if ((twopass->bits_left > 0 && twopass->modified_error_left > 0.0) ||
3265 (cpi->ppi->lap_enabled && oxcf->rc_cfg.mode != AOM_Q)) {
3266 // Maximum number of bits for a single normal frame (not key frame).
3267 const int max_bits = frame_max_bits(rc, oxcf);
3268
3269 // Maximum number of bits allocated to the key frame group.
3270 int64_t max_grp_bits;
3271
3272 if (oxcf->rc_cfg.vbr_corpus_complexity_lap) {
3273 kf_group_avg_error =
3274 get_kf_group_avg_error(twopass, &cpi->twopass_frame, &first_frame,
3275 start_position, rc->frames_to_key);
3276 }
3277
3278 // Default allocation based on bits left and relative
3279 // complexity of the section.
3280 twopass->kf_group_bits =
3281 get_kf_group_bits(cpi, kf_group_err, kf_group_avg_error);
3282 // Clip based on maximum per frame rate defined by the user.
3283 max_grp_bits = (int64_t)max_bits * (int64_t)rc->frames_to_key;
3284 if (twopass->kf_group_bits > max_grp_bits)
3285 twopass->kf_group_bits = max_grp_bits;
3286 } else {
3287 twopass->kf_group_bits = 0;
3288 }
3289 twopass->kf_group_bits = AOMMAX(0, twopass->kf_group_bits);
3290
3291 if (cpi->ppi->lap_enabled) {
3292 // In the case of single pass based on LAP, frames to key may have an
3293 // inaccurate value, and hence should be clipped to an appropriate
3294 // interval.
3295 frames_to_key_clipped =
3296 (int)(MAX_KF_BITS_INTERVAL_SINGLE_PASS * cpi->framerate);
3297
3298 // This variable calculates the bits allocated to kf_group with a clipped
3299 // frames_to_key.
3300 if (rc->frames_to_key > frames_to_key_clipped) {
3301 kf_group_bits_clipped =
3302 (int64_t)((double)twopass->kf_group_bits * frames_to_key_clipped /
3303 rc->frames_to_key);
3304 }
3305 }
3306
3307 // Reset the first pass file position.
3308 reset_fpf_position(&cpi->twopass_frame, start_position);
3309
3310 // Scan through the kf group collating various stats used to determine
3311 // how many bits to spend on it.
3312 boost_score = get_kf_boost_score(cpi, kf_raw_err, &zero_motion_accumulator,
3313 &sr_accumulator, 0);
3314 reset_fpf_position(&cpi->twopass_frame, start_position);
3315 // Store the zero motion percentage
3316 twopass->kf_zeromotion_pct = (int)(zero_motion_accumulator * 100.0);
3317
3318 // Calculate a section intra ratio used in setting max loop filter.
3319 twopass->section_intra_rating = calculate_section_intra_ratio(
3320 start_position, twopass->stats_buf_ctx->stats_in_end, rc->frames_to_key);
3321
3322 p_rc->kf_boost = (int)boost_score;
3323
3324 if (cpi->ppi->lap_enabled) {
3325 if (oxcf->rc_cfg.mode == AOM_Q) {
3326 p_rc->kf_boost = get_projected_kf_boost(cpi);
3327 } else {
3328 // TODO(any): Explore using average frame stats for AOM_Q as well.
3329 boost_score = get_kf_boost_score(
3330 cpi, kf_raw_err, &zero_motion_accumulator, &sr_accumulator, 1);
3331 reset_fpf_position(&cpi->twopass_frame, start_position);
3332 p_rc->kf_boost += (int)boost_score;
3333 }
3334 }
3335
3336 // Special case for static / slide show content but don't apply
3337 // if the kf group is very short.
3338 if ((zero_motion_accumulator > STATIC_KF_GROUP_FLOAT_THRESH) &&
3339 (rc->frames_to_key > 8)) {
3340 p_rc->kf_boost = AOMMAX(p_rc->kf_boost, MIN_STATIC_KF_BOOST);
3341 } else {
3342 // Apply various clamps for min and max boost
3343 p_rc->kf_boost = AOMMAX(p_rc->kf_boost, (rc->frames_to_key * 3));
3344 p_rc->kf_boost = AOMMAX(p_rc->kf_boost, MIN_KF_BOOST);
3345 #ifdef STRICT_RC
3346 p_rc->kf_boost = AOMMIN(p_rc->kf_boost, MAX_KF_BOOST);
3347 #endif
3348 }
3349
3350 // Work out how many bits to allocate for the key frame itself.
3351 // In case of LAP enabled for VBR, if the frames_to_key value is
3352 // very high, we calculate the bits based on a clipped value of
3353 // frames_to_key.
3354 kf_bits = calculate_boost_bits(
3355 AOMMIN(rc->frames_to_key, frames_to_key_clipped) - 1, p_rc->kf_boost,
3356 AOMMIN(twopass->kf_group_bits, kf_group_bits_clipped));
3357 // printf("kf boost = %d kf_bits = %d kf_zeromotion_pct = %d\n",
3358 // p_rc->kf_boost,
3359 // kf_bits, twopass->kf_zeromotion_pct);
3360 kf_bits = adjust_boost_bits_for_target_level(cpi, rc, kf_bits,
3361 twopass->kf_group_bits, 0);
3362
3363 twopass->kf_group_bits -= kf_bits;
3364
3365 // Save the bits to spend on the key frame.
3366 gf_group->bit_allocation[0] = kf_bits;
3367 gf_group->update_type[0] = KF_UPDATE;
3368
3369 // Note the total error score of the kf group minus the key frame itself.
3370 if (cpi->ppi->lap_enabled)
3371 // As we don't have enough stats to know the actual error of the group,
3372 // we assume the complexity of each frame to be equal to 1, and set the
3373 // error as the number of frames in the group(minus the keyframe).
3374 twopass->kf_group_error_left = (double)(rc->frames_to_key - 1);
3375 else
3376 twopass->kf_group_error_left = kf_group_err - kf_mod_err;
3377
3378 // Adjust the count of total modified error left.
3379 // The count of bits left is adjusted elsewhere based on real coded frame
3380 // sizes.
3381 twopass->modified_error_left -= kf_group_err;
3382 }
3383
3384 #define ARF_STATS_OUTPUT 0
3385 #if ARF_STATS_OUTPUT
3386 unsigned int arf_count = 0;
3387 #endif
3388
get_section_target_bandwidth(AV1_COMP * cpi)3389 static int get_section_target_bandwidth(AV1_COMP *cpi) {
3390 AV1_COMMON *const cm = &cpi->common;
3391 CurrentFrame *const current_frame = &cm->current_frame;
3392 RATE_CONTROL *const rc = &cpi->rc;
3393 TWO_PASS *const twopass = &cpi->ppi->twopass;
3394 int section_target_bandwidth;
3395 const int frames_left = (int)(twopass->stats_buf_ctx->total_stats->count -
3396 current_frame->frame_number);
3397 if (cpi->ppi->lap_enabled)
3398 section_target_bandwidth = (int)rc->avg_frame_bandwidth;
3399 else
3400 section_target_bandwidth = (int)(twopass->bits_left / frames_left);
3401 return section_target_bandwidth;
3402 }
3403
set_twopass_params_based_on_fp_stats(AV1_COMP * cpi,const FIRSTPASS_STATS * this_frame_ptr)3404 static INLINE void set_twopass_params_based_on_fp_stats(
3405 AV1_COMP *cpi, const FIRSTPASS_STATS *this_frame_ptr) {
3406 if (this_frame_ptr == NULL) return;
3407
3408 TWO_PASS_FRAME *twopass_frame = &cpi->twopass_frame;
3409 // The multiplication by 256 reverses a scaling factor of (>> 8)
3410 // applied when combining MB error values for the frame.
3411 twopass_frame->mb_av_energy = log((this_frame_ptr->intra_error) + 1.0);
3412
3413 const FIRSTPASS_STATS *const total_stats =
3414 cpi->ppi->twopass.stats_buf_ctx->total_stats;
3415 if (is_fp_wavelet_energy_invalid(total_stats) == 0) {
3416 twopass_frame->frame_avg_haar_energy =
3417 log((this_frame_ptr->frame_avg_wavelet_energy) + 1.0);
3418 }
3419
3420 // Set the frame content type flag.
3421 if (this_frame_ptr->intra_skip_pct >= FC_ANIMATION_THRESH)
3422 twopass_frame->fr_content_type = FC_GRAPHICS_ANIMATION;
3423 else
3424 twopass_frame->fr_content_type = FC_NORMAL;
3425 }
3426
process_first_pass_stats(AV1_COMP * cpi,FIRSTPASS_STATS * this_frame)3427 static void process_first_pass_stats(AV1_COMP *cpi,
3428 FIRSTPASS_STATS *this_frame) {
3429 AV1_COMMON *const cm = &cpi->common;
3430 CurrentFrame *const current_frame = &cm->current_frame;
3431 RATE_CONTROL *const rc = &cpi->rc;
3432 PRIMARY_RATE_CONTROL *const p_rc = &cpi->ppi->p_rc;
3433 TWO_PASS *const twopass = &cpi->ppi->twopass;
3434 FIRSTPASS_STATS *total_stats = twopass->stats_buf_ctx->total_stats;
3435
3436 if (cpi->oxcf.rc_cfg.mode != AOM_Q && current_frame->frame_number == 0 &&
3437 cpi->gf_frame_index == 0 && total_stats &&
3438 twopass->stats_buf_ctx->total_left_stats) {
3439 if (cpi->ppi->lap_enabled) {
3440 /*
3441 * Accumulate total_stats using available limited number of stats,
3442 * and assign it to total_left_stats.
3443 */
3444 *twopass->stats_buf_ctx->total_left_stats = *total_stats;
3445 }
3446 // Special case code for first frame.
3447 const int section_target_bandwidth = get_section_target_bandwidth(cpi);
3448 const double section_length =
3449 twopass->stats_buf_ctx->total_left_stats->count;
3450 const double section_error =
3451 twopass->stats_buf_ctx->total_left_stats->coded_error / section_length;
3452 const double section_intra_skip =
3453 twopass->stats_buf_ctx->total_left_stats->intra_skip_pct /
3454 section_length;
3455 const double section_inactive_zone =
3456 (twopass->stats_buf_ctx->total_left_stats->inactive_zone_rows * 2) /
3457 ((double)cm->mi_params.mb_rows * section_length);
3458 const int tmp_q = get_twopass_worst_quality(
3459 cpi, section_error, section_intra_skip + section_inactive_zone,
3460 section_target_bandwidth);
3461
3462 rc->active_worst_quality = tmp_q;
3463 rc->ni_av_qi = tmp_q;
3464 p_rc->last_q[INTER_FRAME] = tmp_q;
3465 p_rc->avg_q = av1_convert_qindex_to_q(tmp_q, cm->seq_params->bit_depth);
3466 p_rc->avg_frame_qindex[INTER_FRAME] = tmp_q;
3467 p_rc->last_q[KEY_FRAME] = (tmp_q + cpi->oxcf.rc_cfg.best_allowed_q) / 2;
3468 p_rc->avg_frame_qindex[KEY_FRAME] = p_rc->last_q[KEY_FRAME];
3469 }
3470
3471 if (cpi->twopass_frame.stats_in < twopass->stats_buf_ctx->stats_in_end) {
3472 *this_frame = *cpi->twopass_frame.stats_in;
3473 ++cpi->twopass_frame.stats_in;
3474 }
3475 set_twopass_params_based_on_fp_stats(cpi, this_frame);
3476 }
3477
setup_target_rate(AV1_COMP * cpi)3478 static void setup_target_rate(AV1_COMP *cpi) {
3479 RATE_CONTROL *const rc = &cpi->rc;
3480 GF_GROUP *const gf_group = &cpi->ppi->gf_group;
3481
3482 int target_rate = gf_group->bit_allocation[cpi->gf_frame_index];
3483
3484 if (has_no_stats_stage(cpi)) {
3485 av1_rc_set_frame_target(cpi, target_rate, cpi->common.width,
3486 cpi->common.height);
3487 }
3488
3489 rc->base_frame_target = target_rate;
3490 }
3491
av1_mark_flashes(FIRSTPASS_STATS * first_stats,FIRSTPASS_STATS * last_stats)3492 void av1_mark_flashes(FIRSTPASS_STATS *first_stats,
3493 FIRSTPASS_STATS *last_stats) {
3494 FIRSTPASS_STATS *this_stats = first_stats, *next_stats;
3495 while (this_stats < last_stats - 1) {
3496 next_stats = this_stats + 1;
3497 if (next_stats->pcnt_second_ref > next_stats->pcnt_inter &&
3498 next_stats->pcnt_second_ref >= 0.5) {
3499 this_stats->is_flash = 1;
3500 } else {
3501 this_stats->is_flash = 0;
3502 }
3503 this_stats = next_stats;
3504 }
3505 // We always treat the last one as none flash.
3506 if (last_stats - 1 >= first_stats) {
3507 (last_stats - 1)->is_flash = 0;
3508 }
3509 }
3510
3511 // Estimate the noise variance of each frame from the first pass stats
av1_estimate_noise(FIRSTPASS_STATS * first_stats,FIRSTPASS_STATS * last_stats)3512 void av1_estimate_noise(FIRSTPASS_STATS *first_stats,
3513 FIRSTPASS_STATS *last_stats) {
3514 FIRSTPASS_STATS *this_stats, *next_stats;
3515 double C1, C2, C3, noise;
3516 for (this_stats = first_stats + 2; this_stats < last_stats; this_stats++) {
3517 this_stats->noise_var = 0.0;
3518 // flashes tend to have high correlation of innovations, so ignore them.
3519 if (this_stats->is_flash || (this_stats - 1)->is_flash ||
3520 (this_stats - 2)->is_flash)
3521 continue;
3522
3523 C1 = (this_stats - 1)->intra_error *
3524 (this_stats->intra_error - this_stats->coded_error);
3525 C2 = (this_stats - 2)->intra_error *
3526 ((this_stats - 1)->intra_error - (this_stats - 1)->coded_error);
3527 C3 = (this_stats - 2)->intra_error *
3528 (this_stats->intra_error - this_stats->sr_coded_error);
3529 if (C1 <= 0 || C2 <= 0 || C3 <= 0) continue;
3530 C1 = sqrt(C1);
3531 C2 = sqrt(C2);
3532 C3 = sqrt(C3);
3533
3534 noise = (this_stats - 1)->intra_error - C1 * C2 / C3;
3535 noise = AOMMAX(noise, 0.01);
3536 this_stats->noise_var = noise;
3537 }
3538
3539 // Copy noise from the neighbor if the noise value is not trustworthy
3540 for (this_stats = first_stats + 2; this_stats < last_stats; this_stats++) {
3541 if (this_stats->is_flash || (this_stats - 1)->is_flash ||
3542 (this_stats - 2)->is_flash)
3543 continue;
3544 if (this_stats->noise_var < 1.0) {
3545 int found = 0;
3546 // TODO(bohanli): consider expanding to two directions at the same time
3547 for (next_stats = this_stats + 1; next_stats < last_stats; next_stats++) {
3548 if (next_stats->is_flash || (next_stats - 1)->is_flash ||
3549 (next_stats - 2)->is_flash || next_stats->noise_var < 1.0)
3550 continue;
3551 found = 1;
3552 this_stats->noise_var = next_stats->noise_var;
3553 break;
3554 }
3555 if (found) continue;
3556 for (next_stats = this_stats - 1; next_stats >= first_stats + 2;
3557 next_stats--) {
3558 if (next_stats->is_flash || (next_stats - 1)->is_flash ||
3559 (next_stats - 2)->is_flash || next_stats->noise_var < 1.0)
3560 continue;
3561 this_stats->noise_var = next_stats->noise_var;
3562 break;
3563 }
3564 }
3565 }
3566
3567 // copy the noise if this is a flash
3568 for (this_stats = first_stats + 2; this_stats < last_stats; this_stats++) {
3569 if (this_stats->is_flash || (this_stats - 1)->is_flash ||
3570 (this_stats - 2)->is_flash) {
3571 int found = 0;
3572 for (next_stats = this_stats + 1; next_stats < last_stats; next_stats++) {
3573 if (next_stats->is_flash || (next_stats - 1)->is_flash ||
3574 (next_stats - 2)->is_flash)
3575 continue;
3576 found = 1;
3577 this_stats->noise_var = next_stats->noise_var;
3578 break;
3579 }
3580 if (found) continue;
3581 for (next_stats = this_stats - 1; next_stats >= first_stats + 2;
3582 next_stats--) {
3583 if (next_stats->is_flash || (next_stats - 1)->is_flash ||
3584 (next_stats - 2)->is_flash)
3585 continue;
3586 this_stats->noise_var = next_stats->noise_var;
3587 break;
3588 }
3589 }
3590 }
3591
3592 // if we are at the first 2 frames, copy the noise
3593 for (this_stats = first_stats;
3594 this_stats < first_stats + 2 && (first_stats + 2) < last_stats;
3595 this_stats++) {
3596 this_stats->noise_var = (first_stats + 2)->noise_var;
3597 }
3598 }
3599
3600 // Estimate correlation coefficient of each frame with its previous frame.
av1_estimate_coeff(FIRSTPASS_STATS * first_stats,FIRSTPASS_STATS * last_stats)3601 void av1_estimate_coeff(FIRSTPASS_STATS *first_stats,
3602 FIRSTPASS_STATS *last_stats) {
3603 FIRSTPASS_STATS *this_stats;
3604 for (this_stats = first_stats + 1; this_stats < last_stats; this_stats++) {
3605 const double C =
3606 sqrt(AOMMAX((this_stats - 1)->intra_error *
3607 (this_stats->intra_error - this_stats->coded_error),
3608 0.001));
3609 const double cor_coeff =
3610 C /
3611 AOMMAX((this_stats - 1)->intra_error - this_stats->noise_var, 0.001);
3612
3613 this_stats->cor_coeff =
3614 cor_coeff *
3615 sqrt(AOMMAX((this_stats - 1)->intra_error - this_stats->noise_var,
3616 0.001) /
3617 AOMMAX(this_stats->intra_error - this_stats->noise_var, 0.001));
3618 // clip correlation coefficient.
3619 this_stats->cor_coeff = AOMMIN(AOMMAX(this_stats->cor_coeff, 0), 1);
3620 }
3621 first_stats->cor_coeff = 1.0;
3622 }
3623
av1_get_second_pass_params(AV1_COMP * cpi,EncodeFrameParams * const frame_params,unsigned int frame_flags)3624 void av1_get_second_pass_params(AV1_COMP *cpi,
3625 EncodeFrameParams *const frame_params,
3626 unsigned int frame_flags) {
3627 RATE_CONTROL *const rc = &cpi->rc;
3628 PRIMARY_RATE_CONTROL *const p_rc = &cpi->ppi->p_rc;
3629 TWO_PASS *const twopass = &cpi->ppi->twopass;
3630 GF_GROUP *const gf_group = &cpi->ppi->gf_group;
3631 const AV1EncoderConfig *const oxcf = &cpi->oxcf;
3632
3633 if (cpi->use_ducky_encode &&
3634 cpi->ducky_encode_info.frame_info.gop_mode == DUCKY_ENCODE_GOP_MODE_RCL) {
3635 frame_params->frame_type = gf_group->frame_type[cpi->gf_frame_index];
3636 frame_params->show_frame =
3637 !(gf_group->update_type[cpi->gf_frame_index] == ARF_UPDATE ||
3638 gf_group->update_type[cpi->gf_frame_index] == INTNL_ARF_UPDATE);
3639 return;
3640 }
3641
3642 const FIRSTPASS_STATS *const start_pos = cpi->twopass_frame.stats_in;
3643 int update_total_stats = 0;
3644
3645 if (is_stat_consumption_stage(cpi) && !cpi->twopass_frame.stats_in) return;
3646
3647 // Check forced key frames.
3648 const int frames_to_next_forced_key = detect_app_forced_key(cpi);
3649 if (frames_to_next_forced_key == 0) {
3650 rc->frames_to_key = 0;
3651 frame_flags &= FRAMEFLAGS_KEY;
3652 } else if (frames_to_next_forced_key > 0 &&
3653 frames_to_next_forced_key < rc->frames_to_key) {
3654 rc->frames_to_key = frames_to_next_forced_key;
3655 }
3656
3657 assert(cpi->twopass_frame.stats_in != NULL);
3658 const int update_type = gf_group->update_type[cpi->gf_frame_index];
3659 frame_params->frame_type = gf_group->frame_type[cpi->gf_frame_index];
3660
3661 if (cpi->gf_frame_index < gf_group->size && !(frame_flags & FRAMEFLAGS_KEY)) {
3662 assert(cpi->gf_frame_index < gf_group->size);
3663
3664 setup_target_rate(cpi);
3665
3666 // If this is an arf frame then we dont want to read the stats file or
3667 // advance the input pointer as we already have what we need.
3668 if (update_type == ARF_UPDATE || update_type == INTNL_ARF_UPDATE) {
3669 const FIRSTPASS_STATS *const this_frame_ptr =
3670 read_frame_stats(twopass, &cpi->twopass_frame,
3671 gf_group->arf_src_offset[cpi->gf_frame_index]);
3672 set_twopass_params_based_on_fp_stats(cpi, this_frame_ptr);
3673 return;
3674 }
3675 }
3676
3677 if (oxcf->rc_cfg.mode == AOM_Q)
3678 rc->active_worst_quality = oxcf->rc_cfg.cq_level;
3679 FIRSTPASS_STATS this_frame;
3680 av1_zero(this_frame);
3681 // call above fn
3682 if (is_stat_consumption_stage(cpi)) {
3683 if (cpi->gf_frame_index < gf_group->size || rc->frames_to_key == 0) {
3684 process_first_pass_stats(cpi, &this_frame);
3685 update_total_stats = 1;
3686 }
3687 } else {
3688 rc->active_worst_quality = oxcf->rc_cfg.cq_level;
3689 }
3690
3691 if (cpi->gf_frame_index == gf_group->size) {
3692 if (cpi->ppi->lap_enabled && cpi->ppi->p_rc.enable_scenecut_detection) {
3693 const int num_frames_to_detect_scenecut = MAX_GF_LENGTH_LAP + 1;
3694 const int frames_to_key = define_kf_interval(
3695 cpi, &twopass->firstpass_info, num_frames_to_detect_scenecut,
3696 /*search_start_idx=*/0);
3697 if (frames_to_key != -1)
3698 rc->frames_to_key = AOMMIN(rc->frames_to_key, frames_to_key);
3699 }
3700 }
3701
3702 // Keyframe and section processing.
3703 FIRSTPASS_STATS this_frame_copy;
3704 this_frame_copy = this_frame;
3705 if (rc->frames_to_key <= 0) {
3706 assert(rc->frames_to_key == 0);
3707 // Define next KF group and assign bits to it.
3708 frame_params->frame_type = KEY_FRAME;
3709 find_next_key_frame(cpi, &this_frame);
3710 this_frame = this_frame_copy;
3711 }
3712
3713 if (rc->frames_to_fwd_kf <= 0)
3714 rc->frames_to_fwd_kf = oxcf->kf_cfg.fwd_kf_dist;
3715
3716 // Define a new GF/ARF group. (Should always enter here for key frames).
3717 if (cpi->gf_frame_index == gf_group->size) {
3718 av1_tf_info_reset(&cpi->ppi->tf_info);
3719 #if CONFIG_BITRATE_ACCURACY && !CONFIG_THREE_PASS
3720 vbr_rc_reset_gop_data(&cpi->vbr_rc_info);
3721 #endif // CONFIG_BITRATE_ACCURACY
3722 int max_gop_length =
3723 (oxcf->gf_cfg.lag_in_frames >= 32)
3724 ? AOMMIN(MAX_GF_INTERVAL, oxcf->gf_cfg.lag_in_frames -
3725 oxcf->algo_cfg.arnr_max_frames / 2)
3726 : MAX_GF_LENGTH_LAP;
3727
3728 // Handle forward key frame when enabled.
3729 if (oxcf->kf_cfg.fwd_kf_dist > 0)
3730 max_gop_length = AOMMIN(rc->frames_to_fwd_kf + 1, max_gop_length);
3731
3732 // Use the provided gop size in low delay setting
3733 if (oxcf->gf_cfg.lag_in_frames == 0) max_gop_length = rc->max_gf_interval;
3734
3735 // Limit the max gop length for the last gop in 1 pass setting.
3736 max_gop_length = AOMMIN(max_gop_length, rc->frames_to_key);
3737
3738 // Identify regions if needed.
3739 // TODO(bohanli): identify regions for all stats available.
3740 if (rc->frames_since_key == 0 || rc->frames_since_key == 1 ||
3741 (p_rc->frames_till_regions_update - rc->frames_since_key <
3742 rc->frames_to_key &&
3743 p_rc->frames_till_regions_update - rc->frames_since_key <
3744 max_gop_length + 1)) {
3745 // how many frames we can analyze from this frame
3746 int rest_frames =
3747 AOMMIN(rc->frames_to_key, MAX_FIRSTPASS_ANALYSIS_FRAMES);
3748 rest_frames =
3749 AOMMIN(rest_frames, (int)(twopass->stats_buf_ctx->stats_in_end -
3750 cpi->twopass_frame.stats_in +
3751 (rc->frames_since_key == 0)));
3752 p_rc->frames_till_regions_update = rest_frames;
3753
3754 if (cpi->ppi->lap_enabled) {
3755 av1_mark_flashes(twopass->stats_buf_ctx->stats_in_start,
3756 twopass->stats_buf_ctx->stats_in_end);
3757 av1_estimate_noise(twopass->stats_buf_ctx->stats_in_start,
3758 twopass->stats_buf_ctx->stats_in_end);
3759 av1_estimate_coeff(twopass->stats_buf_ctx->stats_in_start,
3760 twopass->stats_buf_ctx->stats_in_end);
3761 av1_identify_regions(cpi->twopass_frame.stats_in, rest_frames,
3762 (rc->frames_since_key == 0), p_rc->regions,
3763 &p_rc->num_regions);
3764 } else {
3765 av1_identify_regions(
3766 cpi->twopass_frame.stats_in - (rc->frames_since_key == 0),
3767 rest_frames, 0, p_rc->regions, &p_rc->num_regions);
3768 }
3769 }
3770
3771 int cur_region_idx =
3772 find_regions_index(p_rc->regions, p_rc->num_regions,
3773 rc->frames_since_key - p_rc->regions_offset);
3774 if ((cur_region_idx >= 0 &&
3775 p_rc->regions[cur_region_idx].type == SCENECUT_REGION) ||
3776 rc->frames_since_key == 0) {
3777 // If we start from a scenecut, then the last GOP's arf boost is not
3778 // needed for this GOP.
3779 cpi->ppi->gf_state.arf_gf_boost_lst = 0;
3780 }
3781
3782 int need_gf_len = 1;
3783 if (cpi->third_pass_ctx && oxcf->pass == AOM_RC_THIRD_PASS) {
3784 // set up bitstream to read
3785 if (!cpi->third_pass_ctx->input_file_name && oxcf->two_pass_output) {
3786 cpi->third_pass_ctx->input_file_name = oxcf->two_pass_output;
3787 }
3788 av1_open_second_pass_log(cpi, 1);
3789 THIRD_PASS_GOP_INFO *gop_info = &cpi->third_pass_ctx->gop_info;
3790 // Read in GOP information from the second pass file.
3791 av1_read_second_pass_gop_info(cpi->second_pass_log_stream, gop_info,
3792 cpi->common.error);
3793 #if CONFIG_BITRATE_ACCURACY
3794 TPL_INFO *tpl_info;
3795 AOM_CHECK_MEM_ERROR(cpi->common.error, tpl_info,
3796 aom_malloc(sizeof(*tpl_info)));
3797 av1_read_tpl_info(tpl_info, cpi->second_pass_log_stream,
3798 cpi->common.error);
3799 aom_free(tpl_info);
3800 #if CONFIG_THREE_PASS
3801 // TODO(angiebird): Put this part into a func
3802 cpi->vbr_rc_info.cur_gop_idx++;
3803 #endif // CONFIG_THREE_PASS
3804 #endif // CONFIG_BITRATE_ACCURACY
3805 // Read in third_pass_info from the bitstream.
3806 av1_set_gop_third_pass(cpi->third_pass_ctx);
3807 // Read in per-frame info from second-pass encoding
3808 av1_read_second_pass_per_frame_info(
3809 cpi->second_pass_log_stream, cpi->third_pass_ctx->frame_info,
3810 gop_info->num_frames, cpi->common.error);
3811
3812 p_rc->cur_gf_index = 0;
3813 p_rc->gf_intervals[0] = cpi->third_pass_ctx->gop_info.gf_length;
3814 need_gf_len = 0;
3815 }
3816
3817 if (need_gf_len) {
3818 // If we cannot obtain GF group length from second_pass_file
3819 // TODO(jingning): Resolve the redundant calls here.
3820 if (rc->intervals_till_gf_calculate_due == 0 || 1) {
3821 calculate_gf_length(cpi, max_gop_length, MAX_NUM_GF_INTERVALS);
3822 }
3823
3824 if (max_gop_length > 16 && oxcf->algo_cfg.enable_tpl_model &&
3825 oxcf->gf_cfg.lag_in_frames >= 32 &&
3826 cpi->sf.tpl_sf.gop_length_decision_method != 3) {
3827 int this_idx = rc->frames_since_key +
3828 p_rc->gf_intervals[p_rc->cur_gf_index] -
3829 p_rc->regions_offset - 1;
3830 int this_region =
3831 find_regions_index(p_rc->regions, p_rc->num_regions, this_idx);
3832 int next_region =
3833 find_regions_index(p_rc->regions, p_rc->num_regions, this_idx + 1);
3834 // TODO(angiebird): Figure out why this_region and next_region are -1 in
3835 // unit test like AltRefFramePresenceTestLarge (aomedia:3134)
3836 int is_last_scenecut =
3837 p_rc->gf_intervals[p_rc->cur_gf_index] >= rc->frames_to_key ||
3838 (this_region != -1 &&
3839 p_rc->regions[this_region].type == SCENECUT_REGION) ||
3840 (next_region != -1 &&
3841 p_rc->regions[next_region].type == SCENECUT_REGION);
3842
3843 int ori_gf_int = p_rc->gf_intervals[p_rc->cur_gf_index];
3844
3845 if (p_rc->gf_intervals[p_rc->cur_gf_index] > 16 &&
3846 rc->min_gf_interval <= 16) {
3847 // The calculate_gf_length function is previously used with
3848 // max_gop_length = 32 with look-ahead gf intervals.
3849 define_gf_group(cpi, frame_params, 0);
3850 av1_tf_info_filtering(&cpi->ppi->tf_info, cpi, gf_group);
3851 this_frame = this_frame_copy;
3852
3853 if (is_shorter_gf_interval_better(cpi, frame_params)) {
3854 // A shorter gf interval is better.
3855 // TODO(jingning): Remove redundant computations here.
3856 max_gop_length = 16;
3857 calculate_gf_length(cpi, max_gop_length, 1);
3858 if (is_last_scenecut &&
3859 (ori_gf_int - p_rc->gf_intervals[p_rc->cur_gf_index] < 4)) {
3860 p_rc->gf_intervals[p_rc->cur_gf_index] = ori_gf_int;
3861 }
3862 }
3863 }
3864 }
3865 }
3866
3867 define_gf_group(cpi, frame_params, 0);
3868
3869 if (gf_group->update_type[cpi->gf_frame_index] != ARF_UPDATE &&
3870 rc->frames_since_key > 0)
3871 process_first_pass_stats(cpi, &this_frame);
3872
3873 define_gf_group(cpi, frame_params, 1);
3874
3875 // write gop info if needed for third pass. Per-frame info is written after
3876 // each frame is encoded.
3877 av1_write_second_pass_gop_info(cpi);
3878
3879 av1_tf_info_filtering(&cpi->ppi->tf_info, cpi, gf_group);
3880
3881 rc->frames_till_gf_update_due = p_rc->baseline_gf_interval;
3882 assert(cpi->gf_frame_index == 0);
3883 #if ARF_STATS_OUTPUT
3884 {
3885 FILE *fpfile;
3886 fpfile = fopen("arf.stt", "a");
3887 ++arf_count;
3888 fprintf(fpfile, "%10d %10d %10d %10d %10d\n",
3889 cpi->common.current_frame.frame_number,
3890 rc->frames_till_gf_update_due, cpi->ppi->p_rc.kf_boost, arf_count,
3891 p_rc->gfu_boost);
3892
3893 fclose(fpfile);
3894 }
3895 #endif
3896 }
3897 assert(cpi->gf_frame_index < gf_group->size);
3898
3899 if (gf_group->update_type[cpi->gf_frame_index] == ARF_UPDATE ||
3900 gf_group->update_type[cpi->gf_frame_index] == INTNL_ARF_UPDATE) {
3901 reset_fpf_position(&cpi->twopass_frame, start_pos);
3902
3903 const FIRSTPASS_STATS *const this_frame_ptr =
3904 read_frame_stats(twopass, &cpi->twopass_frame,
3905 gf_group->arf_src_offset[cpi->gf_frame_index]);
3906 set_twopass_params_based_on_fp_stats(cpi, this_frame_ptr);
3907 } else {
3908 // Back up this frame's stats for updating total stats during post encode.
3909 cpi->twopass_frame.this_frame = update_total_stats ? start_pos : NULL;
3910 }
3911
3912 frame_params->frame_type = gf_group->frame_type[cpi->gf_frame_index];
3913 setup_target_rate(cpi);
3914 }
3915
av1_init_second_pass(AV1_COMP * cpi)3916 void av1_init_second_pass(AV1_COMP *cpi) {
3917 const AV1EncoderConfig *const oxcf = &cpi->oxcf;
3918 TWO_PASS *const twopass = &cpi->ppi->twopass;
3919 FRAME_INFO *const frame_info = &cpi->frame_info;
3920 double frame_rate;
3921 FIRSTPASS_STATS *stats;
3922
3923 if (!twopass->stats_buf_ctx->stats_in_end) return;
3924
3925 av1_mark_flashes(twopass->stats_buf_ctx->stats_in_start,
3926 twopass->stats_buf_ctx->stats_in_end);
3927 av1_estimate_noise(twopass->stats_buf_ctx->stats_in_start,
3928 twopass->stats_buf_ctx->stats_in_end);
3929 av1_estimate_coeff(twopass->stats_buf_ctx->stats_in_start,
3930 twopass->stats_buf_ctx->stats_in_end);
3931
3932 stats = twopass->stats_buf_ctx->total_stats;
3933
3934 *stats = *twopass->stats_buf_ctx->stats_in_end;
3935 *twopass->stats_buf_ctx->total_left_stats = *stats;
3936
3937 frame_rate = 10000000.0 * stats->count / stats->duration;
3938 // Each frame can have a different duration, as the frame rate in the source
3939 // isn't guaranteed to be constant. The frame rate prior to the first frame
3940 // encoded in the second pass is a guess. However, the sum duration is not.
3941 // It is calculated based on the actual durations of all frames from the
3942 // first pass.
3943 av1_new_framerate(cpi, frame_rate);
3944 twopass->bits_left =
3945 (int64_t)(stats->duration * oxcf->rc_cfg.target_bandwidth / 10000000.0);
3946
3947 #if CONFIG_BITRATE_ACCURACY
3948 av1_vbr_rc_init(&cpi->vbr_rc_info, twopass->bits_left,
3949 (int)round(stats->count));
3950 #endif
3951
3952 #if CONFIG_RATECTRL_LOG
3953 rc_log_init(&cpi->rc_log);
3954 #endif
3955
3956 // This variable monitors how far behind the second ref update is lagging.
3957 twopass->sr_update_lag = 1;
3958
3959 // Scan the first pass file and calculate a modified total error based upon
3960 // the bias/power function used to allocate bits.
3961 {
3962 const double avg_error =
3963 stats->coded_error / DOUBLE_DIVIDE_CHECK(stats->count);
3964 const FIRSTPASS_STATS *s = cpi->twopass_frame.stats_in;
3965 double modified_error_total = 0.0;
3966 twopass->modified_error_min =
3967 (avg_error * oxcf->rc_cfg.vbrmin_section) / 100;
3968 twopass->modified_error_max =
3969 (avg_error * oxcf->rc_cfg.vbrmax_section) / 100;
3970 while (s < twopass->stats_buf_ctx->stats_in_end) {
3971 modified_error_total +=
3972 calculate_modified_err(frame_info, twopass, oxcf, s);
3973 ++s;
3974 }
3975 twopass->modified_error_left = modified_error_total;
3976 }
3977
3978 // Reset the vbr bits off target counters
3979 cpi->ppi->p_rc.vbr_bits_off_target = 0;
3980 cpi->ppi->p_rc.vbr_bits_off_target_fast = 0;
3981
3982 cpi->ppi->p_rc.rate_error_estimate = 0;
3983
3984 // Static sequence monitor variables.
3985 twopass->kf_zeromotion_pct = 100;
3986 twopass->last_kfgroup_zeromotion_pct = 100;
3987
3988 // Initialize bits per macro_block estimate correction factor.
3989 twopass->bpm_factor = 1.0;
3990 // Initialize actual and target bits counters for ARF groups so that
3991 // at the start we have a neutral bpm adjustment.
3992 twopass->rolling_arf_group_target_bits = 1;
3993 twopass->rolling_arf_group_actual_bits = 1;
3994 }
3995
av1_init_single_pass_lap(AV1_COMP * cpi)3996 void av1_init_single_pass_lap(AV1_COMP *cpi) {
3997 TWO_PASS *const twopass = &cpi->ppi->twopass;
3998
3999 if (!twopass->stats_buf_ctx->stats_in_end) return;
4000
4001 // This variable monitors how far behind the second ref update is lagging.
4002 twopass->sr_update_lag = 1;
4003
4004 twopass->bits_left = 0;
4005 twopass->modified_error_min = 0.0;
4006 twopass->modified_error_max = 0.0;
4007 twopass->modified_error_left = 0.0;
4008
4009 // Reset the vbr bits off target counters
4010 cpi->ppi->p_rc.vbr_bits_off_target = 0;
4011 cpi->ppi->p_rc.vbr_bits_off_target_fast = 0;
4012
4013 cpi->ppi->p_rc.rate_error_estimate = 0;
4014
4015 // Static sequence monitor variables.
4016 twopass->kf_zeromotion_pct = 100;
4017 twopass->last_kfgroup_zeromotion_pct = 100;
4018
4019 // Initialize bits per macro_block estimate correction factor.
4020 twopass->bpm_factor = 1.0;
4021 // Initialize actual and target bits counters for ARF groups so that
4022 // at the start we have a neutral bpm adjustment.
4023 twopass->rolling_arf_group_target_bits = 1;
4024 twopass->rolling_arf_group_actual_bits = 1;
4025 }
4026
4027 #define MINQ_ADJ_LIMIT 48
4028 #define MINQ_ADJ_LIMIT_CQ 20
4029 #define HIGH_UNDERSHOOT_RATIO 2
av1_twopass_postencode_update(AV1_COMP * cpi)4030 void av1_twopass_postencode_update(AV1_COMP *cpi) {
4031 TWO_PASS *const twopass = &cpi->ppi->twopass;
4032 RATE_CONTROL *const rc = &cpi->rc;
4033 PRIMARY_RATE_CONTROL *const p_rc = &cpi->ppi->p_rc;
4034 const RateControlCfg *const rc_cfg = &cpi->oxcf.rc_cfg;
4035
4036 // Increment the stats_in pointer.
4037 if (is_stat_consumption_stage(cpi) &&
4038 !(cpi->use_ducky_encode && cpi->ducky_encode_info.frame_info.gop_mode ==
4039 DUCKY_ENCODE_GOP_MODE_RCL) &&
4040 (cpi->gf_frame_index < cpi->ppi->gf_group.size ||
4041 rc->frames_to_key == 0)) {
4042 const int update_type = cpi->ppi->gf_group.update_type[cpi->gf_frame_index];
4043 if (update_type != ARF_UPDATE && update_type != INTNL_ARF_UPDATE) {
4044 FIRSTPASS_STATS this_frame;
4045 assert(cpi->twopass_frame.stats_in >
4046 twopass->stats_buf_ctx->stats_in_start);
4047 --cpi->twopass_frame.stats_in;
4048 if (cpi->ppi->lap_enabled) {
4049 input_stats_lap(twopass, &cpi->twopass_frame, &this_frame);
4050 } else {
4051 input_stats(twopass, &cpi->twopass_frame, &this_frame);
4052 }
4053 } else if (cpi->ppi->lap_enabled) {
4054 cpi->twopass_frame.stats_in = twopass->stats_buf_ctx->stats_in_start;
4055 }
4056 }
4057
4058 // VBR correction is done through rc->vbr_bits_off_target. Based on the
4059 // sign of this value, a limited % adjustment is made to the target rate
4060 // of subsequent frames, to try and push it back towards 0. This method
4061 // is designed to prevent extreme behaviour at the end of a clip
4062 // or group of frames.
4063 p_rc->vbr_bits_off_target += rc->base_frame_target - rc->projected_frame_size;
4064 twopass->bits_left = AOMMAX(twopass->bits_left - rc->base_frame_target, 0);
4065
4066 if (cpi->do_update_vbr_bits_off_target_fast) {
4067 // Subtract current frame's fast_extra_bits.
4068 p_rc->vbr_bits_off_target_fast -= rc->frame_level_fast_extra_bits;
4069 rc->frame_level_fast_extra_bits = 0;
4070 }
4071
4072 // Target vs actual bits for this arf group.
4073 twopass->rolling_arf_group_target_bits += rc->base_frame_target;
4074 twopass->rolling_arf_group_actual_bits += rc->projected_frame_size;
4075
4076 // Calculate the pct rc error.
4077 if (p_rc->total_actual_bits) {
4078 p_rc->rate_error_estimate =
4079 (int)((p_rc->vbr_bits_off_target * 100) / p_rc->total_actual_bits);
4080 p_rc->rate_error_estimate = clamp(p_rc->rate_error_estimate, -100, 100);
4081 } else {
4082 p_rc->rate_error_estimate = 0;
4083 }
4084
4085 #if CONFIG_FPMT_TEST
4086 /* The variables temp_vbr_bits_off_target, temp_bits_left,
4087 * temp_rolling_arf_group_target_bits, temp_rolling_arf_group_actual_bits
4088 * temp_rate_error_estimate are introduced for quality simulation purpose,
4089 * it retains the value previous to the parallel encode frames. The
4090 * variables are updated based on the update flag.
4091 *
4092 * If there exist show_existing_frames between parallel frames, then to
4093 * retain the temp state do not update it. */
4094 const int simulate_parallel_frame =
4095 cpi->ppi->fpmt_unit_test_cfg == PARALLEL_SIMULATION_ENCODE;
4096 int show_existing_between_parallel_frames =
4097 (cpi->ppi->gf_group.update_type[cpi->gf_frame_index] ==
4098 INTNL_OVERLAY_UPDATE &&
4099 cpi->ppi->gf_group.frame_parallel_level[cpi->gf_frame_index + 1] == 2);
4100
4101 if (cpi->do_frame_data_update && !show_existing_between_parallel_frames &&
4102 simulate_parallel_frame) {
4103 cpi->ppi->p_rc.temp_vbr_bits_off_target = p_rc->vbr_bits_off_target;
4104 cpi->ppi->p_rc.temp_bits_left = twopass->bits_left;
4105 cpi->ppi->p_rc.temp_rolling_arf_group_target_bits =
4106 twopass->rolling_arf_group_target_bits;
4107 cpi->ppi->p_rc.temp_rolling_arf_group_actual_bits =
4108 twopass->rolling_arf_group_actual_bits;
4109 cpi->ppi->p_rc.temp_rate_error_estimate = p_rc->rate_error_estimate;
4110 }
4111 #endif
4112 // Update the active best quality pyramid.
4113 if (!rc->is_src_frame_alt_ref) {
4114 const int pyramid_level =
4115 cpi->ppi->gf_group.layer_depth[cpi->gf_frame_index];
4116 int i;
4117 for (i = pyramid_level; i <= MAX_ARF_LAYERS; ++i) {
4118 p_rc->active_best_quality[i] = cpi->common.quant_params.base_qindex;
4119 #if CONFIG_TUNE_VMAF
4120 if (cpi->vmaf_info.original_qindex != -1 &&
4121 (cpi->oxcf.tune_cfg.tuning >= AOM_TUNE_VMAF_WITH_PREPROCESSING &&
4122 cpi->oxcf.tune_cfg.tuning <= AOM_TUNE_VMAF_NEG_MAX_GAIN)) {
4123 p_rc->active_best_quality[i] = cpi->vmaf_info.original_qindex;
4124 }
4125 #endif
4126 }
4127 }
4128
4129 #if 0
4130 {
4131 AV1_COMMON *cm = &cpi->common;
4132 FILE *fpfile;
4133 fpfile = fopen("details.stt", "a");
4134 fprintf(fpfile,
4135 "%10d %10d %10d %10" PRId64 " %10" PRId64
4136 " %10d %10d %10d %10.4lf %10.4lf %10.4lf %10.4lf\n",
4137 cm->current_frame.frame_number, rc->base_frame_target,
4138 rc->projected_frame_size, rc->total_actual_bits,
4139 rc->vbr_bits_off_target, p_rc->rate_error_estimate,
4140 twopass->rolling_arf_group_target_bits,
4141 twopass->rolling_arf_group_actual_bits,
4142 (double)twopass->rolling_arf_group_actual_bits /
4143 (double)twopass->rolling_arf_group_target_bits,
4144 twopass->bpm_factor,
4145 av1_convert_qindex_to_q(cpi->common.quant_params.base_qindex,
4146 cm->seq_params->bit_depth),
4147 av1_convert_qindex_to_q(rc->active_worst_quality,
4148 cm->seq_params->bit_depth));
4149 fclose(fpfile);
4150 }
4151 #endif
4152
4153 if (cpi->common.current_frame.frame_type != KEY_FRAME) {
4154 twopass->kf_group_bits -= rc->base_frame_target;
4155 twopass->last_kfgroup_zeromotion_pct = twopass->kf_zeromotion_pct;
4156 }
4157 twopass->kf_group_bits = AOMMAX(twopass->kf_group_bits, 0);
4158
4159 // If the rate control is drifting consider adjustment to min or maxq.
4160 if ((rc_cfg->mode != AOM_Q) && !cpi->rc.is_src_frame_alt_ref) {
4161 int maxq_adj_limit;
4162 int minq_adj_limit;
4163 maxq_adj_limit = rc->worst_quality - rc->active_worst_quality;
4164 minq_adj_limit =
4165 (rc_cfg->mode == AOM_CQ ? MINQ_ADJ_LIMIT_CQ : MINQ_ADJ_LIMIT);
4166 // Undershoot.
4167 if (p_rc->rate_error_estimate > rc_cfg->under_shoot_pct) {
4168 --twopass->extend_maxq;
4169 if (p_rc->rolling_target_bits >= p_rc->rolling_actual_bits)
4170 ++twopass->extend_minq;
4171 // Overshoot.
4172 } else if (p_rc->rate_error_estimate < -rc_cfg->over_shoot_pct) {
4173 --twopass->extend_minq;
4174 if (p_rc->rolling_target_bits < p_rc->rolling_actual_bits)
4175 ++twopass->extend_maxq;
4176 } else {
4177 // Adjustment for extreme local overshoot.
4178 if (rc->projected_frame_size > (2 * rc->base_frame_target) &&
4179 rc->projected_frame_size > (2 * rc->avg_frame_bandwidth))
4180 ++twopass->extend_maxq;
4181 // Unwind undershoot or overshoot adjustment.
4182 if (p_rc->rolling_target_bits < p_rc->rolling_actual_bits)
4183 --twopass->extend_minq;
4184 else if (p_rc->rolling_target_bits > p_rc->rolling_actual_bits)
4185 --twopass->extend_maxq;
4186 }
4187 twopass->extend_minq = clamp(twopass->extend_minq, 0, minq_adj_limit);
4188 twopass->extend_maxq = clamp(twopass->extend_maxq, 0, maxq_adj_limit);
4189
4190 // If there is a big and undexpected undershoot then feed the extra
4191 // bits back in quickly. One situation where this may happen is if a
4192 // frame is unexpectedly almost perfectly predicted by the ARF or GF
4193 // but not very well predcited by the previous frame.
4194 if (!frame_is_kf_gf_arf(cpi) && !cpi->rc.is_src_frame_alt_ref) {
4195 int fast_extra_thresh = rc->base_frame_target / HIGH_UNDERSHOOT_RATIO;
4196 if (rc->projected_frame_size < fast_extra_thresh) {
4197 p_rc->vbr_bits_off_target_fast +=
4198 fast_extra_thresh - rc->projected_frame_size;
4199 p_rc->vbr_bits_off_target_fast = AOMMIN(p_rc->vbr_bits_off_target_fast,
4200 (4 * rc->avg_frame_bandwidth));
4201
4202 // Fast adaptation of minQ if necessary to use up the extra bits.
4203 if (rc->avg_frame_bandwidth) {
4204 twopass->extend_minq_fast = (int)(p_rc->vbr_bits_off_target_fast * 8 /
4205 rc->avg_frame_bandwidth);
4206 }
4207 twopass->extend_minq_fast = AOMMIN(
4208 twopass->extend_minq_fast, minq_adj_limit - twopass->extend_minq);
4209 } else if (p_rc->vbr_bits_off_target_fast) {
4210 twopass->extend_minq_fast = AOMMIN(
4211 twopass->extend_minq_fast, minq_adj_limit - twopass->extend_minq);
4212 } else {
4213 twopass->extend_minq_fast = 0;
4214 }
4215 }
4216
4217 #if CONFIG_FPMT_TEST
4218 if (cpi->do_frame_data_update && !show_existing_between_parallel_frames &&
4219 simulate_parallel_frame) {
4220 cpi->ppi->p_rc.temp_vbr_bits_off_target_fast =
4221 p_rc->vbr_bits_off_target_fast;
4222 cpi->ppi->p_rc.temp_extend_minq = twopass->extend_minq;
4223 cpi->ppi->p_rc.temp_extend_maxq = twopass->extend_maxq;
4224 cpi->ppi->p_rc.temp_extend_minq_fast = twopass->extend_minq_fast;
4225 }
4226 #endif
4227 }
4228
4229 // Update the frame probabilities obtained from parallel encode frames
4230 FrameProbInfo *const frame_probs = &cpi->ppi->frame_probs;
4231 #if CONFIG_FPMT_TEST
4232 /* The variable temp_active_best_quality is introduced only for quality
4233 * simulation purpose, it retains the value previous to the parallel
4234 * encode frames. The variable is updated based on the update flag.
4235 *
4236 * If there exist show_existing_frames between parallel frames, then to
4237 * retain the temp state do not update it. */
4238 if (cpi->do_frame_data_update && !show_existing_between_parallel_frames &&
4239 simulate_parallel_frame) {
4240 int i;
4241 const int pyramid_level =
4242 cpi->ppi->gf_group.layer_depth[cpi->gf_frame_index];
4243 if (!rc->is_src_frame_alt_ref) {
4244 for (i = pyramid_level; i <= MAX_ARF_LAYERS; ++i)
4245 cpi->ppi->p_rc.temp_active_best_quality[i] =
4246 p_rc->active_best_quality[i];
4247 }
4248 }
4249
4250 // Update the frame probabilities obtained from parallel encode frames
4251 FrameProbInfo *const temp_frame_probs_simulation =
4252 simulate_parallel_frame ? &cpi->ppi->temp_frame_probs_simulation
4253 : frame_probs;
4254 FrameProbInfo *const temp_frame_probs =
4255 simulate_parallel_frame ? &cpi->ppi->temp_frame_probs : NULL;
4256 #endif
4257 int i, j, loop;
4258 // Sequentially do average on temp_frame_probs_simulation which holds
4259 // probabilities of last frame before parallel encode
4260 for (loop = 0; loop <= cpi->num_frame_recode; loop++) {
4261 // Sequentially update tx_type_probs
4262 if (cpi->do_update_frame_probs_txtype[loop] &&
4263 (cpi->ppi->gf_group.frame_parallel_level[cpi->gf_frame_index] > 0)) {
4264 const FRAME_UPDATE_TYPE update_type =
4265 get_frame_update_type(&cpi->ppi->gf_group, cpi->gf_frame_index);
4266 for (i = 0; i < TX_SIZES_ALL; i++) {
4267 int left = 1024;
4268
4269 for (j = TX_TYPES - 1; j >= 0; j--) {
4270 const int new_prob =
4271 cpi->frame_new_probs[loop].tx_type_probs[update_type][i][j];
4272 #if CONFIG_FPMT_TEST
4273 int prob =
4274 (temp_frame_probs_simulation->tx_type_probs[update_type][i][j] +
4275 new_prob) >>
4276 1;
4277 left -= prob;
4278 if (j == 0) prob += left;
4279 temp_frame_probs_simulation->tx_type_probs[update_type][i][j] = prob;
4280 #else
4281 int prob =
4282 (frame_probs->tx_type_probs[update_type][i][j] + new_prob) >> 1;
4283 left -= prob;
4284 if (j == 0) prob += left;
4285 frame_probs->tx_type_probs[update_type][i][j] = prob;
4286 #endif
4287 }
4288 }
4289 }
4290
4291 // Sequentially update obmc_probs
4292 if (cpi->do_update_frame_probs_obmc[loop] &&
4293 cpi->ppi->gf_group.frame_parallel_level[cpi->gf_frame_index] > 0) {
4294 const FRAME_UPDATE_TYPE update_type =
4295 get_frame_update_type(&cpi->ppi->gf_group, cpi->gf_frame_index);
4296
4297 for (i = 0; i < BLOCK_SIZES_ALL; i++) {
4298 const int new_prob =
4299 cpi->frame_new_probs[loop].obmc_probs[update_type][i];
4300 #if CONFIG_FPMT_TEST
4301 temp_frame_probs_simulation->obmc_probs[update_type][i] =
4302 (temp_frame_probs_simulation->obmc_probs[update_type][i] +
4303 new_prob) >>
4304 1;
4305 #else
4306 frame_probs->obmc_probs[update_type][i] =
4307 (frame_probs->obmc_probs[update_type][i] + new_prob) >> 1;
4308 #endif
4309 }
4310 }
4311
4312 // Sequentially update warped_probs
4313 if (cpi->do_update_frame_probs_warp[loop] &&
4314 cpi->ppi->gf_group.frame_parallel_level[cpi->gf_frame_index] > 0) {
4315 const FRAME_UPDATE_TYPE update_type =
4316 get_frame_update_type(&cpi->ppi->gf_group, cpi->gf_frame_index);
4317 const int new_prob = cpi->frame_new_probs[loop].warped_probs[update_type];
4318 #if CONFIG_FPMT_TEST
4319 temp_frame_probs_simulation->warped_probs[update_type] =
4320 (temp_frame_probs_simulation->warped_probs[update_type] + new_prob) >>
4321 1;
4322 #else
4323 frame_probs->warped_probs[update_type] =
4324 (frame_probs->warped_probs[update_type] + new_prob) >> 1;
4325 #endif
4326 }
4327
4328 // Sequentially update switchable_interp_probs
4329 if (cpi->do_update_frame_probs_interpfilter[loop] &&
4330 cpi->ppi->gf_group.frame_parallel_level[cpi->gf_frame_index] > 0) {
4331 const FRAME_UPDATE_TYPE update_type =
4332 get_frame_update_type(&cpi->ppi->gf_group, cpi->gf_frame_index);
4333
4334 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++) {
4335 int left = 1536;
4336
4337 for (j = SWITCHABLE_FILTERS - 1; j >= 0; j--) {
4338 const int new_prob = cpi->frame_new_probs[loop]
4339 .switchable_interp_probs[update_type][i][j];
4340 #if CONFIG_FPMT_TEST
4341 int prob = (temp_frame_probs_simulation
4342 ->switchable_interp_probs[update_type][i][j] +
4343 new_prob) >>
4344 1;
4345 left -= prob;
4346 if (j == 0) prob += left;
4347
4348 temp_frame_probs_simulation
4349 ->switchable_interp_probs[update_type][i][j] = prob;
4350 #else
4351 int prob = (frame_probs->switchable_interp_probs[update_type][i][j] +
4352 new_prob) >>
4353 1;
4354 left -= prob;
4355 if (j == 0) prob += left;
4356 frame_probs->switchable_interp_probs[update_type][i][j] = prob;
4357 #endif
4358 }
4359 }
4360 }
4361 }
4362
4363 #if CONFIG_FPMT_TEST
4364 // Copying temp_frame_probs_simulation to temp_frame_probs based on
4365 // the flag
4366 if (cpi->do_frame_data_update &&
4367 cpi->ppi->gf_group.frame_parallel_level[cpi->gf_frame_index] > 0 &&
4368 simulate_parallel_frame) {
4369 for (int update_type_idx = 0; update_type_idx < FRAME_UPDATE_TYPES;
4370 update_type_idx++) {
4371 for (i = 0; i < BLOCK_SIZES_ALL; i++) {
4372 temp_frame_probs->obmc_probs[update_type_idx][i] =
4373 temp_frame_probs_simulation->obmc_probs[update_type_idx][i];
4374 }
4375 temp_frame_probs->warped_probs[update_type_idx] =
4376 temp_frame_probs_simulation->warped_probs[update_type_idx];
4377 for (i = 0; i < TX_SIZES_ALL; i++) {
4378 for (j = 0; j < TX_TYPES; j++) {
4379 temp_frame_probs->tx_type_probs[update_type_idx][i][j] =
4380 temp_frame_probs_simulation->tx_type_probs[update_type_idx][i][j];
4381 }
4382 }
4383 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++) {
4384 for (j = 0; j < SWITCHABLE_FILTERS; j++) {
4385 temp_frame_probs->switchable_interp_probs[update_type_idx][i][j] =
4386 temp_frame_probs_simulation
4387 ->switchable_interp_probs[update_type_idx][i][j];
4388 }
4389 }
4390 }
4391 }
4392 #endif
4393 // Update framerate obtained from parallel encode frames
4394 if (cpi->common.show_frame &&
4395 cpi->ppi->gf_group.frame_parallel_level[cpi->gf_frame_index] > 0)
4396 cpi->framerate = cpi->new_framerate;
4397 #if CONFIG_FPMT_TEST
4398 // SIMULATION PURPOSE
4399 int show_existing_between_parallel_frames_cndn =
4400 (cpi->ppi->gf_group.update_type[cpi->gf_frame_index] ==
4401 INTNL_OVERLAY_UPDATE &&
4402 cpi->ppi->gf_group.frame_parallel_level[cpi->gf_frame_index + 1] == 2);
4403 if (cpi->common.show_frame && !show_existing_between_parallel_frames_cndn &&
4404 cpi->do_frame_data_update && simulate_parallel_frame)
4405 cpi->temp_framerate = cpi->framerate;
4406 #endif
4407 }
4408