• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
3  *
4  *  Use of this source code is governed by a BSD-style license
5  *  that can be found in the LICENSE file in the root of the source
6  *  tree. An additional intellectual property rights grant can be found
7  *  in the file PATENTS.  All contributing project authors may
8  *  be found in the AUTHORS file in the root of the source tree.
9  */
10 
11 #include <math.h>
12 #include <stdio.h>
13 #include <limits.h>
14 
15 #include "./vp9_rtcd.h"
16 #include "./vpx_config.h"
17 #include "./vpx_dsp_rtcd.h"
18 #include "./vpx_scale_rtcd.h"
19 #include "vpx_dsp/psnr.h"
20 #include "vpx_dsp/vpx_dsp_common.h"
21 #include "vpx_dsp/vpx_filter.h"
22 #if CONFIG_INTERNAL_STATS
23 #include "vpx_dsp/ssim.h"
24 #endif
25 #include "vpx_ports/mem.h"
26 #include "vpx_ports/system_state.h"
27 #include "vpx_ports/vpx_timer.h"
28 
29 #include "vp9/common/vp9_alloccommon.h"
30 #include "vp9/common/vp9_filter.h"
31 #include "vp9/common/vp9_idct.h"
32 #if CONFIG_VP9_POSTPROC
33 #include "vp9/common/vp9_postproc.h"
34 #endif
35 #include "vp9/common/vp9_reconinter.h"
36 #include "vp9/common/vp9_reconintra.h"
37 #include "vp9/common/vp9_tile_common.h"
38 
39 #include "vp9/encoder/vp9_alt_ref_aq.h"
40 #include "vp9/encoder/vp9_aq_360.h"
41 #include "vp9/encoder/vp9_aq_complexity.h"
42 #include "vp9/encoder/vp9_aq_cyclicrefresh.h"
43 #include "vp9/encoder/vp9_aq_variance.h"
44 #include "vp9/encoder/vp9_bitstream.h"
45 #include "vp9/encoder/vp9_context_tree.h"
46 #include "vp9/encoder/vp9_encodeframe.h"
47 #include "vp9/encoder/vp9_encodemv.h"
48 #include "vp9/encoder/vp9_encoder.h"
49 #include "vp9/encoder/vp9_extend.h"
50 #include "vp9/encoder/vp9_ethread.h"
51 #include "vp9/encoder/vp9_firstpass.h"
52 #include "vp9/encoder/vp9_mbgraph.h"
53 #include "vp9/encoder/vp9_multi_thread.h"
54 #include "vp9/encoder/vp9_noise_estimate.h"
55 #include "vp9/encoder/vp9_picklpf.h"
56 #include "vp9/encoder/vp9_ratectrl.h"
57 #include "vp9/encoder/vp9_rd.h"
58 #include "vp9/encoder/vp9_resize.h"
59 #include "vp9/encoder/vp9_segmentation.h"
60 #include "vp9/encoder/vp9_skin_detection.h"
61 #include "vp9/encoder/vp9_speed_features.h"
62 #include "vp9/encoder/vp9_svc_layercontext.h"
63 #include "vp9/encoder/vp9_temporal_filter.h"
64 
65 #define AM_SEGMENT_ID_INACTIVE 7
66 #define AM_SEGMENT_ID_ACTIVE 0
67 
68 #define ALTREF_HIGH_PRECISION_MV 1     // Whether to use high precision mv
69                                        //  for altref computation.
70 #define HIGH_PRECISION_MV_QTHRESH 200  // Q threshold for high precision
71                                        // mv. Choose a very high value for
72                                        // now so that HIGH_PRECISION is always
73                                        // chosen.
74 // #define OUTPUT_YUV_REC
75 
76 #define FRAME_SIZE_FACTOR 128  // empirical params for context model threshold
77 #define FRAME_RATE_FACTOR 8
78 
79 #ifdef OUTPUT_YUV_DENOISED
80 FILE *yuv_denoised_file = NULL;
81 #endif
82 #ifdef OUTPUT_YUV_SKINMAP
83 FILE *yuv_skinmap_file = NULL;
84 #endif
85 #ifdef OUTPUT_YUV_REC
86 FILE *yuv_rec_file;
87 #endif
88 
89 #if 0
90 FILE *framepsnr;
91 FILE *kf_list;
92 FILE *keyfile;
93 #endif
94 
95 #ifdef ENABLE_KF_DENOISE
96 // Test condition for spatial denoise of source.
is_spatial_denoise_enabled(VP9_COMP * cpi)97 static int is_spatial_denoise_enabled(VP9_COMP *cpi) {
98   VP9_COMMON *const cm = &cpi->common;
99   const VP9EncoderConfig *const oxcf = &cpi->oxcf;
100 
101   return (oxcf->pass != 1) && !is_lossless_requested(&cpi->oxcf) &&
102          frame_is_intra_only(cm);
103 }
104 #endif
105 
106 // compute adaptive threshold for skip recoding
compute_context_model_thresh(const VP9_COMP * const cpi)107 static int compute_context_model_thresh(const VP9_COMP *const cpi) {
108   const VP9_COMMON *const cm = &cpi->common;
109   const VP9EncoderConfig *const oxcf = &cpi->oxcf;
110   const int frame_size = (cm->width * cm->height) >> 10;
111   const int bitrate = (int)(oxcf->target_bandwidth >> 10);
112   const int qindex_factor = cm->base_qindex + (MAXQ >> 1);
113 
114   // This equation makes the threshold adaptive to frame size.
115   // Coding gain obtained by recoding comes from alternate frames of large
116   // content change. We skip recoding if the difference of previous and current
117   // frame context probability model is less than a certain threshold.
118   // The first component is the most critical part to guarantee adaptivity.
119   // Other parameters are estimated based on normal setting of hd resolution
120   // parameters. e.g frame_size = 1920x1080, bitrate = 8000, qindex_factor < 50
121   const int thresh =
122       ((FRAME_SIZE_FACTOR * frame_size - FRAME_RATE_FACTOR * bitrate) *
123        qindex_factor) >>
124       9;
125 
126   return thresh;
127 }
128 
129 // compute the total cost difference between current
130 // and previous frame context prob model.
compute_context_model_diff(const VP9_COMMON * const cm)131 static int compute_context_model_diff(const VP9_COMMON *const cm) {
132   const FRAME_CONTEXT *const pre_fc =
133       &cm->frame_contexts[cm->frame_context_idx];
134   const FRAME_CONTEXT *const cur_fc = cm->fc;
135   const FRAME_COUNTS *counts = &cm->counts;
136   vpx_prob pre_last_prob, cur_last_prob;
137   int diff = 0;
138   int i, j, k, l, m, n;
139 
140   // y_mode_prob
141   for (i = 0; i < BLOCK_SIZE_GROUPS; ++i) {
142     for (j = 0; j < INTRA_MODES - 1; ++j) {
143       diff += (int)counts->y_mode[i][j] *
144               (pre_fc->y_mode_prob[i][j] - cur_fc->y_mode_prob[i][j]);
145     }
146     pre_last_prob = MAX_PROB - pre_fc->y_mode_prob[i][INTRA_MODES - 2];
147     cur_last_prob = MAX_PROB - cur_fc->y_mode_prob[i][INTRA_MODES - 2];
148 
149     diff += (int)counts->y_mode[i][INTRA_MODES - 1] *
150             (pre_last_prob - cur_last_prob);
151   }
152 
153   // uv_mode_prob
154   for (i = 0; i < INTRA_MODES; ++i) {
155     for (j = 0; j < INTRA_MODES - 1; ++j) {
156       diff += (int)counts->uv_mode[i][j] *
157               (pre_fc->uv_mode_prob[i][j] - cur_fc->uv_mode_prob[i][j]);
158     }
159     pre_last_prob = MAX_PROB - pre_fc->uv_mode_prob[i][INTRA_MODES - 2];
160     cur_last_prob = MAX_PROB - cur_fc->uv_mode_prob[i][INTRA_MODES - 2];
161 
162     diff += (int)counts->uv_mode[i][INTRA_MODES - 1] *
163             (pre_last_prob - cur_last_prob);
164   }
165 
166   // partition_prob
167   for (i = 0; i < PARTITION_CONTEXTS; ++i) {
168     for (j = 0; j < PARTITION_TYPES - 1; ++j) {
169       diff += (int)counts->partition[i][j] *
170               (pre_fc->partition_prob[i][j] - cur_fc->partition_prob[i][j]);
171     }
172     pre_last_prob = MAX_PROB - pre_fc->partition_prob[i][PARTITION_TYPES - 2];
173     cur_last_prob = MAX_PROB - cur_fc->partition_prob[i][PARTITION_TYPES - 2];
174 
175     diff += (int)counts->partition[i][PARTITION_TYPES - 1] *
176             (pre_last_prob - cur_last_prob);
177   }
178 
179   // coef_probs
180   for (i = 0; i < TX_SIZES; ++i) {
181     for (j = 0; j < PLANE_TYPES; ++j) {
182       for (k = 0; k < REF_TYPES; ++k) {
183         for (l = 0; l < COEF_BANDS; ++l) {
184           for (m = 0; m < BAND_COEFF_CONTEXTS(l); ++m) {
185             for (n = 0; n < UNCONSTRAINED_NODES; ++n) {
186               diff += (int)counts->coef[i][j][k][l][m][n] *
187                       (pre_fc->coef_probs[i][j][k][l][m][n] -
188                        cur_fc->coef_probs[i][j][k][l][m][n]);
189             }
190 
191             pre_last_prob =
192                 MAX_PROB -
193                 pre_fc->coef_probs[i][j][k][l][m][UNCONSTRAINED_NODES - 1];
194             cur_last_prob =
195                 MAX_PROB -
196                 cur_fc->coef_probs[i][j][k][l][m][UNCONSTRAINED_NODES - 1];
197 
198             diff += (int)counts->coef[i][j][k][l][m][UNCONSTRAINED_NODES] *
199                     (pre_last_prob - cur_last_prob);
200           }
201         }
202       }
203     }
204   }
205 
206   // switchable_interp_prob
207   for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i) {
208     for (j = 0; j < SWITCHABLE_FILTERS - 1; ++j) {
209       diff += (int)counts->switchable_interp[i][j] *
210               (pre_fc->switchable_interp_prob[i][j] -
211                cur_fc->switchable_interp_prob[i][j]);
212     }
213     pre_last_prob =
214         MAX_PROB - pre_fc->switchable_interp_prob[i][SWITCHABLE_FILTERS - 2];
215     cur_last_prob =
216         MAX_PROB - cur_fc->switchable_interp_prob[i][SWITCHABLE_FILTERS - 2];
217 
218     diff += (int)counts->switchable_interp[i][SWITCHABLE_FILTERS - 1] *
219             (pre_last_prob - cur_last_prob);
220   }
221 
222   // inter_mode_probs
223   for (i = 0; i < INTER_MODE_CONTEXTS; ++i) {
224     for (j = 0; j < INTER_MODES - 1; ++j) {
225       diff += (int)counts->inter_mode[i][j] *
226               (pre_fc->inter_mode_probs[i][j] - cur_fc->inter_mode_probs[i][j]);
227     }
228     pre_last_prob = MAX_PROB - pre_fc->inter_mode_probs[i][INTER_MODES - 2];
229     cur_last_prob = MAX_PROB - cur_fc->inter_mode_probs[i][INTER_MODES - 2];
230 
231     diff += (int)counts->inter_mode[i][INTER_MODES - 1] *
232             (pre_last_prob - cur_last_prob);
233   }
234 
235   // intra_inter_prob
236   for (i = 0; i < INTRA_INTER_CONTEXTS; ++i) {
237     diff += (int)counts->intra_inter[i][0] *
238             (pre_fc->intra_inter_prob[i] - cur_fc->intra_inter_prob[i]);
239 
240     pre_last_prob = MAX_PROB - pre_fc->intra_inter_prob[i];
241     cur_last_prob = MAX_PROB - cur_fc->intra_inter_prob[i];
242 
243     diff += (int)counts->intra_inter[i][1] * (pre_last_prob - cur_last_prob);
244   }
245 
246   // comp_inter_prob
247   for (i = 0; i < COMP_INTER_CONTEXTS; ++i) {
248     diff += (int)counts->comp_inter[i][0] *
249             (pre_fc->comp_inter_prob[i] - cur_fc->comp_inter_prob[i]);
250 
251     pre_last_prob = MAX_PROB - pre_fc->comp_inter_prob[i];
252     cur_last_prob = MAX_PROB - cur_fc->comp_inter_prob[i];
253 
254     diff += (int)counts->comp_inter[i][1] * (pre_last_prob - cur_last_prob);
255   }
256 
257   // single_ref_prob
258   for (i = 0; i < REF_CONTEXTS; ++i) {
259     for (j = 0; j < 2; ++j) {
260       diff += (int)counts->single_ref[i][j][0] *
261               (pre_fc->single_ref_prob[i][j] - cur_fc->single_ref_prob[i][j]);
262 
263       pre_last_prob = MAX_PROB - pre_fc->single_ref_prob[i][j];
264       cur_last_prob = MAX_PROB - cur_fc->single_ref_prob[i][j];
265 
266       diff +=
267           (int)counts->single_ref[i][j][1] * (pre_last_prob - cur_last_prob);
268     }
269   }
270 
271   // comp_ref_prob
272   for (i = 0; i < REF_CONTEXTS; ++i) {
273     diff += (int)counts->comp_ref[i][0] *
274             (pre_fc->comp_ref_prob[i] - cur_fc->comp_ref_prob[i]);
275 
276     pre_last_prob = MAX_PROB - pre_fc->comp_ref_prob[i];
277     cur_last_prob = MAX_PROB - cur_fc->comp_ref_prob[i];
278 
279     diff += (int)counts->comp_ref[i][1] * (pre_last_prob - cur_last_prob);
280   }
281 
282   // tx_probs
283   for (i = 0; i < TX_SIZE_CONTEXTS; ++i) {
284     // p32x32
285     for (j = 0; j < TX_SIZES - 1; ++j) {
286       diff += (int)counts->tx.p32x32[i][j] *
287               (pre_fc->tx_probs.p32x32[i][j] - cur_fc->tx_probs.p32x32[i][j]);
288     }
289     pre_last_prob = MAX_PROB - pre_fc->tx_probs.p32x32[i][TX_SIZES - 2];
290     cur_last_prob = MAX_PROB - cur_fc->tx_probs.p32x32[i][TX_SIZES - 2];
291 
292     diff += (int)counts->tx.p32x32[i][TX_SIZES - 1] *
293             (pre_last_prob - cur_last_prob);
294 
295     // p16x16
296     for (j = 0; j < TX_SIZES - 2; ++j) {
297       diff += (int)counts->tx.p16x16[i][j] *
298               (pre_fc->tx_probs.p16x16[i][j] - cur_fc->tx_probs.p16x16[i][j]);
299     }
300     pre_last_prob = MAX_PROB - pre_fc->tx_probs.p16x16[i][TX_SIZES - 3];
301     cur_last_prob = MAX_PROB - cur_fc->tx_probs.p16x16[i][TX_SIZES - 3];
302 
303     diff += (int)counts->tx.p16x16[i][TX_SIZES - 2] *
304             (pre_last_prob - cur_last_prob);
305 
306     // p8x8
307     for (j = 0; j < TX_SIZES - 3; ++j) {
308       diff += (int)counts->tx.p8x8[i][j] *
309               (pre_fc->tx_probs.p8x8[i][j] - cur_fc->tx_probs.p8x8[i][j]);
310     }
311     pre_last_prob = MAX_PROB - pre_fc->tx_probs.p8x8[i][TX_SIZES - 4];
312     cur_last_prob = MAX_PROB - cur_fc->tx_probs.p8x8[i][TX_SIZES - 4];
313 
314     diff +=
315         (int)counts->tx.p8x8[i][TX_SIZES - 3] * (pre_last_prob - cur_last_prob);
316   }
317 
318   // skip_probs
319   for (i = 0; i < SKIP_CONTEXTS; ++i) {
320     diff += (int)counts->skip[i][0] *
321             (pre_fc->skip_probs[i] - cur_fc->skip_probs[i]);
322 
323     pre_last_prob = MAX_PROB - pre_fc->skip_probs[i];
324     cur_last_prob = MAX_PROB - cur_fc->skip_probs[i];
325 
326     diff += (int)counts->skip[i][1] * (pre_last_prob - cur_last_prob);
327   }
328 
329   // mv
330   for (i = 0; i < MV_JOINTS - 1; ++i) {
331     diff += (int)counts->mv.joints[i] *
332             (pre_fc->nmvc.joints[i] - cur_fc->nmvc.joints[i]);
333   }
334   pre_last_prob = MAX_PROB - pre_fc->nmvc.joints[MV_JOINTS - 2];
335   cur_last_prob = MAX_PROB - cur_fc->nmvc.joints[MV_JOINTS - 2];
336 
337   diff +=
338       (int)counts->mv.joints[MV_JOINTS - 1] * (pre_last_prob - cur_last_prob);
339 
340   for (i = 0; i < 2; ++i) {
341     const nmv_component_counts *nmv_count = &counts->mv.comps[i];
342     const nmv_component *pre_nmv_prob = &pre_fc->nmvc.comps[i];
343     const nmv_component *cur_nmv_prob = &cur_fc->nmvc.comps[i];
344 
345     // sign
346     diff += (int)nmv_count->sign[0] * (pre_nmv_prob->sign - cur_nmv_prob->sign);
347 
348     pre_last_prob = MAX_PROB - pre_nmv_prob->sign;
349     cur_last_prob = MAX_PROB - cur_nmv_prob->sign;
350 
351     diff += (int)nmv_count->sign[1] * (pre_last_prob - cur_last_prob);
352 
353     // classes
354     for (j = 0; j < MV_CLASSES - 1; ++j) {
355       diff += (int)nmv_count->classes[j] *
356               (pre_nmv_prob->classes[j] - cur_nmv_prob->classes[j]);
357     }
358     pre_last_prob = MAX_PROB - pre_nmv_prob->classes[MV_CLASSES - 2];
359     cur_last_prob = MAX_PROB - cur_nmv_prob->classes[MV_CLASSES - 2];
360 
361     diff += (int)nmv_count->classes[MV_CLASSES - 1] *
362             (pre_last_prob - cur_last_prob);
363 
364     // class0
365     for (j = 0; j < CLASS0_SIZE - 1; ++j) {
366       diff += (int)nmv_count->class0[j] *
367               (pre_nmv_prob->class0[j] - cur_nmv_prob->class0[j]);
368     }
369     pre_last_prob = MAX_PROB - pre_nmv_prob->class0[CLASS0_SIZE - 2];
370     cur_last_prob = MAX_PROB - cur_nmv_prob->class0[CLASS0_SIZE - 2];
371 
372     diff += (int)nmv_count->class0[CLASS0_SIZE - 1] *
373             (pre_last_prob - cur_last_prob);
374 
375     // bits
376     for (j = 0; j < MV_OFFSET_BITS; ++j) {
377       diff += (int)nmv_count->bits[j][0] *
378               (pre_nmv_prob->bits[j] - cur_nmv_prob->bits[j]);
379 
380       pre_last_prob = MAX_PROB - pre_nmv_prob->bits[j];
381       cur_last_prob = MAX_PROB - cur_nmv_prob->bits[j];
382 
383       diff += (int)nmv_count->bits[j][1] * (pre_last_prob - cur_last_prob);
384     }
385 
386     // class0_fp
387     for (j = 0; j < CLASS0_SIZE; ++j) {
388       for (k = 0; k < MV_FP_SIZE - 1; ++k) {
389         diff += (int)nmv_count->class0_fp[j][k] *
390                 (pre_nmv_prob->class0_fp[j][k] - cur_nmv_prob->class0_fp[j][k]);
391       }
392       pre_last_prob = MAX_PROB - pre_nmv_prob->class0_fp[j][MV_FP_SIZE - 2];
393       cur_last_prob = MAX_PROB - cur_nmv_prob->class0_fp[j][MV_FP_SIZE - 2];
394 
395       diff += (int)nmv_count->class0_fp[j][MV_FP_SIZE - 1] *
396               (pre_last_prob - cur_last_prob);
397     }
398 
399     // fp
400     for (j = 0; j < MV_FP_SIZE - 1; ++j) {
401       diff +=
402           (int)nmv_count->fp[j] * (pre_nmv_prob->fp[j] - cur_nmv_prob->fp[j]);
403     }
404     pre_last_prob = MAX_PROB - pre_nmv_prob->fp[MV_FP_SIZE - 2];
405     cur_last_prob = MAX_PROB - cur_nmv_prob->fp[MV_FP_SIZE - 2];
406 
407     diff +=
408         (int)nmv_count->fp[MV_FP_SIZE - 1] * (pre_last_prob - cur_last_prob);
409 
410     // class0_hp
411     diff += (int)nmv_count->class0_hp[0] *
412             (pre_nmv_prob->class0_hp - cur_nmv_prob->class0_hp);
413 
414     pre_last_prob = MAX_PROB - pre_nmv_prob->class0_hp;
415     cur_last_prob = MAX_PROB - cur_nmv_prob->class0_hp;
416 
417     diff += (int)nmv_count->class0_hp[1] * (pre_last_prob - cur_last_prob);
418 
419     // hp
420     diff += (int)nmv_count->hp[0] * (pre_nmv_prob->hp - cur_nmv_prob->hp);
421 
422     pre_last_prob = MAX_PROB - pre_nmv_prob->hp;
423     cur_last_prob = MAX_PROB - cur_nmv_prob->hp;
424 
425     diff += (int)nmv_count->hp[1] * (pre_last_prob - cur_last_prob);
426   }
427 
428   return -diff;
429 }
430 
431 // Test for whether to calculate metrics for the frame.
is_psnr_calc_enabled(VP9_COMP * cpi)432 static int is_psnr_calc_enabled(VP9_COMP *cpi) {
433   VP9_COMMON *const cm = &cpi->common;
434   const VP9EncoderConfig *const oxcf = &cpi->oxcf;
435 
436   return cpi->b_calculate_psnr && (oxcf->pass != 1) && cm->show_frame;
437 }
438 
439 /* clang-format off */
440 const Vp9LevelSpec vp9_level_defs[VP9_LEVELS] = {
441   { LEVEL_1,   829440,      36864,    200,    400,    2, 1,  4,  8 },
442   { LEVEL_1_1, 2764800,     73728,    800,    1000,   2, 1,  4,  8 },
443   { LEVEL_2,   4608000,     122880,   1800,   1500,   2, 1,  4,  8 },
444   { LEVEL_2_1, 9216000,     245760,   3600,   2800,   2, 2,  4,  8 },
445   { LEVEL_3,   20736000,    552960,   7200,   6000,   2, 4,  4,  8 },
446   { LEVEL_3_1, 36864000,    983040,   12000,  10000,  2, 4,  4,  8 },
447   { LEVEL_4,   83558400,    2228224,  18000,  16000,  4, 4,  4,  8 },
448   { LEVEL_4_1, 160432128,   2228224,  30000,  18000,  4, 4,  5,  6 },
449   { LEVEL_5,   311951360,   8912896,  60000,  36000,  6, 8,  6,  4 },
450   { LEVEL_5_1, 588251136,   8912896,  120000, 46000,  8, 8,  10, 4 },
451   // TODO(huisu): update max_cpb_size for level 5_2 ~ 6_2 when
452   // they are finalized (currently tentative).
453   { LEVEL_5_2, 1176502272,  8912896,  180000, 90000,  8, 8,  10, 4 },
454   { LEVEL_6,   1176502272,  35651584, 180000, 90000,  8, 16, 10, 4 },
455   { LEVEL_6_1, 2353004544u, 35651584, 240000, 180000, 8, 16, 10, 4 },
456   { LEVEL_6_2, 4706009088u, 35651584, 480000, 360000, 8, 16, 10, 4 },
457 };
458 /* clang-format on */
459 
460 static const char *level_fail_messages[TARGET_LEVEL_FAIL_IDS] =
461     { "The average bit-rate is too high.",
462       "The picture size is too large.",
463       "The luma sample rate is too large.",
464       "The CPB size is too large.",
465       "The compression ratio is too small",
466       "Too many column tiles are used.",
467       "The alt-ref distance is too small.",
468       "Too many reference buffers are used." };
469 
Scale2Ratio(VPX_SCALING mode,int * hr,int * hs)470 static INLINE void Scale2Ratio(VPX_SCALING mode, int *hr, int *hs) {
471   switch (mode) {
472     case NORMAL:
473       *hr = 1;
474       *hs = 1;
475       break;
476     case FOURFIVE:
477       *hr = 4;
478       *hs = 5;
479       break;
480     case THREEFIVE:
481       *hr = 3;
482       *hs = 5;
483       break;
484     case ONETWO:
485       *hr = 1;
486       *hs = 2;
487       break;
488     default:
489       *hr = 1;
490       *hs = 1;
491       assert(0);
492       break;
493   }
494 }
495 
496 // Mark all inactive blocks as active. Other segmentation features may be set
497 // so memset cannot be used, instead only inactive blocks should be reset.
suppress_active_map(VP9_COMP * cpi)498 static void suppress_active_map(VP9_COMP *cpi) {
499   unsigned char *const seg_map = cpi->segmentation_map;
500 
501   if (cpi->active_map.enabled || cpi->active_map.update) {
502     const int rows = cpi->common.mi_rows;
503     const int cols = cpi->common.mi_cols;
504     int i;
505 
506     for (i = 0; i < rows * cols; ++i)
507       if (seg_map[i] == AM_SEGMENT_ID_INACTIVE)
508         seg_map[i] = AM_SEGMENT_ID_ACTIVE;
509   }
510 }
511 
apply_active_map(VP9_COMP * cpi)512 static void apply_active_map(VP9_COMP *cpi) {
513   struct segmentation *const seg = &cpi->common.seg;
514   unsigned char *const seg_map = cpi->segmentation_map;
515   const unsigned char *const active_map = cpi->active_map.map;
516   int i;
517 
518   assert(AM_SEGMENT_ID_ACTIVE == CR_SEGMENT_ID_BASE);
519 
520   if (frame_is_intra_only(&cpi->common)) {
521     cpi->active_map.enabled = 0;
522     cpi->active_map.update = 1;
523   }
524 
525   if (cpi->active_map.update) {
526     if (cpi->active_map.enabled) {
527       for (i = 0; i < cpi->common.mi_rows * cpi->common.mi_cols; ++i)
528         if (seg_map[i] == AM_SEGMENT_ID_ACTIVE) seg_map[i] = active_map[i];
529       vp9_enable_segmentation(seg);
530       vp9_enable_segfeature(seg, AM_SEGMENT_ID_INACTIVE, SEG_LVL_SKIP);
531       vp9_enable_segfeature(seg, AM_SEGMENT_ID_INACTIVE, SEG_LVL_ALT_LF);
532       // Setting the data to -MAX_LOOP_FILTER will result in the computed loop
533       // filter level being zero regardless of the value of seg->abs_delta.
534       vp9_set_segdata(seg, AM_SEGMENT_ID_INACTIVE, SEG_LVL_ALT_LF,
535                       -MAX_LOOP_FILTER);
536     } else {
537       vp9_disable_segfeature(seg, AM_SEGMENT_ID_INACTIVE, SEG_LVL_SKIP);
538       vp9_disable_segfeature(seg, AM_SEGMENT_ID_INACTIVE, SEG_LVL_ALT_LF);
539       if (seg->enabled) {
540         seg->update_data = 1;
541         seg->update_map = 1;
542       }
543     }
544     cpi->active_map.update = 0;
545   }
546 }
547 
init_level_info(Vp9LevelInfo * level_info)548 static void init_level_info(Vp9LevelInfo *level_info) {
549   Vp9LevelStats *const level_stats = &level_info->level_stats;
550   Vp9LevelSpec *const level_spec = &level_info->level_spec;
551 
552   memset(level_stats, 0, sizeof(*level_stats));
553   memset(level_spec, 0, sizeof(*level_spec));
554   level_spec->level = LEVEL_UNKNOWN;
555   level_spec->min_altref_distance = INT_MAX;
556 }
557 
vp9_get_level(const Vp9LevelSpec * const level_spec)558 VP9_LEVEL vp9_get_level(const Vp9LevelSpec *const level_spec) {
559   int i;
560   const Vp9LevelSpec *this_level;
561 
562   vpx_clear_system_state();
563 
564   for (i = 0; i < VP9_LEVELS; ++i) {
565     this_level = &vp9_level_defs[i];
566     if ((double)level_spec->max_luma_sample_rate >
567             (double)this_level->max_luma_sample_rate *
568                 (1 + SAMPLE_RATE_GRACE_P) ||
569         level_spec->max_luma_picture_size > this_level->max_luma_picture_size ||
570         level_spec->average_bitrate > this_level->average_bitrate ||
571         level_spec->max_cpb_size > this_level->max_cpb_size ||
572         level_spec->compression_ratio < this_level->compression_ratio ||
573         level_spec->max_col_tiles > this_level->max_col_tiles ||
574         level_spec->min_altref_distance < this_level->min_altref_distance ||
575         level_spec->max_ref_frame_buffers > this_level->max_ref_frame_buffers)
576       continue;
577     break;
578   }
579   return (i == VP9_LEVELS) ? LEVEL_UNKNOWN : vp9_level_defs[i].level;
580 }
581 
vp9_set_active_map(VP9_COMP * cpi,unsigned char * new_map_16x16,int rows,int cols)582 int vp9_set_active_map(VP9_COMP *cpi, unsigned char *new_map_16x16, int rows,
583                        int cols) {
584   if (rows == cpi->common.mb_rows && cols == cpi->common.mb_cols) {
585     unsigned char *const active_map_8x8 = cpi->active_map.map;
586     const int mi_rows = cpi->common.mi_rows;
587     const int mi_cols = cpi->common.mi_cols;
588     cpi->active_map.update = 1;
589     if (new_map_16x16) {
590       int r, c;
591       for (r = 0; r < mi_rows; ++r) {
592         for (c = 0; c < mi_cols; ++c) {
593           active_map_8x8[r * mi_cols + c] =
594               new_map_16x16[(r >> 1) * cols + (c >> 1)]
595                   ? AM_SEGMENT_ID_ACTIVE
596                   : AM_SEGMENT_ID_INACTIVE;
597         }
598       }
599       cpi->active_map.enabled = 1;
600     } else {
601       cpi->active_map.enabled = 0;
602     }
603     return 0;
604   } else {
605     return -1;
606   }
607 }
608 
vp9_get_active_map(VP9_COMP * cpi,unsigned char * new_map_16x16,int rows,int cols)609 int vp9_get_active_map(VP9_COMP *cpi, unsigned char *new_map_16x16, int rows,
610                        int cols) {
611   if (rows == cpi->common.mb_rows && cols == cpi->common.mb_cols &&
612       new_map_16x16) {
613     unsigned char *const seg_map_8x8 = cpi->segmentation_map;
614     const int mi_rows = cpi->common.mi_rows;
615     const int mi_cols = cpi->common.mi_cols;
616     memset(new_map_16x16, !cpi->active_map.enabled, rows * cols);
617     if (cpi->active_map.enabled) {
618       int r, c;
619       for (r = 0; r < mi_rows; ++r) {
620         for (c = 0; c < mi_cols; ++c) {
621           // Cyclic refresh segments are considered active despite not having
622           // AM_SEGMENT_ID_ACTIVE
623           new_map_16x16[(r >> 1) * cols + (c >> 1)] |=
624               seg_map_8x8[r * mi_cols + c] != AM_SEGMENT_ID_INACTIVE;
625         }
626       }
627     }
628     return 0;
629   } else {
630     return -1;
631   }
632 }
633 
vp9_set_high_precision_mv(VP9_COMP * cpi,int allow_high_precision_mv)634 void vp9_set_high_precision_mv(VP9_COMP *cpi, int allow_high_precision_mv) {
635   MACROBLOCK *const mb = &cpi->td.mb;
636   cpi->common.allow_high_precision_mv = allow_high_precision_mv;
637   if (cpi->common.allow_high_precision_mv) {
638     mb->mvcost = mb->nmvcost_hp;
639     mb->mvsadcost = mb->nmvsadcost_hp;
640   } else {
641     mb->mvcost = mb->nmvcost;
642     mb->mvsadcost = mb->nmvsadcost;
643   }
644 }
645 
setup_frame(VP9_COMP * cpi)646 static void setup_frame(VP9_COMP *cpi) {
647   VP9_COMMON *const cm = &cpi->common;
648   // Set up entropy context depending on frame type. The decoder mandates
649   // the use of the default context, index 0, for keyframes and inter
650   // frames where the error_resilient_mode or intra_only flag is set. For
651   // other inter-frames the encoder currently uses only two contexts;
652   // context 1 for ALTREF frames and context 0 for the others.
653   if (frame_is_intra_only(cm) || cm->error_resilient_mode) {
654     vp9_setup_past_independence(cm);
655   } else {
656     if (!cpi->use_svc) cm->frame_context_idx = cpi->refresh_alt_ref_frame;
657   }
658 
659   if (cm->frame_type == KEY_FRAME) {
660     if (!is_two_pass_svc(cpi)) cpi->refresh_golden_frame = 1;
661     cpi->refresh_alt_ref_frame = 1;
662     vp9_zero(cpi->interp_filter_selected);
663   } else {
664     *cm->fc = cm->frame_contexts[cm->frame_context_idx];
665     vp9_zero(cpi->interp_filter_selected[0]);
666   }
667 }
668 
vp9_enc_setup_mi(VP9_COMMON * cm)669 static void vp9_enc_setup_mi(VP9_COMMON *cm) {
670   int i;
671   cm->mi = cm->mip + cm->mi_stride + 1;
672   memset(cm->mip, 0, cm->mi_stride * (cm->mi_rows + 1) * sizeof(*cm->mip));
673   cm->prev_mi = cm->prev_mip + cm->mi_stride + 1;
674   // Clear top border row
675   memset(cm->prev_mip, 0, sizeof(*cm->prev_mip) * cm->mi_stride);
676   // Clear left border column
677   for (i = 1; i < cm->mi_rows + 1; ++i)
678     memset(&cm->prev_mip[i * cm->mi_stride], 0, sizeof(*cm->prev_mip));
679 
680   cm->mi_grid_visible = cm->mi_grid_base + cm->mi_stride + 1;
681   cm->prev_mi_grid_visible = cm->prev_mi_grid_base + cm->mi_stride + 1;
682 
683   memset(cm->mi_grid_base, 0,
684          cm->mi_stride * (cm->mi_rows + 1) * sizeof(*cm->mi_grid_base));
685 }
686 
vp9_enc_alloc_mi(VP9_COMMON * cm,int mi_size)687 static int vp9_enc_alloc_mi(VP9_COMMON *cm, int mi_size) {
688   cm->mip = vpx_calloc(mi_size, sizeof(*cm->mip));
689   if (!cm->mip) return 1;
690   cm->prev_mip = vpx_calloc(mi_size, sizeof(*cm->prev_mip));
691   if (!cm->prev_mip) return 1;
692   cm->mi_alloc_size = mi_size;
693 
694   cm->mi_grid_base = (MODE_INFO **)vpx_calloc(mi_size, sizeof(MODE_INFO *));
695   if (!cm->mi_grid_base) return 1;
696   cm->prev_mi_grid_base =
697       (MODE_INFO **)vpx_calloc(mi_size, sizeof(MODE_INFO *));
698   if (!cm->prev_mi_grid_base) return 1;
699 
700   return 0;
701 }
702 
vp9_enc_free_mi(VP9_COMMON * cm)703 static void vp9_enc_free_mi(VP9_COMMON *cm) {
704   vpx_free(cm->mip);
705   cm->mip = NULL;
706   vpx_free(cm->prev_mip);
707   cm->prev_mip = NULL;
708   vpx_free(cm->mi_grid_base);
709   cm->mi_grid_base = NULL;
710   vpx_free(cm->prev_mi_grid_base);
711   cm->prev_mi_grid_base = NULL;
712 }
713 
vp9_swap_mi_and_prev_mi(VP9_COMMON * cm)714 static void vp9_swap_mi_and_prev_mi(VP9_COMMON *cm) {
715   // Current mip will be the prev_mip for the next frame.
716   MODE_INFO **temp_base = cm->prev_mi_grid_base;
717   MODE_INFO *temp = cm->prev_mip;
718   cm->prev_mip = cm->mip;
719   cm->mip = temp;
720 
721   // Update the upper left visible macroblock ptrs.
722   cm->mi = cm->mip + cm->mi_stride + 1;
723   cm->prev_mi = cm->prev_mip + cm->mi_stride + 1;
724 
725   cm->prev_mi_grid_base = cm->mi_grid_base;
726   cm->mi_grid_base = temp_base;
727   cm->mi_grid_visible = cm->mi_grid_base + cm->mi_stride + 1;
728   cm->prev_mi_grid_visible = cm->prev_mi_grid_base + cm->mi_stride + 1;
729 }
730 
vp9_initialize_enc(void)731 void vp9_initialize_enc(void) {
732   static volatile int init_done = 0;
733 
734   if (!init_done) {
735     vp9_rtcd();
736     vpx_dsp_rtcd();
737     vpx_scale_rtcd();
738     vp9_init_intra_predictors();
739     vp9_init_me_luts();
740     vp9_rc_init_minq_luts();
741     vp9_entropy_mv_init();
742     vp9_temporal_filter_init();
743     init_done = 1;
744   }
745 }
746 
dealloc_compressor_data(VP9_COMP * cpi)747 static void dealloc_compressor_data(VP9_COMP *cpi) {
748   VP9_COMMON *const cm = &cpi->common;
749   int i;
750 
751   vpx_free(cpi->mbmi_ext_base);
752   cpi->mbmi_ext_base = NULL;
753 
754   vpx_free(cpi->tile_data);
755   cpi->tile_data = NULL;
756 
757   vpx_free(cpi->segmentation_map);
758   cpi->segmentation_map = NULL;
759   vpx_free(cpi->coding_context.last_frame_seg_map_copy);
760   cpi->coding_context.last_frame_seg_map_copy = NULL;
761 
762   vpx_free(cpi->nmvcosts[0]);
763   vpx_free(cpi->nmvcosts[1]);
764   cpi->nmvcosts[0] = NULL;
765   cpi->nmvcosts[1] = NULL;
766 
767   vpx_free(cpi->nmvcosts_hp[0]);
768   vpx_free(cpi->nmvcosts_hp[1]);
769   cpi->nmvcosts_hp[0] = NULL;
770   cpi->nmvcosts_hp[1] = NULL;
771 
772   vpx_free(cpi->nmvsadcosts[0]);
773   vpx_free(cpi->nmvsadcosts[1]);
774   cpi->nmvsadcosts[0] = NULL;
775   cpi->nmvsadcosts[1] = NULL;
776 
777   vpx_free(cpi->nmvsadcosts_hp[0]);
778   vpx_free(cpi->nmvsadcosts_hp[1]);
779   cpi->nmvsadcosts_hp[0] = NULL;
780   cpi->nmvsadcosts_hp[1] = NULL;
781 
782   vpx_free(cpi->prev_partition);
783   cpi->prev_partition = NULL;
784 
785   vpx_free(cpi->prev_segment_id);
786   cpi->prev_segment_id = NULL;
787 
788   vpx_free(cpi->prev_variance_low);
789   cpi->prev_variance_low = NULL;
790 
791   vpx_free(cpi->copied_frame_cnt);
792   cpi->copied_frame_cnt = NULL;
793 
794   vpx_free(cpi->content_state_sb_fd);
795   cpi->content_state_sb_fd = NULL;
796 
797   vp9_cyclic_refresh_free(cpi->cyclic_refresh);
798   cpi->cyclic_refresh = NULL;
799 
800   vpx_free(cpi->active_map.map);
801   cpi->active_map.map = NULL;
802 
803   vpx_free(cpi->consec_zero_mv);
804   cpi->consec_zero_mv = NULL;
805 
806   vp9_free_ref_frame_buffers(cm->buffer_pool);
807 #if CONFIG_VP9_POSTPROC
808   vp9_free_postproc_buffers(cm);
809 #endif
810   vp9_free_context_buffers(cm);
811 
812   vpx_free_frame_buffer(&cpi->last_frame_uf);
813   vpx_free_frame_buffer(&cpi->scaled_source);
814   vpx_free_frame_buffer(&cpi->scaled_last_source);
815   vpx_free_frame_buffer(&cpi->alt_ref_buffer);
816 #ifdef ENABLE_KF_DENOISE
817   vpx_free_frame_buffer(&cpi->raw_unscaled_source);
818   vpx_free_frame_buffer(&cpi->raw_scaled_source);
819 #endif
820 
821   vp9_lookahead_destroy(cpi->lookahead);
822 
823   vpx_free(cpi->tile_tok[0][0]);
824   cpi->tile_tok[0][0] = 0;
825 
826   vpx_free(cpi->tplist[0][0]);
827   cpi->tplist[0][0] = NULL;
828 
829   vp9_free_pc_tree(&cpi->td);
830 
831   for (i = 0; i < cpi->svc.number_spatial_layers; ++i) {
832     LAYER_CONTEXT *const lc = &cpi->svc.layer_context[i];
833     vpx_free(lc->rc_twopass_stats_in.buf);
834     lc->rc_twopass_stats_in.buf = NULL;
835     lc->rc_twopass_stats_in.sz = 0;
836   }
837 
838   if (cpi->source_diff_var != NULL) {
839     vpx_free(cpi->source_diff_var);
840     cpi->source_diff_var = NULL;
841   }
842 
843   for (i = 0; i < MAX_LAG_BUFFERS; ++i) {
844     vpx_free_frame_buffer(&cpi->svc.scaled_frames[i]);
845   }
846   memset(&cpi->svc.scaled_frames[0], 0,
847          MAX_LAG_BUFFERS * sizeof(cpi->svc.scaled_frames[0]));
848 
849   vpx_free_frame_buffer(&cpi->svc.scaled_temp);
850   memset(&cpi->svc.scaled_temp, 0, sizeof(cpi->svc.scaled_temp));
851 
852   vpx_free_frame_buffer(&cpi->svc.empty_frame.img);
853   memset(&cpi->svc.empty_frame, 0, sizeof(cpi->svc.empty_frame));
854 
855   vp9_free_svc_cyclic_refresh(cpi);
856 }
857 
save_coding_context(VP9_COMP * cpi)858 static void save_coding_context(VP9_COMP *cpi) {
859   CODING_CONTEXT *const cc = &cpi->coding_context;
860   VP9_COMMON *cm = &cpi->common;
861 
862   // Stores a snapshot of key state variables which can subsequently be
863   // restored with a call to vp9_restore_coding_context. These functions are
864   // intended for use in a re-code loop in vp9_compress_frame where the
865   // quantizer value is adjusted between loop iterations.
866   vp9_copy(cc->nmvjointcost, cpi->td.mb.nmvjointcost);
867 
868   memcpy(cc->nmvcosts[0], cpi->nmvcosts[0],
869          MV_VALS * sizeof(*cpi->nmvcosts[0]));
870   memcpy(cc->nmvcosts[1], cpi->nmvcosts[1],
871          MV_VALS * sizeof(*cpi->nmvcosts[1]));
872   memcpy(cc->nmvcosts_hp[0], cpi->nmvcosts_hp[0],
873          MV_VALS * sizeof(*cpi->nmvcosts_hp[0]));
874   memcpy(cc->nmvcosts_hp[1], cpi->nmvcosts_hp[1],
875          MV_VALS * sizeof(*cpi->nmvcosts_hp[1]));
876 
877   vp9_copy(cc->segment_pred_probs, cm->seg.pred_probs);
878 
879   memcpy(cpi->coding_context.last_frame_seg_map_copy, cm->last_frame_seg_map,
880          (cm->mi_rows * cm->mi_cols));
881 
882   vp9_copy(cc->last_ref_lf_deltas, cm->lf.last_ref_deltas);
883   vp9_copy(cc->last_mode_lf_deltas, cm->lf.last_mode_deltas);
884 
885   cc->fc = *cm->fc;
886 }
887 
restore_coding_context(VP9_COMP * cpi)888 static void restore_coding_context(VP9_COMP *cpi) {
889   CODING_CONTEXT *const cc = &cpi->coding_context;
890   VP9_COMMON *cm = &cpi->common;
891 
892   // Restore key state variables to the snapshot state stored in the
893   // previous call to vp9_save_coding_context.
894   vp9_copy(cpi->td.mb.nmvjointcost, cc->nmvjointcost);
895 
896   memcpy(cpi->nmvcosts[0], cc->nmvcosts[0], MV_VALS * sizeof(*cc->nmvcosts[0]));
897   memcpy(cpi->nmvcosts[1], cc->nmvcosts[1], MV_VALS * sizeof(*cc->nmvcosts[1]));
898   memcpy(cpi->nmvcosts_hp[0], cc->nmvcosts_hp[0],
899          MV_VALS * sizeof(*cc->nmvcosts_hp[0]));
900   memcpy(cpi->nmvcosts_hp[1], cc->nmvcosts_hp[1],
901          MV_VALS * sizeof(*cc->nmvcosts_hp[1]));
902 
903   vp9_copy(cm->seg.pred_probs, cc->segment_pred_probs);
904 
905   memcpy(cm->last_frame_seg_map, cpi->coding_context.last_frame_seg_map_copy,
906          (cm->mi_rows * cm->mi_cols));
907 
908   vp9_copy(cm->lf.last_ref_deltas, cc->last_ref_lf_deltas);
909   vp9_copy(cm->lf.last_mode_deltas, cc->last_mode_lf_deltas);
910 
911   *cm->fc = cc->fc;
912 }
913 
configure_static_seg_features(VP9_COMP * cpi)914 static void configure_static_seg_features(VP9_COMP *cpi) {
915   VP9_COMMON *const cm = &cpi->common;
916   const RATE_CONTROL *const rc = &cpi->rc;
917   struct segmentation *const seg = &cm->seg;
918 
919   int high_q = (int)(rc->avg_q > 48.0);
920   int qi_delta;
921 
922   // Disable and clear down for KF
923   if (cm->frame_type == KEY_FRAME) {
924     // Clear down the global segmentation map
925     memset(cpi->segmentation_map, 0, cm->mi_rows * cm->mi_cols);
926     seg->update_map = 0;
927     seg->update_data = 0;
928     cpi->static_mb_pct = 0;
929 
930     // Disable segmentation
931     vp9_disable_segmentation(seg);
932 
933     // Clear down the segment features.
934     vp9_clearall_segfeatures(seg);
935   } else if (cpi->refresh_alt_ref_frame) {
936     // If this is an alt ref frame
937     // Clear down the global segmentation map
938     memset(cpi->segmentation_map, 0, cm->mi_rows * cm->mi_cols);
939     seg->update_map = 0;
940     seg->update_data = 0;
941     cpi->static_mb_pct = 0;
942 
943     // Disable segmentation and individual segment features by default
944     vp9_disable_segmentation(seg);
945     vp9_clearall_segfeatures(seg);
946 
947     // Scan frames from current to arf frame.
948     // This function re-enables segmentation if appropriate.
949     vp9_update_mbgraph_stats(cpi);
950 
951     // If segmentation was enabled set those features needed for the
952     // arf itself.
953     if (seg->enabled) {
954       seg->update_map = 1;
955       seg->update_data = 1;
956 
957       qi_delta =
958           vp9_compute_qdelta(rc, rc->avg_q, rc->avg_q * 0.875, cm->bit_depth);
959       vp9_set_segdata(seg, 1, SEG_LVL_ALT_Q, qi_delta - 2);
960       vp9_set_segdata(seg, 1, SEG_LVL_ALT_LF, -2);
961 
962       vp9_enable_segfeature(seg, 1, SEG_LVL_ALT_Q);
963       vp9_enable_segfeature(seg, 1, SEG_LVL_ALT_LF);
964 
965       // Where relevant assume segment data is delta data
966       seg->abs_delta = SEGMENT_DELTADATA;
967     }
968   } else if (seg->enabled) {
969     // All other frames if segmentation has been enabled
970 
971     // First normal frame in a valid gf or alt ref group
972     if (rc->frames_since_golden == 0) {
973       // Set up segment features for normal frames in an arf group
974       if (rc->source_alt_ref_active) {
975         seg->update_map = 0;
976         seg->update_data = 1;
977         seg->abs_delta = SEGMENT_DELTADATA;
978 
979         qi_delta =
980             vp9_compute_qdelta(rc, rc->avg_q, rc->avg_q * 1.125, cm->bit_depth);
981         vp9_set_segdata(seg, 1, SEG_LVL_ALT_Q, qi_delta + 2);
982         vp9_enable_segfeature(seg, 1, SEG_LVL_ALT_Q);
983 
984         vp9_set_segdata(seg, 1, SEG_LVL_ALT_LF, -2);
985         vp9_enable_segfeature(seg, 1, SEG_LVL_ALT_LF);
986 
987         // Segment coding disabled for compred testing
988         if (high_q || (cpi->static_mb_pct == 100)) {
989           vp9_set_segdata(seg, 1, SEG_LVL_REF_FRAME, ALTREF_FRAME);
990           vp9_enable_segfeature(seg, 1, SEG_LVL_REF_FRAME);
991           vp9_enable_segfeature(seg, 1, SEG_LVL_SKIP);
992         }
993       } else {
994         // Disable segmentation and clear down features if alt ref
995         // is not active for this group
996 
997         vp9_disable_segmentation(seg);
998 
999         memset(cpi->segmentation_map, 0, cm->mi_rows * cm->mi_cols);
1000 
1001         seg->update_map = 0;
1002         seg->update_data = 0;
1003 
1004         vp9_clearall_segfeatures(seg);
1005       }
1006     } else if (rc->is_src_frame_alt_ref) {
1007       // Special case where we are coding over the top of a previous
1008       // alt ref frame.
1009       // Segment coding disabled for compred testing
1010 
1011       // Enable ref frame features for segment 0 as well
1012       vp9_enable_segfeature(seg, 0, SEG_LVL_REF_FRAME);
1013       vp9_enable_segfeature(seg, 1, SEG_LVL_REF_FRAME);
1014 
1015       // All mbs should use ALTREF_FRAME
1016       vp9_clear_segdata(seg, 0, SEG_LVL_REF_FRAME);
1017       vp9_set_segdata(seg, 0, SEG_LVL_REF_FRAME, ALTREF_FRAME);
1018       vp9_clear_segdata(seg, 1, SEG_LVL_REF_FRAME);
1019       vp9_set_segdata(seg, 1, SEG_LVL_REF_FRAME, ALTREF_FRAME);
1020 
1021       // Skip all MBs if high Q (0,0 mv and skip coeffs)
1022       if (high_q) {
1023         vp9_enable_segfeature(seg, 0, SEG_LVL_SKIP);
1024         vp9_enable_segfeature(seg, 1, SEG_LVL_SKIP);
1025       }
1026       // Enable data update
1027       seg->update_data = 1;
1028     } else {
1029       // All other frames.
1030 
1031       // No updates.. leave things as they are.
1032       seg->update_map = 0;
1033       seg->update_data = 0;
1034     }
1035   }
1036 }
1037 
update_reference_segmentation_map(VP9_COMP * cpi)1038 static void update_reference_segmentation_map(VP9_COMP *cpi) {
1039   VP9_COMMON *const cm = &cpi->common;
1040   MODE_INFO **mi_8x8_ptr = cm->mi_grid_visible;
1041   uint8_t *cache_ptr = cm->last_frame_seg_map;
1042   int row, col;
1043 
1044   for (row = 0; row < cm->mi_rows; row++) {
1045     MODE_INFO **mi_8x8 = mi_8x8_ptr;
1046     uint8_t *cache = cache_ptr;
1047     for (col = 0; col < cm->mi_cols; col++, mi_8x8++, cache++)
1048       cache[0] = mi_8x8[0]->segment_id;
1049     mi_8x8_ptr += cm->mi_stride;
1050     cache_ptr += cm->mi_cols;
1051   }
1052 }
1053 
alloc_raw_frame_buffers(VP9_COMP * cpi)1054 static void alloc_raw_frame_buffers(VP9_COMP *cpi) {
1055   VP9_COMMON *cm = &cpi->common;
1056   const VP9EncoderConfig *oxcf = &cpi->oxcf;
1057 
1058   if (!cpi->lookahead)
1059     cpi->lookahead = vp9_lookahead_init(oxcf->width, oxcf->height,
1060                                         cm->subsampling_x, cm->subsampling_y,
1061 #if CONFIG_VP9_HIGHBITDEPTH
1062                                         cm->use_highbitdepth,
1063 #endif
1064                                         oxcf->lag_in_frames);
1065   if (!cpi->lookahead)
1066     vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
1067                        "Failed to allocate lag buffers");
1068 
1069   // TODO(agrange) Check if ARF is enabled and skip allocation if not.
1070   if (vpx_realloc_frame_buffer(&cpi->alt_ref_buffer, oxcf->width, oxcf->height,
1071                                cm->subsampling_x, cm->subsampling_y,
1072 #if CONFIG_VP9_HIGHBITDEPTH
1073                                cm->use_highbitdepth,
1074 #endif
1075                                VP9_ENC_BORDER_IN_PIXELS, cm->byte_alignment,
1076                                NULL, NULL, NULL))
1077     vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
1078                        "Failed to allocate altref buffer");
1079 }
1080 
alloc_util_frame_buffers(VP9_COMP * cpi)1081 static void alloc_util_frame_buffers(VP9_COMP *cpi) {
1082   VP9_COMMON *const cm = &cpi->common;
1083   if (vpx_realloc_frame_buffer(&cpi->last_frame_uf, cm->width, cm->height,
1084                                cm->subsampling_x, cm->subsampling_y,
1085 #if CONFIG_VP9_HIGHBITDEPTH
1086                                cm->use_highbitdepth,
1087 #endif
1088                                VP9_ENC_BORDER_IN_PIXELS, cm->byte_alignment,
1089                                NULL, NULL, NULL))
1090     vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
1091                        "Failed to allocate last frame buffer");
1092 
1093   if (vpx_realloc_frame_buffer(&cpi->scaled_source, cm->width, cm->height,
1094                                cm->subsampling_x, cm->subsampling_y,
1095 #if CONFIG_VP9_HIGHBITDEPTH
1096                                cm->use_highbitdepth,
1097 #endif
1098                                VP9_ENC_BORDER_IN_PIXELS, cm->byte_alignment,
1099                                NULL, NULL, NULL))
1100     vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
1101                        "Failed to allocate scaled source buffer");
1102 
1103   // For 1 pass cbr: allocate scaled_frame that may be used as an intermediate
1104   // buffer for a 2 stage down-sampling: two stages of 1:2 down-sampling for a
1105   // target of 1/4x1/4.
1106   if (is_one_pass_cbr_svc(cpi) && !cpi->svc.scaled_temp_is_alloc) {
1107     cpi->svc.scaled_temp_is_alloc = 1;
1108     if (vpx_realloc_frame_buffer(
1109             &cpi->svc.scaled_temp, cm->width >> 1, cm->height >> 1,
1110             cm->subsampling_x, cm->subsampling_y,
1111 #if CONFIG_VP9_HIGHBITDEPTH
1112             cm->use_highbitdepth,
1113 #endif
1114             VP9_ENC_BORDER_IN_PIXELS, cm->byte_alignment, NULL, NULL, NULL))
1115       vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1116                          "Failed to allocate scaled_frame for svc ");
1117   }
1118 
1119   if (vpx_realloc_frame_buffer(&cpi->scaled_last_source, cm->width, cm->height,
1120                                cm->subsampling_x, cm->subsampling_y,
1121 #if CONFIG_VP9_HIGHBITDEPTH
1122                                cm->use_highbitdepth,
1123 #endif
1124                                VP9_ENC_BORDER_IN_PIXELS, cm->byte_alignment,
1125                                NULL, NULL, NULL))
1126     vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
1127                        "Failed to allocate scaled last source buffer");
1128 #ifdef ENABLE_KF_DENOISE
1129   if (vpx_realloc_frame_buffer(&cpi->raw_unscaled_source, cm->width, cm->height,
1130                                cm->subsampling_x, cm->subsampling_y,
1131 #if CONFIG_VP9_HIGHBITDEPTH
1132                                cm->use_highbitdepth,
1133 #endif
1134                                VP9_ENC_BORDER_IN_PIXELS, cm->byte_alignment,
1135                                NULL, NULL, NULL))
1136     vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
1137                        "Failed to allocate unscaled raw source frame buffer");
1138 
1139   if (vpx_realloc_frame_buffer(&cpi->raw_scaled_source, cm->width, cm->height,
1140                                cm->subsampling_x, cm->subsampling_y,
1141 #if CONFIG_VP9_HIGHBITDEPTH
1142                                cm->use_highbitdepth,
1143 #endif
1144                                VP9_ENC_BORDER_IN_PIXELS, cm->byte_alignment,
1145                                NULL, NULL, NULL))
1146     vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
1147                        "Failed to allocate scaled raw source frame buffer");
1148 #endif
1149 }
1150 
alloc_context_buffers_ext(VP9_COMP * cpi)1151 static int alloc_context_buffers_ext(VP9_COMP *cpi) {
1152   VP9_COMMON *cm = &cpi->common;
1153   int mi_size = cm->mi_cols * cm->mi_rows;
1154 
1155   cpi->mbmi_ext_base = vpx_calloc(mi_size, sizeof(*cpi->mbmi_ext_base));
1156   if (!cpi->mbmi_ext_base) return 1;
1157 
1158   return 0;
1159 }
1160 
alloc_compressor_data(VP9_COMP * cpi)1161 static void alloc_compressor_data(VP9_COMP *cpi) {
1162   VP9_COMMON *cm = &cpi->common;
1163   int sb_rows;
1164 
1165   vp9_alloc_context_buffers(cm, cm->width, cm->height);
1166 
1167   alloc_context_buffers_ext(cpi);
1168 
1169   vpx_free(cpi->tile_tok[0][0]);
1170 
1171   {
1172     unsigned int tokens = get_token_alloc(cm->mb_rows, cm->mb_cols);
1173     CHECK_MEM_ERROR(cm, cpi->tile_tok[0][0],
1174                     vpx_calloc(tokens, sizeof(*cpi->tile_tok[0][0])));
1175   }
1176 
1177   sb_rows = mi_cols_aligned_to_sb(cm->mi_rows) >> MI_BLOCK_SIZE_LOG2;
1178   vpx_free(cpi->tplist[0][0]);
1179   CHECK_MEM_ERROR(
1180       cm, cpi->tplist[0][0],
1181       vpx_calloc(sb_rows * 4 * (1 << 6), sizeof(*cpi->tplist[0][0])));
1182 
1183   vp9_setup_pc_tree(&cpi->common, &cpi->td);
1184 }
1185 
vp9_new_framerate(VP9_COMP * cpi,double framerate)1186 void vp9_new_framerate(VP9_COMP *cpi, double framerate) {
1187   cpi->framerate = framerate < 0.1 ? 30 : framerate;
1188   vp9_rc_update_framerate(cpi);
1189 }
1190 
set_tile_limits(VP9_COMP * cpi)1191 static void set_tile_limits(VP9_COMP *cpi) {
1192   VP9_COMMON *const cm = &cpi->common;
1193 
1194   int min_log2_tile_cols, max_log2_tile_cols;
1195   vp9_get_tile_n_bits(cm->mi_cols, &min_log2_tile_cols, &max_log2_tile_cols);
1196 
1197   if (is_two_pass_svc(cpi) && (cpi->svc.encode_empty_frame_state == ENCODING ||
1198                                cpi->svc.number_spatial_layers > 1)) {
1199     cm->log2_tile_cols = 0;
1200     cm->log2_tile_rows = 0;
1201   } else {
1202     cm->log2_tile_cols =
1203         clamp(cpi->oxcf.tile_columns, min_log2_tile_cols, max_log2_tile_cols);
1204     cm->log2_tile_rows = cpi->oxcf.tile_rows;
1205   }
1206 }
1207 
update_frame_size(VP9_COMP * cpi)1208 static void update_frame_size(VP9_COMP *cpi) {
1209   VP9_COMMON *const cm = &cpi->common;
1210   MACROBLOCKD *const xd = &cpi->td.mb.e_mbd;
1211 
1212   vp9_set_mb_mi(cm, cm->width, cm->height);
1213   vp9_init_context_buffers(cm);
1214   vp9_init_macroblockd(cm, xd, NULL);
1215   cpi->td.mb.mbmi_ext_base = cpi->mbmi_ext_base;
1216   memset(cpi->mbmi_ext_base, 0,
1217          cm->mi_rows * cm->mi_cols * sizeof(*cpi->mbmi_ext_base));
1218 
1219   set_tile_limits(cpi);
1220 
1221   if (is_two_pass_svc(cpi)) {
1222     if (vpx_realloc_frame_buffer(&cpi->alt_ref_buffer, cm->width, cm->height,
1223                                  cm->subsampling_x, cm->subsampling_y,
1224 #if CONFIG_VP9_HIGHBITDEPTH
1225                                  cm->use_highbitdepth,
1226 #endif
1227                                  VP9_ENC_BORDER_IN_PIXELS, cm->byte_alignment,
1228                                  NULL, NULL, NULL))
1229       vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
1230                          "Failed to reallocate alt_ref_buffer");
1231   }
1232 }
1233 
init_buffer_indices(VP9_COMP * cpi)1234 static void init_buffer_indices(VP9_COMP *cpi) {
1235   cpi->lst_fb_idx = 0;
1236   cpi->gld_fb_idx = 1;
1237   cpi->alt_fb_idx = 2;
1238 }
1239 
init_level_constraint(LevelConstraint * lc)1240 static void init_level_constraint(LevelConstraint *lc) {
1241   lc->level_index = -1;
1242   lc->max_cpb_size = INT_MAX;
1243   lc->max_frame_size = INT_MAX;
1244   lc->rc_config_updated = 0;
1245   lc->fail_flag = 0;
1246 }
1247 
set_level_constraint(LevelConstraint * ls,int8_t level_index)1248 static void set_level_constraint(LevelConstraint *ls, int8_t level_index) {
1249   vpx_clear_system_state();
1250   ls->level_index = level_index;
1251   if (level_index >= 0) {
1252     ls->max_cpb_size = vp9_level_defs[level_index].max_cpb_size * (double)1000;
1253   }
1254 }
1255 
init_config(struct VP9_COMP * cpi,VP9EncoderConfig * oxcf)1256 static void init_config(struct VP9_COMP *cpi, VP9EncoderConfig *oxcf) {
1257   VP9_COMMON *const cm = &cpi->common;
1258 
1259   cpi->oxcf = *oxcf;
1260   cpi->framerate = oxcf->init_framerate;
1261   cm->profile = oxcf->profile;
1262   cm->bit_depth = oxcf->bit_depth;
1263 #if CONFIG_VP9_HIGHBITDEPTH
1264   cm->use_highbitdepth = oxcf->use_highbitdepth;
1265 #endif
1266   cm->color_space = oxcf->color_space;
1267   cm->color_range = oxcf->color_range;
1268 
1269   cpi->target_level = oxcf->target_level;
1270   cpi->keep_level_stats = oxcf->target_level != LEVEL_MAX;
1271   set_level_constraint(&cpi->level_constraint,
1272                        get_level_index(cpi->target_level));
1273 
1274   cm->width = oxcf->width;
1275   cm->height = oxcf->height;
1276   alloc_compressor_data(cpi);
1277 
1278   cpi->svc.temporal_layering_mode = oxcf->temporal_layering_mode;
1279 
1280   // Single thread case: use counts in common.
1281   cpi->td.counts = &cm->counts;
1282 
1283   // Spatial scalability.
1284   cpi->svc.number_spatial_layers = oxcf->ss_number_layers;
1285   // Temporal scalability.
1286   cpi->svc.number_temporal_layers = oxcf->ts_number_layers;
1287 
1288   if ((cpi->svc.number_temporal_layers > 1 && cpi->oxcf.rc_mode == VPX_CBR) ||
1289       ((cpi->svc.number_temporal_layers > 1 ||
1290         cpi->svc.number_spatial_layers > 1) &&
1291        cpi->oxcf.pass != 1)) {
1292     vp9_init_layer_context(cpi);
1293   }
1294 
1295   // change includes all joint functionality
1296   vp9_change_config(cpi, oxcf);
1297 
1298   cpi->static_mb_pct = 0;
1299   cpi->ref_frame_flags = 0;
1300 
1301   init_buffer_indices(cpi);
1302 
1303   vp9_noise_estimate_init(&cpi->noise_estimate, cm->width, cm->height);
1304 }
1305 
set_rc_buffer_sizes(RATE_CONTROL * rc,const VP9EncoderConfig * oxcf)1306 static void set_rc_buffer_sizes(RATE_CONTROL *rc,
1307                                 const VP9EncoderConfig *oxcf) {
1308   const int64_t bandwidth = oxcf->target_bandwidth;
1309   const int64_t starting = oxcf->starting_buffer_level_ms;
1310   const int64_t optimal = oxcf->optimal_buffer_level_ms;
1311   const int64_t maximum = oxcf->maximum_buffer_size_ms;
1312 
1313   rc->starting_buffer_level = starting * bandwidth / 1000;
1314   rc->optimal_buffer_level =
1315       (optimal == 0) ? bandwidth / 8 : optimal * bandwidth / 1000;
1316   rc->maximum_buffer_size =
1317       (maximum == 0) ? bandwidth / 8 : maximum * bandwidth / 1000;
1318 }
1319 
1320 #if CONFIG_VP9_HIGHBITDEPTH
1321 #define HIGHBD_BFP(BT, SDF, SDAF, VF, SVF, SVAF, SDX3F, SDX8F, SDX4DF) \
1322   cpi->fn_ptr[BT].sdf = SDF;                                           \
1323   cpi->fn_ptr[BT].sdaf = SDAF;                                         \
1324   cpi->fn_ptr[BT].vf = VF;                                             \
1325   cpi->fn_ptr[BT].svf = SVF;                                           \
1326   cpi->fn_ptr[BT].svaf = SVAF;                                         \
1327   cpi->fn_ptr[BT].sdx3f = SDX3F;                                       \
1328   cpi->fn_ptr[BT].sdx8f = SDX8F;                                       \
1329   cpi->fn_ptr[BT].sdx4df = SDX4DF;
1330 
1331 #define MAKE_BFP_SAD_WRAPPER(fnname)                                           \
1332   static unsigned int fnname##_bits8(const uint8_t *src_ptr,                   \
1333                                      int source_stride,                        \
1334                                      const uint8_t *ref_ptr, int ref_stride) { \
1335     return fnname(src_ptr, source_stride, ref_ptr, ref_stride);                \
1336   }                                                                            \
1337   static unsigned int fnname##_bits10(                                         \
1338       const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr,       \
1339       int ref_stride) {                                                        \
1340     return fnname(src_ptr, source_stride, ref_ptr, ref_stride) >> 2;           \
1341   }                                                                            \
1342   static unsigned int fnname##_bits12(                                         \
1343       const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr,       \
1344       int ref_stride) {                                                        \
1345     return fnname(src_ptr, source_stride, ref_ptr, ref_stride) >> 4;           \
1346   }
1347 
1348 #define MAKE_BFP_SADAVG_WRAPPER(fnname)                                        \
1349   static unsigned int fnname##_bits8(                                          \
1350       const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr,       \
1351       int ref_stride, const uint8_t *second_pred) {                            \
1352     return fnname(src_ptr, source_stride, ref_ptr, ref_stride, second_pred);   \
1353   }                                                                            \
1354   static unsigned int fnname##_bits10(                                         \
1355       const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr,       \
1356       int ref_stride, const uint8_t *second_pred) {                            \
1357     return fnname(src_ptr, source_stride, ref_ptr, ref_stride, second_pred) >> \
1358            2;                                                                  \
1359   }                                                                            \
1360   static unsigned int fnname##_bits12(                                         \
1361       const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr,       \
1362       int ref_stride, const uint8_t *second_pred) {                            \
1363     return fnname(src_ptr, source_stride, ref_ptr, ref_stride, second_pred) >> \
1364            4;                                                                  \
1365   }
1366 
1367 #define MAKE_BFP_SAD3_WRAPPER(fnname)                                    \
1368   static void fnname##_bits8(const uint8_t *src_ptr, int source_stride,  \
1369                              const uint8_t *ref_ptr, int ref_stride,     \
1370                              unsigned int *sad_array) {                  \
1371     fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array);      \
1372   }                                                                      \
1373   static void fnname##_bits10(const uint8_t *src_ptr, int source_stride, \
1374                               const uint8_t *ref_ptr, int ref_stride,    \
1375                               unsigned int *sad_array) {                 \
1376     int i;                                                               \
1377     fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array);      \
1378     for (i = 0; i < 3; i++) sad_array[i] >>= 2;                          \
1379   }                                                                      \
1380   static void fnname##_bits12(const uint8_t *src_ptr, int source_stride, \
1381                               const uint8_t *ref_ptr, int ref_stride,    \
1382                               unsigned int *sad_array) {                 \
1383     int i;                                                               \
1384     fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array);      \
1385     for (i = 0; i < 3; i++) sad_array[i] >>= 4;                          \
1386   }
1387 
1388 #define MAKE_BFP_SAD8_WRAPPER(fnname)                                    \
1389   static void fnname##_bits8(const uint8_t *src_ptr, int source_stride,  \
1390                              const uint8_t *ref_ptr, int ref_stride,     \
1391                              unsigned int *sad_array) {                  \
1392     fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array);      \
1393   }                                                                      \
1394   static void fnname##_bits10(const uint8_t *src_ptr, int source_stride, \
1395                               const uint8_t *ref_ptr, int ref_stride,    \
1396                               unsigned int *sad_array) {                 \
1397     int i;                                                               \
1398     fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array);      \
1399     for (i = 0; i < 8; i++) sad_array[i] >>= 2;                          \
1400   }                                                                      \
1401   static void fnname##_bits12(const uint8_t *src_ptr, int source_stride, \
1402                               const uint8_t *ref_ptr, int ref_stride,    \
1403                               unsigned int *sad_array) {                 \
1404     int i;                                                               \
1405     fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array);      \
1406     for (i = 0; i < 8; i++) sad_array[i] >>= 4;                          \
1407   }
1408 #define MAKE_BFP_SAD4D_WRAPPER(fnname)                                        \
1409   static void fnname##_bits8(const uint8_t *src_ptr, int source_stride,       \
1410                              const uint8_t *const ref_ptr[], int ref_stride,  \
1411                              unsigned int *sad_array) {                       \
1412     fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array);           \
1413   }                                                                           \
1414   static void fnname##_bits10(const uint8_t *src_ptr, int source_stride,      \
1415                               const uint8_t *const ref_ptr[], int ref_stride, \
1416                               unsigned int *sad_array) {                      \
1417     int i;                                                                    \
1418     fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array);           \
1419     for (i = 0; i < 4; i++) sad_array[i] >>= 2;                               \
1420   }                                                                           \
1421   static void fnname##_bits12(const uint8_t *src_ptr, int source_stride,      \
1422                               const uint8_t *const ref_ptr[], int ref_stride, \
1423                               unsigned int *sad_array) {                      \
1424     int i;                                                                    \
1425     fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array);           \
1426     for (i = 0; i < 4; i++) sad_array[i] >>= 4;                               \
1427   }
1428 
1429 MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad32x16)
MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad32x16_avg)1430 MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad32x16_avg)
1431 MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad32x16x4d)
1432 MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad16x32)
1433 MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad16x32_avg)
1434 MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad16x32x4d)
1435 MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad64x32)
1436 MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad64x32_avg)
1437 MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad64x32x4d)
1438 MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad32x64)
1439 MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad32x64_avg)
1440 MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad32x64x4d)
1441 MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad32x32)
1442 MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad32x32_avg)
1443 MAKE_BFP_SAD3_WRAPPER(vpx_highbd_sad32x32x3)
1444 MAKE_BFP_SAD8_WRAPPER(vpx_highbd_sad32x32x8)
1445 MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad32x32x4d)
1446 MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad64x64)
1447 MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad64x64_avg)
1448 MAKE_BFP_SAD3_WRAPPER(vpx_highbd_sad64x64x3)
1449 MAKE_BFP_SAD8_WRAPPER(vpx_highbd_sad64x64x8)
1450 MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad64x64x4d)
1451 MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad16x16)
1452 MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad16x16_avg)
1453 MAKE_BFP_SAD3_WRAPPER(vpx_highbd_sad16x16x3)
1454 MAKE_BFP_SAD8_WRAPPER(vpx_highbd_sad16x16x8)
1455 MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad16x16x4d)
1456 MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad16x8)
1457 MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad16x8_avg)
1458 MAKE_BFP_SAD3_WRAPPER(vpx_highbd_sad16x8x3)
1459 MAKE_BFP_SAD8_WRAPPER(vpx_highbd_sad16x8x8)
1460 MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad16x8x4d)
1461 MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad8x16)
1462 MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad8x16_avg)
1463 MAKE_BFP_SAD3_WRAPPER(vpx_highbd_sad8x16x3)
1464 MAKE_BFP_SAD8_WRAPPER(vpx_highbd_sad8x16x8)
1465 MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad8x16x4d)
1466 MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad8x8)
1467 MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad8x8_avg)
1468 MAKE_BFP_SAD3_WRAPPER(vpx_highbd_sad8x8x3)
1469 MAKE_BFP_SAD8_WRAPPER(vpx_highbd_sad8x8x8)
1470 MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad8x8x4d)
1471 MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad8x4)
1472 MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad8x4_avg)
1473 MAKE_BFP_SAD8_WRAPPER(vpx_highbd_sad8x4x8)
1474 MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad8x4x4d)
1475 MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad4x8)
1476 MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad4x8_avg)
1477 MAKE_BFP_SAD8_WRAPPER(vpx_highbd_sad4x8x8)
1478 MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad4x8x4d)
1479 MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad4x4)
1480 MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad4x4_avg)
1481 MAKE_BFP_SAD3_WRAPPER(vpx_highbd_sad4x4x3)
1482 MAKE_BFP_SAD8_WRAPPER(vpx_highbd_sad4x4x8)
1483 MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad4x4x4d)
1484 
1485 static void highbd_set_var_fns(VP9_COMP *const cpi) {
1486   VP9_COMMON *const cm = &cpi->common;
1487   if (cm->use_highbitdepth) {
1488     switch (cm->bit_depth) {
1489       case VPX_BITS_8:
1490         HIGHBD_BFP(BLOCK_32X16, vpx_highbd_sad32x16_bits8,
1491                    vpx_highbd_sad32x16_avg_bits8, vpx_highbd_8_variance32x16,
1492                    vpx_highbd_8_sub_pixel_variance32x16,
1493                    vpx_highbd_8_sub_pixel_avg_variance32x16, NULL, NULL,
1494                    vpx_highbd_sad32x16x4d_bits8)
1495 
1496         HIGHBD_BFP(BLOCK_16X32, vpx_highbd_sad16x32_bits8,
1497                    vpx_highbd_sad16x32_avg_bits8, vpx_highbd_8_variance16x32,
1498                    vpx_highbd_8_sub_pixel_variance16x32,
1499                    vpx_highbd_8_sub_pixel_avg_variance16x32, NULL, NULL,
1500                    vpx_highbd_sad16x32x4d_bits8)
1501 
1502         HIGHBD_BFP(BLOCK_64X32, vpx_highbd_sad64x32_bits8,
1503                    vpx_highbd_sad64x32_avg_bits8, vpx_highbd_8_variance64x32,
1504                    vpx_highbd_8_sub_pixel_variance64x32,
1505                    vpx_highbd_8_sub_pixel_avg_variance64x32, NULL, NULL,
1506                    vpx_highbd_sad64x32x4d_bits8)
1507 
1508         HIGHBD_BFP(BLOCK_32X64, vpx_highbd_sad32x64_bits8,
1509                    vpx_highbd_sad32x64_avg_bits8, vpx_highbd_8_variance32x64,
1510                    vpx_highbd_8_sub_pixel_variance32x64,
1511                    vpx_highbd_8_sub_pixel_avg_variance32x64, NULL, NULL,
1512                    vpx_highbd_sad32x64x4d_bits8)
1513 
1514         HIGHBD_BFP(BLOCK_32X32, vpx_highbd_sad32x32_bits8,
1515                    vpx_highbd_sad32x32_avg_bits8, vpx_highbd_8_variance32x32,
1516                    vpx_highbd_8_sub_pixel_variance32x32,
1517                    vpx_highbd_8_sub_pixel_avg_variance32x32,
1518                    vpx_highbd_sad32x32x3_bits8, vpx_highbd_sad32x32x8_bits8,
1519                    vpx_highbd_sad32x32x4d_bits8)
1520 
1521         HIGHBD_BFP(BLOCK_64X64, vpx_highbd_sad64x64_bits8,
1522                    vpx_highbd_sad64x64_avg_bits8, vpx_highbd_8_variance64x64,
1523                    vpx_highbd_8_sub_pixel_variance64x64,
1524                    vpx_highbd_8_sub_pixel_avg_variance64x64,
1525                    vpx_highbd_sad64x64x3_bits8, vpx_highbd_sad64x64x8_bits8,
1526                    vpx_highbd_sad64x64x4d_bits8)
1527 
1528         HIGHBD_BFP(BLOCK_16X16, vpx_highbd_sad16x16_bits8,
1529                    vpx_highbd_sad16x16_avg_bits8, vpx_highbd_8_variance16x16,
1530                    vpx_highbd_8_sub_pixel_variance16x16,
1531                    vpx_highbd_8_sub_pixel_avg_variance16x16,
1532                    vpx_highbd_sad16x16x3_bits8, vpx_highbd_sad16x16x8_bits8,
1533                    vpx_highbd_sad16x16x4d_bits8)
1534 
1535         HIGHBD_BFP(
1536             BLOCK_16X8, vpx_highbd_sad16x8_bits8, vpx_highbd_sad16x8_avg_bits8,
1537             vpx_highbd_8_variance16x8, vpx_highbd_8_sub_pixel_variance16x8,
1538             vpx_highbd_8_sub_pixel_avg_variance16x8, vpx_highbd_sad16x8x3_bits8,
1539             vpx_highbd_sad16x8x8_bits8, vpx_highbd_sad16x8x4d_bits8)
1540 
1541         HIGHBD_BFP(
1542             BLOCK_8X16, vpx_highbd_sad8x16_bits8, vpx_highbd_sad8x16_avg_bits8,
1543             vpx_highbd_8_variance8x16, vpx_highbd_8_sub_pixel_variance8x16,
1544             vpx_highbd_8_sub_pixel_avg_variance8x16, vpx_highbd_sad8x16x3_bits8,
1545             vpx_highbd_sad8x16x8_bits8, vpx_highbd_sad8x16x4d_bits8)
1546 
1547         HIGHBD_BFP(
1548             BLOCK_8X8, vpx_highbd_sad8x8_bits8, vpx_highbd_sad8x8_avg_bits8,
1549             vpx_highbd_8_variance8x8, vpx_highbd_8_sub_pixel_variance8x8,
1550             vpx_highbd_8_sub_pixel_avg_variance8x8, vpx_highbd_sad8x8x3_bits8,
1551             vpx_highbd_sad8x8x8_bits8, vpx_highbd_sad8x8x4d_bits8)
1552 
1553         HIGHBD_BFP(BLOCK_8X4, vpx_highbd_sad8x4_bits8,
1554                    vpx_highbd_sad8x4_avg_bits8, vpx_highbd_8_variance8x4,
1555                    vpx_highbd_8_sub_pixel_variance8x4,
1556                    vpx_highbd_8_sub_pixel_avg_variance8x4, NULL,
1557                    vpx_highbd_sad8x4x8_bits8, vpx_highbd_sad8x4x4d_bits8)
1558 
1559         HIGHBD_BFP(BLOCK_4X8, vpx_highbd_sad4x8_bits8,
1560                    vpx_highbd_sad4x8_avg_bits8, vpx_highbd_8_variance4x8,
1561                    vpx_highbd_8_sub_pixel_variance4x8,
1562                    vpx_highbd_8_sub_pixel_avg_variance4x8, NULL,
1563                    vpx_highbd_sad4x8x8_bits8, vpx_highbd_sad4x8x4d_bits8)
1564 
1565         HIGHBD_BFP(
1566             BLOCK_4X4, vpx_highbd_sad4x4_bits8, vpx_highbd_sad4x4_avg_bits8,
1567             vpx_highbd_8_variance4x4, vpx_highbd_8_sub_pixel_variance4x4,
1568             vpx_highbd_8_sub_pixel_avg_variance4x4, vpx_highbd_sad4x4x3_bits8,
1569             vpx_highbd_sad4x4x8_bits8, vpx_highbd_sad4x4x4d_bits8)
1570         break;
1571 
1572       case VPX_BITS_10:
1573         HIGHBD_BFP(BLOCK_32X16, vpx_highbd_sad32x16_bits10,
1574                    vpx_highbd_sad32x16_avg_bits10, vpx_highbd_10_variance32x16,
1575                    vpx_highbd_10_sub_pixel_variance32x16,
1576                    vpx_highbd_10_sub_pixel_avg_variance32x16, NULL, NULL,
1577                    vpx_highbd_sad32x16x4d_bits10)
1578 
1579         HIGHBD_BFP(BLOCK_16X32, vpx_highbd_sad16x32_bits10,
1580                    vpx_highbd_sad16x32_avg_bits10, vpx_highbd_10_variance16x32,
1581                    vpx_highbd_10_sub_pixel_variance16x32,
1582                    vpx_highbd_10_sub_pixel_avg_variance16x32, NULL, NULL,
1583                    vpx_highbd_sad16x32x4d_bits10)
1584 
1585         HIGHBD_BFP(BLOCK_64X32, vpx_highbd_sad64x32_bits10,
1586                    vpx_highbd_sad64x32_avg_bits10, vpx_highbd_10_variance64x32,
1587                    vpx_highbd_10_sub_pixel_variance64x32,
1588                    vpx_highbd_10_sub_pixel_avg_variance64x32, NULL, NULL,
1589                    vpx_highbd_sad64x32x4d_bits10)
1590 
1591         HIGHBD_BFP(BLOCK_32X64, vpx_highbd_sad32x64_bits10,
1592                    vpx_highbd_sad32x64_avg_bits10, vpx_highbd_10_variance32x64,
1593                    vpx_highbd_10_sub_pixel_variance32x64,
1594                    vpx_highbd_10_sub_pixel_avg_variance32x64, NULL, NULL,
1595                    vpx_highbd_sad32x64x4d_bits10)
1596 
1597         HIGHBD_BFP(BLOCK_32X32, vpx_highbd_sad32x32_bits10,
1598                    vpx_highbd_sad32x32_avg_bits10, vpx_highbd_10_variance32x32,
1599                    vpx_highbd_10_sub_pixel_variance32x32,
1600                    vpx_highbd_10_sub_pixel_avg_variance32x32,
1601                    vpx_highbd_sad32x32x3_bits10, vpx_highbd_sad32x32x8_bits10,
1602                    vpx_highbd_sad32x32x4d_bits10)
1603 
1604         HIGHBD_BFP(BLOCK_64X64, vpx_highbd_sad64x64_bits10,
1605                    vpx_highbd_sad64x64_avg_bits10, vpx_highbd_10_variance64x64,
1606                    vpx_highbd_10_sub_pixel_variance64x64,
1607                    vpx_highbd_10_sub_pixel_avg_variance64x64,
1608                    vpx_highbd_sad64x64x3_bits10, vpx_highbd_sad64x64x8_bits10,
1609                    vpx_highbd_sad64x64x4d_bits10)
1610 
1611         HIGHBD_BFP(BLOCK_16X16, vpx_highbd_sad16x16_bits10,
1612                    vpx_highbd_sad16x16_avg_bits10, vpx_highbd_10_variance16x16,
1613                    vpx_highbd_10_sub_pixel_variance16x16,
1614                    vpx_highbd_10_sub_pixel_avg_variance16x16,
1615                    vpx_highbd_sad16x16x3_bits10, vpx_highbd_sad16x16x8_bits10,
1616                    vpx_highbd_sad16x16x4d_bits10)
1617 
1618         HIGHBD_BFP(BLOCK_16X8, vpx_highbd_sad16x8_bits10,
1619                    vpx_highbd_sad16x8_avg_bits10, vpx_highbd_10_variance16x8,
1620                    vpx_highbd_10_sub_pixel_variance16x8,
1621                    vpx_highbd_10_sub_pixel_avg_variance16x8,
1622                    vpx_highbd_sad16x8x3_bits10, vpx_highbd_sad16x8x8_bits10,
1623                    vpx_highbd_sad16x8x4d_bits10)
1624 
1625         HIGHBD_BFP(BLOCK_8X16, vpx_highbd_sad8x16_bits10,
1626                    vpx_highbd_sad8x16_avg_bits10, vpx_highbd_10_variance8x16,
1627                    vpx_highbd_10_sub_pixel_variance8x16,
1628                    vpx_highbd_10_sub_pixel_avg_variance8x16,
1629                    vpx_highbd_sad8x16x3_bits10, vpx_highbd_sad8x16x8_bits10,
1630                    vpx_highbd_sad8x16x4d_bits10)
1631 
1632         HIGHBD_BFP(
1633             BLOCK_8X8, vpx_highbd_sad8x8_bits10, vpx_highbd_sad8x8_avg_bits10,
1634             vpx_highbd_10_variance8x8, vpx_highbd_10_sub_pixel_variance8x8,
1635             vpx_highbd_10_sub_pixel_avg_variance8x8, vpx_highbd_sad8x8x3_bits10,
1636             vpx_highbd_sad8x8x8_bits10, vpx_highbd_sad8x8x4d_bits10)
1637 
1638         HIGHBD_BFP(BLOCK_8X4, vpx_highbd_sad8x4_bits10,
1639                    vpx_highbd_sad8x4_avg_bits10, vpx_highbd_10_variance8x4,
1640                    vpx_highbd_10_sub_pixel_variance8x4,
1641                    vpx_highbd_10_sub_pixel_avg_variance8x4, NULL,
1642                    vpx_highbd_sad8x4x8_bits10, vpx_highbd_sad8x4x4d_bits10)
1643 
1644         HIGHBD_BFP(BLOCK_4X8, vpx_highbd_sad4x8_bits10,
1645                    vpx_highbd_sad4x8_avg_bits10, vpx_highbd_10_variance4x8,
1646                    vpx_highbd_10_sub_pixel_variance4x8,
1647                    vpx_highbd_10_sub_pixel_avg_variance4x8, NULL,
1648                    vpx_highbd_sad4x8x8_bits10, vpx_highbd_sad4x8x4d_bits10)
1649 
1650         HIGHBD_BFP(
1651             BLOCK_4X4, vpx_highbd_sad4x4_bits10, vpx_highbd_sad4x4_avg_bits10,
1652             vpx_highbd_10_variance4x4, vpx_highbd_10_sub_pixel_variance4x4,
1653             vpx_highbd_10_sub_pixel_avg_variance4x4, vpx_highbd_sad4x4x3_bits10,
1654             vpx_highbd_sad4x4x8_bits10, vpx_highbd_sad4x4x4d_bits10)
1655         break;
1656 
1657       case VPX_BITS_12:
1658         HIGHBD_BFP(BLOCK_32X16, vpx_highbd_sad32x16_bits12,
1659                    vpx_highbd_sad32x16_avg_bits12, vpx_highbd_12_variance32x16,
1660                    vpx_highbd_12_sub_pixel_variance32x16,
1661                    vpx_highbd_12_sub_pixel_avg_variance32x16, NULL, NULL,
1662                    vpx_highbd_sad32x16x4d_bits12)
1663 
1664         HIGHBD_BFP(BLOCK_16X32, vpx_highbd_sad16x32_bits12,
1665                    vpx_highbd_sad16x32_avg_bits12, vpx_highbd_12_variance16x32,
1666                    vpx_highbd_12_sub_pixel_variance16x32,
1667                    vpx_highbd_12_sub_pixel_avg_variance16x32, NULL, NULL,
1668                    vpx_highbd_sad16x32x4d_bits12)
1669 
1670         HIGHBD_BFP(BLOCK_64X32, vpx_highbd_sad64x32_bits12,
1671                    vpx_highbd_sad64x32_avg_bits12, vpx_highbd_12_variance64x32,
1672                    vpx_highbd_12_sub_pixel_variance64x32,
1673                    vpx_highbd_12_sub_pixel_avg_variance64x32, NULL, NULL,
1674                    vpx_highbd_sad64x32x4d_bits12)
1675 
1676         HIGHBD_BFP(BLOCK_32X64, vpx_highbd_sad32x64_bits12,
1677                    vpx_highbd_sad32x64_avg_bits12, vpx_highbd_12_variance32x64,
1678                    vpx_highbd_12_sub_pixel_variance32x64,
1679                    vpx_highbd_12_sub_pixel_avg_variance32x64, NULL, NULL,
1680                    vpx_highbd_sad32x64x4d_bits12)
1681 
1682         HIGHBD_BFP(BLOCK_32X32, vpx_highbd_sad32x32_bits12,
1683                    vpx_highbd_sad32x32_avg_bits12, vpx_highbd_12_variance32x32,
1684                    vpx_highbd_12_sub_pixel_variance32x32,
1685                    vpx_highbd_12_sub_pixel_avg_variance32x32,
1686                    vpx_highbd_sad32x32x3_bits12, vpx_highbd_sad32x32x8_bits12,
1687                    vpx_highbd_sad32x32x4d_bits12)
1688 
1689         HIGHBD_BFP(BLOCK_64X64, vpx_highbd_sad64x64_bits12,
1690                    vpx_highbd_sad64x64_avg_bits12, vpx_highbd_12_variance64x64,
1691                    vpx_highbd_12_sub_pixel_variance64x64,
1692                    vpx_highbd_12_sub_pixel_avg_variance64x64,
1693                    vpx_highbd_sad64x64x3_bits12, vpx_highbd_sad64x64x8_bits12,
1694                    vpx_highbd_sad64x64x4d_bits12)
1695 
1696         HIGHBD_BFP(BLOCK_16X16, vpx_highbd_sad16x16_bits12,
1697                    vpx_highbd_sad16x16_avg_bits12, vpx_highbd_12_variance16x16,
1698                    vpx_highbd_12_sub_pixel_variance16x16,
1699                    vpx_highbd_12_sub_pixel_avg_variance16x16,
1700                    vpx_highbd_sad16x16x3_bits12, vpx_highbd_sad16x16x8_bits12,
1701                    vpx_highbd_sad16x16x4d_bits12)
1702 
1703         HIGHBD_BFP(BLOCK_16X8, vpx_highbd_sad16x8_bits12,
1704                    vpx_highbd_sad16x8_avg_bits12, vpx_highbd_12_variance16x8,
1705                    vpx_highbd_12_sub_pixel_variance16x8,
1706                    vpx_highbd_12_sub_pixel_avg_variance16x8,
1707                    vpx_highbd_sad16x8x3_bits12, vpx_highbd_sad16x8x8_bits12,
1708                    vpx_highbd_sad16x8x4d_bits12)
1709 
1710         HIGHBD_BFP(BLOCK_8X16, vpx_highbd_sad8x16_bits12,
1711                    vpx_highbd_sad8x16_avg_bits12, vpx_highbd_12_variance8x16,
1712                    vpx_highbd_12_sub_pixel_variance8x16,
1713                    vpx_highbd_12_sub_pixel_avg_variance8x16,
1714                    vpx_highbd_sad8x16x3_bits12, vpx_highbd_sad8x16x8_bits12,
1715                    vpx_highbd_sad8x16x4d_bits12)
1716 
1717         HIGHBD_BFP(
1718             BLOCK_8X8, vpx_highbd_sad8x8_bits12, vpx_highbd_sad8x8_avg_bits12,
1719             vpx_highbd_12_variance8x8, vpx_highbd_12_sub_pixel_variance8x8,
1720             vpx_highbd_12_sub_pixel_avg_variance8x8, vpx_highbd_sad8x8x3_bits12,
1721             vpx_highbd_sad8x8x8_bits12, vpx_highbd_sad8x8x4d_bits12)
1722 
1723         HIGHBD_BFP(BLOCK_8X4, vpx_highbd_sad8x4_bits12,
1724                    vpx_highbd_sad8x4_avg_bits12, vpx_highbd_12_variance8x4,
1725                    vpx_highbd_12_sub_pixel_variance8x4,
1726                    vpx_highbd_12_sub_pixel_avg_variance8x4, NULL,
1727                    vpx_highbd_sad8x4x8_bits12, vpx_highbd_sad8x4x4d_bits12)
1728 
1729         HIGHBD_BFP(BLOCK_4X8, vpx_highbd_sad4x8_bits12,
1730                    vpx_highbd_sad4x8_avg_bits12, vpx_highbd_12_variance4x8,
1731                    vpx_highbd_12_sub_pixel_variance4x8,
1732                    vpx_highbd_12_sub_pixel_avg_variance4x8, NULL,
1733                    vpx_highbd_sad4x8x8_bits12, vpx_highbd_sad4x8x4d_bits12)
1734 
1735         HIGHBD_BFP(
1736             BLOCK_4X4, vpx_highbd_sad4x4_bits12, vpx_highbd_sad4x4_avg_bits12,
1737             vpx_highbd_12_variance4x4, vpx_highbd_12_sub_pixel_variance4x4,
1738             vpx_highbd_12_sub_pixel_avg_variance4x4, vpx_highbd_sad4x4x3_bits12,
1739             vpx_highbd_sad4x4x8_bits12, vpx_highbd_sad4x4x4d_bits12)
1740         break;
1741 
1742       default:
1743         assert(0 &&
1744                "cm->bit_depth should be VPX_BITS_8, "
1745                "VPX_BITS_10 or VPX_BITS_12");
1746     }
1747   }
1748 }
1749 #endif  // CONFIG_VP9_HIGHBITDEPTH
1750 
realloc_segmentation_maps(VP9_COMP * cpi)1751 static void realloc_segmentation_maps(VP9_COMP *cpi) {
1752   VP9_COMMON *const cm = &cpi->common;
1753 
1754   // Create the encoder segmentation map and set all entries to 0
1755   vpx_free(cpi->segmentation_map);
1756   CHECK_MEM_ERROR(cm, cpi->segmentation_map,
1757                   vpx_calloc(cm->mi_rows * cm->mi_cols, 1));
1758 
1759   // Create a map used for cyclic background refresh.
1760   if (cpi->cyclic_refresh) vp9_cyclic_refresh_free(cpi->cyclic_refresh);
1761   CHECK_MEM_ERROR(cm, cpi->cyclic_refresh,
1762                   vp9_cyclic_refresh_alloc(cm->mi_rows, cm->mi_cols));
1763 
1764   // Create a map used to mark inactive areas.
1765   vpx_free(cpi->active_map.map);
1766   CHECK_MEM_ERROR(cm, cpi->active_map.map,
1767                   vpx_calloc(cm->mi_rows * cm->mi_cols, 1));
1768 
1769   // And a place holder structure is the coding context
1770   // for use if we want to save and restore it
1771   vpx_free(cpi->coding_context.last_frame_seg_map_copy);
1772   CHECK_MEM_ERROR(cm, cpi->coding_context.last_frame_seg_map_copy,
1773                   vpx_calloc(cm->mi_rows * cm->mi_cols, 1));
1774 }
1775 
alloc_copy_partition_data(VP9_COMP * cpi)1776 static void alloc_copy_partition_data(VP9_COMP *cpi) {
1777   VP9_COMMON *const cm = &cpi->common;
1778   if (cpi->prev_partition == NULL) {
1779     CHECK_MEM_ERROR(cm, cpi->prev_partition,
1780                     (BLOCK_SIZE *)vpx_calloc(cm->mi_stride * cm->mi_rows,
1781                                              sizeof(*cpi->prev_partition)));
1782   }
1783   if (cpi->prev_segment_id == NULL) {
1784     CHECK_MEM_ERROR(
1785         cm, cpi->prev_segment_id,
1786         (int8_t *)vpx_calloc((cm->mi_stride >> 3) * ((cm->mi_rows >> 3) + 1),
1787                              sizeof(*cpi->prev_segment_id)));
1788   }
1789   if (cpi->prev_variance_low == NULL) {
1790     CHECK_MEM_ERROR(cm, cpi->prev_variance_low,
1791                     (uint8_t *)vpx_calloc(
1792                         (cm->mi_stride >> 3) * ((cm->mi_rows >> 3) + 1) * 25,
1793                         sizeof(*cpi->prev_variance_low)));
1794   }
1795   if (cpi->copied_frame_cnt == NULL) {
1796     CHECK_MEM_ERROR(
1797         cm, cpi->copied_frame_cnt,
1798         (uint8_t *)vpx_calloc((cm->mi_stride >> 3) * ((cm->mi_rows >> 3) + 1),
1799                               sizeof(*cpi->copied_frame_cnt)));
1800   }
1801 }
1802 
vp9_change_config(struct VP9_COMP * cpi,const VP9EncoderConfig * oxcf)1803 void vp9_change_config(struct VP9_COMP *cpi, const VP9EncoderConfig *oxcf) {
1804   VP9_COMMON *const cm = &cpi->common;
1805   RATE_CONTROL *const rc = &cpi->rc;
1806   int last_w = cpi->oxcf.width;
1807   int last_h = cpi->oxcf.height;
1808 
1809   if (cm->profile != oxcf->profile) cm->profile = oxcf->profile;
1810   cm->bit_depth = oxcf->bit_depth;
1811   cm->color_space = oxcf->color_space;
1812   cm->color_range = oxcf->color_range;
1813 
1814   cpi->target_level = oxcf->target_level;
1815   cpi->keep_level_stats = oxcf->target_level != LEVEL_MAX;
1816   set_level_constraint(&cpi->level_constraint,
1817                        get_level_index(cpi->target_level));
1818 
1819   if (cm->profile <= PROFILE_1)
1820     assert(cm->bit_depth == VPX_BITS_8);
1821   else
1822     assert(cm->bit_depth > VPX_BITS_8);
1823 
1824   cpi->oxcf = *oxcf;
1825 #if CONFIG_VP9_HIGHBITDEPTH
1826   cpi->td.mb.e_mbd.bd = (int)cm->bit_depth;
1827 #endif  // CONFIG_VP9_HIGHBITDEPTH
1828 
1829   if ((oxcf->pass == 0) && (oxcf->rc_mode == VPX_Q)) {
1830     rc->baseline_gf_interval = FIXED_GF_INTERVAL;
1831   } else {
1832     rc->baseline_gf_interval = (MIN_GF_INTERVAL + MAX_GF_INTERVAL) / 2;
1833   }
1834 
1835   cpi->refresh_golden_frame = 0;
1836   cpi->refresh_last_frame = 1;
1837   cm->refresh_frame_context = 1;
1838   cm->reset_frame_context = 0;
1839 
1840   vp9_reset_segment_features(&cm->seg);
1841   vp9_set_high_precision_mv(cpi, 0);
1842 
1843   {
1844     int i;
1845 
1846     for (i = 0; i < MAX_SEGMENTS; i++)
1847       cpi->segment_encode_breakout[i] = cpi->oxcf.encode_breakout;
1848   }
1849   cpi->encode_breakout = cpi->oxcf.encode_breakout;
1850 
1851   set_rc_buffer_sizes(rc, &cpi->oxcf);
1852 
1853   // Under a configuration change, where maximum_buffer_size may change,
1854   // keep buffer level clipped to the maximum allowed buffer size.
1855   rc->bits_off_target = VPXMIN(rc->bits_off_target, rc->maximum_buffer_size);
1856   rc->buffer_level = VPXMIN(rc->buffer_level, rc->maximum_buffer_size);
1857 
1858   // Set up frame rate and related parameters rate control values.
1859   vp9_new_framerate(cpi, cpi->framerate);
1860 
1861   // Set absolute upper and lower quality limits
1862   rc->worst_quality = cpi->oxcf.worst_allowed_q;
1863   rc->best_quality = cpi->oxcf.best_allowed_q;
1864 
1865   cm->interp_filter = cpi->sf.default_interp_filter;
1866 
1867   if (cpi->oxcf.render_width > 0 && cpi->oxcf.render_height > 0) {
1868     cm->render_width = cpi->oxcf.render_width;
1869     cm->render_height = cpi->oxcf.render_height;
1870   } else {
1871     cm->render_width = cpi->oxcf.width;
1872     cm->render_height = cpi->oxcf.height;
1873   }
1874   if (last_w != cpi->oxcf.width || last_h != cpi->oxcf.height) {
1875     cm->width = cpi->oxcf.width;
1876     cm->height = cpi->oxcf.height;
1877     cpi->external_resize = 1;
1878   }
1879 
1880   if (cpi->initial_width) {
1881     int new_mi_size = 0;
1882     vp9_set_mb_mi(cm, cm->width, cm->height);
1883     new_mi_size = cm->mi_stride * calc_mi_size(cm->mi_rows);
1884     if (cm->mi_alloc_size < new_mi_size) {
1885       vp9_free_context_buffers(cm);
1886       alloc_compressor_data(cpi);
1887       realloc_segmentation_maps(cpi);
1888       cpi->initial_width = cpi->initial_height = 0;
1889       cpi->external_resize = 0;
1890     } else if (cm->mi_alloc_size == new_mi_size &&
1891                (cpi->oxcf.width > last_w || cpi->oxcf.height > last_h)) {
1892       vp9_alloc_loop_filter(cm);
1893     }
1894   }
1895 
1896   if (cm->current_video_frame == 0 || last_w != cpi->oxcf.width ||
1897       last_h != cpi->oxcf.height)
1898     update_frame_size(cpi);
1899 
1900   if (last_w != cpi->oxcf.width || last_h != cpi->oxcf.height) {
1901     memset(cpi->consec_zero_mv, 0,
1902            cm->mi_rows * cm->mi_cols * sizeof(*cpi->consec_zero_mv));
1903     if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ)
1904       vp9_cyclic_refresh_reset_resize(cpi);
1905   }
1906 
1907   if ((cpi->svc.number_temporal_layers > 1 && cpi->oxcf.rc_mode == VPX_CBR) ||
1908       ((cpi->svc.number_temporal_layers > 1 ||
1909         cpi->svc.number_spatial_layers > 1) &&
1910        cpi->oxcf.pass != 1)) {
1911     vp9_update_layer_context_change_config(cpi,
1912                                            (int)cpi->oxcf.target_bandwidth);
1913   }
1914 
1915   cpi->alt_ref_source = NULL;
1916   rc->is_src_frame_alt_ref = 0;
1917 
1918 #if 0
1919   // Experimental RD Code
1920   cpi->frame_distortion = 0;
1921   cpi->last_frame_distortion = 0;
1922 #endif
1923 
1924   set_tile_limits(cpi);
1925 
1926   cpi->ext_refresh_frame_flags_pending = 0;
1927   cpi->ext_refresh_frame_context_pending = 0;
1928 
1929 #if CONFIG_VP9_HIGHBITDEPTH
1930   highbd_set_var_fns(cpi);
1931 #endif
1932 
1933   vp9_set_row_mt(cpi);
1934 }
1935 
1936 #ifndef M_LOG2_E
1937 #define M_LOG2_E 0.693147180559945309417
1938 #endif
1939 #define log2f(x) (log(x) / (float)M_LOG2_E)
1940 
1941 /***********************************************************************
1942  * Read before modifying 'cal_nmvjointsadcost' or 'cal_nmvsadcosts'    *
1943  ***********************************************************************
1944  * The following 2 functions ('cal_nmvjointsadcost' and                *
1945  * 'cal_nmvsadcosts') are used to calculate cost lookup tables         *
1946  * used by 'vp9_diamond_search_sad'. The C implementation of the       *
1947  * function is generic, but the AVX intrinsics optimised version       *
1948  * relies on the following properties of the computed tables:          *
1949  * For cal_nmvjointsadcost:                                            *
1950  *   - mvjointsadcost[1] == mvjointsadcost[2] == mvjointsadcost[3]     *
1951  * For cal_nmvsadcosts:                                                *
1952  *   - For all i: mvsadcost[0][i] == mvsadcost[1][i]                   *
1953  *         (Equal costs for both components)                           *
1954  *   - For all i: mvsadcost[0][i] == mvsadcost[0][-i]                  *
1955  *         (Cost function is even)                                     *
1956  * If these do not hold, then the AVX optimised version of the         *
1957  * 'vp9_diamond_search_sad' function cannot be used as it is, in which *
1958  * case you can revert to using the C function instead.                *
1959  ***********************************************************************/
1960 
cal_nmvjointsadcost(int * mvjointsadcost)1961 static void cal_nmvjointsadcost(int *mvjointsadcost) {
1962   /*********************************************************************
1963    * Warning: Read the comments above before modifying this function   *
1964    *********************************************************************/
1965   mvjointsadcost[0] = 600;
1966   mvjointsadcost[1] = 300;
1967   mvjointsadcost[2] = 300;
1968   mvjointsadcost[3] = 300;
1969 }
1970 
cal_nmvsadcosts(int * mvsadcost[2])1971 static void cal_nmvsadcosts(int *mvsadcost[2]) {
1972   /*********************************************************************
1973    * Warning: Read the comments above before modifying this function   *
1974    *********************************************************************/
1975   int i = 1;
1976 
1977   mvsadcost[0][0] = 0;
1978   mvsadcost[1][0] = 0;
1979 
1980   do {
1981     double z = 256 * (2 * (log2f(8 * i) + .6));
1982     mvsadcost[0][i] = (int)z;
1983     mvsadcost[1][i] = (int)z;
1984     mvsadcost[0][-i] = (int)z;
1985     mvsadcost[1][-i] = (int)z;
1986   } while (++i <= MV_MAX);
1987 }
1988 
cal_nmvsadcosts_hp(int * mvsadcost[2])1989 static void cal_nmvsadcosts_hp(int *mvsadcost[2]) {
1990   int i = 1;
1991 
1992   mvsadcost[0][0] = 0;
1993   mvsadcost[1][0] = 0;
1994 
1995   do {
1996     double z = 256 * (2 * (log2f(8 * i) + .6));
1997     mvsadcost[0][i] = (int)z;
1998     mvsadcost[1][i] = (int)z;
1999     mvsadcost[0][-i] = (int)z;
2000     mvsadcost[1][-i] = (int)z;
2001   } while (++i <= MV_MAX);
2002 }
2003 
vp9_create_compressor(VP9EncoderConfig * oxcf,BufferPool * const pool)2004 VP9_COMP *vp9_create_compressor(VP9EncoderConfig *oxcf,
2005                                 BufferPool *const pool) {
2006   unsigned int i;
2007   VP9_COMP *volatile const cpi = vpx_memalign(32, sizeof(VP9_COMP));
2008   VP9_COMMON *volatile const cm = cpi != NULL ? &cpi->common : NULL;
2009 
2010   if (!cm) return NULL;
2011 
2012   vp9_zero(*cpi);
2013 
2014   if (setjmp(cm->error.jmp)) {
2015     cm->error.setjmp = 0;
2016     vp9_remove_compressor(cpi);
2017     return 0;
2018   }
2019 
2020   cm->error.setjmp = 1;
2021   cm->alloc_mi = vp9_enc_alloc_mi;
2022   cm->free_mi = vp9_enc_free_mi;
2023   cm->setup_mi = vp9_enc_setup_mi;
2024 
2025   CHECK_MEM_ERROR(cm, cm->fc, (FRAME_CONTEXT *)vpx_calloc(1, sizeof(*cm->fc)));
2026   CHECK_MEM_ERROR(
2027       cm, cm->frame_contexts,
2028       (FRAME_CONTEXT *)vpx_calloc(FRAME_CONTEXTS, sizeof(*cm->frame_contexts)));
2029 
2030   cpi->use_svc = 0;
2031   cpi->resize_state = ORIG;
2032   cpi->external_resize = 0;
2033   cpi->resize_avg_qp = 0;
2034   cpi->resize_buffer_underflow = 0;
2035   cpi->use_skin_detection = 0;
2036   cpi->common.buffer_pool = pool;
2037 
2038   cpi->force_update_segmentation = 0;
2039 
2040   init_config(cpi, oxcf);
2041   vp9_rc_init(&cpi->oxcf, oxcf->pass, &cpi->rc);
2042 
2043   cm->current_video_frame = 0;
2044   cpi->partition_search_skippable_frame = 0;
2045   cpi->tile_data = NULL;
2046 
2047   realloc_segmentation_maps(cpi);
2048 
2049   CHECK_MEM_ERROR(cm, cpi->alt_ref_aq, vp9_alt_ref_aq_create());
2050 
2051   CHECK_MEM_ERROR(
2052       cm, cpi->consec_zero_mv,
2053       vpx_calloc(cm->mi_rows * cm->mi_cols, sizeof(*cpi->consec_zero_mv)));
2054 
2055   CHECK_MEM_ERROR(cm, cpi->nmvcosts[0],
2056                   vpx_calloc(MV_VALS, sizeof(*cpi->nmvcosts[0])));
2057   CHECK_MEM_ERROR(cm, cpi->nmvcosts[1],
2058                   vpx_calloc(MV_VALS, sizeof(*cpi->nmvcosts[1])));
2059   CHECK_MEM_ERROR(cm, cpi->nmvcosts_hp[0],
2060                   vpx_calloc(MV_VALS, sizeof(*cpi->nmvcosts_hp[0])));
2061   CHECK_MEM_ERROR(cm, cpi->nmvcosts_hp[1],
2062                   vpx_calloc(MV_VALS, sizeof(*cpi->nmvcosts_hp[1])));
2063   CHECK_MEM_ERROR(cm, cpi->nmvsadcosts[0],
2064                   vpx_calloc(MV_VALS, sizeof(*cpi->nmvsadcosts[0])));
2065   CHECK_MEM_ERROR(cm, cpi->nmvsadcosts[1],
2066                   vpx_calloc(MV_VALS, sizeof(*cpi->nmvsadcosts[1])));
2067   CHECK_MEM_ERROR(cm, cpi->nmvsadcosts_hp[0],
2068                   vpx_calloc(MV_VALS, sizeof(*cpi->nmvsadcosts_hp[0])));
2069   CHECK_MEM_ERROR(cm, cpi->nmvsadcosts_hp[1],
2070                   vpx_calloc(MV_VALS, sizeof(*cpi->nmvsadcosts_hp[1])));
2071 
2072   for (i = 0; i < (sizeof(cpi->mbgraph_stats) / sizeof(cpi->mbgraph_stats[0]));
2073        i++) {
2074     CHECK_MEM_ERROR(
2075         cm, cpi->mbgraph_stats[i].mb_stats,
2076         vpx_calloc(cm->MBs * sizeof(*cpi->mbgraph_stats[i].mb_stats), 1));
2077   }
2078 
2079 #if CONFIG_FP_MB_STATS
2080   cpi->use_fp_mb_stats = 0;
2081   if (cpi->use_fp_mb_stats) {
2082     // a place holder used to store the first pass mb stats in the first pass
2083     CHECK_MEM_ERROR(cm, cpi->twopass.frame_mb_stats_buf,
2084                     vpx_calloc(cm->MBs * sizeof(uint8_t), 1));
2085   } else {
2086     cpi->twopass.frame_mb_stats_buf = NULL;
2087   }
2088 #endif
2089 
2090   cpi->refresh_alt_ref_frame = 0;
2091   cpi->multi_arf_last_grp_enabled = 0;
2092 
2093   cpi->b_calculate_psnr = CONFIG_INTERNAL_STATS;
2094 
2095   init_level_info(&cpi->level_info);
2096   init_level_constraint(&cpi->level_constraint);
2097 
2098 #if CONFIG_INTERNAL_STATS
2099   cpi->b_calculate_blockiness = 1;
2100   cpi->b_calculate_consistency = 1;
2101   cpi->total_inconsistency = 0;
2102   cpi->psnr.worst = 100.0;
2103   cpi->worst_ssim = 100.0;
2104 
2105   cpi->count = 0;
2106   cpi->bytes = 0;
2107 
2108   if (cpi->b_calculate_psnr) {
2109     cpi->total_sq_error = 0;
2110     cpi->total_samples = 0;
2111 
2112     cpi->totalp_sq_error = 0;
2113     cpi->totalp_samples = 0;
2114 
2115     cpi->tot_recode_hits = 0;
2116     cpi->summed_quality = 0;
2117     cpi->summed_weights = 0;
2118     cpi->summedp_quality = 0;
2119     cpi->summedp_weights = 0;
2120   }
2121 
2122   cpi->fastssim.worst = 100.0;
2123 
2124   cpi->psnrhvs.worst = 100.0;
2125 
2126   if (cpi->b_calculate_blockiness) {
2127     cpi->total_blockiness = 0;
2128     cpi->worst_blockiness = 0.0;
2129   }
2130 
2131   if (cpi->b_calculate_consistency) {
2132     CHECK_MEM_ERROR(cm, cpi->ssim_vars,
2133                     vpx_malloc(sizeof(*cpi->ssim_vars) * 4 *
2134                                cpi->common.mi_rows * cpi->common.mi_cols));
2135     cpi->worst_consistency = 100.0;
2136   }
2137 
2138 #endif
2139 
2140   cpi->first_time_stamp_ever = INT64_MAX;
2141 
2142   /*********************************************************************
2143    * Warning: Read the comments around 'cal_nmvjointsadcost' and       *
2144    * 'cal_nmvsadcosts' before modifying how these tables are computed. *
2145    *********************************************************************/
2146   cal_nmvjointsadcost(cpi->td.mb.nmvjointsadcost);
2147   cpi->td.mb.nmvcost[0] = &cpi->nmvcosts[0][MV_MAX];
2148   cpi->td.mb.nmvcost[1] = &cpi->nmvcosts[1][MV_MAX];
2149   cpi->td.mb.nmvsadcost[0] = &cpi->nmvsadcosts[0][MV_MAX];
2150   cpi->td.mb.nmvsadcost[1] = &cpi->nmvsadcosts[1][MV_MAX];
2151   cal_nmvsadcosts(cpi->td.mb.nmvsadcost);
2152 
2153   cpi->td.mb.nmvcost_hp[0] = &cpi->nmvcosts_hp[0][MV_MAX];
2154   cpi->td.mb.nmvcost_hp[1] = &cpi->nmvcosts_hp[1][MV_MAX];
2155   cpi->td.mb.nmvsadcost_hp[0] = &cpi->nmvsadcosts_hp[0][MV_MAX];
2156   cpi->td.mb.nmvsadcost_hp[1] = &cpi->nmvsadcosts_hp[1][MV_MAX];
2157   cal_nmvsadcosts_hp(cpi->td.mb.nmvsadcost_hp);
2158 
2159 #if CONFIG_VP9_TEMPORAL_DENOISING
2160 #ifdef OUTPUT_YUV_DENOISED
2161   yuv_denoised_file = fopen("denoised.yuv", "ab");
2162 #endif
2163 #endif
2164 #ifdef OUTPUT_YUV_SKINMAP
2165   yuv_skinmap_file = fopen("skinmap.yuv", "ab");
2166 #endif
2167 #ifdef OUTPUT_YUV_REC
2168   yuv_rec_file = fopen("rec.yuv", "wb");
2169 #endif
2170 
2171 #if 0
2172   framepsnr = fopen("framepsnr.stt", "a");
2173   kf_list = fopen("kf_list.stt", "w");
2174 #endif
2175 
2176   cpi->allow_encode_breakout = ENCODE_BREAKOUT_ENABLED;
2177 
2178   if (oxcf->pass == 1) {
2179     vp9_init_first_pass(cpi);
2180   } else if (oxcf->pass == 2) {
2181     const size_t packet_sz = sizeof(FIRSTPASS_STATS);
2182     const int packets = (int)(oxcf->two_pass_stats_in.sz / packet_sz);
2183 
2184     if (cpi->svc.number_spatial_layers > 1 ||
2185         cpi->svc.number_temporal_layers > 1) {
2186       FIRSTPASS_STATS *const stats = oxcf->two_pass_stats_in.buf;
2187       FIRSTPASS_STATS *stats_copy[VPX_SS_MAX_LAYERS] = { 0 };
2188       int i;
2189 
2190       for (i = 0; i < oxcf->ss_number_layers; ++i) {
2191         FIRSTPASS_STATS *const last_packet_for_layer =
2192             &stats[packets - oxcf->ss_number_layers + i];
2193         const int layer_id = (int)last_packet_for_layer->spatial_layer_id;
2194         const int packets_in_layer = (int)last_packet_for_layer->count + 1;
2195         if (layer_id >= 0 && layer_id < oxcf->ss_number_layers) {
2196           LAYER_CONTEXT *const lc = &cpi->svc.layer_context[layer_id];
2197 
2198           vpx_free(lc->rc_twopass_stats_in.buf);
2199 
2200           lc->rc_twopass_stats_in.sz = packets_in_layer * packet_sz;
2201           CHECK_MEM_ERROR(cm, lc->rc_twopass_stats_in.buf,
2202                           vpx_malloc(lc->rc_twopass_stats_in.sz));
2203           lc->twopass.stats_in_start = lc->rc_twopass_stats_in.buf;
2204           lc->twopass.stats_in = lc->twopass.stats_in_start;
2205           lc->twopass.stats_in_end =
2206               lc->twopass.stats_in_start + packets_in_layer - 1;
2207           stats_copy[layer_id] = lc->rc_twopass_stats_in.buf;
2208         }
2209       }
2210 
2211       for (i = 0; i < packets; ++i) {
2212         const int layer_id = (int)stats[i].spatial_layer_id;
2213         if (layer_id >= 0 && layer_id < oxcf->ss_number_layers &&
2214             stats_copy[layer_id] != NULL) {
2215           *stats_copy[layer_id] = stats[i];
2216           ++stats_copy[layer_id];
2217         }
2218       }
2219 
2220       vp9_init_second_pass_spatial_svc(cpi);
2221     } else {
2222 #if CONFIG_FP_MB_STATS
2223       if (cpi->use_fp_mb_stats) {
2224         const size_t psz = cpi->common.MBs * sizeof(uint8_t);
2225         const int ps = (int)(oxcf->firstpass_mb_stats_in.sz / psz);
2226 
2227         cpi->twopass.firstpass_mb_stats.mb_stats_start =
2228             oxcf->firstpass_mb_stats_in.buf;
2229         cpi->twopass.firstpass_mb_stats.mb_stats_end =
2230             cpi->twopass.firstpass_mb_stats.mb_stats_start +
2231             (ps - 1) * cpi->common.MBs * sizeof(uint8_t);
2232       }
2233 #endif
2234 
2235       cpi->twopass.stats_in_start = oxcf->two_pass_stats_in.buf;
2236       cpi->twopass.stats_in = cpi->twopass.stats_in_start;
2237       cpi->twopass.stats_in_end = &cpi->twopass.stats_in[packets - 1];
2238 
2239       vp9_init_second_pass(cpi);
2240     }
2241   }
2242 
2243   vp9_set_speed_features_framesize_independent(cpi);
2244   vp9_set_speed_features_framesize_dependent(cpi);
2245 
2246   // Allocate memory to store variances for a frame.
2247   CHECK_MEM_ERROR(cm, cpi->source_diff_var, vpx_calloc(cm->MBs, sizeof(diff)));
2248   cpi->source_var_thresh = 0;
2249   cpi->frames_till_next_var_check = 0;
2250 
2251 #define BFP(BT, SDF, SDAF, VF, SVF, SVAF, SDX3F, SDX8F, SDX4DF) \
2252   cpi->fn_ptr[BT].sdf = SDF;                                    \
2253   cpi->fn_ptr[BT].sdaf = SDAF;                                  \
2254   cpi->fn_ptr[BT].vf = VF;                                      \
2255   cpi->fn_ptr[BT].svf = SVF;                                    \
2256   cpi->fn_ptr[BT].svaf = SVAF;                                  \
2257   cpi->fn_ptr[BT].sdx3f = SDX3F;                                \
2258   cpi->fn_ptr[BT].sdx8f = SDX8F;                                \
2259   cpi->fn_ptr[BT].sdx4df = SDX4DF;
2260 
2261   BFP(BLOCK_32X16, vpx_sad32x16, vpx_sad32x16_avg, vpx_variance32x16,
2262       vpx_sub_pixel_variance32x16, vpx_sub_pixel_avg_variance32x16, NULL, NULL,
2263       vpx_sad32x16x4d)
2264 
2265   BFP(BLOCK_16X32, vpx_sad16x32, vpx_sad16x32_avg, vpx_variance16x32,
2266       vpx_sub_pixel_variance16x32, vpx_sub_pixel_avg_variance16x32, NULL, NULL,
2267       vpx_sad16x32x4d)
2268 
2269   BFP(BLOCK_64X32, vpx_sad64x32, vpx_sad64x32_avg, vpx_variance64x32,
2270       vpx_sub_pixel_variance64x32, vpx_sub_pixel_avg_variance64x32, NULL, NULL,
2271       vpx_sad64x32x4d)
2272 
2273   BFP(BLOCK_32X64, vpx_sad32x64, vpx_sad32x64_avg, vpx_variance32x64,
2274       vpx_sub_pixel_variance32x64, vpx_sub_pixel_avg_variance32x64, NULL, NULL,
2275       vpx_sad32x64x4d)
2276 
2277   BFP(BLOCK_32X32, vpx_sad32x32, vpx_sad32x32_avg, vpx_variance32x32,
2278       vpx_sub_pixel_variance32x32, vpx_sub_pixel_avg_variance32x32,
2279       vpx_sad32x32x3, vpx_sad32x32x8, vpx_sad32x32x4d)
2280 
2281   BFP(BLOCK_64X64, vpx_sad64x64, vpx_sad64x64_avg, vpx_variance64x64,
2282       vpx_sub_pixel_variance64x64, vpx_sub_pixel_avg_variance64x64,
2283       vpx_sad64x64x3, vpx_sad64x64x8, vpx_sad64x64x4d)
2284 
2285   BFP(BLOCK_16X16, vpx_sad16x16, vpx_sad16x16_avg, vpx_variance16x16,
2286       vpx_sub_pixel_variance16x16, vpx_sub_pixel_avg_variance16x16,
2287       vpx_sad16x16x3, vpx_sad16x16x8, vpx_sad16x16x4d)
2288 
2289   BFP(BLOCK_16X8, vpx_sad16x8, vpx_sad16x8_avg, vpx_variance16x8,
2290       vpx_sub_pixel_variance16x8, vpx_sub_pixel_avg_variance16x8, vpx_sad16x8x3,
2291       vpx_sad16x8x8, vpx_sad16x8x4d)
2292 
2293   BFP(BLOCK_8X16, vpx_sad8x16, vpx_sad8x16_avg, vpx_variance8x16,
2294       vpx_sub_pixel_variance8x16, vpx_sub_pixel_avg_variance8x16, vpx_sad8x16x3,
2295       vpx_sad8x16x8, vpx_sad8x16x4d)
2296 
2297   BFP(BLOCK_8X8, vpx_sad8x8, vpx_sad8x8_avg, vpx_variance8x8,
2298       vpx_sub_pixel_variance8x8, vpx_sub_pixel_avg_variance8x8, vpx_sad8x8x3,
2299       vpx_sad8x8x8, vpx_sad8x8x4d)
2300 
2301   BFP(BLOCK_8X4, vpx_sad8x4, vpx_sad8x4_avg, vpx_variance8x4,
2302       vpx_sub_pixel_variance8x4, vpx_sub_pixel_avg_variance8x4, NULL,
2303       vpx_sad8x4x8, vpx_sad8x4x4d)
2304 
2305   BFP(BLOCK_4X8, vpx_sad4x8, vpx_sad4x8_avg, vpx_variance4x8,
2306       vpx_sub_pixel_variance4x8, vpx_sub_pixel_avg_variance4x8, NULL,
2307       vpx_sad4x8x8, vpx_sad4x8x4d)
2308 
2309   BFP(BLOCK_4X4, vpx_sad4x4, vpx_sad4x4_avg, vpx_variance4x4,
2310       vpx_sub_pixel_variance4x4, vpx_sub_pixel_avg_variance4x4, vpx_sad4x4x3,
2311       vpx_sad4x4x8, vpx_sad4x4x4d)
2312 
2313 #if CONFIG_VP9_HIGHBITDEPTH
2314   highbd_set_var_fns(cpi);
2315 #endif
2316 
2317   /* vp9_init_quantizer() is first called here. Add check in
2318    * vp9_frame_init_quantizer() so that vp9_init_quantizer is only
2319    * called later when needed. This will avoid unnecessary calls of
2320    * vp9_init_quantizer() for every frame.
2321    */
2322   vp9_init_quantizer(cpi);
2323 
2324   vp9_loop_filter_init(cm);
2325 
2326   cm->error.setjmp = 0;
2327 
2328   return cpi;
2329 }
2330 
2331 #if CONFIG_INTERNAL_STATS
2332 #define SNPRINT(H, T) snprintf((H) + strlen(H), sizeof(H) - strlen(H), (T))
2333 
2334 #define SNPRINT2(H, T, V) \
2335   snprintf((H) + strlen(H), sizeof(H) - strlen(H), (T), (V))
2336 #endif  // CONFIG_INTERNAL_STATS
2337 
vp9_remove_compressor(VP9_COMP * cpi)2338 void vp9_remove_compressor(VP9_COMP *cpi) {
2339   VP9_COMMON *cm;
2340   unsigned int i;
2341   int t;
2342 
2343   if (!cpi) return;
2344 
2345   cm = &cpi->common;
2346   if (cm->current_video_frame > 0) {
2347 #if CONFIG_INTERNAL_STATS
2348     vpx_clear_system_state();
2349 
2350     if (cpi->oxcf.pass != 1) {
2351       char headings[512] = { 0 };
2352       char results[512] = { 0 };
2353       FILE *f = fopen("opsnr.stt", "a");
2354       double time_encoded =
2355           (cpi->last_end_time_stamp_seen - cpi->first_time_stamp_ever) /
2356           10000000.000;
2357       double total_encode_time =
2358           (cpi->time_receive_data + cpi->time_compress_data) / 1000.000;
2359       const double dr =
2360           (double)cpi->bytes * (double)8 / (double)1000 / time_encoded;
2361       const double peak = (double)((1 << cpi->oxcf.input_bit_depth) - 1);
2362       const double target_rate = (double)cpi->oxcf.target_bandwidth / 1000;
2363       const double rate_err = ((100.0 * (dr - target_rate)) / target_rate);
2364 
2365       if (cpi->b_calculate_psnr) {
2366         const double total_psnr = vpx_sse_to_psnr(
2367             (double)cpi->total_samples, peak, (double)cpi->total_sq_error);
2368         const double totalp_psnr = vpx_sse_to_psnr(
2369             (double)cpi->totalp_samples, peak, (double)cpi->totalp_sq_error);
2370         const double total_ssim =
2371             100 * pow(cpi->summed_quality / cpi->summed_weights, 8.0);
2372         const double totalp_ssim =
2373             100 * pow(cpi->summedp_quality / cpi->summedp_weights, 8.0);
2374 
2375         snprintf(headings, sizeof(headings),
2376                  "Bitrate\tAVGPsnr\tGLBPsnr\tAVPsnrP\tGLPsnrP\t"
2377                  "VPXSSIM\tVPSSIMP\tFASTSIM\tPSNRHVS\t"
2378                  "WstPsnr\tWstSsim\tWstFast\tWstHVS");
2379         snprintf(results, sizeof(results),
2380                  "%7.2f\t%7.3f\t%7.3f\t%7.3f\t%7.3f\t"
2381                  "%7.3f\t%7.3f\t%7.3f\t%7.3f\t"
2382                  "%7.3f\t%7.3f\t%7.3f\t%7.3f",
2383                  dr, cpi->psnr.stat[ALL] / cpi->count, total_psnr,
2384                  cpi->psnrp.stat[ALL] / cpi->count, totalp_psnr, total_ssim,
2385                  totalp_ssim, cpi->fastssim.stat[ALL] / cpi->count,
2386                  cpi->psnrhvs.stat[ALL] / cpi->count, cpi->psnr.worst,
2387                  cpi->worst_ssim, cpi->fastssim.worst, cpi->psnrhvs.worst);
2388 
2389         if (cpi->b_calculate_blockiness) {
2390           SNPRINT(headings, "\t  Block\tWstBlck");
2391           SNPRINT2(results, "\t%7.3f", cpi->total_blockiness / cpi->count);
2392           SNPRINT2(results, "\t%7.3f", cpi->worst_blockiness);
2393         }
2394 
2395         if (cpi->b_calculate_consistency) {
2396           double consistency =
2397               vpx_sse_to_psnr((double)cpi->totalp_samples, peak,
2398                               (double)cpi->total_inconsistency);
2399 
2400           SNPRINT(headings, "\tConsist\tWstCons");
2401           SNPRINT2(results, "\t%7.3f", consistency);
2402           SNPRINT2(results, "\t%7.3f", cpi->worst_consistency);
2403         }
2404 
2405         fprintf(f, "%s\t    Time\tRcErr\tAbsErr\n", headings);
2406         fprintf(f, "%s\t%8.0f\t%7.2f\t%7.2f\n", results, total_encode_time,
2407                 rate_err, fabs(rate_err));
2408       }
2409 
2410       fclose(f);
2411     }
2412 
2413 #endif
2414 
2415 #if 0
2416     {
2417       printf("\n_pick_loop_filter_level:%d\n", cpi->time_pick_lpf / 1000);
2418       printf("\n_frames recive_data encod_mb_row compress_frame  Total\n");
2419       printf("%6d %10ld %10ld %10ld %10ld\n", cpi->common.current_video_frame,
2420              cpi->time_receive_data / 1000, cpi->time_encode_sb_row / 1000,
2421              cpi->time_compress_data / 1000,
2422              (cpi->time_receive_data + cpi->time_compress_data) / 1000);
2423     }
2424 #endif
2425   }
2426 
2427 #if CONFIG_VP9_TEMPORAL_DENOISING
2428   vp9_denoiser_free(&(cpi->denoiser));
2429 #endif
2430 
2431   for (t = 0; t < cpi->num_workers; ++t) {
2432     VPxWorker *const worker = &cpi->workers[t];
2433     EncWorkerData *const thread_data = &cpi->tile_thr_data[t];
2434 
2435     // Deallocate allocated threads.
2436     vpx_get_worker_interface()->end(worker);
2437 
2438     // Deallocate allocated thread data.
2439     if (t < cpi->num_workers - 1) {
2440       vpx_free(thread_data->td->counts);
2441       vp9_free_pc_tree(thread_data->td);
2442       vpx_free(thread_data->td);
2443     }
2444   }
2445   vpx_free(cpi->tile_thr_data);
2446   vpx_free(cpi->workers);
2447   vp9_row_mt_mem_dealloc(cpi);
2448 
2449   if (cpi->num_workers > 1) {
2450     vp9_loop_filter_dealloc(&cpi->lf_row_sync);
2451     vp9_bitstream_encode_tiles_buffer_dealloc(cpi);
2452   }
2453 
2454   vp9_alt_ref_aq_destroy(cpi->alt_ref_aq);
2455 
2456   dealloc_compressor_data(cpi);
2457 
2458   for (i = 0; i < sizeof(cpi->mbgraph_stats) / sizeof(cpi->mbgraph_stats[0]);
2459        ++i) {
2460     vpx_free(cpi->mbgraph_stats[i].mb_stats);
2461   }
2462 
2463 #if CONFIG_FP_MB_STATS
2464   if (cpi->use_fp_mb_stats) {
2465     vpx_free(cpi->twopass.frame_mb_stats_buf);
2466     cpi->twopass.frame_mb_stats_buf = NULL;
2467   }
2468 #endif
2469 
2470   vp9_remove_common(cm);
2471   vp9_free_ref_frame_buffers(cm->buffer_pool);
2472 #if CONFIG_VP9_POSTPROC
2473   vp9_free_postproc_buffers(cm);
2474 #endif
2475   vpx_free(cpi);
2476 
2477 #if CONFIG_VP9_TEMPORAL_DENOISING
2478 #ifdef OUTPUT_YUV_DENOISED
2479   fclose(yuv_denoised_file);
2480 #endif
2481 #endif
2482 #ifdef OUTPUT_YUV_SKINMAP
2483   fclose(yuv_skinmap_file);
2484 #endif
2485 #ifdef OUTPUT_YUV_REC
2486   fclose(yuv_rec_file);
2487 #endif
2488 
2489 #if 0
2490 
2491   if (keyfile)
2492     fclose(keyfile);
2493 
2494   if (framepsnr)
2495     fclose(framepsnr);
2496 
2497   if (kf_list)
2498     fclose(kf_list);
2499 
2500 #endif
2501 }
2502 
generate_psnr_packet(VP9_COMP * cpi)2503 static void generate_psnr_packet(VP9_COMP *cpi) {
2504   struct vpx_codec_cx_pkt pkt;
2505   int i;
2506   PSNR_STATS psnr;
2507 #if CONFIG_VP9_HIGHBITDEPTH
2508   vpx_calc_highbd_psnr(cpi->raw_source_frame, cpi->common.frame_to_show, &psnr,
2509                        cpi->td.mb.e_mbd.bd, cpi->oxcf.input_bit_depth);
2510 #else
2511   vpx_calc_psnr(cpi->raw_source_frame, cpi->common.frame_to_show, &psnr);
2512 #endif
2513 
2514   for (i = 0; i < 4; ++i) {
2515     pkt.data.psnr.samples[i] = psnr.samples[i];
2516     pkt.data.psnr.sse[i] = psnr.sse[i];
2517     pkt.data.psnr.psnr[i] = psnr.psnr[i];
2518   }
2519   pkt.kind = VPX_CODEC_PSNR_PKT;
2520   if (cpi->use_svc)
2521     cpi->svc
2522         .layer_context[cpi->svc.spatial_layer_id *
2523                        cpi->svc.number_temporal_layers]
2524         .psnr_pkt = pkt.data.psnr;
2525   else
2526     vpx_codec_pkt_list_add(cpi->output_pkt_list, &pkt);
2527 }
2528 
vp9_use_as_reference(VP9_COMP * cpi,int ref_frame_flags)2529 int vp9_use_as_reference(VP9_COMP *cpi, int ref_frame_flags) {
2530   if (ref_frame_flags > 7) return -1;
2531 
2532   cpi->ref_frame_flags = ref_frame_flags;
2533   return 0;
2534 }
2535 
vp9_update_reference(VP9_COMP * cpi,int ref_frame_flags)2536 void vp9_update_reference(VP9_COMP *cpi, int ref_frame_flags) {
2537   cpi->ext_refresh_golden_frame = (ref_frame_flags & VP9_GOLD_FLAG) != 0;
2538   cpi->ext_refresh_alt_ref_frame = (ref_frame_flags & VP9_ALT_FLAG) != 0;
2539   cpi->ext_refresh_last_frame = (ref_frame_flags & VP9_LAST_FLAG) != 0;
2540   cpi->ext_refresh_frame_flags_pending = 1;
2541 }
2542 
get_vp9_ref_frame_buffer(VP9_COMP * cpi,VP9_REFFRAME ref_frame_flag)2543 static YV12_BUFFER_CONFIG *get_vp9_ref_frame_buffer(
2544     VP9_COMP *cpi, VP9_REFFRAME ref_frame_flag) {
2545   MV_REFERENCE_FRAME ref_frame = NONE;
2546   if (ref_frame_flag == VP9_LAST_FLAG)
2547     ref_frame = LAST_FRAME;
2548   else if (ref_frame_flag == VP9_GOLD_FLAG)
2549     ref_frame = GOLDEN_FRAME;
2550   else if (ref_frame_flag == VP9_ALT_FLAG)
2551     ref_frame = ALTREF_FRAME;
2552 
2553   return ref_frame == NONE ? NULL : get_ref_frame_buffer(cpi, ref_frame);
2554 }
2555 
vp9_copy_reference_enc(VP9_COMP * cpi,VP9_REFFRAME ref_frame_flag,YV12_BUFFER_CONFIG * sd)2556 int vp9_copy_reference_enc(VP9_COMP *cpi, VP9_REFFRAME ref_frame_flag,
2557                            YV12_BUFFER_CONFIG *sd) {
2558   YV12_BUFFER_CONFIG *cfg = get_vp9_ref_frame_buffer(cpi, ref_frame_flag);
2559   if (cfg) {
2560     vp8_yv12_copy_frame(cfg, sd);
2561     return 0;
2562   } else {
2563     return -1;
2564   }
2565 }
2566 
vp9_set_reference_enc(VP9_COMP * cpi,VP9_REFFRAME ref_frame_flag,YV12_BUFFER_CONFIG * sd)2567 int vp9_set_reference_enc(VP9_COMP *cpi, VP9_REFFRAME ref_frame_flag,
2568                           YV12_BUFFER_CONFIG *sd) {
2569   YV12_BUFFER_CONFIG *cfg = get_vp9_ref_frame_buffer(cpi, ref_frame_flag);
2570   if (cfg) {
2571     vp8_yv12_copy_frame(sd, cfg);
2572     return 0;
2573   } else {
2574     return -1;
2575   }
2576 }
2577 
vp9_update_entropy(VP9_COMP * cpi,int update)2578 int vp9_update_entropy(VP9_COMP *cpi, int update) {
2579   cpi->ext_refresh_frame_context = update;
2580   cpi->ext_refresh_frame_context_pending = 1;
2581   return 0;
2582 }
2583 
2584 #if defined(OUTPUT_YUV_DENOISED) || defined(OUTPUT_YUV_SKINMAP)
2585 // The denoiser buffer is allocated as a YUV 440 buffer. This function writes it
2586 // as YUV 420. We simply use the top-left pixels of the UV buffers, since we do
2587 // not denoise the UV channels at this time. If ever we implement UV channel
2588 // denoising we will have to modify this.
vp9_write_yuv_frame_420(YV12_BUFFER_CONFIG * s,FILE * f)2589 void vp9_write_yuv_frame_420(YV12_BUFFER_CONFIG *s, FILE *f) {
2590   uint8_t *src = s->y_buffer;
2591   int h = s->y_height;
2592 
2593   do {
2594     fwrite(src, s->y_width, 1, f);
2595     src += s->y_stride;
2596   } while (--h);
2597 
2598   src = s->u_buffer;
2599   h = s->uv_height;
2600 
2601   do {
2602     fwrite(src, s->uv_width, 1, f);
2603     src += s->uv_stride;
2604   } while (--h);
2605 
2606   src = s->v_buffer;
2607   h = s->uv_height;
2608 
2609   do {
2610     fwrite(src, s->uv_width, 1, f);
2611     src += s->uv_stride;
2612   } while (--h);
2613 }
2614 #endif
2615 
2616 #ifdef OUTPUT_YUV_REC
vp9_write_yuv_rec_frame(VP9_COMMON * cm)2617 void vp9_write_yuv_rec_frame(VP9_COMMON *cm) {
2618   YV12_BUFFER_CONFIG *s = cm->frame_to_show;
2619   uint8_t *src = s->y_buffer;
2620   int h = cm->height;
2621 
2622 #if CONFIG_VP9_HIGHBITDEPTH
2623   if (s->flags & YV12_FLAG_HIGHBITDEPTH) {
2624     uint16_t *src16 = CONVERT_TO_SHORTPTR(s->y_buffer);
2625 
2626     do {
2627       fwrite(src16, s->y_width, 2, yuv_rec_file);
2628       src16 += s->y_stride;
2629     } while (--h);
2630 
2631     src16 = CONVERT_TO_SHORTPTR(s->u_buffer);
2632     h = s->uv_height;
2633 
2634     do {
2635       fwrite(src16, s->uv_width, 2, yuv_rec_file);
2636       src16 += s->uv_stride;
2637     } while (--h);
2638 
2639     src16 = CONVERT_TO_SHORTPTR(s->v_buffer);
2640     h = s->uv_height;
2641 
2642     do {
2643       fwrite(src16, s->uv_width, 2, yuv_rec_file);
2644       src16 += s->uv_stride;
2645     } while (--h);
2646 
2647     fflush(yuv_rec_file);
2648     return;
2649   }
2650 #endif  // CONFIG_VP9_HIGHBITDEPTH
2651 
2652   do {
2653     fwrite(src, s->y_width, 1, yuv_rec_file);
2654     src += s->y_stride;
2655   } while (--h);
2656 
2657   src = s->u_buffer;
2658   h = s->uv_height;
2659 
2660   do {
2661     fwrite(src, s->uv_width, 1, yuv_rec_file);
2662     src += s->uv_stride;
2663   } while (--h);
2664 
2665   src = s->v_buffer;
2666   h = s->uv_height;
2667 
2668   do {
2669     fwrite(src, s->uv_width, 1, yuv_rec_file);
2670     src += s->uv_stride;
2671   } while (--h);
2672 
2673   fflush(yuv_rec_file);
2674 }
2675 #endif
2676 
2677 #if CONFIG_VP9_HIGHBITDEPTH
scale_and_extend_frame_nonnormative(const YV12_BUFFER_CONFIG * src,YV12_BUFFER_CONFIG * dst,int bd)2678 static void scale_and_extend_frame_nonnormative(const YV12_BUFFER_CONFIG *src,
2679                                                 YV12_BUFFER_CONFIG *dst,
2680                                                 int bd) {
2681 #else
2682 static void scale_and_extend_frame_nonnormative(const YV12_BUFFER_CONFIG *src,
2683                                                 YV12_BUFFER_CONFIG *dst) {
2684 #endif  // CONFIG_VP9_HIGHBITDEPTH
2685   // TODO(dkovalev): replace YV12_BUFFER_CONFIG with vpx_image_t
2686   int i;
2687   const uint8_t *const srcs[3] = { src->y_buffer, src->u_buffer,
2688                                    src->v_buffer };
2689   const int src_strides[3] = { src->y_stride, src->uv_stride, src->uv_stride };
2690   const int src_widths[3] = { src->y_crop_width, src->uv_crop_width,
2691                               src->uv_crop_width };
2692   const int src_heights[3] = { src->y_crop_height, src->uv_crop_height,
2693                                src->uv_crop_height };
2694   uint8_t *const dsts[3] = { dst->y_buffer, dst->u_buffer, dst->v_buffer };
2695   const int dst_strides[3] = { dst->y_stride, dst->uv_stride, dst->uv_stride };
2696   const int dst_widths[3] = { dst->y_crop_width, dst->uv_crop_width,
2697                               dst->uv_crop_width };
2698   const int dst_heights[3] = { dst->y_crop_height, dst->uv_crop_height,
2699                                dst->uv_crop_height };
2700 
2701   for (i = 0; i < MAX_MB_PLANE; ++i) {
2702 #if CONFIG_VP9_HIGHBITDEPTH
2703     if (src->flags & YV12_FLAG_HIGHBITDEPTH) {
2704       vp9_highbd_resize_plane(srcs[i], src_heights[i], src_widths[i],
2705                               src_strides[i], dsts[i], dst_heights[i],
2706                               dst_widths[i], dst_strides[i], bd);
2707     } else {
2708       vp9_resize_plane(srcs[i], src_heights[i], src_widths[i], src_strides[i],
2709                        dsts[i], dst_heights[i], dst_widths[i], dst_strides[i]);
2710     }
2711 #else
2712     vp9_resize_plane(srcs[i], src_heights[i], src_widths[i], src_strides[i],
2713                      dsts[i], dst_heights[i], dst_widths[i], dst_strides[i]);
2714 #endif  // CONFIG_VP9_HIGHBITDEPTH
2715   }
2716   vpx_extend_frame_borders(dst);
2717 }
2718 
2719 #if CONFIG_VP9_HIGHBITDEPTH
2720 static void scale_and_extend_frame(const YV12_BUFFER_CONFIG *src,
2721                                    YV12_BUFFER_CONFIG *dst, int bd,
2722                                    INTERP_FILTER filter_type,
2723                                    int phase_scaler) {
2724   const int src_w = src->y_crop_width;
2725   const int src_h = src->y_crop_height;
2726   const int dst_w = dst->y_crop_width;
2727   const int dst_h = dst->y_crop_height;
2728   const uint8_t *const srcs[3] = { src->y_buffer, src->u_buffer,
2729                                    src->v_buffer };
2730   const int src_strides[3] = { src->y_stride, src->uv_stride, src->uv_stride };
2731   uint8_t *const dsts[3] = { dst->y_buffer, dst->u_buffer, dst->v_buffer };
2732   const int dst_strides[3] = { dst->y_stride, dst->uv_stride, dst->uv_stride };
2733   const InterpKernel *const kernel = vp9_filter_kernels[filter_type];
2734   int x, y, i;
2735 
2736   for (i = 0; i < MAX_MB_PLANE; ++i) {
2737     const int factor = (i == 0 || i == 3 ? 1 : 2);
2738     const int src_stride = src_strides[i];
2739     const int dst_stride = dst_strides[i];
2740     for (y = 0; y < dst_h; y += 16) {
2741       const int y_q4 = y * (16 / factor) * src_h / dst_h + phase_scaler;
2742       for (x = 0; x < dst_w; x += 16) {
2743         const int x_q4 = x * (16 / factor) * src_w / dst_w + phase_scaler;
2744         const uint8_t *src_ptr = srcs[i] +
2745                                  (y / factor) * src_h / dst_h * src_stride +
2746                                  (x / factor) * src_w / dst_w;
2747         uint8_t *dst_ptr = dsts[i] + (y / factor) * dst_stride + (x / factor);
2748 
2749         if (src->flags & YV12_FLAG_HIGHBITDEPTH) {
2750           vpx_highbd_convolve8(CONVERT_TO_SHORTPTR(src_ptr), src_stride,
2751                                CONVERT_TO_SHORTPTR(dst_ptr), dst_stride,
2752                                kernel[x_q4 & 0xf], 16 * src_w / dst_w,
2753                                kernel[y_q4 & 0xf], 16 * src_h / dst_h,
2754                                16 / factor, 16 / factor, bd);
2755         } else {
2756           vpx_scaled_2d(src_ptr, src_stride, dst_ptr, dst_stride,
2757                         kernel[x_q4 & 0xf], 16 * src_w / dst_w,
2758                         kernel[y_q4 & 0xf], 16 * src_h / dst_h, 16 / factor,
2759                         16 / factor);
2760         }
2761       }
2762     }
2763   }
2764 
2765   vpx_extend_frame_borders(dst);
2766 }
2767 #endif  // CONFIG_VP9_HIGHBITDEPTH
2768 
2769 static int scale_down(VP9_COMP *cpi, int q) {
2770   RATE_CONTROL *const rc = &cpi->rc;
2771   GF_GROUP *const gf_group = &cpi->twopass.gf_group;
2772   int scale = 0;
2773   assert(frame_is_kf_gf_arf(cpi));
2774 
2775   if (rc->frame_size_selector == UNSCALED &&
2776       q >= rc->rf_level_maxq[gf_group->rf_level[gf_group->index]]) {
2777     const int max_size_thresh =
2778         (int)(rate_thresh_mult[SCALE_STEP1] *
2779               VPXMAX(rc->this_frame_target, rc->avg_frame_bandwidth));
2780     scale = rc->projected_frame_size > max_size_thresh ? 1 : 0;
2781   }
2782   return scale;
2783 }
2784 
2785 static int big_rate_miss(VP9_COMP *cpi, int high_limit, int low_limit) {
2786   const RATE_CONTROL *const rc = &cpi->rc;
2787 
2788   return (rc->projected_frame_size > ((high_limit * 3) / 2)) ||
2789          (rc->projected_frame_size < (low_limit / 2));
2790 }
2791 
2792 // test in two pass for the first
2793 static int two_pass_first_group_inter(VP9_COMP *cpi) {
2794   TWO_PASS *const twopass = &cpi->twopass;
2795   GF_GROUP *const gf_group = &twopass->gf_group;
2796   if ((cpi->oxcf.pass == 2) &&
2797       (gf_group->index == gf_group->first_inter_index)) {
2798     return 1;
2799   } else {
2800     return 0;
2801   }
2802 }
2803 
2804 // Function to test for conditions that indicate we should loop
2805 // back and recode a frame.
2806 static int recode_loop_test(VP9_COMP *cpi, int high_limit, int low_limit, int q,
2807                             int maxq, int minq) {
2808   const RATE_CONTROL *const rc = &cpi->rc;
2809   const VP9EncoderConfig *const oxcf = &cpi->oxcf;
2810   const int frame_is_kfgfarf = frame_is_kf_gf_arf(cpi);
2811   int force_recode = 0;
2812 
2813   if ((rc->projected_frame_size >= rc->max_frame_bandwidth) ||
2814       big_rate_miss(cpi, high_limit, low_limit) ||
2815       (cpi->sf.recode_loop == ALLOW_RECODE) ||
2816       (two_pass_first_group_inter(cpi) &&
2817        (cpi->sf.recode_loop == ALLOW_RECODE_FIRST)) ||
2818       (frame_is_kfgfarf && (cpi->sf.recode_loop >= ALLOW_RECODE_KFARFGF))) {
2819     if (frame_is_kfgfarf && (oxcf->resize_mode == RESIZE_DYNAMIC) &&
2820         scale_down(cpi, q)) {
2821       // Code this group at a lower resolution.
2822       cpi->resize_pending = 1;
2823       return 1;
2824     }
2825     // Force recode if projected_frame_size > max_frame_bandwidth
2826     if (rc->projected_frame_size >= rc->max_frame_bandwidth) return 1;
2827 
2828     // TODO(agrange) high_limit could be greater than the scale-down threshold.
2829     if ((rc->projected_frame_size > high_limit && q < maxq) ||
2830         (rc->projected_frame_size < low_limit && q > minq)) {
2831       force_recode = 1;
2832     } else if (cpi->oxcf.rc_mode == VPX_CQ) {
2833       // Deal with frame undershoot and whether or not we are
2834       // below the automatically set cq level.
2835       if (q > oxcf->cq_level &&
2836           rc->projected_frame_size < ((rc->this_frame_target * 7) >> 3)) {
2837         force_recode = 1;
2838       }
2839     }
2840   }
2841   return force_recode;
2842 }
2843 
2844 void vp9_update_reference_frames(VP9_COMP *cpi) {
2845   VP9_COMMON *const cm = &cpi->common;
2846   BufferPool *const pool = cm->buffer_pool;
2847 
2848   // At this point the new frame has been encoded.
2849   // If any buffer copy / swapping is signaled it should be done here.
2850   if (cm->frame_type == KEY_FRAME) {
2851     ref_cnt_fb(pool->frame_bufs, &cm->ref_frame_map[cpi->gld_fb_idx],
2852                cm->new_fb_idx);
2853     ref_cnt_fb(pool->frame_bufs, &cm->ref_frame_map[cpi->alt_fb_idx],
2854                cm->new_fb_idx);
2855   } else if (vp9_preserve_existing_gf(cpi)) {
2856     // We have decided to preserve the previously existing golden frame as our
2857     // new ARF frame. However, in the short term in function
2858     // vp9_get_refresh_mask() we left it in the GF slot and, if
2859     // we're updating the GF with the current decoded frame, we save it to the
2860     // ARF slot instead.
2861     // We now have to update the ARF with the current frame and swap gld_fb_idx
2862     // and alt_fb_idx so that, overall, we've stored the old GF in the new ARF
2863     // slot and, if we're updating the GF, the current frame becomes the new GF.
2864     int tmp;
2865 
2866     ref_cnt_fb(pool->frame_bufs, &cm->ref_frame_map[cpi->alt_fb_idx],
2867                cm->new_fb_idx);
2868 
2869     tmp = cpi->alt_fb_idx;
2870     cpi->alt_fb_idx = cpi->gld_fb_idx;
2871     cpi->gld_fb_idx = tmp;
2872 
2873     if (is_two_pass_svc(cpi)) {
2874       cpi->svc.layer_context[0].gold_ref_idx = cpi->gld_fb_idx;
2875       cpi->svc.layer_context[0].alt_ref_idx = cpi->alt_fb_idx;
2876     }
2877   } else { /* For non key/golden frames */
2878     if (cpi->refresh_alt_ref_frame) {
2879       int arf_idx = cpi->alt_fb_idx;
2880       if ((cpi->oxcf.pass == 2) && cpi->multi_arf_allowed) {
2881         const GF_GROUP *const gf_group = &cpi->twopass.gf_group;
2882         arf_idx = gf_group->arf_update_idx[gf_group->index];
2883       }
2884 
2885       ref_cnt_fb(pool->frame_bufs, &cm->ref_frame_map[arf_idx], cm->new_fb_idx);
2886       memcpy(cpi->interp_filter_selected[ALTREF_FRAME],
2887              cpi->interp_filter_selected[0],
2888              sizeof(cpi->interp_filter_selected[0]));
2889     }
2890 
2891     if (cpi->refresh_golden_frame) {
2892       ref_cnt_fb(pool->frame_bufs, &cm->ref_frame_map[cpi->gld_fb_idx],
2893                  cm->new_fb_idx);
2894       if (!cpi->rc.is_src_frame_alt_ref)
2895         memcpy(cpi->interp_filter_selected[GOLDEN_FRAME],
2896                cpi->interp_filter_selected[0],
2897                sizeof(cpi->interp_filter_selected[0]));
2898       else
2899         memcpy(cpi->interp_filter_selected[GOLDEN_FRAME],
2900                cpi->interp_filter_selected[ALTREF_FRAME],
2901                sizeof(cpi->interp_filter_selected[ALTREF_FRAME]));
2902     }
2903   }
2904 
2905   if (cpi->refresh_last_frame) {
2906     ref_cnt_fb(pool->frame_bufs, &cm->ref_frame_map[cpi->lst_fb_idx],
2907                cm->new_fb_idx);
2908     if (!cpi->rc.is_src_frame_alt_ref)
2909       memcpy(cpi->interp_filter_selected[LAST_FRAME],
2910              cpi->interp_filter_selected[0],
2911              sizeof(cpi->interp_filter_selected[0]));
2912   }
2913 #if CONFIG_VP9_TEMPORAL_DENOISING
2914   if (cpi->oxcf.noise_sensitivity > 0 && denoise_svc(cpi) &&
2915       cpi->denoiser.denoising_level > kDenLowLow) {
2916     int svc_base_is_key = 0;
2917     if (cpi->use_svc) {
2918       int layer = LAYER_IDS_TO_IDX(cpi->svc.spatial_layer_id,
2919                                    cpi->svc.temporal_layer_id,
2920                                    cpi->svc.number_temporal_layers);
2921       LAYER_CONTEXT *lc = &cpi->svc.layer_context[layer];
2922       svc_base_is_key = lc->is_key_frame;
2923     }
2924     vp9_denoiser_update_frame_info(
2925         &cpi->denoiser, *cpi->Source, cpi->common.frame_type,
2926         cpi->refresh_alt_ref_frame, cpi->refresh_golden_frame,
2927         cpi->refresh_last_frame, cpi->resize_pending, svc_base_is_key);
2928   }
2929 #endif
2930   if (is_one_pass_cbr_svc(cpi)) {
2931     // Keep track of frame index for each reference frame.
2932     SVC *const svc = &cpi->svc;
2933     if (cm->frame_type == KEY_FRAME) {
2934       svc->ref_frame_index[cpi->lst_fb_idx] = svc->current_superframe;
2935       svc->ref_frame_index[cpi->gld_fb_idx] = svc->current_superframe;
2936       svc->ref_frame_index[cpi->alt_fb_idx] = svc->current_superframe;
2937     } else {
2938       if (cpi->refresh_last_frame)
2939         svc->ref_frame_index[cpi->lst_fb_idx] = svc->current_superframe;
2940       if (cpi->refresh_golden_frame)
2941         svc->ref_frame_index[cpi->gld_fb_idx] = svc->current_superframe;
2942       if (cpi->refresh_alt_ref_frame)
2943         svc->ref_frame_index[cpi->alt_fb_idx] = svc->current_superframe;
2944     }
2945   }
2946 }
2947 
2948 static void loopfilter_frame(VP9_COMP *cpi, VP9_COMMON *cm) {
2949   MACROBLOCKD *xd = &cpi->td.mb.e_mbd;
2950   struct loopfilter *lf = &cm->lf;
2951 
2952   const int is_reference_frame =
2953       (cm->frame_type == KEY_FRAME || cpi->refresh_last_frame ||
2954        cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame);
2955 
2956   if (xd->lossless) {
2957     lf->filter_level = 0;
2958     lf->last_filt_level = 0;
2959   } else {
2960     struct vpx_usec_timer timer;
2961 
2962     vpx_clear_system_state();
2963 
2964     vpx_usec_timer_start(&timer);
2965 
2966     if (!cpi->rc.is_src_frame_alt_ref) {
2967       if ((cpi->common.frame_type == KEY_FRAME) &&
2968           (!cpi->rc.this_key_frame_forced)) {
2969         lf->last_filt_level = 0;
2970       }
2971       vp9_pick_filter_level(cpi->Source, cpi, cpi->sf.lpf_pick);
2972       lf->last_filt_level = lf->filter_level;
2973     } else {
2974       lf->filter_level = 0;
2975     }
2976 
2977     vpx_usec_timer_mark(&timer);
2978     cpi->time_pick_lpf += vpx_usec_timer_elapsed(&timer);
2979   }
2980 
2981   if (lf->filter_level > 0 && is_reference_frame) {
2982     vp9_build_mask_frame(cm, lf->filter_level, 0);
2983 
2984     if (cpi->num_workers > 1)
2985       vp9_loop_filter_frame_mt(cm->frame_to_show, cm, xd->plane,
2986                                lf->filter_level, 0, 0, cpi->workers,
2987                                cpi->num_workers, &cpi->lf_row_sync);
2988     else
2989       vp9_loop_filter_frame(cm->frame_to_show, cm, xd, lf->filter_level, 0, 0);
2990   }
2991 
2992   vpx_extend_frame_inner_borders(cm->frame_to_show);
2993 }
2994 
2995 static INLINE void alloc_frame_mvs(VP9_COMMON *const cm, int buffer_idx) {
2996   RefCntBuffer *const new_fb_ptr = &cm->buffer_pool->frame_bufs[buffer_idx];
2997   if (new_fb_ptr->mvs == NULL || new_fb_ptr->mi_rows < cm->mi_rows ||
2998       new_fb_ptr->mi_cols < cm->mi_cols) {
2999     vpx_free(new_fb_ptr->mvs);
3000     CHECK_MEM_ERROR(cm, new_fb_ptr->mvs,
3001                     (MV_REF *)vpx_calloc(cm->mi_rows * cm->mi_cols,
3002                                          sizeof(*new_fb_ptr->mvs)));
3003     new_fb_ptr->mi_rows = cm->mi_rows;
3004     new_fb_ptr->mi_cols = cm->mi_cols;
3005   }
3006 }
3007 
3008 void vp9_scale_references(VP9_COMP *cpi) {
3009   VP9_COMMON *cm = &cpi->common;
3010   MV_REFERENCE_FRAME ref_frame;
3011   const VP9_REFFRAME ref_mask[3] = { VP9_LAST_FLAG, VP9_GOLD_FLAG,
3012                                      VP9_ALT_FLAG };
3013 
3014   for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) {
3015     // Need to convert from VP9_REFFRAME to index into ref_mask (subtract 1).
3016     if (cpi->ref_frame_flags & ref_mask[ref_frame - 1]) {
3017       BufferPool *const pool = cm->buffer_pool;
3018       const YV12_BUFFER_CONFIG *const ref =
3019           get_ref_frame_buffer(cpi, ref_frame);
3020 
3021       if (ref == NULL) {
3022         cpi->scaled_ref_idx[ref_frame - 1] = INVALID_IDX;
3023         continue;
3024       }
3025 
3026 #if CONFIG_VP9_HIGHBITDEPTH
3027       if (ref->y_crop_width != cm->width || ref->y_crop_height != cm->height) {
3028         RefCntBuffer *new_fb_ptr = NULL;
3029         int force_scaling = 0;
3030         int new_fb = cpi->scaled_ref_idx[ref_frame - 1];
3031         if (new_fb == INVALID_IDX) {
3032           new_fb = get_free_fb(cm);
3033           force_scaling = 1;
3034         }
3035         if (new_fb == INVALID_IDX) return;
3036         new_fb_ptr = &pool->frame_bufs[new_fb];
3037         if (force_scaling || new_fb_ptr->buf.y_crop_width != cm->width ||
3038             new_fb_ptr->buf.y_crop_height != cm->height) {
3039           if (vpx_realloc_frame_buffer(&new_fb_ptr->buf, cm->width, cm->height,
3040                                        cm->subsampling_x, cm->subsampling_y,
3041                                        cm->use_highbitdepth,
3042                                        VP9_ENC_BORDER_IN_PIXELS,
3043                                        cm->byte_alignment, NULL, NULL, NULL))
3044             vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
3045                                "Failed to allocate frame buffer");
3046           scale_and_extend_frame(ref, &new_fb_ptr->buf, (int)cm->bit_depth,
3047                                  EIGHTTAP, 0);
3048           cpi->scaled_ref_idx[ref_frame - 1] = new_fb;
3049           alloc_frame_mvs(cm, new_fb);
3050         }
3051 #else
3052       if (ref->y_crop_width != cm->width || ref->y_crop_height != cm->height) {
3053         RefCntBuffer *new_fb_ptr = NULL;
3054         int force_scaling = 0;
3055         int new_fb = cpi->scaled_ref_idx[ref_frame - 1];
3056         if (new_fb == INVALID_IDX) {
3057           new_fb = get_free_fb(cm);
3058           force_scaling = 1;
3059         }
3060         if (new_fb == INVALID_IDX) return;
3061         new_fb_ptr = &pool->frame_bufs[new_fb];
3062         if (force_scaling || new_fb_ptr->buf.y_crop_width != cm->width ||
3063             new_fb_ptr->buf.y_crop_height != cm->height) {
3064           if (vpx_realloc_frame_buffer(&new_fb_ptr->buf, cm->width, cm->height,
3065                                        cm->subsampling_x, cm->subsampling_y,
3066                                        VP9_ENC_BORDER_IN_PIXELS,
3067                                        cm->byte_alignment, NULL, NULL, NULL))
3068             vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
3069                                "Failed to allocate frame buffer");
3070           vp9_scale_and_extend_frame(ref, &new_fb_ptr->buf, EIGHTTAP, 0);
3071           cpi->scaled_ref_idx[ref_frame - 1] = new_fb;
3072           alloc_frame_mvs(cm, new_fb);
3073         }
3074 #endif  // CONFIG_VP9_HIGHBITDEPTH
3075       } else {
3076         int buf_idx;
3077         RefCntBuffer *buf = NULL;
3078         if (cpi->oxcf.pass == 0 && !cpi->use_svc) {
3079           // Check for release of scaled reference.
3080           buf_idx = cpi->scaled_ref_idx[ref_frame - 1];
3081           buf = (buf_idx != INVALID_IDX) ? &pool->frame_bufs[buf_idx] : NULL;
3082           if (buf != NULL) {
3083             --buf->ref_count;
3084             cpi->scaled_ref_idx[ref_frame - 1] = INVALID_IDX;
3085           }
3086         }
3087         buf_idx = get_ref_frame_buf_idx(cpi, ref_frame);
3088         buf = &pool->frame_bufs[buf_idx];
3089         buf->buf.y_crop_width = ref->y_crop_width;
3090         buf->buf.y_crop_height = ref->y_crop_height;
3091         cpi->scaled_ref_idx[ref_frame - 1] = buf_idx;
3092         ++buf->ref_count;
3093       }
3094     } else {
3095       if (cpi->oxcf.pass != 0 || cpi->use_svc)
3096         cpi->scaled_ref_idx[ref_frame - 1] = INVALID_IDX;
3097     }
3098   }
3099 }
3100 
3101 static void release_scaled_references(VP9_COMP *cpi) {
3102   VP9_COMMON *cm = &cpi->common;
3103   int i;
3104   if (cpi->oxcf.pass == 0 && !cpi->use_svc) {
3105     // Only release scaled references under certain conditions:
3106     // if reference will be updated, or if scaled reference has same resolution.
3107     int refresh[3];
3108     refresh[0] = (cpi->refresh_last_frame) ? 1 : 0;
3109     refresh[1] = (cpi->refresh_golden_frame) ? 1 : 0;
3110     refresh[2] = (cpi->refresh_alt_ref_frame) ? 1 : 0;
3111     for (i = LAST_FRAME; i <= ALTREF_FRAME; ++i) {
3112       const int idx = cpi->scaled_ref_idx[i - 1];
3113       RefCntBuffer *const buf =
3114           idx != INVALID_IDX ? &cm->buffer_pool->frame_bufs[idx] : NULL;
3115       const YV12_BUFFER_CONFIG *const ref = get_ref_frame_buffer(cpi, i);
3116       if (buf != NULL &&
3117           (refresh[i - 1] || (buf->buf.y_crop_width == ref->y_crop_width &&
3118                               buf->buf.y_crop_height == ref->y_crop_height))) {
3119         --buf->ref_count;
3120         cpi->scaled_ref_idx[i - 1] = INVALID_IDX;
3121       }
3122     }
3123   } else {
3124     for (i = 0; i < MAX_REF_FRAMES; ++i) {
3125       const int idx = cpi->scaled_ref_idx[i];
3126       RefCntBuffer *const buf =
3127           idx != INVALID_IDX ? &cm->buffer_pool->frame_bufs[idx] : NULL;
3128       if (buf != NULL) {
3129         --buf->ref_count;
3130         cpi->scaled_ref_idx[i] = INVALID_IDX;
3131       }
3132     }
3133   }
3134 }
3135 
3136 static void full_to_model_count(unsigned int *model_count,
3137                                 unsigned int *full_count) {
3138   int n;
3139   model_count[ZERO_TOKEN] = full_count[ZERO_TOKEN];
3140   model_count[ONE_TOKEN] = full_count[ONE_TOKEN];
3141   model_count[TWO_TOKEN] = full_count[TWO_TOKEN];
3142   for (n = THREE_TOKEN; n < EOB_TOKEN; ++n)
3143     model_count[TWO_TOKEN] += full_count[n];
3144   model_count[EOB_MODEL_TOKEN] = full_count[EOB_TOKEN];
3145 }
3146 
3147 static void full_to_model_counts(vp9_coeff_count_model *model_count,
3148                                  vp9_coeff_count *full_count) {
3149   int i, j, k, l;
3150 
3151   for (i = 0; i < PLANE_TYPES; ++i)
3152     for (j = 0; j < REF_TYPES; ++j)
3153       for (k = 0; k < COEF_BANDS; ++k)
3154         for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l)
3155           full_to_model_count(model_count[i][j][k][l], full_count[i][j][k][l]);
3156 }
3157 
3158 #if 0 && CONFIG_INTERNAL_STATS
3159 static void output_frame_level_debug_stats(VP9_COMP *cpi) {
3160   VP9_COMMON *const cm = &cpi->common;
3161   FILE *const f = fopen("tmp.stt", cm->current_video_frame ? "a" : "w");
3162   int64_t recon_err;
3163 
3164   vpx_clear_system_state();
3165 
3166 #if CONFIG_VP9_HIGHBITDEPTH
3167   if (cm->use_highbitdepth) {
3168     recon_err = vpx_highbd_get_y_sse(cpi->Source, get_frame_new_buffer(cm));
3169   } else {
3170     recon_err = vpx_get_y_sse(cpi->Source, get_frame_new_buffer(cm));
3171   }
3172 #else
3173   recon_err = vpx_get_y_sse(cpi->Source, get_frame_new_buffer(cm));
3174 #endif  // CONFIG_VP9_HIGHBITDEPTH
3175 
3176 
3177   if (cpi->twopass.total_left_stats.coded_error != 0.0) {
3178     double dc_quant_devisor;
3179 #if CONFIG_VP9_HIGHBITDEPTH
3180     switch (cm->bit_depth) {
3181       case VPX_BITS_8:
3182         dc_quant_devisor = 4.0;
3183         break;
3184       case VPX_BITS_10:
3185         dc_quant_devisor = 16.0;
3186         break;
3187       case VPX_BITS_12:
3188         dc_quant_devisor = 64.0;
3189         break;
3190       default:
3191         assert(0 && "bit_depth must be VPX_BITS_8, VPX_BITS_10 or VPX_BITS_12");
3192         break;
3193     }
3194 #else
3195     dc_quant_devisor = 4.0;
3196 #endif
3197 
3198     fprintf(f, "%10u %dx%d %d %d %10d %10d %10d %10d"
3199        "%10"PRId64" %10"PRId64" %5d %5d %10"PRId64" "
3200        "%10"PRId64" %10"PRId64" %10d "
3201        "%7.2lf %7.2lf %7.2lf %7.2lf %7.2lf"
3202         "%6d %6d %5d %5d %5d "
3203         "%10"PRId64" %10.3lf"
3204         "%10lf %8u %10"PRId64" %10d %10d %10d %10d %10d\n",
3205         cpi->common.current_video_frame,
3206         cm->width, cm->height,
3207         cpi->rc.source_alt_ref_pending,
3208         cpi->rc.source_alt_ref_active,
3209         cpi->rc.this_frame_target,
3210         cpi->rc.projected_frame_size,
3211         cpi->rc.projected_frame_size / cpi->common.MBs,
3212         (cpi->rc.projected_frame_size - cpi->rc.this_frame_target),
3213         cpi->rc.vbr_bits_off_target,
3214         cpi->rc.vbr_bits_off_target_fast,
3215         cpi->twopass.extend_minq,
3216         cpi->twopass.extend_minq_fast,
3217         cpi->rc.total_target_vs_actual,
3218         (cpi->rc.starting_buffer_level - cpi->rc.bits_off_target),
3219         cpi->rc.total_actual_bits, cm->base_qindex,
3220         vp9_convert_qindex_to_q(cm->base_qindex, cm->bit_depth),
3221         (double)vp9_dc_quant(cm->base_qindex, 0, cm->bit_depth) /
3222             dc_quant_devisor,
3223         vp9_convert_qindex_to_q(cpi->twopass.active_worst_quality,
3224                                 cm->bit_depth),
3225         cpi->rc.avg_q,
3226         vp9_convert_qindex_to_q(cpi->oxcf.cq_level, cm->bit_depth),
3227         cpi->refresh_last_frame, cpi->refresh_golden_frame,
3228         cpi->refresh_alt_ref_frame, cm->frame_type, cpi->rc.gfu_boost,
3229         cpi->twopass.bits_left,
3230         cpi->twopass.total_left_stats.coded_error,
3231         cpi->twopass.bits_left /
3232             (1 + cpi->twopass.total_left_stats.coded_error),
3233         cpi->tot_recode_hits, recon_err, cpi->rc.kf_boost,
3234         cpi->twopass.kf_zeromotion_pct,
3235         cpi->twopass.fr_content_type,
3236         cm->lf.filter_level,
3237         cm->seg.aq_av_offset);
3238   }
3239   fclose(f);
3240 
3241   if (0) {
3242     FILE *const fmodes = fopen("Modes.stt", "a");
3243     int i;
3244 
3245     fprintf(fmodes, "%6d:%1d:%1d:%1d ", cpi->common.current_video_frame,
3246             cm->frame_type, cpi->refresh_golden_frame,
3247             cpi->refresh_alt_ref_frame);
3248 
3249     for (i = 0; i < MAX_MODES; ++i)
3250       fprintf(fmodes, "%5d ", cpi->mode_chosen_counts[i]);
3251 
3252     fprintf(fmodes, "\n");
3253 
3254     fclose(fmodes);
3255   }
3256 }
3257 #endif
3258 
3259 static void set_mv_search_params(VP9_COMP *cpi) {
3260   const VP9_COMMON *const cm = &cpi->common;
3261   const unsigned int max_mv_def = VPXMIN(cm->width, cm->height);
3262 
3263   // Default based on max resolution.
3264   cpi->mv_step_param = vp9_init_search_range(max_mv_def);
3265 
3266   if (cpi->sf.mv.auto_mv_step_size) {
3267     if (frame_is_intra_only(cm)) {
3268       // Initialize max_mv_magnitude for use in the first INTER frame
3269       // after a key/intra-only frame.
3270       cpi->max_mv_magnitude = max_mv_def;
3271     } else {
3272       if (cm->show_frame) {
3273         // Allow mv_steps to correspond to twice the max mv magnitude found
3274         // in the previous frame, capped by the default max_mv_magnitude based
3275         // on resolution.
3276         cpi->mv_step_param = vp9_init_search_range(
3277             VPXMIN(max_mv_def, 2 * cpi->max_mv_magnitude));
3278       }
3279       cpi->max_mv_magnitude = 0;
3280     }
3281   }
3282 }
3283 
3284 static void set_size_independent_vars(VP9_COMP *cpi) {
3285   vp9_set_speed_features_framesize_independent(cpi);
3286   vp9_set_rd_speed_thresholds(cpi);
3287   vp9_set_rd_speed_thresholds_sub8x8(cpi);
3288   cpi->common.interp_filter = cpi->sf.default_interp_filter;
3289 }
3290 
3291 static void set_size_dependent_vars(VP9_COMP *cpi, int *q, int *bottom_index,
3292                                     int *top_index) {
3293   VP9_COMMON *const cm = &cpi->common;
3294   const VP9EncoderConfig *const oxcf = &cpi->oxcf;
3295 
3296   // Setup variables that depend on the dimensions of the frame.
3297   vp9_set_speed_features_framesize_dependent(cpi);
3298 
3299   // Decide q and q bounds.
3300   *q = vp9_rc_pick_q_and_bounds(cpi, bottom_index, top_index);
3301 
3302   if (!frame_is_intra_only(cm)) {
3303     vp9_set_high_precision_mv(cpi, (*q) < HIGH_PRECISION_MV_QTHRESH);
3304   }
3305 
3306   // Configure experimental use of segmentation for enhanced coding of
3307   // static regions if indicated.
3308   // Only allowed in the second pass of a two pass encode, as it requires
3309   // lagged coding, and if the relevant speed feature flag is set.
3310   if (oxcf->pass == 2 && cpi->sf.static_segmentation)
3311     configure_static_seg_features(cpi);
3312 
3313 #if CONFIG_VP9_POSTPROC && !(CONFIG_VP9_TEMPORAL_DENOISING)
3314   if (oxcf->noise_sensitivity > 0) {
3315     int l = 0;
3316     switch (oxcf->noise_sensitivity) {
3317       case 1: l = 20; break;
3318       case 2: l = 40; break;
3319       case 3: l = 60; break;
3320       case 4:
3321       case 5: l = 100; break;
3322       case 6: l = 150; break;
3323     }
3324     if (!cpi->common.postproc_state.limits) {
3325       cpi->common.postproc_state.limits =
3326           vpx_calloc(cpi->un_scaled_source->y_width,
3327                      sizeof(*cpi->common.postproc_state.limits));
3328     }
3329     vp9_denoise(cpi->Source, cpi->Source, l, cpi->common.postproc_state.limits);
3330   }
3331 #endif  // CONFIG_VP9_POSTPROC
3332 }
3333 
3334 #if CONFIG_VP9_TEMPORAL_DENOISING
3335 static void setup_denoiser_buffer(VP9_COMP *cpi) {
3336   VP9_COMMON *const cm = &cpi->common;
3337   if (cpi->oxcf.noise_sensitivity > 0 &&
3338       !cpi->denoiser.frame_buffer_initialized) {
3339     if (vp9_denoiser_alloc(&cpi->denoiser, cm->width, cm->height,
3340                            cm->subsampling_x, cm->subsampling_y,
3341 #if CONFIG_VP9_HIGHBITDEPTH
3342                            cm->use_highbitdepth,
3343 #endif
3344                            VP9_ENC_BORDER_IN_PIXELS))
3345       vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
3346                          "Failed to allocate denoiser");
3347   }
3348 }
3349 #endif
3350 
3351 static void init_motion_estimation(VP9_COMP *cpi) {
3352   int y_stride = cpi->scaled_source.y_stride;
3353 
3354   if (cpi->sf.mv.search_method == NSTEP) {
3355     vp9_init3smotion_compensation(&cpi->ss_cfg, y_stride);
3356   } else if (cpi->sf.mv.search_method == DIAMOND) {
3357     vp9_init_dsmotion_compensation(&cpi->ss_cfg, y_stride);
3358   }
3359 }
3360 
3361 static void set_frame_size(VP9_COMP *cpi) {
3362   int ref_frame;
3363   VP9_COMMON *const cm = &cpi->common;
3364   VP9EncoderConfig *const oxcf = &cpi->oxcf;
3365   MACROBLOCKD *const xd = &cpi->td.mb.e_mbd;
3366 
3367   if (oxcf->pass == 2 && oxcf->rc_mode == VPX_VBR &&
3368       ((oxcf->resize_mode == RESIZE_FIXED && cm->current_video_frame == 0) ||
3369        (oxcf->resize_mode == RESIZE_DYNAMIC && cpi->resize_pending))) {
3370     calculate_coded_size(cpi, &oxcf->scaled_frame_width,
3371                          &oxcf->scaled_frame_height);
3372 
3373     // There has been a change in frame size.
3374     vp9_set_size_literal(cpi, oxcf->scaled_frame_width,
3375                          oxcf->scaled_frame_height);
3376   }
3377 
3378   if (oxcf->pass == 0 && oxcf->rc_mode == VPX_CBR && !cpi->use_svc &&
3379       oxcf->resize_mode == RESIZE_DYNAMIC && cpi->resize_pending != 0) {
3380     oxcf->scaled_frame_width =
3381         (oxcf->width * cpi->resize_scale_num) / cpi->resize_scale_den;
3382     oxcf->scaled_frame_height =
3383         (oxcf->height * cpi->resize_scale_num) / cpi->resize_scale_den;
3384     // There has been a change in frame size.
3385     vp9_set_size_literal(cpi, oxcf->scaled_frame_width,
3386                          oxcf->scaled_frame_height);
3387 
3388     // TODO(agrange) Scale cpi->max_mv_magnitude if frame-size has changed.
3389     set_mv_search_params(cpi);
3390 
3391     vp9_noise_estimate_init(&cpi->noise_estimate, cm->width, cm->height);
3392 #if CONFIG_VP9_TEMPORAL_DENOISING
3393     // Reset the denoiser on the resized frame.
3394     if (cpi->oxcf.noise_sensitivity > 0) {
3395       vp9_denoiser_free(&(cpi->denoiser));
3396       setup_denoiser_buffer(cpi);
3397       // Dynamic resize is only triggered for non-SVC, so we can force
3398       // golden frame update here as temporary fix to denoiser.
3399       cpi->refresh_golden_frame = 1;
3400     }
3401 #endif
3402   }
3403 
3404   if ((oxcf->pass == 2) &&
3405       (!cpi->use_svc || (is_two_pass_svc(cpi) &&
3406                          cpi->svc.encode_empty_frame_state != ENCODING))) {
3407     vp9_set_target_rate(cpi);
3408   }
3409 
3410   alloc_frame_mvs(cm, cm->new_fb_idx);
3411 
3412   // Reset the frame pointers to the current frame size.
3413   if (vpx_realloc_frame_buffer(get_frame_new_buffer(cm), cm->width, cm->height,
3414                                cm->subsampling_x, cm->subsampling_y,
3415 #if CONFIG_VP9_HIGHBITDEPTH
3416                                cm->use_highbitdepth,
3417 #endif
3418                                VP9_ENC_BORDER_IN_PIXELS, cm->byte_alignment,
3419                                NULL, NULL, NULL))
3420     vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
3421                        "Failed to allocate frame buffer");
3422 
3423   alloc_util_frame_buffers(cpi);
3424   init_motion_estimation(cpi);
3425 
3426   for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) {
3427     RefBuffer *const ref_buf = &cm->frame_refs[ref_frame - 1];
3428     const int buf_idx = get_ref_frame_buf_idx(cpi, ref_frame);
3429 
3430     ref_buf->idx = buf_idx;
3431 
3432     if (buf_idx != INVALID_IDX) {
3433       YV12_BUFFER_CONFIG *const buf = &cm->buffer_pool->frame_bufs[buf_idx].buf;
3434       ref_buf->buf = buf;
3435 #if CONFIG_VP9_HIGHBITDEPTH
3436       vp9_setup_scale_factors_for_frame(
3437           &ref_buf->sf, buf->y_crop_width, buf->y_crop_height, cm->width,
3438           cm->height, (buf->flags & YV12_FLAG_HIGHBITDEPTH) ? 1 : 0);
3439 #else
3440       vp9_setup_scale_factors_for_frame(&ref_buf->sf, buf->y_crop_width,
3441                                         buf->y_crop_height, cm->width,
3442                                         cm->height);
3443 #endif  // CONFIG_VP9_HIGHBITDEPTH
3444       if (vp9_is_scaled(&ref_buf->sf)) vpx_extend_frame_borders(buf);
3445     } else {
3446       ref_buf->buf = NULL;
3447     }
3448   }
3449 
3450   set_ref_ptrs(cm, xd, LAST_FRAME, LAST_FRAME);
3451 }
3452 
3453 static void encode_without_recode_loop(VP9_COMP *cpi, size_t *size,
3454                                        uint8_t *dest) {
3455   VP9_COMMON *const cm = &cpi->common;
3456   int q = 0, bottom_index = 0, top_index = 0;  // Dummy variables.
3457   const INTERP_FILTER filter_scaler =
3458       (is_one_pass_cbr_svc(cpi))
3459           ? cpi->svc.downsample_filter_type[cpi->svc.spatial_layer_id]
3460           : EIGHTTAP;
3461   const int phase_scaler =
3462       (is_one_pass_cbr_svc(cpi))
3463           ? cpi->svc.downsample_filter_phase[cpi->svc.spatial_layer_id]
3464           : 0;
3465 
3466   // Flag to check if its valid to compute the source sad (used for
3467   // scene detection and for superblock content state in CBR mode).
3468   // The flag may get reset below based on SVC or resizing state.
3469   cpi->compute_source_sad_onepass =
3470       cpi->oxcf.mode == REALTIME && cpi->oxcf.speed >= 5 && cm->show_frame;
3471 
3472   vpx_clear_system_state();
3473 
3474   set_frame_size(cpi);
3475 
3476   if (is_one_pass_cbr_svc(cpi) &&
3477       cpi->un_scaled_source->y_width == cm->width << 2 &&
3478       cpi->un_scaled_source->y_height == cm->height << 2 &&
3479       cpi->svc.scaled_temp.y_width == cm->width << 1 &&
3480       cpi->svc.scaled_temp.y_height == cm->height << 1) {
3481     // For svc, if it is a 1/4x1/4 downscaling, do a two-stage scaling to take
3482     // advantage of the 1:2 optimized scaler. In the process, the 1/2x1/2
3483     // result will be saved in scaled_temp and might be used later.
3484     const INTERP_FILTER filter_scaler2 = cpi->svc.downsample_filter_type[1];
3485     const int phase_scaler2 = cpi->svc.downsample_filter_phase[1];
3486     cpi->Source = vp9_svc_twostage_scale(
3487         cm, cpi->un_scaled_source, &cpi->scaled_source, &cpi->svc.scaled_temp,
3488         filter_scaler, phase_scaler, filter_scaler2, phase_scaler2);
3489     cpi->svc.scaled_one_half = 1;
3490   } else if (is_one_pass_cbr_svc(cpi) &&
3491              cpi->un_scaled_source->y_width == cm->width << 1 &&
3492              cpi->un_scaled_source->y_height == cm->height << 1 &&
3493              cpi->svc.scaled_one_half) {
3494     // If the spatial layer is 1/2x1/2 and the scaling is already done in the
3495     // two-stage scaling, use the result directly.
3496     cpi->Source = &cpi->svc.scaled_temp;
3497     cpi->svc.scaled_one_half = 0;
3498   } else {
3499     cpi->Source = vp9_scale_if_required(
3500         cm, cpi->un_scaled_source, &cpi->scaled_source, (cpi->oxcf.pass == 0),
3501         filter_scaler, phase_scaler);
3502   }
3503   // Unfiltered raw source used in metrics calculation if the source
3504   // has been filtered.
3505   if (is_psnr_calc_enabled(cpi)) {
3506 #ifdef ENABLE_KF_DENOISE
3507     if (is_spatial_denoise_enabled(cpi)) {
3508       cpi->raw_source_frame = vp9_scale_if_required(
3509           cm, &cpi->raw_unscaled_source, &cpi->raw_scaled_source,
3510           (cpi->oxcf.pass == 0), EIGHTTAP, phase_scaler);
3511     } else {
3512       cpi->raw_source_frame = cpi->Source;
3513     }
3514 #else
3515     cpi->raw_source_frame = cpi->Source;
3516 #endif
3517   }
3518 
3519   if ((cpi->use_svc &&
3520        (cpi->svc.spatial_layer_id < cpi->svc.number_spatial_layers - 1 ||
3521         cpi->svc.current_superframe < 1)) ||
3522       cpi->resize_pending || cpi->resize_state || cpi->external_resize ||
3523       cpi->resize_state != ORIG) {
3524     cpi->compute_source_sad_onepass = 0;
3525     if (cpi->content_state_sb_fd != NULL)
3526       memset(cpi->content_state_sb_fd, 0,
3527              (cm->mi_stride >> 3) * ((cm->mi_rows >> 3) + 1) *
3528                  sizeof(*cpi->content_state_sb_fd));
3529   }
3530 
3531   // Avoid scaling last_source unless its needed.
3532   // Last source is needed if avg_source_sad() is used, or if
3533   // partition_search_type == SOURCE_VAR_BASED_PARTITION, or if noise
3534   // estimation is enabled.
3535   if (cpi->unscaled_last_source != NULL &&
3536       (cpi->oxcf.content == VP9E_CONTENT_SCREEN ||
3537        (cpi->oxcf.pass == 0 && cpi->oxcf.rc_mode == VPX_VBR &&
3538         cpi->oxcf.mode == REALTIME && cpi->oxcf.speed >= 5) ||
3539        cpi->sf.partition_search_type == SOURCE_VAR_BASED_PARTITION ||
3540        (cpi->noise_estimate.enabled && !cpi->oxcf.noise_sensitivity) ||
3541        cpi->compute_source_sad_onepass))
3542     cpi->Last_Source = vp9_scale_if_required(
3543         cm, cpi->unscaled_last_source, &cpi->scaled_last_source,
3544         (cpi->oxcf.pass == 0), EIGHTTAP, 0);
3545 
3546   if (cpi->Last_Source == NULL ||
3547       cpi->Last_Source->y_width != cpi->Source->y_width ||
3548       cpi->Last_Source->y_height != cpi->Source->y_height)
3549     cpi->compute_source_sad_onepass = 0;
3550 
3551   if (cm->frame_type == KEY_FRAME || cpi->resize_pending != 0) {
3552     memset(cpi->consec_zero_mv, 0,
3553            cm->mi_rows * cm->mi_cols * sizeof(*cpi->consec_zero_mv));
3554   }
3555 
3556   vp9_update_noise_estimate(cpi);
3557 
3558   // Scene detection is used for VBR mode or screen-content case.
3559   // Make sure compute_source_sad_onepass is set (which handles SVC case
3560   // and dynamic resize).
3561   if (cpi->compute_source_sad_onepass &&
3562       (cpi->oxcf.rc_mode == VPX_VBR ||
3563        cpi->oxcf.content == VP9E_CONTENT_SCREEN))
3564     vp9_scene_detection_onepass(cpi);
3565 
3566   // For 1 pass CBR SVC, only ZEROMV is allowed for spatial reference frame
3567   // when svc->force_zero_mode_spatial_ref = 1. Under those conditions we can
3568   // avoid this frame-level upsampling (for non intra_only frames).
3569   if (frame_is_intra_only(cm) == 0 &&
3570       !(is_one_pass_cbr_svc(cpi) && cpi->svc.force_zero_mode_spatial_ref)) {
3571     vp9_scale_references(cpi);
3572   }
3573 
3574   set_size_independent_vars(cpi);
3575   set_size_dependent_vars(cpi, &q, &bottom_index, &top_index);
3576 
3577   if (cpi->sf.copy_partition_flag) alloc_copy_partition_data(cpi);
3578 
3579   if (cpi->oxcf.speed >= 5 && cpi->oxcf.pass == 0 &&
3580       cpi->oxcf.rc_mode == VPX_CBR &&
3581       cpi->oxcf.content != VP9E_CONTENT_SCREEN &&
3582       cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ) {
3583     cpi->use_skin_detection = 1;
3584   }
3585 
3586   vp9_set_quantizer(cm, q);
3587   vp9_set_variance_partition_thresholds(cpi, q, 0);
3588 
3589   setup_frame(cpi);
3590 
3591   suppress_active_map(cpi);
3592 
3593   // Variance adaptive and in frame q adjustment experiments are mutually
3594   // exclusive.
3595   if (cpi->oxcf.aq_mode == VARIANCE_AQ) {
3596     vp9_vaq_frame_setup(cpi);
3597   } else if (cpi->oxcf.aq_mode == EQUATOR360_AQ) {
3598     vp9_360aq_frame_setup(cpi);
3599   } else if (cpi->oxcf.aq_mode == COMPLEXITY_AQ) {
3600     vp9_setup_in_frame_q_adj(cpi);
3601   } else if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ) {
3602     vp9_cyclic_refresh_setup(cpi);
3603   } else if (cpi->oxcf.aq_mode == LOOKAHEAD_AQ) {
3604     // it may be pretty bad for rate-control,
3605     // and I should handle it somehow
3606     vp9_alt_ref_aq_setup_map(cpi->alt_ref_aq, cpi);
3607   }
3608 
3609   apply_active_map(cpi);
3610 
3611   vp9_encode_frame(cpi);
3612 
3613   // Check if we should drop this frame because of high overshoot.
3614   // Only for frames where high temporal-source SAD is detected.
3615   if (cpi->oxcf.pass == 0 && cpi->oxcf.rc_mode == VPX_CBR &&
3616       cpi->resize_state == ORIG && cm->frame_type != KEY_FRAME &&
3617       cpi->oxcf.content == VP9E_CONTENT_SCREEN &&
3618       cpi->rc.high_source_sad == 1) {
3619     int frame_size = 0;
3620     // Get an estimate of the encoded frame size.
3621     save_coding_context(cpi);
3622     vp9_pack_bitstream(cpi, dest, size);
3623     restore_coding_context(cpi);
3624     frame_size = (int)(*size) << 3;
3625     // Check if encoded frame will overshoot too much, and if so, set the q and
3626     // adjust some rate control parameters, and return to re-encode the frame.
3627     if (vp9_encodedframe_overshoot(cpi, frame_size, &q)) {
3628       vpx_clear_system_state();
3629       vp9_set_quantizer(cm, q);
3630       vp9_set_variance_partition_thresholds(cpi, q, 0);
3631       suppress_active_map(cpi);
3632       // Turn-off cyclic refresh for re-encoded frame.
3633       if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ) {
3634         unsigned char *const seg_map = cpi->segmentation_map;
3635         memset(seg_map, 0, cm->mi_rows * cm->mi_cols);
3636         vp9_disable_segmentation(&cm->seg);
3637       }
3638       apply_active_map(cpi);
3639       vp9_encode_frame(cpi);
3640     }
3641   }
3642 
3643   // Update some stats from cyclic refresh, and check for golden frame update.
3644   if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ && cm->seg.enabled &&
3645       cm->frame_type != KEY_FRAME)
3646     vp9_cyclic_refresh_postencode(cpi);
3647 
3648   // Update the skip mb flag probabilities based on the distribution
3649   // seen in the last encoder iteration.
3650   // update_base_skip_probs(cpi);
3651   vpx_clear_system_state();
3652 }
3653 
3654 #define MAX_QSTEP_ADJ 4
3655 static int get_qstep_adj(int rate_excess, int rate_limit) {
3656   int qstep =
3657       rate_limit ? ((rate_excess + rate_limit / 2) / rate_limit) : INT_MAX;
3658   return VPXMIN(qstep, MAX_QSTEP_ADJ);
3659 }
3660 
3661 static void encode_with_recode_loop(VP9_COMP *cpi, size_t *size,
3662                                     uint8_t *dest) {
3663   VP9_COMMON *const cm = &cpi->common;
3664   RATE_CONTROL *const rc = &cpi->rc;
3665   int bottom_index, top_index;
3666   int loop_count = 0;
3667   int loop_at_this_size = 0;
3668   int loop = 0;
3669   int overshoot_seen = 0;
3670   int undershoot_seen = 0;
3671   int frame_over_shoot_limit;
3672   int frame_under_shoot_limit;
3673   int q = 0, q_low = 0, q_high = 0;
3674   int enable_acl;
3675 #ifdef AGGRESSIVE_VBR
3676   int qrange_adj = 1;
3677 #endif
3678 
3679   set_size_independent_vars(cpi);
3680 
3681   enable_acl = cpi->sf.allow_acl
3682                    ? (cm->frame_type == KEY_FRAME) || (cm->show_frame == 0)
3683                    : 0;
3684 
3685   do {
3686     vpx_clear_system_state();
3687 
3688     set_frame_size(cpi);
3689 
3690     if (loop_count == 0 || cpi->resize_pending != 0) {
3691       set_size_dependent_vars(cpi, &q, &bottom_index, &top_index);
3692 
3693 #ifdef AGGRESSIVE_VBR
3694       if (two_pass_first_group_inter(cpi)) {
3695         // Adjustment limits for min and max q
3696         qrange_adj = VPXMAX(1, (top_index - bottom_index) / 2);
3697 
3698         bottom_index =
3699             VPXMAX(bottom_index - qrange_adj / 2, cpi->oxcf.best_allowed_q);
3700         top_index =
3701             VPXMIN(cpi->oxcf.worst_allowed_q, top_index + qrange_adj / 2);
3702       }
3703 #endif
3704       // TODO(agrange) Scale cpi->max_mv_magnitude if frame-size has changed.
3705       set_mv_search_params(cpi);
3706 
3707       // Reset the loop state for new frame size.
3708       overshoot_seen = 0;
3709       undershoot_seen = 0;
3710 
3711       // Reconfiguration for change in frame size has concluded.
3712       cpi->resize_pending = 0;
3713 
3714       q_low = bottom_index;
3715       q_high = top_index;
3716 
3717       loop_at_this_size = 0;
3718     }
3719 
3720     // Decide frame size bounds first time through.
3721     if (loop_count == 0) {
3722       vp9_rc_compute_frame_size_bounds(cpi, rc->this_frame_target,
3723                                        &frame_under_shoot_limit,
3724                                        &frame_over_shoot_limit);
3725     }
3726 
3727     cpi->Source =
3728         vp9_scale_if_required(cm, cpi->un_scaled_source, &cpi->scaled_source,
3729                               (cpi->oxcf.pass == 0), EIGHTTAP, 0);
3730 
3731     // Unfiltered raw source used in metrics calculation if the source
3732     // has been filtered.
3733     if (is_psnr_calc_enabled(cpi)) {
3734 #ifdef ENABLE_KF_DENOISE
3735       if (is_spatial_denoise_enabled(cpi)) {
3736         cpi->raw_source_frame = vp9_scale_if_required(
3737             cm, &cpi->raw_unscaled_source, &cpi->raw_scaled_source,
3738             (cpi->oxcf.pass == 0), EIGHTTAP, 0);
3739       } else {
3740         cpi->raw_source_frame = cpi->Source;
3741       }
3742 #else
3743       cpi->raw_source_frame = cpi->Source;
3744 #endif
3745     }
3746 
3747     if (cpi->unscaled_last_source != NULL)
3748       cpi->Last_Source = vp9_scale_if_required(
3749           cm, cpi->unscaled_last_source, &cpi->scaled_last_source,
3750           (cpi->oxcf.pass == 0), EIGHTTAP, 0);
3751 
3752     if (frame_is_intra_only(cm) == 0) {
3753       if (loop_count > 0) {
3754         release_scaled_references(cpi);
3755       }
3756       vp9_scale_references(cpi);
3757     }
3758 
3759     vp9_set_quantizer(cm, q);
3760 
3761     if (loop_count == 0) setup_frame(cpi);
3762 
3763     // Variance adaptive and in frame q adjustment experiments are mutually
3764     // exclusive.
3765     if (cpi->oxcf.aq_mode == VARIANCE_AQ) {
3766       vp9_vaq_frame_setup(cpi);
3767     } else if (cpi->oxcf.aq_mode == EQUATOR360_AQ) {
3768       vp9_360aq_frame_setup(cpi);
3769     } else if (cpi->oxcf.aq_mode == COMPLEXITY_AQ) {
3770       vp9_setup_in_frame_q_adj(cpi);
3771     } else if (cpi->oxcf.aq_mode == LOOKAHEAD_AQ) {
3772       vp9_alt_ref_aq_setup_map(cpi->alt_ref_aq, cpi);
3773     }
3774 
3775     vp9_encode_frame(cpi);
3776 
3777     // Update the skip mb flag probabilities based on the distribution
3778     // seen in the last encoder iteration.
3779     // update_base_skip_probs(cpi);
3780 
3781     vpx_clear_system_state();
3782 
3783     // Dummy pack of the bitstream using up to date stats to get an
3784     // accurate estimate of output frame size to determine if we need
3785     // to recode.
3786     if (cpi->sf.recode_loop >= ALLOW_RECODE_KFARFGF) {
3787       save_coding_context(cpi);
3788       if (!cpi->sf.use_nonrd_pick_mode) vp9_pack_bitstream(cpi, dest, size);
3789 
3790       rc->projected_frame_size = (int)(*size) << 3;
3791 
3792       if (frame_over_shoot_limit == 0) frame_over_shoot_limit = 1;
3793     }
3794 
3795     if (cpi->oxcf.rc_mode == VPX_Q) {
3796       loop = 0;
3797     } else {
3798       if ((cm->frame_type == KEY_FRAME) && rc->this_key_frame_forced &&
3799           (rc->projected_frame_size < rc->max_frame_bandwidth)) {
3800         int last_q = q;
3801         int64_t kf_err;
3802 
3803         int64_t high_err_target = cpi->ambient_err;
3804         int64_t low_err_target = cpi->ambient_err >> 1;
3805 
3806 #if CONFIG_VP9_HIGHBITDEPTH
3807         if (cm->use_highbitdepth) {
3808           kf_err = vpx_highbd_get_y_sse(cpi->Source, get_frame_new_buffer(cm));
3809         } else {
3810           kf_err = vpx_get_y_sse(cpi->Source, get_frame_new_buffer(cm));
3811         }
3812 #else
3813         kf_err = vpx_get_y_sse(cpi->Source, get_frame_new_buffer(cm));
3814 #endif  // CONFIG_VP9_HIGHBITDEPTH
3815 
3816         // Prevent possible divide by zero error below for perfect KF
3817         kf_err += !kf_err;
3818 
3819         // The key frame is not good enough or we can afford
3820         // to make it better without undue risk of popping.
3821         if ((kf_err > high_err_target &&
3822              rc->projected_frame_size <= frame_over_shoot_limit) ||
3823             (kf_err > low_err_target &&
3824              rc->projected_frame_size <= frame_under_shoot_limit)) {
3825           // Lower q_high
3826           q_high = q > q_low ? q - 1 : q_low;
3827 
3828           // Adjust Q
3829           q = (int)((q * high_err_target) / kf_err);
3830           q = VPXMIN(q, (q_high + q_low) >> 1);
3831         } else if (kf_err < low_err_target &&
3832                    rc->projected_frame_size >= frame_under_shoot_limit) {
3833           // The key frame is much better than the previous frame
3834           // Raise q_low
3835           q_low = q < q_high ? q + 1 : q_high;
3836 
3837           // Adjust Q
3838           q = (int)((q * low_err_target) / kf_err);
3839           q = VPXMIN(q, (q_high + q_low + 1) >> 1);
3840         }
3841 
3842         // Clamp Q to upper and lower limits:
3843         q = clamp(q, q_low, q_high);
3844 
3845         loop = q != last_q;
3846       } else if (recode_loop_test(cpi, frame_over_shoot_limit,
3847                                   frame_under_shoot_limit, q,
3848                                   VPXMAX(q_high, top_index), bottom_index)) {
3849         // Is the projected frame size out of range and are we allowed
3850         // to attempt to recode.
3851         int last_q = q;
3852         int retries = 0;
3853         int qstep;
3854 
3855         if (cpi->resize_pending == 1) {
3856           // Change in frame size so go back around the recode loop.
3857           cpi->rc.frame_size_selector =
3858               SCALE_STEP1 - cpi->rc.frame_size_selector;
3859           cpi->rc.next_frame_size_selector = cpi->rc.frame_size_selector;
3860 
3861 #if CONFIG_INTERNAL_STATS
3862           ++cpi->tot_recode_hits;
3863 #endif
3864           ++loop_count;
3865           loop = 1;
3866           continue;
3867         }
3868 
3869         // Frame size out of permitted range:
3870         // Update correction factor & compute new Q to try...
3871 
3872         // Frame is too large
3873         if (rc->projected_frame_size > rc->this_frame_target) {
3874           // Special case if the projected size is > the max allowed.
3875           if (rc->projected_frame_size >= rc->max_frame_bandwidth) {
3876             double q_val_high;
3877             q_val_high = vp9_convert_qindex_to_q(q_high, cm->bit_depth);
3878             q_val_high = q_val_high * ((double)rc->projected_frame_size /
3879                                        rc->max_frame_bandwidth);
3880             q_high = vp9_convert_q_to_qindex(q_val_high, cm->bit_depth);
3881             q_high = clamp(q_high, rc->best_quality, rc->worst_quality);
3882           }
3883 
3884           // Raise Qlow as to at least the current value
3885           qstep =
3886               get_qstep_adj(rc->projected_frame_size, rc->this_frame_target);
3887           q_low = VPXMIN(q + qstep, q_high);
3888           // q_low = q < q_high ? q + 1 : q_high;
3889 
3890           if (undershoot_seen || loop_at_this_size > 1) {
3891             // Update rate_correction_factor unless
3892             vp9_rc_update_rate_correction_factors(cpi);
3893 
3894             q = (q_high + q_low + 1) / 2;
3895           } else {
3896             // Update rate_correction_factor unless
3897             vp9_rc_update_rate_correction_factors(cpi);
3898 
3899             q = vp9_rc_regulate_q(cpi, rc->this_frame_target, bottom_index,
3900                                   VPXMAX(q_high, top_index));
3901 
3902             while (q < q_low && retries < 10) {
3903               vp9_rc_update_rate_correction_factors(cpi);
3904               q = vp9_rc_regulate_q(cpi, rc->this_frame_target, bottom_index,
3905                                     VPXMAX(q_high, top_index));
3906               retries++;
3907             }
3908           }
3909 
3910           overshoot_seen = 1;
3911         } else {
3912           // Frame is too small
3913           qstep =
3914               get_qstep_adj(rc->this_frame_target, rc->projected_frame_size);
3915           q_high = VPXMAX(q - qstep, q_low);
3916           // q_high = q > q_low ? q - 1 : q_low;
3917 
3918           if (overshoot_seen || loop_at_this_size > 1) {
3919             vp9_rc_update_rate_correction_factors(cpi);
3920             q = (q_high + q_low) / 2;
3921           } else {
3922             vp9_rc_update_rate_correction_factors(cpi);
3923             q = vp9_rc_regulate_q(cpi, rc->this_frame_target, bottom_index,
3924                                   top_index);
3925             // Special case reset for qlow for constrained quality.
3926             // This should only trigger where there is very substantial
3927             // undershoot on a frame and the auto cq level is above
3928             // the user passsed in value.
3929             if (cpi->oxcf.rc_mode == VPX_CQ && q < q_low) {
3930               q_low = q;
3931             }
3932 
3933             while (q > q_high && retries < 10) {
3934               vp9_rc_update_rate_correction_factors(cpi);
3935               q = vp9_rc_regulate_q(cpi, rc->this_frame_target, bottom_index,
3936                                     top_index);
3937               retries++;
3938             }
3939           }
3940 
3941           undershoot_seen = 1;
3942         }
3943 
3944         // Clamp Q to upper and lower limits:
3945         q = clamp(q, q_low, q_high);
3946 
3947         loop = (q != last_q);
3948       } else {
3949         loop = 0;
3950       }
3951     }
3952 
3953     // Special case for overlay frame.
3954     if (rc->is_src_frame_alt_ref &&
3955         rc->projected_frame_size < rc->max_frame_bandwidth)
3956       loop = 0;
3957 
3958     if (loop) {
3959       ++loop_count;
3960       ++loop_at_this_size;
3961 
3962 #if CONFIG_INTERNAL_STATS
3963       ++cpi->tot_recode_hits;
3964 #endif
3965     }
3966 
3967     if (cpi->sf.recode_loop >= ALLOW_RECODE_KFARFGF)
3968       if (loop || !enable_acl) restore_coding_context(cpi);
3969   } while (loop);
3970 
3971 #ifdef AGGRESSIVE_VBR
3972   if (two_pass_first_group_inter(cpi)) {
3973     cpi->twopass.active_worst_quality =
3974         VPXMIN(q + qrange_adj, cpi->oxcf.worst_allowed_q);
3975   }
3976 #endif
3977 
3978   if (enable_acl) {
3979     // Skip recoding, if model diff is below threshold
3980     const int thresh = compute_context_model_thresh(cpi);
3981     const int diff = compute_context_model_diff(cm);
3982     if (diff < thresh) {
3983       vpx_clear_system_state();
3984       restore_coding_context(cpi);
3985       return;
3986     }
3987 
3988     vp9_encode_frame(cpi);
3989     vpx_clear_system_state();
3990     restore_coding_context(cpi);
3991     vp9_pack_bitstream(cpi, dest, size);
3992 
3993     vp9_encode_frame(cpi);
3994     vpx_clear_system_state();
3995 
3996     restore_coding_context(cpi);
3997   }
3998 }
3999 
4000 static int get_ref_frame_flags(const VP9_COMP *cpi) {
4001   const int *const map = cpi->common.ref_frame_map;
4002   const int gold_is_last = map[cpi->gld_fb_idx] == map[cpi->lst_fb_idx];
4003   const int alt_is_last = map[cpi->alt_fb_idx] == map[cpi->lst_fb_idx];
4004   const int gold_is_alt = map[cpi->gld_fb_idx] == map[cpi->alt_fb_idx];
4005   int flags = VP9_ALT_FLAG | VP9_GOLD_FLAG | VP9_LAST_FLAG;
4006 
4007   if (gold_is_last) flags &= ~VP9_GOLD_FLAG;
4008 
4009   if (cpi->rc.frames_till_gf_update_due == INT_MAX &&
4010       (cpi->svc.number_temporal_layers == 1 &&
4011        cpi->svc.number_spatial_layers == 1))
4012     flags &= ~VP9_GOLD_FLAG;
4013 
4014   if (alt_is_last) flags &= ~VP9_ALT_FLAG;
4015 
4016   if (gold_is_alt) flags &= ~VP9_ALT_FLAG;
4017 
4018   return flags;
4019 }
4020 
4021 static void set_ext_overrides(VP9_COMP *cpi) {
4022   // Overrides the defaults with the externally supplied values with
4023   // vp9_update_reference() and vp9_update_entropy() calls
4024   // Note: The overrides are valid only for the next frame passed
4025   // to encode_frame_to_data_rate() function
4026   if (cpi->ext_refresh_frame_context_pending) {
4027     cpi->common.refresh_frame_context = cpi->ext_refresh_frame_context;
4028     cpi->ext_refresh_frame_context_pending = 0;
4029   }
4030   if (cpi->ext_refresh_frame_flags_pending) {
4031     cpi->refresh_last_frame = cpi->ext_refresh_last_frame;
4032     cpi->refresh_golden_frame = cpi->ext_refresh_golden_frame;
4033     cpi->refresh_alt_ref_frame = cpi->ext_refresh_alt_ref_frame;
4034   }
4035 }
4036 
4037 YV12_BUFFER_CONFIG *vp9_svc_twostage_scale(
4038     VP9_COMMON *cm, YV12_BUFFER_CONFIG *unscaled, YV12_BUFFER_CONFIG *scaled,
4039     YV12_BUFFER_CONFIG *scaled_temp, INTERP_FILTER filter_type,
4040     int phase_scaler, INTERP_FILTER filter_type2, int phase_scaler2) {
4041   if (cm->mi_cols * MI_SIZE != unscaled->y_width ||
4042       cm->mi_rows * MI_SIZE != unscaled->y_height) {
4043 #if CONFIG_VP9_HIGHBITDEPTH
4044     if (cm->bit_depth == VPX_BITS_8) {
4045       vp9_scale_and_extend_frame(unscaled, scaled_temp, filter_type2,
4046                                  phase_scaler2);
4047       vp9_scale_and_extend_frame(scaled_temp, scaled, filter_type,
4048                                  phase_scaler);
4049     } else {
4050       scale_and_extend_frame(unscaled, scaled_temp, (int)cm->bit_depth,
4051                              filter_type2, phase_scaler2);
4052       scale_and_extend_frame(scaled_temp, scaled, (int)cm->bit_depth,
4053                              filter_type, phase_scaler);
4054     }
4055 #else
4056     vp9_scale_and_extend_frame(unscaled, scaled_temp, filter_type2,
4057                                phase_scaler2);
4058     vp9_scale_and_extend_frame(scaled_temp, scaled, filter_type, phase_scaler);
4059 #endif  // CONFIG_VP9_HIGHBITDEPTH
4060     return scaled;
4061   } else {
4062     return unscaled;
4063   }
4064 }
4065 
4066 YV12_BUFFER_CONFIG *vp9_scale_if_required(
4067     VP9_COMMON *cm, YV12_BUFFER_CONFIG *unscaled, YV12_BUFFER_CONFIG *scaled,
4068     int use_normative_scaler, INTERP_FILTER filter_type, int phase_scaler) {
4069   if (cm->mi_cols * MI_SIZE != unscaled->y_width ||
4070       cm->mi_rows * MI_SIZE != unscaled->y_height) {
4071 #if CONFIG_VP9_HIGHBITDEPTH
4072     if (use_normative_scaler && unscaled->y_width <= (scaled->y_width << 1) &&
4073         unscaled->y_height <= (scaled->y_height << 1))
4074       if (cm->bit_depth == VPX_BITS_8)
4075         vp9_scale_and_extend_frame(unscaled, scaled, filter_type, phase_scaler);
4076       else
4077         scale_and_extend_frame(unscaled, scaled, (int)cm->bit_depth,
4078                                filter_type, phase_scaler);
4079     else
4080       scale_and_extend_frame_nonnormative(unscaled, scaled, (int)cm->bit_depth);
4081 #else
4082     if (use_normative_scaler && unscaled->y_width <= (scaled->y_width << 1) &&
4083         unscaled->y_height <= (scaled->y_height << 1))
4084       vp9_scale_and_extend_frame(unscaled, scaled, filter_type, phase_scaler);
4085     else
4086       scale_and_extend_frame_nonnormative(unscaled, scaled);
4087 #endif  // CONFIG_VP9_HIGHBITDEPTH
4088     return scaled;
4089   } else {
4090     return unscaled;
4091   }
4092 }
4093 
4094 static void set_arf_sign_bias(VP9_COMP *cpi) {
4095   VP9_COMMON *const cm = &cpi->common;
4096   int arf_sign_bias;
4097 
4098   if ((cpi->oxcf.pass == 2) && cpi->multi_arf_allowed) {
4099     const GF_GROUP *const gf_group = &cpi->twopass.gf_group;
4100     arf_sign_bias = cpi->rc.source_alt_ref_active &&
4101                     (!cpi->refresh_alt_ref_frame ||
4102                      (gf_group->rf_level[gf_group->index] == GF_ARF_LOW));
4103   } else {
4104     arf_sign_bias =
4105         (cpi->rc.source_alt_ref_active && !cpi->refresh_alt_ref_frame);
4106   }
4107   cm->ref_frame_sign_bias[ALTREF_FRAME] = arf_sign_bias;
4108 }
4109 
4110 static int setup_interp_filter_search_mask(VP9_COMP *cpi) {
4111   INTERP_FILTER ifilter;
4112   int ref_total[MAX_REF_FRAMES] = { 0 };
4113   MV_REFERENCE_FRAME ref;
4114   int mask = 0;
4115   if (cpi->common.last_frame_type == KEY_FRAME || cpi->refresh_alt_ref_frame)
4116     return mask;
4117   for (ref = LAST_FRAME; ref <= ALTREF_FRAME; ++ref)
4118     for (ifilter = EIGHTTAP; ifilter <= EIGHTTAP_SHARP; ++ifilter)
4119       ref_total[ref] += cpi->interp_filter_selected[ref][ifilter];
4120 
4121   for (ifilter = EIGHTTAP; ifilter <= EIGHTTAP_SHARP; ++ifilter) {
4122     if ((ref_total[LAST_FRAME] &&
4123          cpi->interp_filter_selected[LAST_FRAME][ifilter] == 0) &&
4124         (ref_total[GOLDEN_FRAME] == 0 ||
4125          cpi->interp_filter_selected[GOLDEN_FRAME][ifilter] * 50 <
4126              ref_total[GOLDEN_FRAME]) &&
4127         (ref_total[ALTREF_FRAME] == 0 ||
4128          cpi->interp_filter_selected[ALTREF_FRAME][ifilter] * 50 <
4129              ref_total[ALTREF_FRAME]))
4130       mask |= 1 << ifilter;
4131   }
4132   return mask;
4133 }
4134 
4135 #ifdef ENABLE_KF_DENOISE
4136 // Baseline Kernal weights for denoise
4137 static uint8_t dn_kernal_3[9] = { 1, 2, 1, 2, 4, 2, 1, 2, 1 };
4138 static uint8_t dn_kernal_5[25] = { 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 2, 4,
4139                                    2, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1 };
4140 
4141 static INLINE void add_denoise_point(int centre_val, int data_val, int thresh,
4142                                      uint8_t point_weight, int *sum_val,
4143                                      int *sum_weight) {
4144   if (abs(centre_val - data_val) <= thresh) {
4145     *sum_weight += point_weight;
4146     *sum_val += (int)data_val * (int)point_weight;
4147   }
4148 }
4149 
4150 static void spatial_denoise_point(uint8_t *src_ptr, const int stride,
4151                                   const int strength) {
4152   int sum_weight = 0;
4153   int sum_val = 0;
4154   int thresh = strength;
4155   int kernal_size = 5;
4156   int half_k_size = 2;
4157   int i, j;
4158   int max_diff = 0;
4159   uint8_t *tmp_ptr;
4160   uint8_t *kernal_ptr;
4161 
4162   // Find the maximum deviation from the source point in the locale.
4163   tmp_ptr = src_ptr - (stride * (half_k_size + 1)) - (half_k_size + 1);
4164   for (i = 0; i < kernal_size + 2; ++i) {
4165     for (j = 0; j < kernal_size + 2; ++j) {
4166       max_diff = VPXMAX(max_diff, abs((int)*src_ptr - (int)tmp_ptr[j]));
4167     }
4168     tmp_ptr += stride;
4169   }
4170 
4171   // Select the kernal size.
4172   if (max_diff > (strength + (strength >> 1))) {
4173     kernal_size = 3;
4174     half_k_size = 1;
4175     thresh = thresh >> 1;
4176   }
4177   kernal_ptr = (kernal_size == 3) ? dn_kernal_3 : dn_kernal_5;
4178 
4179   // Apply the kernal
4180   tmp_ptr = src_ptr - (stride * half_k_size) - half_k_size;
4181   for (i = 0; i < kernal_size; ++i) {
4182     for (j = 0; j < kernal_size; ++j) {
4183       add_denoise_point((int)*src_ptr, (int)tmp_ptr[j], thresh, *kernal_ptr,
4184                         &sum_val, &sum_weight);
4185       ++kernal_ptr;
4186     }
4187     tmp_ptr += stride;
4188   }
4189 
4190   // Update the source value with the new filtered value
4191   *src_ptr = (uint8_t)((sum_val + (sum_weight >> 1)) / sum_weight);
4192 }
4193 
4194 #if CONFIG_VP9_HIGHBITDEPTH
4195 static void highbd_spatial_denoise_point(uint16_t *src_ptr, const int stride,
4196                                          const int strength) {
4197   int sum_weight = 0;
4198   int sum_val = 0;
4199   int thresh = strength;
4200   int kernal_size = 5;
4201   int half_k_size = 2;
4202   int i, j;
4203   int max_diff = 0;
4204   uint16_t *tmp_ptr;
4205   uint8_t *kernal_ptr;
4206 
4207   // Find the maximum deviation from the source point in the locale.
4208   tmp_ptr = src_ptr - (stride * (half_k_size + 1)) - (half_k_size + 1);
4209   for (i = 0; i < kernal_size + 2; ++i) {
4210     for (j = 0; j < kernal_size + 2; ++j) {
4211       max_diff = VPXMAX(max_diff, abs((int)src_ptr - (int)tmp_ptr[j]));
4212     }
4213     tmp_ptr += stride;
4214   }
4215 
4216   // Select the kernal size.
4217   if (max_diff > (strength + (strength >> 1))) {
4218     kernal_size = 3;
4219     half_k_size = 1;
4220     thresh = thresh >> 1;
4221   }
4222   kernal_ptr = (kernal_size == 3) ? dn_kernal_3 : dn_kernal_5;
4223 
4224   // Apply the kernal
4225   tmp_ptr = src_ptr - (stride * half_k_size) - half_k_size;
4226   for (i = 0; i < kernal_size; ++i) {
4227     for (j = 0; j < kernal_size; ++j) {
4228       add_denoise_point((int)*src_ptr, (int)tmp_ptr[j], thresh, *kernal_ptr,
4229                         &sum_val, &sum_weight);
4230       ++kernal_ptr;
4231     }
4232     tmp_ptr += stride;
4233   }
4234 
4235   // Update the source value with the new filtered value
4236   *src_ptr = (uint16_t)((sum_val + (sum_weight >> 1)) / sum_weight);
4237 }
4238 #endif  // CONFIG_VP9_HIGHBITDEPTH
4239 
4240 // Apply thresholded spatial noise supression to a given buffer.
4241 static void spatial_denoise_buffer(VP9_COMP *cpi, uint8_t *buffer,
4242                                    const int stride, const int width,
4243                                    const int height, const int strength) {
4244   VP9_COMMON *const cm = &cpi->common;
4245   uint8_t *src_ptr = buffer;
4246   int row;
4247   int col;
4248 
4249   for (row = 0; row < height; ++row) {
4250     for (col = 0; col < width; ++col) {
4251 #if CONFIG_VP9_HIGHBITDEPTH
4252       if (cm->use_highbitdepth)
4253         highbd_spatial_denoise_point(CONVERT_TO_SHORTPTR(&src_ptr[col]), stride,
4254                                      strength);
4255       else
4256         spatial_denoise_point(&src_ptr[col], stride, strength);
4257 #else
4258       spatial_denoise_point(&src_ptr[col], stride, strength);
4259 #endif  // CONFIG_VP9_HIGHBITDEPTH
4260     }
4261     src_ptr += stride;
4262   }
4263 }
4264 
4265 // Apply thresholded spatial noise supression to source.
4266 static void spatial_denoise_frame(VP9_COMP *cpi) {
4267   YV12_BUFFER_CONFIG *src = cpi->Source;
4268   const VP9EncoderConfig *const oxcf = &cpi->oxcf;
4269   TWO_PASS *const twopass = &cpi->twopass;
4270   VP9_COMMON *const cm = &cpi->common;
4271 
4272   // Base the filter strength on the current active max Q.
4273   const int q = (int)(vp9_convert_qindex_to_q(twopass->active_worst_quality,
4274                                               cm->bit_depth));
4275   int strength =
4276       VPXMAX(oxcf->arnr_strength >> 2, VPXMIN(oxcf->arnr_strength, (q >> 4)));
4277 
4278   // Denoise each of Y,U and V buffers.
4279   spatial_denoise_buffer(cpi, src->y_buffer, src->y_stride, src->y_width,
4280                          src->y_height, strength);
4281 
4282   strength += (strength >> 1);
4283   spatial_denoise_buffer(cpi, src->u_buffer, src->uv_stride, src->uv_width,
4284                          src->uv_height, strength << 1);
4285 
4286   spatial_denoise_buffer(cpi, src->v_buffer, src->uv_stride, src->uv_width,
4287                          src->uv_height, strength << 1);
4288 }
4289 #endif  // ENABLE_KF_DENOISE
4290 
4291 static void vp9_try_disable_lookahead_aq(VP9_COMP *cpi, size_t *size,
4292                                          uint8_t *dest) {
4293   if (cpi->common.seg.enabled)
4294     if (ALT_REF_AQ_PROTECT_GAIN) {
4295       size_t nsize = *size;
4296       int overhead;
4297 
4298       // TODO(yuryg): optimize this, as
4299       // we don't really need to repack
4300 
4301       save_coding_context(cpi);
4302       vp9_disable_segmentation(&cpi->common.seg);
4303       vp9_pack_bitstream(cpi, dest, &nsize);
4304       restore_coding_context(cpi);
4305 
4306       overhead = (int)*size - (int)nsize;
4307 
4308       if (vp9_alt_ref_aq_disable_if(cpi->alt_ref_aq, overhead, (int)*size))
4309         vp9_encode_frame(cpi);
4310       else
4311         vp9_enable_segmentation(&cpi->common.seg);
4312     }
4313 }
4314 
4315 static void encode_frame_to_data_rate(VP9_COMP *cpi, size_t *size,
4316                                       uint8_t *dest,
4317                                       unsigned int *frame_flags) {
4318   VP9_COMMON *const cm = &cpi->common;
4319   const VP9EncoderConfig *const oxcf = &cpi->oxcf;
4320   struct segmentation *const seg = &cm->seg;
4321   TX_SIZE t;
4322 
4323   set_ext_overrides(cpi);
4324   vpx_clear_system_state();
4325 
4326 #ifdef ENABLE_KF_DENOISE
4327   // Spatial denoise of key frame.
4328   if (is_spatial_denoise_enabled(cpi)) spatial_denoise_frame(cpi);
4329 #endif
4330 
4331   // Set the arf sign bias for this frame.
4332   set_arf_sign_bias(cpi);
4333 
4334   // Set default state for segment based loop filter update flags.
4335   cm->lf.mode_ref_delta_update = 0;
4336 
4337   if (cpi->oxcf.pass == 2 && cpi->sf.adaptive_interp_filter_search)
4338     cpi->sf.interp_filter_search_mask = setup_interp_filter_search_mask(cpi);
4339 
4340   // Set various flags etc to special state if it is a key frame.
4341   if (frame_is_intra_only(cm)) {
4342     // Reset the loop filter deltas and segmentation map.
4343     vp9_reset_segment_features(&cm->seg);
4344 
4345     // If segmentation is enabled force a map update for key frames.
4346     if (seg->enabled) {
4347       seg->update_map = 1;
4348       seg->update_data = 1;
4349     }
4350 
4351     // The alternate reference frame cannot be active for a key frame.
4352     cpi->rc.source_alt_ref_active = 0;
4353 
4354     cm->error_resilient_mode = oxcf->error_resilient_mode;
4355     cm->frame_parallel_decoding_mode = oxcf->frame_parallel_decoding_mode;
4356 
4357     // By default, encoder assumes decoder can use prev_mi.
4358     if (cm->error_resilient_mode) {
4359       cm->frame_parallel_decoding_mode = 1;
4360       cm->reset_frame_context = 0;
4361       cm->refresh_frame_context = 0;
4362     } else if (cm->intra_only) {
4363       // Only reset the current context.
4364       cm->reset_frame_context = 2;
4365     }
4366   }
4367   if (is_two_pass_svc(cpi) && cm->error_resilient_mode == 0) {
4368     // Use context 0 for intra only empty frame, but the last frame context
4369     // for other empty frames.
4370     if (cpi->svc.encode_empty_frame_state == ENCODING) {
4371       if (cpi->svc.encode_intra_empty_frame != 0)
4372         cm->frame_context_idx = 0;
4373       else
4374         cm->frame_context_idx = FRAME_CONTEXTS - 1;
4375     } else {
4376       cm->frame_context_idx =
4377           cpi->svc.spatial_layer_id * cpi->svc.number_temporal_layers +
4378           cpi->svc.temporal_layer_id;
4379     }
4380 
4381     cm->frame_parallel_decoding_mode = oxcf->frame_parallel_decoding_mode;
4382 
4383     // The probs will be updated based on the frame type of its previous
4384     // frame if frame_parallel_decoding_mode is 0. The type may vary for
4385     // the frame after a key frame in base layer since we may drop enhancement
4386     // layers. So set frame_parallel_decoding_mode to 1 in this case.
4387     if (cm->frame_parallel_decoding_mode == 0) {
4388       if (cpi->svc.number_temporal_layers == 1) {
4389         if (cpi->svc.spatial_layer_id == 0 &&
4390             cpi->svc.layer_context[0].last_frame_type == KEY_FRAME)
4391           cm->frame_parallel_decoding_mode = 1;
4392       } else if (cpi->svc.spatial_layer_id == 0) {
4393         // Find the 2nd frame in temporal base layer and 1st frame in temporal
4394         // enhancement layers from the key frame.
4395         int i;
4396         for (i = 0; i < cpi->svc.number_temporal_layers; ++i) {
4397           if (cpi->svc.layer_context[0].frames_from_key_frame == 1 << i) {
4398             cm->frame_parallel_decoding_mode = 1;
4399             break;
4400           }
4401         }
4402       }
4403     }
4404   }
4405 
4406   // For 1 pass CBR, check if we are dropping this frame.
4407   // For spatial layers, for now only check for frame-dropping on first spatial
4408   // layer, and if decision is to drop, we drop whole super-frame.
4409   if (oxcf->pass == 0 && oxcf->rc_mode == VPX_CBR &&
4410       cm->frame_type != KEY_FRAME) {
4411     if (vp9_rc_drop_frame(cpi) ||
4412         (is_one_pass_cbr_svc(cpi) && cpi->svc.rc_drop_superframe == 1)) {
4413       vp9_rc_postencode_update_drop_frame(cpi);
4414       ++cm->current_video_frame;
4415       cpi->ext_refresh_frame_flags_pending = 0;
4416       cpi->svc.rc_drop_superframe = 1;
4417       cpi->last_frame_dropped = 1;
4418       // TODO(marpan): Advancing the svc counters on dropped frames can break
4419       // the referencing scheme for the fixed svc patterns defined in
4420       // vp9_one_pass_cbr_svc_start_layer(). Look into fixing this issue, but
4421       // for now, don't advance the svc frame counters on dropped frame.
4422       // if (cpi->use_svc)
4423       //   vp9_inc_frame_in_layer(cpi);
4424 
4425       return;
4426     }
4427   }
4428 
4429   vpx_clear_system_state();
4430 
4431 #if CONFIG_INTERNAL_STATS
4432   memset(cpi->mode_chosen_counts, 0,
4433          MAX_MODES * sizeof(*cpi->mode_chosen_counts));
4434 #endif
4435 
4436   if (cpi->sf.recode_loop == DISALLOW_RECODE) {
4437     encode_without_recode_loop(cpi, size, dest);
4438   } else {
4439     encode_with_recode_loop(cpi, size, dest);
4440   }
4441 
4442   cpi->last_frame_dropped = 0;
4443 
4444   // Disable segmentation if it decrease rate/distortion ratio
4445   if (cpi->oxcf.aq_mode == LOOKAHEAD_AQ)
4446     vp9_try_disable_lookahead_aq(cpi, size, dest);
4447 
4448 #if CONFIG_VP9_TEMPORAL_DENOISING
4449 #ifdef OUTPUT_YUV_DENOISED
4450   if (oxcf->noise_sensitivity > 0 && denoise_svc(cpi)) {
4451     vp9_write_yuv_frame_420(&cpi->denoiser.running_avg_y[INTRA_FRAME],
4452                             yuv_denoised_file);
4453   }
4454 #endif
4455 #endif
4456 #ifdef OUTPUT_YUV_SKINMAP
4457   if (cpi->common.current_video_frame > 1) {
4458     vp9_compute_skin_map(cpi, yuv_skinmap_file);
4459   }
4460 #endif
4461 
4462   // Special case code to reduce pulsing when key frames are forced at a
4463   // fixed interval. Note the reconstruction error if it is the frame before
4464   // the force key frame
4465   if (cpi->rc.next_key_frame_forced && cpi->rc.frames_to_key == 1) {
4466 #if CONFIG_VP9_HIGHBITDEPTH
4467     if (cm->use_highbitdepth) {
4468       cpi->ambient_err =
4469           vpx_highbd_get_y_sse(cpi->Source, get_frame_new_buffer(cm));
4470     } else {
4471       cpi->ambient_err = vpx_get_y_sse(cpi->Source, get_frame_new_buffer(cm));
4472     }
4473 #else
4474     cpi->ambient_err = vpx_get_y_sse(cpi->Source, get_frame_new_buffer(cm));
4475 #endif  // CONFIG_VP9_HIGHBITDEPTH
4476   }
4477 
4478   // If the encoder forced a KEY_FRAME decision
4479   if (cm->frame_type == KEY_FRAME) cpi->refresh_last_frame = 1;
4480 
4481   cm->frame_to_show = get_frame_new_buffer(cm);
4482   cm->frame_to_show->color_space = cm->color_space;
4483   cm->frame_to_show->color_range = cm->color_range;
4484   cm->frame_to_show->render_width = cm->render_width;
4485   cm->frame_to_show->render_height = cm->render_height;
4486 
4487   // Pick the loop filter level for the frame.
4488   loopfilter_frame(cpi, cm);
4489 
4490   // build the bitstream
4491   vp9_pack_bitstream(cpi, dest, size);
4492 
4493   if (cm->seg.update_map) update_reference_segmentation_map(cpi);
4494 
4495   if (frame_is_intra_only(cm) == 0) {
4496     release_scaled_references(cpi);
4497   }
4498   vp9_update_reference_frames(cpi);
4499 
4500   for (t = TX_4X4; t <= TX_32X32; t++)
4501     full_to_model_counts(cpi->td.counts->coef[t],
4502                          cpi->td.rd_counts.coef_counts[t]);
4503 
4504   if (!cm->error_resilient_mode && !cm->frame_parallel_decoding_mode)
4505     vp9_adapt_coef_probs(cm);
4506 
4507   if (!frame_is_intra_only(cm)) {
4508     if (!cm->error_resilient_mode && !cm->frame_parallel_decoding_mode) {
4509       vp9_adapt_mode_probs(cm);
4510       vp9_adapt_mv_probs(cm, cm->allow_high_precision_mv);
4511     }
4512   }
4513 
4514   cpi->ext_refresh_frame_flags_pending = 0;
4515 
4516   if (cpi->refresh_golden_frame == 1)
4517     cpi->frame_flags |= FRAMEFLAGS_GOLDEN;
4518   else
4519     cpi->frame_flags &= ~FRAMEFLAGS_GOLDEN;
4520 
4521   if (cpi->refresh_alt_ref_frame == 1)
4522     cpi->frame_flags |= FRAMEFLAGS_ALTREF;
4523   else
4524     cpi->frame_flags &= ~FRAMEFLAGS_ALTREF;
4525 
4526   cpi->ref_frame_flags = get_ref_frame_flags(cpi);
4527 
4528   cm->last_frame_type = cm->frame_type;
4529 
4530   if (!(is_two_pass_svc(cpi) && cpi->svc.encode_empty_frame_state == ENCODING))
4531     vp9_rc_postencode_update(cpi, *size);
4532 
4533 #if 0
4534   output_frame_level_debug_stats(cpi);
4535 #endif
4536 
4537   if (cm->frame_type == KEY_FRAME) {
4538     // Tell the caller that the frame was coded as a key frame
4539     *frame_flags = cpi->frame_flags | FRAMEFLAGS_KEY;
4540   } else {
4541     *frame_flags = cpi->frame_flags & ~FRAMEFLAGS_KEY;
4542   }
4543 
4544   // Clear the one shot update flags for segmentation map and mode/ref loop
4545   // filter deltas.
4546   cm->seg.update_map = 0;
4547   cm->seg.update_data = 0;
4548   cm->lf.mode_ref_delta_update = 0;
4549 
4550   // keep track of the last coded dimensions
4551   cm->last_width = cm->width;
4552   cm->last_height = cm->height;
4553 
4554   // reset to normal state now that we are done.
4555   if (!cm->show_existing_frame) cm->last_show_frame = cm->show_frame;
4556 
4557   if (cm->show_frame) {
4558     vp9_swap_mi_and_prev_mi(cm);
4559     // Don't increment frame counters if this was an altref buffer
4560     // update not a real frame
4561     ++cm->current_video_frame;
4562     if (cpi->use_svc) vp9_inc_frame_in_layer(cpi);
4563   }
4564   cm->prev_frame = cm->cur_frame;
4565 
4566   if (cpi->use_svc)
4567     cpi->svc
4568         .layer_context[cpi->svc.spatial_layer_id *
4569                            cpi->svc.number_temporal_layers +
4570                        cpi->svc.temporal_layer_id]
4571         .last_frame_type = cm->frame_type;
4572 
4573   cpi->force_update_segmentation = 0;
4574 
4575   if (cpi->oxcf.aq_mode == LOOKAHEAD_AQ)
4576     vp9_alt_ref_aq_unset_all(cpi->alt_ref_aq, cpi);
4577 }
4578 
4579 static void SvcEncode(VP9_COMP *cpi, size_t *size, uint8_t *dest,
4580                       unsigned int *frame_flags) {
4581   vp9_rc_get_svc_params(cpi);
4582   encode_frame_to_data_rate(cpi, size, dest, frame_flags);
4583 }
4584 
4585 static void Pass0Encode(VP9_COMP *cpi, size_t *size, uint8_t *dest,
4586                         unsigned int *frame_flags) {
4587   if (cpi->oxcf.rc_mode == VPX_CBR) {
4588     vp9_rc_get_one_pass_cbr_params(cpi);
4589   } else {
4590     vp9_rc_get_one_pass_vbr_params(cpi);
4591   }
4592   encode_frame_to_data_rate(cpi, size, dest, frame_flags);
4593 }
4594 
4595 static void Pass2Encode(VP9_COMP *cpi, size_t *size, uint8_t *dest,
4596                         unsigned int *frame_flags) {
4597   cpi->allow_encode_breakout = ENCODE_BREAKOUT_ENABLED;
4598   encode_frame_to_data_rate(cpi, size, dest, frame_flags);
4599 
4600   if (!(is_two_pass_svc(cpi) && cpi->svc.encode_empty_frame_state == ENCODING))
4601     vp9_twopass_postencode_update(cpi);
4602 }
4603 
4604 static void init_ref_frame_bufs(VP9_COMMON *cm) {
4605   int i;
4606   BufferPool *const pool = cm->buffer_pool;
4607   cm->new_fb_idx = INVALID_IDX;
4608   for (i = 0; i < REF_FRAMES; ++i) {
4609     cm->ref_frame_map[i] = INVALID_IDX;
4610     pool->frame_bufs[i].ref_count = 0;
4611   }
4612 }
4613 
4614 static void check_initial_width(VP9_COMP *cpi,
4615 #if CONFIG_VP9_HIGHBITDEPTH
4616                                 int use_highbitdepth,
4617 #endif
4618                                 int subsampling_x, int subsampling_y) {
4619   VP9_COMMON *const cm = &cpi->common;
4620 
4621   if (!cpi->initial_width ||
4622 #if CONFIG_VP9_HIGHBITDEPTH
4623       cm->use_highbitdepth != use_highbitdepth ||
4624 #endif
4625       cm->subsampling_x != subsampling_x ||
4626       cm->subsampling_y != subsampling_y) {
4627     cm->subsampling_x = subsampling_x;
4628     cm->subsampling_y = subsampling_y;
4629 #if CONFIG_VP9_HIGHBITDEPTH
4630     cm->use_highbitdepth = use_highbitdepth;
4631 #endif
4632 
4633     alloc_raw_frame_buffers(cpi);
4634     init_ref_frame_bufs(cm);
4635     alloc_util_frame_buffers(cpi);
4636 
4637     init_motion_estimation(cpi);  // TODO(agrange) This can be removed.
4638 
4639     cpi->initial_width = cm->width;
4640     cpi->initial_height = cm->height;
4641     cpi->initial_mbs = cm->MBs;
4642   }
4643 }
4644 
4645 int vp9_receive_raw_frame(VP9_COMP *cpi, vpx_enc_frame_flags_t frame_flags,
4646                           YV12_BUFFER_CONFIG *sd, int64_t time_stamp,
4647                           int64_t end_time) {
4648   VP9_COMMON *const cm = &cpi->common;
4649   struct vpx_usec_timer timer;
4650   int res = 0;
4651   const int subsampling_x = sd->subsampling_x;
4652   const int subsampling_y = sd->subsampling_y;
4653 #if CONFIG_VP9_HIGHBITDEPTH
4654   const int use_highbitdepth = (sd->flags & YV12_FLAG_HIGHBITDEPTH) != 0;
4655 #endif
4656 
4657 #if CONFIG_VP9_HIGHBITDEPTH
4658   check_initial_width(cpi, use_highbitdepth, subsampling_x, subsampling_y);
4659 #else
4660   check_initial_width(cpi, subsampling_x, subsampling_y);
4661 #endif  // CONFIG_VP9_HIGHBITDEPTH
4662 
4663 #if CONFIG_VP9_TEMPORAL_DENOISING
4664   setup_denoiser_buffer(cpi);
4665 #endif
4666   vpx_usec_timer_start(&timer);
4667 
4668   if (vp9_lookahead_push(cpi->lookahead, sd, time_stamp, end_time,
4669 #if CONFIG_VP9_HIGHBITDEPTH
4670                          use_highbitdepth,
4671 #endif  // CONFIG_VP9_HIGHBITDEPTH
4672                          frame_flags))
4673     res = -1;
4674   vpx_usec_timer_mark(&timer);
4675   cpi->time_receive_data += vpx_usec_timer_elapsed(&timer);
4676 
4677   if ((cm->profile == PROFILE_0 || cm->profile == PROFILE_2) &&
4678       (subsampling_x != 1 || subsampling_y != 1)) {
4679     vpx_internal_error(&cm->error, VPX_CODEC_INVALID_PARAM,
4680                        "Non-4:2:0 color format requires profile 1 or 3");
4681     res = -1;
4682   }
4683   if ((cm->profile == PROFILE_1 || cm->profile == PROFILE_3) &&
4684       (subsampling_x == 1 && subsampling_y == 1)) {
4685     vpx_internal_error(&cm->error, VPX_CODEC_INVALID_PARAM,
4686                        "4:2:0 color format requires profile 0 or 2");
4687     res = -1;
4688   }
4689 
4690   return res;
4691 }
4692 
4693 static int frame_is_reference(const VP9_COMP *cpi) {
4694   const VP9_COMMON *cm = &cpi->common;
4695 
4696   return cm->frame_type == KEY_FRAME || cpi->refresh_last_frame ||
4697          cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame ||
4698          cm->refresh_frame_context || cm->lf.mode_ref_delta_update ||
4699          cm->seg.update_map || cm->seg.update_data;
4700 }
4701 
4702 static void adjust_frame_rate(VP9_COMP *cpi,
4703                               const struct lookahead_entry *source) {
4704   int64_t this_duration;
4705   int step = 0;
4706 
4707   if (source->ts_start == cpi->first_time_stamp_ever) {
4708     this_duration = source->ts_end - source->ts_start;
4709     step = 1;
4710   } else {
4711     int64_t last_duration =
4712         cpi->last_end_time_stamp_seen - cpi->last_time_stamp_seen;
4713 
4714     this_duration = source->ts_end - cpi->last_end_time_stamp_seen;
4715 
4716     // do a step update if the duration changes by 10%
4717     if (last_duration)
4718       step = (int)((this_duration - last_duration) * 10 / last_duration);
4719   }
4720 
4721   if (this_duration) {
4722     if (step) {
4723       vp9_new_framerate(cpi, 10000000.0 / this_duration);
4724     } else {
4725       // Average this frame's rate into the last second's average
4726       // frame rate. If we haven't seen 1 second yet, then average
4727       // over the whole interval seen.
4728       const double interval = VPXMIN(
4729           (double)(source->ts_end - cpi->first_time_stamp_ever), 10000000.0);
4730       double avg_duration = 10000000.0 / cpi->framerate;
4731       avg_duration *= (interval - avg_duration + this_duration);
4732       avg_duration /= interval;
4733 
4734       vp9_new_framerate(cpi, 10000000.0 / avg_duration);
4735     }
4736   }
4737   cpi->last_time_stamp_seen = source->ts_start;
4738   cpi->last_end_time_stamp_seen = source->ts_end;
4739 }
4740 
4741 // Returns 0 if this is not an alt ref else the offset of the source frame
4742 // used as the arf midpoint.
4743 static int get_arf_src_index(VP9_COMP *cpi) {
4744   RATE_CONTROL *const rc = &cpi->rc;
4745   int arf_src_index = 0;
4746   if (is_altref_enabled(cpi)) {
4747     if (cpi->oxcf.pass == 2) {
4748       const GF_GROUP *const gf_group = &cpi->twopass.gf_group;
4749       if (gf_group->update_type[gf_group->index] == ARF_UPDATE) {
4750         arf_src_index = gf_group->arf_src_offset[gf_group->index];
4751       }
4752     } else if (rc->source_alt_ref_pending) {
4753       arf_src_index = rc->frames_till_gf_update_due;
4754     }
4755   }
4756   return arf_src_index;
4757 }
4758 
4759 static void check_src_altref(VP9_COMP *cpi,
4760                              const struct lookahead_entry *source) {
4761   RATE_CONTROL *const rc = &cpi->rc;
4762 
4763   if (cpi->oxcf.pass == 2) {
4764     const GF_GROUP *const gf_group = &cpi->twopass.gf_group;
4765     rc->is_src_frame_alt_ref =
4766         (gf_group->update_type[gf_group->index] == OVERLAY_UPDATE);
4767   } else {
4768     rc->is_src_frame_alt_ref =
4769         cpi->alt_ref_source && (source == cpi->alt_ref_source);
4770   }
4771 
4772   if (rc->is_src_frame_alt_ref) {
4773     // Current frame is an ARF overlay frame.
4774     cpi->alt_ref_source = NULL;
4775 
4776     // Don't refresh the last buffer for an ARF overlay frame. It will
4777     // become the GF so preserve last as an alternative prediction option.
4778     cpi->refresh_last_frame = 0;
4779   }
4780 }
4781 
4782 #if CONFIG_INTERNAL_STATS
4783 extern double vp9_get_blockiness(const uint8_t *img1, int img1_pitch,
4784                                  const uint8_t *img2, int img2_pitch, int width,
4785                                  int height);
4786 
4787 static void adjust_image_stat(double y, double u, double v, double all,
4788                               ImageStat *s) {
4789   s->stat[Y] += y;
4790   s->stat[U] += u;
4791   s->stat[V] += v;
4792   s->stat[ALL] += all;
4793   s->worst = VPXMIN(s->worst, all);
4794 }
4795 #endif  // CONFIG_INTERNAL_STATS
4796 
4797 // Adjust the maximum allowable frame size for the target level.
4798 static void level_rc_framerate(VP9_COMP *cpi, int arf_src_index) {
4799   RATE_CONTROL *const rc = &cpi->rc;
4800   LevelConstraint *const ls = &cpi->level_constraint;
4801   VP9_COMMON *const cm = &cpi->common;
4802   const double max_cpb_size = ls->max_cpb_size;
4803   vpx_clear_system_state();
4804   rc->max_frame_bandwidth = VPXMIN(rc->max_frame_bandwidth, ls->max_frame_size);
4805   if (frame_is_intra_only(cm)) {
4806     rc->max_frame_bandwidth =
4807         VPXMIN(rc->max_frame_bandwidth, (int)(max_cpb_size * 0.5));
4808   } else if (arf_src_index > 0) {
4809     rc->max_frame_bandwidth =
4810         VPXMIN(rc->max_frame_bandwidth, (int)(max_cpb_size * 0.4));
4811   } else {
4812     rc->max_frame_bandwidth =
4813         VPXMIN(rc->max_frame_bandwidth, (int)(max_cpb_size * 0.2));
4814   }
4815 }
4816 
4817 static void update_level_info(VP9_COMP *cpi, size_t *size, int arf_src_index) {
4818   VP9_COMMON *const cm = &cpi->common;
4819   Vp9LevelInfo *const level_info = &cpi->level_info;
4820   Vp9LevelSpec *const level_spec = &level_info->level_spec;
4821   Vp9LevelStats *const level_stats = &level_info->level_stats;
4822   int i, idx;
4823   uint64_t luma_samples, dur_end;
4824   const uint32_t luma_pic_size = cm->width * cm->height;
4825   LevelConstraint *const level_constraint = &cpi->level_constraint;
4826   const int8_t level_index = level_constraint->level_index;
4827   double cpb_data_size;
4828 
4829   vpx_clear_system_state();
4830 
4831   // update level_stats
4832   level_stats->total_compressed_size += *size;
4833   if (cm->show_frame) {
4834     level_stats->total_uncompressed_size +=
4835         luma_pic_size +
4836         2 * (luma_pic_size >> (cm->subsampling_x + cm->subsampling_y));
4837     level_stats->time_encoded =
4838         (cpi->last_end_time_stamp_seen - cpi->first_time_stamp_ever) /
4839         (double)TICKS_PER_SEC;
4840   }
4841 
4842   if (arf_src_index > 0) {
4843     if (!level_stats->seen_first_altref) {
4844       level_stats->seen_first_altref = 1;
4845     } else if (level_stats->frames_since_last_altref <
4846                level_spec->min_altref_distance) {
4847       level_spec->min_altref_distance = level_stats->frames_since_last_altref;
4848     }
4849     level_stats->frames_since_last_altref = 0;
4850   } else {
4851     ++level_stats->frames_since_last_altref;
4852   }
4853 
4854   if (level_stats->frame_window_buffer.len < FRAME_WINDOW_SIZE - 1) {
4855     idx = (level_stats->frame_window_buffer.start +
4856            level_stats->frame_window_buffer.len++) %
4857           FRAME_WINDOW_SIZE;
4858   } else {
4859     idx = level_stats->frame_window_buffer.start;
4860     level_stats->frame_window_buffer.start = (idx + 1) % FRAME_WINDOW_SIZE;
4861   }
4862   level_stats->frame_window_buffer.buf[idx].ts = cpi->last_time_stamp_seen;
4863   level_stats->frame_window_buffer.buf[idx].size = (uint32_t)(*size);
4864   level_stats->frame_window_buffer.buf[idx].luma_samples = luma_pic_size;
4865 
4866   if (cm->frame_type == KEY_FRAME) {
4867     level_stats->ref_refresh_map = 0;
4868   } else {
4869     int count = 0;
4870     level_stats->ref_refresh_map |= vp9_get_refresh_mask(cpi);
4871     // Also need to consider the case where the encoder refers to a buffer
4872     // that has been implicitly refreshed after encoding a keyframe.
4873     if (!cm->intra_only) {
4874       level_stats->ref_refresh_map |= (1 << cpi->lst_fb_idx);
4875       level_stats->ref_refresh_map |= (1 << cpi->gld_fb_idx);
4876       level_stats->ref_refresh_map |= (1 << cpi->alt_fb_idx);
4877     }
4878     for (i = 0; i < REF_FRAMES; ++i) {
4879       count += (level_stats->ref_refresh_map >> i) & 1;
4880     }
4881     if (count > level_spec->max_ref_frame_buffers) {
4882       level_spec->max_ref_frame_buffers = count;
4883     }
4884   }
4885 
4886   // update average_bitrate
4887   level_spec->average_bitrate = (double)level_stats->total_compressed_size /
4888                                 125.0 / level_stats->time_encoded;
4889 
4890   // update max_luma_sample_rate
4891   luma_samples = 0;
4892   for (i = 0; i < level_stats->frame_window_buffer.len; ++i) {
4893     idx = (level_stats->frame_window_buffer.start +
4894            level_stats->frame_window_buffer.len - 1 - i) %
4895           FRAME_WINDOW_SIZE;
4896     if (i == 0) {
4897       dur_end = level_stats->frame_window_buffer.buf[idx].ts;
4898     }
4899     if (dur_end - level_stats->frame_window_buffer.buf[idx].ts >=
4900         TICKS_PER_SEC) {
4901       break;
4902     }
4903     luma_samples += level_stats->frame_window_buffer.buf[idx].luma_samples;
4904   }
4905   if (luma_samples > level_spec->max_luma_sample_rate) {
4906     level_spec->max_luma_sample_rate = luma_samples;
4907   }
4908 
4909   // update max_cpb_size
4910   cpb_data_size = 0;
4911   for (i = 0; i < CPB_WINDOW_SIZE; ++i) {
4912     if (i >= level_stats->frame_window_buffer.len) break;
4913     idx = (level_stats->frame_window_buffer.start +
4914            level_stats->frame_window_buffer.len - 1 - i) %
4915           FRAME_WINDOW_SIZE;
4916     cpb_data_size += level_stats->frame_window_buffer.buf[idx].size;
4917   }
4918   cpb_data_size = cpb_data_size / 125.0;
4919   if (cpb_data_size > level_spec->max_cpb_size) {
4920     level_spec->max_cpb_size = cpb_data_size;
4921   }
4922 
4923   // update max_luma_picture_size
4924   if (luma_pic_size > level_spec->max_luma_picture_size) {
4925     level_spec->max_luma_picture_size = luma_pic_size;
4926   }
4927 
4928   // update compression_ratio
4929   level_spec->compression_ratio = (double)level_stats->total_uncompressed_size *
4930                                   cm->bit_depth /
4931                                   level_stats->total_compressed_size / 8.0;
4932 
4933   // update max_col_tiles
4934   if (level_spec->max_col_tiles < (1 << cm->log2_tile_cols)) {
4935     level_spec->max_col_tiles = (1 << cm->log2_tile_cols);
4936   }
4937 
4938   if (level_index >= 0 && level_constraint->fail_flag == 0) {
4939     if (level_spec->max_luma_picture_size >
4940         vp9_level_defs[level_index].max_luma_picture_size) {
4941       level_constraint->fail_flag |= (1 << LUMA_PIC_SIZE_TOO_LARGE);
4942       vpx_internal_error(&cm->error, VPX_CODEC_ERROR,
4943                          "Failed to encode to the target level %d. %s",
4944                          vp9_level_defs[level_index].level,
4945                          level_fail_messages[LUMA_PIC_SIZE_TOO_LARGE]);
4946     }
4947 
4948     if ((double)level_spec->max_luma_sample_rate >
4949         (double)vp9_level_defs[level_index].max_luma_sample_rate *
4950             (1 + SAMPLE_RATE_GRACE_P)) {
4951       level_constraint->fail_flag |= (1 << LUMA_SAMPLE_RATE_TOO_LARGE);
4952       vpx_internal_error(&cm->error, VPX_CODEC_ERROR,
4953                          "Failed to encode to the target level %d. %s",
4954                          vp9_level_defs[level_index].level,
4955                          level_fail_messages[LUMA_SAMPLE_RATE_TOO_LARGE]);
4956     }
4957 
4958     if (level_spec->max_col_tiles > vp9_level_defs[level_index].max_col_tiles) {
4959       level_constraint->fail_flag |= (1 << TOO_MANY_COLUMN_TILE);
4960       vpx_internal_error(&cm->error, VPX_CODEC_ERROR,
4961                          "Failed to encode to the target level %d. %s",
4962                          vp9_level_defs[level_index].level,
4963                          level_fail_messages[TOO_MANY_COLUMN_TILE]);
4964     }
4965 
4966     if (level_spec->min_altref_distance <
4967         vp9_level_defs[level_index].min_altref_distance) {
4968       level_constraint->fail_flag |= (1 << ALTREF_DIST_TOO_SMALL);
4969       vpx_internal_error(&cm->error, VPX_CODEC_ERROR,
4970                          "Failed to encode to the target level %d. %s",
4971                          vp9_level_defs[level_index].level,
4972                          level_fail_messages[ALTREF_DIST_TOO_SMALL]);
4973     }
4974 
4975     if (level_spec->max_ref_frame_buffers >
4976         vp9_level_defs[level_index].max_ref_frame_buffers) {
4977       level_constraint->fail_flag |= (1 << TOO_MANY_REF_BUFFER);
4978       vpx_internal_error(&cm->error, VPX_CODEC_ERROR,
4979                          "Failed to encode to the target level %d. %s",
4980                          vp9_level_defs[level_index].level,
4981                          level_fail_messages[TOO_MANY_REF_BUFFER]);
4982     }
4983 
4984     if (level_spec->max_cpb_size > vp9_level_defs[level_index].max_cpb_size) {
4985       level_constraint->fail_flag |= (1 << CPB_TOO_LARGE);
4986       vpx_internal_error(&cm->error, VPX_CODEC_ERROR,
4987                          "Failed to encode to the target level %d. %s",
4988                          vp9_level_defs[level_index].level,
4989                          level_fail_messages[CPB_TOO_LARGE]);
4990     }
4991 
4992     // Set an upper bound for the next frame size. It will be used in
4993     // level_rc_framerate() before encoding the next frame.
4994     cpb_data_size = 0;
4995     for (i = 0; i < CPB_WINDOW_SIZE - 1; ++i) {
4996       if (i >= level_stats->frame_window_buffer.len) break;
4997       idx = (level_stats->frame_window_buffer.start +
4998              level_stats->frame_window_buffer.len - 1 - i) %
4999             FRAME_WINDOW_SIZE;
5000       cpb_data_size += level_stats->frame_window_buffer.buf[idx].size;
5001     }
5002     cpb_data_size = cpb_data_size / 125.0;
5003     level_constraint->max_frame_size =
5004         (int)((vp9_level_defs[level_index].max_cpb_size - cpb_data_size) *
5005               1000.0);
5006     if (level_stats->frame_window_buffer.len < CPB_WINDOW_SIZE - 1)
5007       level_constraint->max_frame_size >>= 1;
5008   }
5009 }
5010 
5011 int vp9_get_compressed_data(VP9_COMP *cpi, unsigned int *frame_flags,
5012                             size_t *size, uint8_t *dest, int64_t *time_stamp,
5013                             int64_t *time_end, int flush) {
5014   const VP9EncoderConfig *const oxcf = &cpi->oxcf;
5015   VP9_COMMON *const cm = &cpi->common;
5016   BufferPool *const pool = cm->buffer_pool;
5017   RATE_CONTROL *const rc = &cpi->rc;
5018   struct vpx_usec_timer cmptimer;
5019   YV12_BUFFER_CONFIG *force_src_buffer = NULL;
5020   struct lookahead_entry *last_source = NULL;
5021   struct lookahead_entry *source = NULL;
5022   int arf_src_index;
5023   int i;
5024 
5025   if (is_two_pass_svc(cpi)) {
5026 #if CONFIG_SPATIAL_SVC
5027     vp9_svc_start_frame(cpi);
5028     // Use a small empty frame instead of a real frame
5029     if (cpi->svc.encode_empty_frame_state == ENCODING)
5030       source = &cpi->svc.empty_frame;
5031 #endif
5032     if (oxcf->pass == 2) vp9_restore_layer_context(cpi);
5033   } else if (is_one_pass_cbr_svc(cpi)) {
5034     vp9_one_pass_cbr_svc_start_layer(cpi);
5035   }
5036 
5037   vpx_usec_timer_start(&cmptimer);
5038 
5039   vp9_set_high_precision_mv(cpi, ALTREF_HIGH_PRECISION_MV);
5040 
5041   // Is multi-arf enabled.
5042   // Note that at the moment multi_arf is only configured for 2 pass VBR and
5043   // will not work properly with svc.
5044   if ((oxcf->pass == 2) && !cpi->use_svc && (cpi->oxcf.enable_auto_arf > 1))
5045     cpi->multi_arf_allowed = 1;
5046   else
5047     cpi->multi_arf_allowed = 0;
5048 
5049   // Normal defaults
5050   cm->reset_frame_context = 0;
5051   cm->refresh_frame_context = 1;
5052   if (!is_one_pass_cbr_svc(cpi)) {
5053     cpi->refresh_last_frame = 1;
5054     cpi->refresh_golden_frame = 0;
5055     cpi->refresh_alt_ref_frame = 0;
5056   }
5057 
5058   // Should we encode an arf frame.
5059   arf_src_index = get_arf_src_index(cpi);
5060 
5061   // Skip alt frame if we encode the empty frame
5062   if (is_two_pass_svc(cpi) && source != NULL) arf_src_index = 0;
5063 
5064   if (arf_src_index) {
5065     for (i = 0; i <= arf_src_index; ++i) {
5066       struct lookahead_entry *e = vp9_lookahead_peek(cpi->lookahead, i);
5067       // Avoid creating an alt-ref if there's a forced keyframe pending.
5068       if (e == NULL) {
5069         break;
5070       } else if (e->flags == VPX_EFLAG_FORCE_KF) {
5071         arf_src_index = 0;
5072         flush = 1;
5073         break;
5074       }
5075     }
5076   }
5077 
5078   if (arf_src_index) {
5079     assert(arf_src_index <= rc->frames_to_key);
5080 
5081     if ((source = vp9_lookahead_peek(cpi->lookahead, arf_src_index)) != NULL) {
5082       cpi->alt_ref_source = source;
5083 
5084 #if CONFIG_SPATIAL_SVC
5085       if (is_two_pass_svc(cpi) && cpi->svc.spatial_layer_id > 0) {
5086         int i;
5087         // Reference a hidden frame from a lower layer
5088         for (i = cpi->svc.spatial_layer_id - 1; i >= 0; --i) {
5089           if (oxcf->ss_enable_auto_arf[i]) {
5090             cpi->gld_fb_idx = cpi->svc.layer_context[i].alt_ref_idx;
5091             break;
5092           }
5093         }
5094       }
5095       cpi->svc.layer_context[cpi->svc.spatial_layer_id].has_alt_frame = 1;
5096 #endif
5097 
5098       if ((oxcf->mode != REALTIME) && (oxcf->arnr_max_frames > 0) &&
5099           (oxcf->arnr_strength > 0)) {
5100         int bitrate = cpi->rc.avg_frame_bandwidth / 40;
5101         int not_low_bitrate = bitrate > ALT_REF_AQ_LOW_BITRATE_BOUNDARY;
5102 
5103         int not_last_frame = (cpi->lookahead->sz - arf_src_index > 1);
5104         not_last_frame |= ALT_REF_AQ_APPLY_TO_LAST_FRAME;
5105 
5106         // Produce the filtered ARF frame.
5107         vp9_temporal_filter(cpi, arf_src_index);
5108         vpx_extend_frame_borders(&cpi->alt_ref_buffer);
5109 
5110         // for small bitrates segmentation overhead usually
5111         // eats all bitrate gain from enabling delta quantizers
5112         if (cpi->oxcf.alt_ref_aq != 0 && not_low_bitrate && not_last_frame)
5113           vp9_alt_ref_aq_setup_mode(cpi->alt_ref_aq, cpi);
5114 
5115         force_src_buffer = &cpi->alt_ref_buffer;
5116       }
5117 
5118       cm->show_frame = 0;
5119       cm->intra_only = 0;
5120       cpi->refresh_alt_ref_frame = 1;
5121       cpi->refresh_golden_frame = 0;
5122       cpi->refresh_last_frame = 0;
5123       rc->is_src_frame_alt_ref = 0;
5124       rc->source_alt_ref_pending = 0;
5125     } else {
5126       rc->source_alt_ref_pending = 0;
5127     }
5128   }
5129 
5130   if (!source) {
5131     // Get last frame source.
5132     if (cm->current_video_frame > 0) {
5133       if ((last_source = vp9_lookahead_peek(cpi->lookahead, -1)) == NULL)
5134         return -1;
5135     }
5136 
5137     // Read in the source frame.
5138     if (cpi->use_svc)
5139       source = vp9_svc_lookahead_pop(cpi, cpi->lookahead, flush);
5140     else
5141       source = vp9_lookahead_pop(cpi->lookahead, flush);
5142 
5143     if (source != NULL) {
5144       cm->show_frame = 1;
5145       cm->intra_only = 0;
5146       // if the flags indicate intra frame, but if the current picture is for
5147       // non-zero spatial layer, it should not be an intra picture.
5148       // TODO(Won Kap): this needs to change if per-layer intra frame is
5149       // allowed.
5150       if ((source->flags & VPX_EFLAG_FORCE_KF) &&
5151           cpi->svc.spatial_layer_id > cpi->svc.first_spatial_layer_to_encode) {
5152         source->flags &= ~(unsigned int)(VPX_EFLAG_FORCE_KF);
5153       }
5154 
5155       // Check to see if the frame should be encoded as an arf overlay.
5156       check_src_altref(cpi, source);
5157     }
5158   }
5159 
5160   if (source) {
5161     cpi->un_scaled_source = cpi->Source =
5162         force_src_buffer ? force_src_buffer : &source->img;
5163 
5164 #ifdef ENABLE_KF_DENOISE
5165     // Copy of raw source for metrics calculation.
5166     if (is_psnr_calc_enabled(cpi))
5167       vp9_copy_and_extend_frame(cpi->Source, &cpi->raw_unscaled_source);
5168 #endif
5169 
5170     cpi->unscaled_last_source = last_source != NULL ? &last_source->img : NULL;
5171 
5172     *time_stamp = source->ts_start;
5173     *time_end = source->ts_end;
5174     *frame_flags = (source->flags & VPX_EFLAG_FORCE_KF) ? FRAMEFLAGS_KEY : 0;
5175 
5176   } else {
5177     *size = 0;
5178     if (flush && oxcf->pass == 1 && !cpi->twopass.first_pass_done) {
5179       vp9_end_first_pass(cpi); /* get last stats packet */
5180       cpi->twopass.first_pass_done = 1;
5181     }
5182     return -1;
5183   }
5184 
5185   if (source->ts_start < cpi->first_time_stamp_ever) {
5186     cpi->first_time_stamp_ever = source->ts_start;
5187     cpi->last_end_time_stamp_seen = source->ts_start;
5188   }
5189 
5190   // Clear down mmx registers
5191   vpx_clear_system_state();
5192 
5193   // adjust frame rates based on timestamps given
5194   if (cm->show_frame) {
5195     adjust_frame_rate(cpi, source);
5196   }
5197 
5198   if (is_one_pass_cbr_svc(cpi)) {
5199     vp9_update_temporal_layer_framerate(cpi);
5200     vp9_restore_layer_context(cpi);
5201   }
5202 
5203   // Find a free buffer for the new frame, releasing the reference previously
5204   // held.
5205   if (cm->new_fb_idx != INVALID_IDX) {
5206     --pool->frame_bufs[cm->new_fb_idx].ref_count;
5207   }
5208   cm->new_fb_idx = get_free_fb(cm);
5209 
5210   if (cm->new_fb_idx == INVALID_IDX) return -1;
5211 
5212   cm->cur_frame = &pool->frame_bufs[cm->new_fb_idx];
5213 
5214   if (!cpi->use_svc && cpi->multi_arf_allowed) {
5215     if (cm->frame_type == KEY_FRAME) {
5216       init_buffer_indices(cpi);
5217     } else if (oxcf->pass == 2) {
5218       const GF_GROUP *const gf_group = &cpi->twopass.gf_group;
5219       cpi->alt_fb_idx = gf_group->arf_ref_idx[gf_group->index];
5220     }
5221   }
5222 
5223   // Start with a 0 size frame.
5224   *size = 0;
5225 
5226   cpi->frame_flags = *frame_flags;
5227 
5228   if ((oxcf->pass == 2) &&
5229       (!cpi->use_svc || (is_two_pass_svc(cpi) &&
5230                          cpi->svc.encode_empty_frame_state != ENCODING))) {
5231     vp9_rc_get_second_pass_params(cpi);
5232   } else if (oxcf->pass == 1) {
5233     set_frame_size(cpi);
5234   }
5235 
5236   if (oxcf->pass != 1 && cpi->level_constraint.level_index >= 0 &&
5237       cpi->level_constraint.fail_flag == 0)
5238     level_rc_framerate(cpi, arf_src_index);
5239 
5240   if (cpi->oxcf.pass != 0 || cpi->use_svc || frame_is_intra_only(cm) == 1) {
5241     for (i = 0; i < MAX_REF_FRAMES; ++i) cpi->scaled_ref_idx[i] = INVALID_IDX;
5242   }
5243 
5244   cpi->td.mb.fp_src_pred = 0;
5245   if (oxcf->pass == 1 && (!cpi->use_svc || is_two_pass_svc(cpi))) {
5246     const int lossless = is_lossless_requested(oxcf);
5247 #if CONFIG_VP9_HIGHBITDEPTH
5248     if (cpi->oxcf.use_highbitdepth)
5249       cpi->td.mb.fwd_txm4x4 =
5250           lossless ? vp9_highbd_fwht4x4 : vpx_highbd_fdct4x4;
5251     else
5252       cpi->td.mb.fwd_txm4x4 = lossless ? vp9_fwht4x4 : vpx_fdct4x4;
5253     cpi->td.mb.highbd_itxm_add =
5254         lossless ? vp9_highbd_iwht4x4_add : vp9_highbd_idct4x4_add;
5255 #else
5256     cpi->td.mb.fwd_txm4x4 = lossless ? vp9_fwht4x4 : vpx_fdct4x4;
5257 #endif  // CONFIG_VP9_HIGHBITDEPTH
5258     cpi->td.mb.itxm_add = lossless ? vp9_iwht4x4_add : vp9_idct4x4_add;
5259     vp9_first_pass(cpi, source);
5260   } else if (oxcf->pass == 2 && (!cpi->use_svc || is_two_pass_svc(cpi))) {
5261     Pass2Encode(cpi, size, dest, frame_flags);
5262   } else if (cpi->use_svc) {
5263     SvcEncode(cpi, size, dest, frame_flags);
5264   } else {
5265     // One pass encode
5266     Pass0Encode(cpi, size, dest, frame_flags);
5267   }
5268 
5269   if (cm->refresh_frame_context)
5270     cm->frame_contexts[cm->frame_context_idx] = *cm->fc;
5271 
5272   // No frame encoded, or frame was dropped, release scaled references.
5273   if ((*size == 0) && (frame_is_intra_only(cm) == 0)) {
5274     release_scaled_references(cpi);
5275   }
5276 
5277   if (*size > 0) {
5278     cpi->droppable = !frame_is_reference(cpi);
5279   }
5280 
5281   // Save layer specific state.
5282   if (is_one_pass_cbr_svc(cpi) || ((cpi->svc.number_temporal_layers > 1 ||
5283                                     cpi->svc.number_spatial_layers > 1) &&
5284                                    oxcf->pass == 2)) {
5285     vp9_save_layer_context(cpi);
5286   }
5287 
5288   vpx_usec_timer_mark(&cmptimer);
5289   cpi->time_compress_data += vpx_usec_timer_elapsed(&cmptimer);
5290 
5291   // Should we calculate metrics for the frame.
5292   if (is_psnr_calc_enabled(cpi)) generate_psnr_packet(cpi);
5293 
5294   if (cpi->keep_level_stats && oxcf->pass != 1)
5295     update_level_info(cpi, size, arf_src_index);
5296 
5297 #if CONFIG_INTERNAL_STATS
5298 
5299   if (oxcf->pass != 1) {
5300     double samples = 0.0;
5301     cpi->bytes += (int)(*size);
5302 
5303     if (cm->show_frame) {
5304       uint32_t bit_depth = 8;
5305       uint32_t in_bit_depth = 8;
5306       cpi->count++;
5307 #if CONFIG_VP9_HIGHBITDEPTH
5308       if (cm->use_highbitdepth) {
5309         in_bit_depth = cpi->oxcf.input_bit_depth;
5310         bit_depth = cm->bit_depth;
5311       }
5312 #endif
5313 
5314       if (cpi->b_calculate_psnr) {
5315         YV12_BUFFER_CONFIG *orig = cpi->raw_source_frame;
5316         YV12_BUFFER_CONFIG *recon = cpi->common.frame_to_show;
5317         YV12_BUFFER_CONFIG *pp = &cm->post_proc_buffer;
5318         PSNR_STATS psnr;
5319 #if CONFIG_VP9_HIGHBITDEPTH
5320         vpx_calc_highbd_psnr(orig, recon, &psnr, cpi->td.mb.e_mbd.bd,
5321                              in_bit_depth);
5322 #else
5323         vpx_calc_psnr(orig, recon, &psnr);
5324 #endif  // CONFIG_VP9_HIGHBITDEPTH
5325 
5326         adjust_image_stat(psnr.psnr[1], psnr.psnr[2], psnr.psnr[3],
5327                           psnr.psnr[0], &cpi->psnr);
5328         cpi->total_sq_error += psnr.sse[0];
5329         cpi->total_samples += psnr.samples[0];
5330         samples = psnr.samples[0];
5331 
5332         {
5333           PSNR_STATS psnr2;
5334           double frame_ssim2 = 0, weight = 0;
5335 #if CONFIG_VP9_POSTPROC
5336           if (vpx_alloc_frame_buffer(
5337                   pp, recon->y_crop_width, recon->y_crop_height,
5338                   cm->subsampling_x, cm->subsampling_y,
5339 #if CONFIG_VP9_HIGHBITDEPTH
5340                   cm->use_highbitdepth,
5341 #endif
5342                   VP9_ENC_BORDER_IN_PIXELS, cm->byte_alignment) < 0) {
5343             vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
5344                                "Failed to allocate post processing buffer");
5345           }
5346           {
5347             vp9_ppflags_t ppflags;
5348             ppflags.post_proc_flag = VP9D_DEBLOCK;
5349             ppflags.deblocking_level = 0;  // not used in vp9_post_proc_frame()
5350             ppflags.noise_level = 0;       // not used in vp9_post_proc_frame()
5351             vp9_post_proc_frame(cm, pp, &ppflags);
5352           }
5353 #endif
5354           vpx_clear_system_state();
5355 
5356 #if CONFIG_VP9_HIGHBITDEPTH
5357           vpx_calc_highbd_psnr(orig, pp, &psnr2, cpi->td.mb.e_mbd.bd,
5358                                cpi->oxcf.input_bit_depth);
5359 #else
5360           vpx_calc_psnr(orig, pp, &psnr2);
5361 #endif  // CONFIG_VP9_HIGHBITDEPTH
5362 
5363           cpi->totalp_sq_error += psnr2.sse[0];
5364           cpi->totalp_samples += psnr2.samples[0];
5365           adjust_image_stat(psnr2.psnr[1], psnr2.psnr[2], psnr2.psnr[3],
5366                             psnr2.psnr[0], &cpi->psnrp);
5367 
5368 #if CONFIG_VP9_HIGHBITDEPTH
5369           if (cm->use_highbitdepth) {
5370             frame_ssim2 = vpx_highbd_calc_ssim(orig, recon, &weight, bit_depth,
5371                                                in_bit_depth);
5372           } else {
5373             frame_ssim2 = vpx_calc_ssim(orig, recon, &weight);
5374           }
5375 #else
5376           frame_ssim2 = vpx_calc_ssim(orig, recon, &weight);
5377 #endif  // CONFIG_VP9_HIGHBITDEPTH
5378 
5379           cpi->worst_ssim = VPXMIN(cpi->worst_ssim, frame_ssim2);
5380           cpi->summed_quality += frame_ssim2 * weight;
5381           cpi->summed_weights += weight;
5382 
5383 #if CONFIG_VP9_HIGHBITDEPTH
5384           if (cm->use_highbitdepth) {
5385             frame_ssim2 = vpx_highbd_calc_ssim(orig, pp, &weight, bit_depth,
5386                                                in_bit_depth);
5387           } else {
5388             frame_ssim2 = vpx_calc_ssim(orig, pp, &weight);
5389           }
5390 #else
5391           frame_ssim2 = vpx_calc_ssim(orig, pp, &weight);
5392 #endif  // CONFIG_VP9_HIGHBITDEPTH
5393 
5394           cpi->summedp_quality += frame_ssim2 * weight;
5395           cpi->summedp_weights += weight;
5396 #if 0
5397           {
5398             FILE *f = fopen("q_used.stt", "a");
5399             fprintf(f, "%5d : Y%f7.3:U%f7.3:V%f7.3:F%f7.3:S%7.3f\n",
5400                     cpi->common.current_video_frame, y2, u2, v2,
5401                     frame_psnr2, frame_ssim2);
5402             fclose(f);
5403           }
5404 #endif
5405         }
5406       }
5407       if (cpi->b_calculate_blockiness) {
5408 #if CONFIG_VP9_HIGHBITDEPTH
5409         if (!cm->use_highbitdepth)
5410 #endif
5411         {
5412           double frame_blockiness = vp9_get_blockiness(
5413               cpi->Source->y_buffer, cpi->Source->y_stride,
5414               cm->frame_to_show->y_buffer, cm->frame_to_show->y_stride,
5415               cpi->Source->y_width, cpi->Source->y_height);
5416           cpi->worst_blockiness =
5417               VPXMAX(cpi->worst_blockiness, frame_blockiness);
5418           cpi->total_blockiness += frame_blockiness;
5419         }
5420       }
5421 
5422       if (cpi->b_calculate_consistency) {
5423 #if CONFIG_VP9_HIGHBITDEPTH
5424         if (!cm->use_highbitdepth)
5425 #endif
5426         {
5427           double this_inconsistency = vpx_get_ssim_metrics(
5428               cpi->Source->y_buffer, cpi->Source->y_stride,
5429               cm->frame_to_show->y_buffer, cm->frame_to_show->y_stride,
5430               cpi->Source->y_width, cpi->Source->y_height, cpi->ssim_vars,
5431               &cpi->metrics, 1);
5432 
5433           const double peak = (double)((1 << cpi->oxcf.input_bit_depth) - 1);
5434           double consistency =
5435               vpx_sse_to_psnr(samples, peak, (double)cpi->total_inconsistency);
5436           if (consistency > 0.0)
5437             cpi->worst_consistency =
5438                 VPXMIN(cpi->worst_consistency, consistency);
5439           cpi->total_inconsistency += this_inconsistency;
5440         }
5441       }
5442 
5443       {
5444         double y, u, v, frame_all;
5445         frame_all = vpx_calc_fastssim(cpi->Source, cm->frame_to_show, &y, &u,
5446                                       &v, bit_depth, in_bit_depth);
5447         adjust_image_stat(y, u, v, frame_all, &cpi->fastssim);
5448       }
5449       {
5450         double y, u, v, frame_all;
5451         frame_all = vpx_psnrhvs(cpi->Source, cm->frame_to_show, &y, &u, &v,
5452                                 bit_depth, in_bit_depth);
5453         adjust_image_stat(y, u, v, frame_all, &cpi->psnrhvs);
5454       }
5455     }
5456   }
5457 
5458 #endif
5459 
5460   if (is_two_pass_svc(cpi)) {
5461     if (cpi->svc.encode_empty_frame_state == ENCODING) {
5462       cpi->svc.encode_empty_frame_state = ENCODED;
5463       cpi->svc.encode_intra_empty_frame = 0;
5464     }
5465 
5466     if (cm->show_frame) {
5467       ++cpi->svc.spatial_layer_to_encode;
5468       if (cpi->svc.spatial_layer_to_encode >= cpi->svc.number_spatial_layers)
5469         cpi->svc.spatial_layer_to_encode = 0;
5470 
5471       // May need the empty frame after an visible frame.
5472       cpi->svc.encode_empty_frame_state = NEED_TO_ENCODE;
5473     }
5474   } else if (is_one_pass_cbr_svc(cpi)) {
5475     if (cm->show_frame) {
5476       ++cpi->svc.spatial_layer_to_encode;
5477       if (cpi->svc.spatial_layer_to_encode >= cpi->svc.number_spatial_layers)
5478         cpi->svc.spatial_layer_to_encode = 0;
5479     }
5480   }
5481 
5482   vpx_clear_system_state();
5483   return 0;
5484 }
5485 
5486 int vp9_get_preview_raw_frame(VP9_COMP *cpi, YV12_BUFFER_CONFIG *dest,
5487                               vp9_ppflags_t *flags) {
5488   VP9_COMMON *cm = &cpi->common;
5489 #if !CONFIG_VP9_POSTPROC
5490   (void)flags;
5491 #endif
5492 
5493   if (!cm->show_frame) {
5494     return -1;
5495   } else {
5496     int ret;
5497 #if CONFIG_VP9_POSTPROC
5498     ret = vp9_post_proc_frame(cm, dest, flags);
5499 #else
5500     if (cm->frame_to_show) {
5501       *dest = *cm->frame_to_show;
5502       dest->y_width = cm->width;
5503       dest->y_height = cm->height;
5504       dest->uv_width = cm->width >> cm->subsampling_x;
5505       dest->uv_height = cm->height >> cm->subsampling_y;
5506       ret = 0;
5507     } else {
5508       ret = -1;
5509     }
5510 #endif  // !CONFIG_VP9_POSTPROC
5511     vpx_clear_system_state();
5512     return ret;
5513   }
5514 }
5515 
5516 int vp9_set_internal_size(VP9_COMP *cpi, VPX_SCALING horiz_mode,
5517                           VPX_SCALING vert_mode) {
5518   VP9_COMMON *cm = &cpi->common;
5519   int hr = 0, hs = 0, vr = 0, vs = 0;
5520 
5521   if (horiz_mode > ONETWO || vert_mode > ONETWO) return -1;
5522 
5523   Scale2Ratio(horiz_mode, &hr, &hs);
5524   Scale2Ratio(vert_mode, &vr, &vs);
5525 
5526   // always go to the next whole number
5527   cm->width = (hs - 1 + cpi->oxcf.width * hr) / hs;
5528   cm->height = (vs - 1 + cpi->oxcf.height * vr) / vs;
5529   if (cm->current_video_frame) {
5530     assert(cm->width <= cpi->initial_width);
5531     assert(cm->height <= cpi->initial_height);
5532   }
5533 
5534   update_frame_size(cpi);
5535 
5536   return 0;
5537 }
5538 
5539 int vp9_set_size_literal(VP9_COMP *cpi, unsigned int width,
5540                          unsigned int height) {
5541   VP9_COMMON *cm = &cpi->common;
5542 #if CONFIG_VP9_HIGHBITDEPTH
5543   check_initial_width(cpi, cm->use_highbitdepth, 1, 1);
5544 #else
5545   check_initial_width(cpi, 1, 1);
5546 #endif  // CONFIG_VP9_HIGHBITDEPTH
5547 
5548 #if CONFIG_VP9_TEMPORAL_DENOISING
5549   setup_denoiser_buffer(cpi);
5550 #endif
5551 
5552   if (width) {
5553     cm->width = width;
5554     if (cm->width > cpi->initial_width) {
5555       cm->width = cpi->initial_width;
5556       printf("Warning: Desired width too large, changed to %d\n", cm->width);
5557     }
5558   }
5559 
5560   if (height) {
5561     cm->height = height;
5562     if (cm->height > cpi->initial_height) {
5563       cm->height = cpi->initial_height;
5564       printf("Warning: Desired height too large, changed to %d\n", cm->height);
5565     }
5566   }
5567   assert(cm->width <= cpi->initial_width);
5568   assert(cm->height <= cpi->initial_height);
5569 
5570   update_frame_size(cpi);
5571 
5572   return 0;
5573 }
5574 
5575 void vp9_set_svc(VP9_COMP *cpi, int use_svc) {
5576   cpi->use_svc = use_svc;
5577   return;
5578 }
5579 
5580 int vp9_get_quantizer(VP9_COMP *cpi) { return cpi->common.base_qindex; }
5581 
5582 void vp9_apply_encoding_flags(VP9_COMP *cpi, vpx_enc_frame_flags_t flags) {
5583   if (flags &
5584       (VP8_EFLAG_NO_REF_LAST | VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_REF_ARF)) {
5585     int ref = 7;
5586 
5587     if (flags & VP8_EFLAG_NO_REF_LAST) ref ^= VP9_LAST_FLAG;
5588 
5589     if (flags & VP8_EFLAG_NO_REF_GF) ref ^= VP9_GOLD_FLAG;
5590 
5591     if (flags & VP8_EFLAG_NO_REF_ARF) ref ^= VP9_ALT_FLAG;
5592 
5593     vp9_use_as_reference(cpi, ref);
5594   }
5595 
5596   if (flags &
5597       (VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF |
5598        VP8_EFLAG_FORCE_GF | VP8_EFLAG_FORCE_ARF)) {
5599     int upd = 7;
5600 
5601     if (flags & VP8_EFLAG_NO_UPD_LAST) upd ^= VP9_LAST_FLAG;
5602 
5603     if (flags & VP8_EFLAG_NO_UPD_GF) upd ^= VP9_GOLD_FLAG;
5604 
5605     if (flags & VP8_EFLAG_NO_UPD_ARF) upd ^= VP9_ALT_FLAG;
5606 
5607     vp9_update_reference(cpi, upd);
5608   }
5609 
5610   if (flags & VP8_EFLAG_NO_UPD_ENTROPY) {
5611     vp9_update_entropy(cpi, 0);
5612   }
5613 }
5614 
5615 void vp9_set_row_mt(VP9_COMP *cpi) {
5616   // Enable row based multi-threading for supported modes of encoding
5617   cpi->row_mt = 0;
5618   if (((cpi->oxcf.mode == GOOD || cpi->oxcf.mode == BEST) &&
5619        cpi->oxcf.speed < 5 && cpi->oxcf.pass == 1) &&
5620       cpi->oxcf.row_mt && !cpi->use_svc)
5621     cpi->row_mt = 1;
5622 
5623   if (cpi->oxcf.mode == GOOD && cpi->oxcf.speed < 5 &&
5624       (cpi->oxcf.pass == 0 || cpi->oxcf.pass == 2) && cpi->oxcf.row_mt &&
5625       !cpi->use_svc)
5626     cpi->row_mt = 1;
5627 
5628   // In realtime mode, enable row based multi-threading for all the speed levels
5629   // where non-rd path is used.
5630   if (cpi->oxcf.mode == REALTIME && cpi->oxcf.speed >= 5 && cpi->oxcf.row_mt) {
5631     cpi->row_mt = 1;
5632   }
5633 
5634   if (cpi->row_mt && cpi->oxcf.max_threads > 1)
5635     cpi->row_mt_bit_exact = 1;
5636   else
5637     cpi->row_mt_bit_exact = 0;
5638 }
5639