1 /*
2 * Copyright (c) 2019, Alliance for Open Media. All rights reserved
3 *
4 * This source code is subject to the terms of the BSD 2 Clause License and
5 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6 * was not distributed with this source code in the LICENSE file, you can
7 * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8 * Media Patent License 1.0 was not distributed with this source code in the
9 * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10 */
11
12 #include <stdint.h>
13
14 #include "config/aom_config.h"
15 #include "config/aom_dsp_rtcd.h"
16
17 #include "aom/aom_codec.h"
18
19 #include "av1/common/onyxc_int.h"
20 #include "av1/common/reconintra.h"
21
22 #include "av1/encoder/encoder.h"
23 #include "av1/encoder/reconinter_enc.h"
24
25 typedef struct GF_PICTURE {
26 YV12_BUFFER_CONFIG *frame;
27 int ref_frame[7];
28 } GF_PICTURE;
29
get_quantize_error(MACROBLOCK * x,int plane,tran_low_t * coeff,tran_low_t * qcoeff,tran_low_t * dqcoeff,TX_SIZE tx_size,int64_t * recon_error,int64_t * sse)30 static void get_quantize_error(MACROBLOCK *x, int plane, tran_low_t *coeff,
31 tran_low_t *qcoeff, tran_low_t *dqcoeff,
32 TX_SIZE tx_size, int64_t *recon_error,
33 int64_t *sse) {
34 const struct macroblock_plane *const p = &x->plane[plane];
35 const SCAN_ORDER *const scan_order = &av1_default_scan_orders[tx_size];
36 uint16_t eob;
37 int pix_num = 1 << num_pels_log2_lookup[txsize_to_bsize[tx_size]];
38 const int shift = tx_size == TX_32X32 ? 0 : 2;
39
40 av1_quantize_fp_32x32(coeff, pix_num, p->zbin_QTX, p->round_fp_QTX,
41 p->quant_fp_QTX, p->quant_shift_QTX, qcoeff, dqcoeff,
42 p->dequant_QTX, &eob, scan_order->scan,
43 scan_order->iscan);
44
45 *recon_error = av1_block_error(coeff, dqcoeff, pix_num, sse) >> shift;
46 *recon_error = AOMMAX(*recon_error, 1);
47
48 *sse = (*sse) >> shift;
49 *sse = AOMMAX(*sse, 1);
50 }
51
wht_fwd_txfm(int16_t * src_diff,int bw,tran_low_t * coeff,TX_SIZE tx_size)52 static void wht_fwd_txfm(int16_t *src_diff, int bw, tran_low_t *coeff,
53 TX_SIZE tx_size) {
54 switch (tx_size) {
55 case TX_8X8: aom_hadamard_8x8(src_diff, bw, coeff); break;
56 case TX_16X16: aom_hadamard_16x16(src_diff, bw, coeff); break;
57 case TX_32X32: aom_hadamard_32x32(src_diff, bw, coeff); break;
58 default: assert(0);
59 }
60 }
61
motion_compensated_prediction(AV1_COMP * cpi,ThreadData * td,uint8_t * cur_frame_buf,uint8_t * ref_frame_buf,int stride,BLOCK_SIZE bsize,int mi_row,int mi_col)62 static uint32_t motion_compensated_prediction(AV1_COMP *cpi, ThreadData *td,
63 uint8_t *cur_frame_buf,
64 uint8_t *ref_frame_buf,
65 int stride, BLOCK_SIZE bsize,
66 int mi_row, int mi_col) {
67 AV1_COMMON *cm = &cpi->common;
68 MACROBLOCK *const x = &td->mb;
69 MACROBLOCKD *const xd = &x->e_mbd;
70 MV_SPEED_FEATURES *const mv_sf = &cpi->sf.mv;
71 const SEARCH_METHODS search_method = NSTEP;
72 int step_param;
73 int sadpb = x->sadperbit16;
74 uint32_t bestsme = UINT_MAX;
75 int distortion;
76 uint32_t sse;
77 int cost_list[5];
78 const MvLimits tmp_mv_limits = x->mv_limits;
79
80 MV best_ref_mv1 = { 0, 0 };
81 MV best_ref_mv1_full; /* full-pixel value of best_ref_mv1 */
82
83 best_ref_mv1_full.col = best_ref_mv1.col >> 3;
84 best_ref_mv1_full.row = best_ref_mv1.row >> 3;
85
86 // Setup frame pointers
87 x->plane[0].src.buf = cur_frame_buf;
88 x->plane[0].src.stride = stride;
89 xd->plane[0].pre[0].buf = ref_frame_buf;
90 xd->plane[0].pre[0].stride = stride;
91
92 step_param = mv_sf->reduce_first_step_size;
93 step_param = AOMMIN(step_param, MAX_MVSEARCH_STEPS - 2);
94
95 av1_set_mv_search_range(&x->mv_limits, &best_ref_mv1);
96
97 av1_full_pixel_search(cpi, x, bsize, &best_ref_mv1_full, step_param,
98 search_method, 0, sadpb, cond_cost_list(cpi, cost_list),
99 &best_ref_mv1, INT_MAX, 0, (MI_SIZE * mi_col),
100 (MI_SIZE * mi_row), 0, &cpi->ss_cfg[SS_CFG_SRC]);
101
102 /* restore UMV window */
103 x->mv_limits = tmp_mv_limits;
104
105 const int pw = block_size_wide[bsize];
106 const int ph = block_size_high[bsize];
107 bestsme = cpi->find_fractional_mv_step(
108 x, cm, mi_row, mi_col, &best_ref_mv1, cpi->common.allow_high_precision_mv,
109 x->errorperbit, &cpi->fn_ptr[bsize], 0, mv_sf->subpel_iters_per_step,
110 cond_cost_list(cpi, cost_list), NULL, NULL, &distortion, &sse, NULL, NULL,
111 0, 0, pw, ph, 1, 1);
112
113 return bestsme;
114 }
115
mode_estimation(AV1_COMP * cpi,MACROBLOCK * x,MACROBLOCKD * xd,struct scale_factors * sf,GF_PICTURE * gf_picture,int frame_idx,int16_t * src_diff,tran_low_t * coeff,tran_low_t * qcoeff,tran_low_t * dqcoeff,int mi_row,int mi_col,BLOCK_SIZE bsize,TX_SIZE tx_size,YV12_BUFFER_CONFIG * ref_frame[],uint8_t * predictor,TplDepStats * tpl_stats)116 static void mode_estimation(AV1_COMP *cpi, MACROBLOCK *x, MACROBLOCKD *xd,
117 struct scale_factors *sf, GF_PICTURE *gf_picture,
118 int frame_idx, int16_t *src_diff, tran_low_t *coeff,
119 tran_low_t *qcoeff, tran_low_t *dqcoeff, int mi_row,
120 int mi_col, BLOCK_SIZE bsize, TX_SIZE tx_size,
121 YV12_BUFFER_CONFIG *ref_frame[], uint8_t *predictor,
122 TplDepStats *tpl_stats) {
123 AV1_COMMON *cm = &cpi->common;
124 ThreadData *td = &cpi->td;
125
126 const int bw = 4 << mi_size_wide_log2[bsize];
127 const int bh = 4 << mi_size_high_log2[bsize];
128 const int pix_num = bw * bh;
129 int best_rf_idx = -1;
130 int_mv best_mv;
131 int64_t best_inter_cost = INT64_MAX;
132 int64_t inter_cost;
133 int rf_idx;
134 const InterpFilters kernel =
135 av1_make_interp_filters(EIGHTTAP_REGULAR, EIGHTTAP_REGULAR);
136
137 int64_t best_intra_cost = INT64_MAX;
138 int64_t intra_cost;
139 PREDICTION_MODE mode;
140 int mb_y_offset = mi_row * MI_SIZE * xd->cur_buf->y_stride + mi_col * MI_SIZE;
141 MB_MODE_INFO mi_above, mi_left;
142
143 memset(tpl_stats, 0, sizeof(*tpl_stats));
144
145 xd->mb_to_top_edge = -((mi_row * MI_SIZE) * 8);
146 xd->mb_to_bottom_edge = ((cm->mi_rows - 1 - mi_row) * MI_SIZE) * 8;
147 xd->mb_to_left_edge = -((mi_col * MI_SIZE) * 8);
148 xd->mb_to_right_edge = ((cm->mi_cols - 1 - mi_col) * MI_SIZE) * 8;
149 xd->above_mbmi = (mi_row > 0) ? &mi_above : NULL;
150 xd->left_mbmi = (mi_col > 0) ? &mi_left : NULL;
151
152 // Intra prediction search
153 for (mode = DC_PRED; mode <= PAETH_PRED; ++mode) {
154 uint8_t *src, *dst;
155 int src_stride, dst_stride;
156
157 src = xd->cur_buf->y_buffer + mb_y_offset;
158 src_stride = xd->cur_buf->y_stride;
159
160 dst = &predictor[0];
161 dst_stride = bw;
162
163 xd->mi[0]->sb_type = bsize;
164 xd->mi[0]->ref_frame[0] = INTRA_FRAME;
165
166 av1_predict_intra_block(
167 cm, xd, block_size_wide[bsize], block_size_high[bsize], tx_size, mode,
168 0, 0, FILTER_INTRA_MODES, src, src_stride, dst, dst_stride, 0, 0, 0);
169
170 if (is_cur_buf_hbd(xd)) {
171 aom_highbd_subtract_block(bh, bw, src_diff, bw, src, src_stride, dst,
172 dst_stride, xd->bd);
173 } else {
174 aom_subtract_block(bh, bw, src_diff, bw, src, src_stride, dst,
175 dst_stride);
176 }
177
178 wht_fwd_txfm(src_diff, bw, coeff, tx_size);
179
180 intra_cost = aom_satd(coeff, pix_num);
181
182 if (intra_cost < best_intra_cost) best_intra_cost = intra_cost;
183 }
184
185 // Motion compensated prediction
186 best_mv.as_int = 0;
187
188 (void)mb_y_offset;
189 // Motion estimation column boundary
190 x->mv_limits.col_min = -((mi_col * MI_SIZE) + (17 - 2 * AOM_INTERP_EXTEND));
191 x->mv_limits.col_max =
192 ((cm->mi_cols - 1 - mi_col) * MI_SIZE) + (17 - 2 * AOM_INTERP_EXTEND);
193
194 for (rf_idx = 0; rf_idx < 7; ++rf_idx) {
195 if (ref_frame[rf_idx] == NULL) continue;
196
197 motion_compensated_prediction(cpi, td, xd->cur_buf->y_buffer + mb_y_offset,
198 ref_frame[rf_idx]->y_buffer + mb_y_offset,
199 xd->cur_buf->y_stride, bsize, mi_row, mi_col);
200
201 // TODO(jingning): Not yet support high bit-depth in the next three
202 // steps.
203 ConvolveParams conv_params = get_conv_params(0, 0, xd->bd);
204 WarpTypesAllowed warp_types;
205 memset(&warp_types, 0, sizeof(WarpTypesAllowed));
206
207 av1_build_inter_predictor(
208 ref_frame[rf_idx]->y_buffer + mb_y_offset, ref_frame[rf_idx]->y_stride,
209 &predictor[0], bw, &x->best_mv.as_mv, sf, bw, bh, &conv_params, kernel,
210 &warp_types, mi_col * MI_SIZE, mi_row * MI_SIZE, 0, 0, MV_PRECISION_Q3,
211 mi_col * MI_SIZE, mi_row * MI_SIZE, xd, 0);
212 if (is_cur_buf_hbd(xd)) {
213 aom_highbd_subtract_block(
214 bh, bw, src_diff, bw, xd->cur_buf->y_buffer + mb_y_offset,
215 xd->cur_buf->y_stride, &predictor[0], bw, xd->bd);
216 } else {
217 aom_subtract_block(bh, bw, src_diff, bw,
218 xd->cur_buf->y_buffer + mb_y_offset,
219 xd->cur_buf->y_stride, &predictor[0], bw);
220 }
221 wht_fwd_txfm(src_diff, bw, coeff, tx_size);
222
223 inter_cost = aom_satd(coeff, pix_num);
224 if (inter_cost < best_inter_cost) {
225 int64_t recon_error, sse;
226
227 best_rf_idx = rf_idx;
228 best_inter_cost = inter_cost;
229 best_mv.as_int = x->best_mv.as_int;
230 get_quantize_error(x, 0, coeff, qcoeff, dqcoeff, tx_size, &recon_error,
231 &sse);
232 }
233 }
234 best_intra_cost = AOMMAX(best_intra_cost, 1);
235 best_inter_cost = AOMMIN(best_intra_cost, best_inter_cost);
236 tpl_stats->inter_cost = best_inter_cost << TPL_DEP_COST_SCALE_LOG2;
237 tpl_stats->intra_cost = best_intra_cost << TPL_DEP_COST_SCALE_LOG2;
238 tpl_stats->mc_dep_cost = tpl_stats->intra_cost + tpl_stats->mc_flow;
239
240 tpl_stats->ref_frame_index = gf_picture[frame_idx].ref_frame[best_rf_idx];
241 tpl_stats->mv.as_int = best_mv.as_int;
242 }
243
round_floor(int ref_pos,int bsize_pix)244 static int round_floor(int ref_pos, int bsize_pix) {
245 int round;
246 if (ref_pos < 0)
247 round = -(1 + (-ref_pos - 1) / bsize_pix);
248 else
249 round = ref_pos / bsize_pix;
250
251 return round;
252 }
253
get_overlap_area(int grid_pos_row,int grid_pos_col,int ref_pos_row,int ref_pos_col,int block,BLOCK_SIZE bsize)254 static int get_overlap_area(int grid_pos_row, int grid_pos_col, int ref_pos_row,
255 int ref_pos_col, int block, BLOCK_SIZE bsize) {
256 int width = 0, height = 0;
257 int bw = 4 << mi_size_wide_log2[bsize];
258 int bh = 4 << mi_size_high_log2[bsize];
259
260 switch (block) {
261 case 0:
262 width = grid_pos_col + bw - ref_pos_col;
263 height = grid_pos_row + bh - ref_pos_row;
264 break;
265 case 1:
266 width = ref_pos_col + bw - grid_pos_col;
267 height = grid_pos_row + bh - ref_pos_row;
268 break;
269 case 2:
270 width = grid_pos_col + bw - ref_pos_col;
271 height = ref_pos_row + bh - grid_pos_row;
272 break;
273 case 3:
274 width = ref_pos_col + bw - grid_pos_col;
275 height = ref_pos_row + bh - grid_pos_row;
276 break;
277 default: assert(0);
278 }
279
280 return width * height;
281 }
282
tpl_model_update_b(TplDepFrame * tpl_frame,TplDepStats * tpl_stats,int mi_row,int mi_col,const BLOCK_SIZE bsize)283 static void tpl_model_update_b(TplDepFrame *tpl_frame, TplDepStats *tpl_stats,
284 int mi_row, int mi_col, const BLOCK_SIZE bsize) {
285 TplDepFrame *ref_tpl_frame = &tpl_frame[tpl_stats->ref_frame_index];
286 TplDepStats *ref_stats = ref_tpl_frame->tpl_stats_ptr;
287 MV mv = tpl_stats->mv.as_mv;
288 int mv_row = mv.row >> 3;
289 int mv_col = mv.col >> 3;
290
291 int ref_pos_row = mi_row * MI_SIZE + mv_row;
292 int ref_pos_col = mi_col * MI_SIZE + mv_col;
293
294 const int bw = 4 << mi_size_wide_log2[bsize];
295 const int bh = 4 << mi_size_high_log2[bsize];
296 const int mi_height = mi_size_high[bsize];
297 const int mi_width = mi_size_wide[bsize];
298 const int pix_num = bw * bh;
299
300 // top-left on grid block location in pixel
301 int grid_pos_row_base = round_floor(ref_pos_row, bh) * bh;
302 int grid_pos_col_base = round_floor(ref_pos_col, bw) * bw;
303 int block;
304
305 for (block = 0; block < 4; ++block) {
306 int grid_pos_row = grid_pos_row_base + bh * (block >> 1);
307 int grid_pos_col = grid_pos_col_base + bw * (block & 0x01);
308
309 if (grid_pos_row >= 0 && grid_pos_row < ref_tpl_frame->mi_rows * MI_SIZE &&
310 grid_pos_col >= 0 && grid_pos_col < ref_tpl_frame->mi_cols * MI_SIZE) {
311 int overlap_area = get_overlap_area(
312 grid_pos_row, grid_pos_col, ref_pos_row, ref_pos_col, block, bsize);
313 int ref_mi_row = round_floor(grid_pos_row, bh) * mi_height;
314 int ref_mi_col = round_floor(grid_pos_col, bw) * mi_width;
315
316 int64_t mc_flow = tpl_stats->mc_dep_cost -
317 (tpl_stats->mc_dep_cost * tpl_stats->inter_cost) /
318 tpl_stats->intra_cost;
319
320 int idx, idy;
321
322 for (idy = 0; idy < mi_height; ++idy) {
323 for (idx = 0; idx < mi_width; ++idx) {
324 TplDepStats *des_stats =
325 &ref_stats[(ref_mi_row + idy) * ref_tpl_frame->stride +
326 (ref_mi_col + idx)];
327
328 des_stats->mc_flow += (mc_flow * overlap_area) / pix_num;
329 assert(overlap_area >= 0);
330 }
331 }
332 }
333 }
334 }
335
tpl_model_update(TplDepFrame * tpl_frame,TplDepStats * tpl_stats,int mi_row,int mi_col,const BLOCK_SIZE bsize)336 static void tpl_model_update(TplDepFrame *tpl_frame, TplDepStats *tpl_stats,
337 int mi_row, int mi_col, const BLOCK_SIZE bsize) {
338 int idx, idy;
339 const int mi_height = mi_size_high[bsize];
340 const int mi_width = mi_size_wide[bsize];
341
342 for (idy = 0; idy < mi_height; ++idy) {
343 for (idx = 0; idx < mi_width; ++idx) {
344 TplDepStats *tpl_ptr =
345 &tpl_stats[(mi_row + idy) * tpl_frame->stride + (mi_col + idx)];
346 tpl_model_update_b(tpl_frame, tpl_ptr, mi_row + idy, mi_col + idx,
347 BLOCK_4X4);
348 }
349 }
350 }
351
tpl_model_store(TplDepStats * tpl_stats,int mi_row,int mi_col,BLOCK_SIZE bsize,int stride,const TplDepStats * src_stats)352 static void tpl_model_store(TplDepStats *tpl_stats, int mi_row, int mi_col,
353 BLOCK_SIZE bsize, int stride,
354 const TplDepStats *src_stats) {
355 const int mi_height = mi_size_high[bsize];
356 const int mi_width = mi_size_wide[bsize];
357 int idx, idy;
358
359 int64_t intra_cost = src_stats->intra_cost / (mi_height * mi_width);
360 int64_t inter_cost = src_stats->inter_cost / (mi_height * mi_width);
361
362 TplDepStats *tpl_ptr;
363
364 intra_cost = AOMMAX(1, intra_cost);
365 inter_cost = AOMMAX(1, inter_cost);
366
367 for (idy = 0; idy < mi_height; ++idy) {
368 tpl_ptr = &tpl_stats[(mi_row + idy) * stride + mi_col];
369 for (idx = 0; idx < mi_width; ++idx) {
370 tpl_ptr->intra_cost = intra_cost;
371 tpl_ptr->inter_cost = inter_cost;
372 tpl_ptr->mc_dep_cost = tpl_ptr->intra_cost + tpl_ptr->mc_flow;
373 tpl_ptr->ref_frame_index = src_stats->ref_frame_index;
374 tpl_ptr->mv.as_int = src_stats->mv.as_int;
375 ++tpl_ptr;
376 }
377 }
378 }
379
mc_flow_dispenser(AV1_COMP * cpi,GF_PICTURE * gf_picture,int frame_idx)380 static void mc_flow_dispenser(AV1_COMP *cpi, GF_PICTURE *gf_picture,
381 int frame_idx) {
382 TplDepFrame *tpl_frame = &cpi->tpl_stats[frame_idx];
383 YV12_BUFFER_CONFIG *this_frame = gf_picture[frame_idx].frame;
384 YV12_BUFFER_CONFIG *ref_frame[7] = {
385 NULL, NULL, NULL, NULL, NULL, NULL, NULL
386 };
387
388 AV1_COMMON *cm = &cpi->common;
389 struct scale_factors sf;
390 int rdmult, idx;
391 ThreadData *td = &cpi->td;
392 MACROBLOCK *x = &td->mb;
393 MACROBLOCKD *xd = &x->e_mbd;
394 int mi_row, mi_col;
395
396 DECLARE_ALIGNED(32, uint16_t, predictor16[32 * 32 * 3]);
397 DECLARE_ALIGNED(32, uint8_t, predictor8[32 * 32 * 3]);
398 uint8_t *predictor;
399 DECLARE_ALIGNED(32, int16_t, src_diff[32 * 32]);
400 DECLARE_ALIGNED(32, tran_low_t, coeff[32 * 32]);
401 DECLARE_ALIGNED(32, tran_low_t, qcoeff[32 * 32]);
402 DECLARE_ALIGNED(32, tran_low_t, dqcoeff[32 * 32]);
403
404 const BLOCK_SIZE bsize = BLOCK_32X32;
405 const TX_SIZE tx_size = max_txsize_lookup[bsize];
406 const int mi_height = mi_size_high[bsize];
407 const int mi_width = mi_size_wide[bsize];
408
409 // Setup scaling factor
410 av1_setup_scale_factors_for_frame(
411 &sf, this_frame->y_crop_width, this_frame->y_crop_height,
412 this_frame->y_crop_width, this_frame->y_crop_height);
413
414 if (is_cur_buf_hbd(xd))
415 predictor = CONVERT_TO_BYTEPTR(predictor16);
416 else
417 predictor = predictor8;
418
419 // Prepare reference frame pointers. If any reference frame slot is
420 // unavailable, the pointer will be set to Null.
421 for (idx = 0; idx < 7; ++idx) {
422 int rf_idx = gf_picture[frame_idx].ref_frame[idx];
423 if (rf_idx != -1) ref_frame[idx] = gf_picture[rf_idx].frame;
424 }
425
426 xd->mi = cm->mi_grid_visible;
427 xd->mi[0] = cm->mi;
428 xd->cur_buf = this_frame;
429
430 // Get rd multiplier set up.
431 rdmult = (int)av1_compute_rd_mult(cpi, tpl_frame->base_qindex);
432 if (rdmult < 1) rdmult = 1;
433 set_error_per_bit(x, rdmult);
434 av1_initialize_me_consts(cpi, x, tpl_frame->base_qindex);
435
436 tpl_frame->is_valid = 1;
437
438 cm->base_qindex = tpl_frame->base_qindex;
439 av1_frame_init_quantizer(cpi);
440
441 for (mi_row = 0; mi_row < cm->mi_rows; mi_row += mi_height) {
442 // Motion estimation row boundary
443 x->mv_limits.row_min = -((mi_row * MI_SIZE) + (17 - 2 * AOM_INTERP_EXTEND));
444 x->mv_limits.row_max =
445 (cm->mi_rows - 1 - mi_row) * MI_SIZE + (17 - 2 * AOM_INTERP_EXTEND);
446 for (mi_col = 0; mi_col < cm->mi_cols; mi_col += mi_width) {
447 TplDepStats tpl_stats;
448 mode_estimation(cpi, x, xd, &sf, gf_picture, frame_idx, src_diff, coeff,
449 qcoeff, dqcoeff, mi_row, mi_col, bsize, tx_size,
450 ref_frame, predictor, &tpl_stats);
451
452 // Motion flow dependency dispenser.
453 tpl_model_store(tpl_frame->tpl_stats_ptr, mi_row, mi_col, bsize,
454 tpl_frame->stride, &tpl_stats);
455
456 tpl_model_update(cpi->tpl_stats, tpl_frame->tpl_stats_ptr, mi_row, mi_col,
457 bsize);
458 }
459 }
460 }
461
init_gop_frames(AV1_COMP * cpi,GF_PICTURE * gf_picture,const GF_GROUP * gf_group,int * tpl_group_frames,const EncodeFrameInput * const frame_input)462 static void init_gop_frames(AV1_COMP *cpi, GF_PICTURE *gf_picture,
463 const GF_GROUP *gf_group, int *tpl_group_frames,
464 const EncodeFrameInput *const frame_input) {
465 AV1_COMMON *cm = &cpi->common;
466 const SequenceHeader *const seq_params = &cm->seq_params;
467 int frame_idx = 0;
468 int i;
469 int gld_index = -1;
470 int alt_index = -1;
471 int lst_index = -1;
472 int extend_frame_count = 0;
473 int pframe_qindex = cpi->tpl_stats[2].base_qindex;
474
475 RefCntBuffer *frame_bufs = cm->buffer_pool->frame_bufs;
476 int recon_frame_index[INTER_REFS_PER_FRAME + 1] = { -1, -1, -1, -1,
477 -1, -1, -1, -1 };
478
479 // TODO(jingning): To be used later for gf frame type parsing.
480 (void)gf_group;
481
482 for (i = 0; i < FRAME_BUFFERS && frame_idx < INTER_REFS_PER_FRAME + 1; ++i) {
483 if (frame_bufs[i].ref_count == 0) {
484 alloc_frame_mvs(cm, &frame_bufs[i]);
485 if (aom_realloc_frame_buffer(
486 &frame_bufs[i].buf, cm->width, cm->height,
487 seq_params->subsampling_x, seq_params->subsampling_y,
488 seq_params->use_highbitdepth, cpi->oxcf.border_in_pixels,
489 cm->byte_alignment, NULL, NULL, NULL))
490 aom_internal_error(&cm->error, AOM_CODEC_MEM_ERROR,
491 "Failed to allocate frame buffer");
492
493 recon_frame_index[frame_idx] = i;
494 ++frame_idx;
495 }
496 }
497
498 for (i = 0; i < INTER_REFS_PER_FRAME + 1; ++i) {
499 assert(recon_frame_index[i] >= 0);
500 cpi->tpl_recon_frames[i] = &frame_bufs[recon_frame_index[i]].buf;
501 }
502
503 *tpl_group_frames = 0;
504
505 // Initialize Golden reference frame.
506 gf_picture[0].frame = NULL;
507 RefCntBuffer *ref_buf = get_ref_frame_buf(cm, GOLDEN_FRAME);
508 if (ref_buf) gf_picture[0].frame = &ref_buf->buf;
509 for (i = 0; i < 7; ++i) gf_picture[0].ref_frame[i] = -1;
510 gld_index = 0;
511 ++*tpl_group_frames;
512
513 // Initialize ARF frame
514 gf_picture[1].frame = frame_input->source;
515 gf_picture[1].ref_frame[0] = gld_index;
516 gf_picture[1].ref_frame[1] = lst_index;
517 gf_picture[1].ref_frame[2] = alt_index;
518 // TODO(yuec) Need o figure out full AV1 reference model
519 for (i = 3; i < 7; ++i) gf_picture[1].ref_frame[i] = -1;
520 alt_index = 1;
521 ++*tpl_group_frames;
522
523 // Initialize P frames
524 for (frame_idx = 2; frame_idx < MAX_LAG_BUFFERS; ++frame_idx) {
525 struct lookahead_entry *buf =
526 av1_lookahead_peek(cpi->lookahead, frame_idx - 2);
527
528 if (buf == NULL) break;
529
530 gf_picture[frame_idx].frame = &buf->img;
531 gf_picture[frame_idx].ref_frame[0] = gld_index;
532 gf_picture[frame_idx].ref_frame[1] = lst_index;
533 gf_picture[frame_idx].ref_frame[2] = alt_index;
534 for (i = 3; i < 7; ++i) gf_picture[frame_idx].ref_frame[i] = -1;
535
536 ++*tpl_group_frames;
537 lst_index = frame_idx;
538
539 if (frame_idx == cpi->rc.baseline_gf_interval + 1) break;
540 }
541
542 gld_index = frame_idx;
543 lst_index = AOMMAX(0, frame_idx - 1);
544 alt_index = -1;
545 ++frame_idx;
546
547 // Extend two frames outside the current gf group.
548 for (; frame_idx < MAX_LAG_BUFFERS && extend_frame_count < 2; ++frame_idx) {
549 struct lookahead_entry *buf =
550 av1_lookahead_peek(cpi->lookahead, frame_idx - 2);
551
552 if (buf == NULL) break;
553
554 cpi->tpl_stats[frame_idx].base_qindex = pframe_qindex;
555
556 gf_picture[frame_idx].frame = &buf->img;
557 gf_picture[frame_idx].ref_frame[0] = gld_index;
558 gf_picture[frame_idx].ref_frame[1] = lst_index;
559 gf_picture[frame_idx].ref_frame[2] = alt_index;
560 for (i = 3; i < 7; ++i) gf_picture[frame_idx].ref_frame[i] = -1;
561 lst_index = frame_idx;
562 ++*tpl_group_frames;
563 ++extend_frame_count;
564 }
565 }
566
init_tpl_stats(AV1_COMP * cpi)567 static void init_tpl_stats(AV1_COMP *cpi) {
568 int frame_idx;
569 for (frame_idx = 0; frame_idx < MAX_LAG_BUFFERS; ++frame_idx) {
570 TplDepFrame *tpl_frame = &cpi->tpl_stats[frame_idx];
571 memset(tpl_frame->tpl_stats_ptr, 0,
572 tpl_frame->height * tpl_frame->width *
573 sizeof(*tpl_frame->tpl_stats_ptr));
574 tpl_frame->is_valid = 0;
575 }
576 }
577
av1_tpl_setup_stats(AV1_COMP * cpi,const EncodeFrameInput * const frame_input)578 void av1_tpl_setup_stats(AV1_COMP *cpi,
579 const EncodeFrameInput *const frame_input) {
580 GF_PICTURE gf_picture[MAX_LAG_BUFFERS];
581 const GF_GROUP *gf_group = &cpi->twopass.gf_group;
582 int tpl_group_frames = 0;
583 int frame_idx;
584
585 init_gop_frames(cpi, gf_picture, gf_group, &tpl_group_frames, frame_input);
586
587 init_tpl_stats(cpi);
588
589 // Backward propagation from tpl_group_frames to 1.
590 for (frame_idx = tpl_group_frames - 1; frame_idx > 0; --frame_idx)
591 mc_flow_dispenser(cpi, gf_picture, frame_idx);
592 }
593