1 /*
2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #include <assert.h>
12 #include <math.h>
13
14 #include "./vp9_rtcd.h"
15 #include "./vpx_dsp_rtcd.h"
16
17 #include "vpx_dsp/vpx_dsp_common.h"
18 #include "vpx_mem/vpx_mem.h"
19 #include "vpx_ports/mem.h"
20 #include "vpx_ports/system_state.h"
21
22 #include "vp9/common/vp9_common.h"
23 #include "vp9/common/vp9_entropy.h"
24 #include "vp9/common/vp9_entropymode.h"
25 #include "vp9/common/vp9_idct.h"
26 #include "vp9/common/vp9_mvref_common.h"
27 #include "vp9/common/vp9_pred_common.h"
28 #include "vp9/common/vp9_quant_common.h"
29 #include "vp9/common/vp9_reconinter.h"
30 #include "vp9/common/vp9_reconintra.h"
31 #include "vp9/common/vp9_scan.h"
32 #include "vp9/common/vp9_seg_common.h"
33
34 #if !CONFIG_REALTIME_ONLY
35 #include "vp9/encoder/vp9_aq_variance.h"
36 #endif
37 #include "vp9/encoder/vp9_cost.h"
38 #include "vp9/encoder/vp9_encodemb.h"
39 #include "vp9/encoder/vp9_encodemv.h"
40 #include "vp9/encoder/vp9_encoder.h"
41 #include "vp9/encoder/vp9_mcomp.h"
42 #include "vp9/encoder/vp9_quantize.h"
43 #include "vp9/encoder/vp9_ratectrl.h"
44 #include "vp9/encoder/vp9_rd.h"
45 #include "vp9/encoder/vp9_rdopt.h"
46
47 #define LAST_FRAME_MODE_MASK \
48 ((1 << GOLDEN_FRAME) | (1 << ALTREF_FRAME) | (1 << INTRA_FRAME))
49 #define GOLDEN_FRAME_MODE_MASK \
50 ((1 << LAST_FRAME) | (1 << ALTREF_FRAME) | (1 << INTRA_FRAME))
51 #define ALT_REF_MODE_MASK \
52 ((1 << LAST_FRAME) | (1 << GOLDEN_FRAME) | (1 << INTRA_FRAME))
53
54 #define SECOND_REF_FRAME_MASK ((1 << ALTREF_FRAME) | 0x01)
55
56 #define MIN_EARLY_TERM_INDEX 3
57 #define NEW_MV_DISCOUNT_FACTOR 8
58
59 typedef struct {
60 PREDICTION_MODE mode;
61 MV_REFERENCE_FRAME ref_frame[2];
62 } MODE_DEFINITION;
63
64 typedef struct {
65 MV_REFERENCE_FRAME ref_frame[2];
66 } REF_DEFINITION;
67
68 struct rdcost_block_args {
69 const VP9_COMP *cpi;
70 MACROBLOCK *x;
71 ENTROPY_CONTEXT t_above[16];
72 ENTROPY_CONTEXT t_left[16];
73 int this_rate;
74 int64_t this_dist;
75 int64_t this_sse;
76 int64_t this_rd;
77 int64_t best_rd;
78 int exit_early;
79 int use_fast_coef_costing;
80 const scan_order *so;
81 uint8_t skippable;
82 struct buf_2d *this_recon;
83 };
84
85 #define LAST_NEW_MV_INDEX 6
86
87 #if !CONFIG_REALTIME_ONLY
88 static const MODE_DEFINITION vp9_mode_order[MAX_MODES] = {
89 { NEARESTMV, { LAST_FRAME, NONE } },
90 { NEARESTMV, { ALTREF_FRAME, NONE } },
91 { NEARESTMV, { GOLDEN_FRAME, NONE } },
92
93 { DC_PRED, { INTRA_FRAME, NONE } },
94
95 { NEWMV, { LAST_FRAME, NONE } },
96 { NEWMV, { ALTREF_FRAME, NONE } },
97 { NEWMV, { GOLDEN_FRAME, NONE } },
98
99 { NEARMV, { LAST_FRAME, NONE } },
100 { NEARMV, { ALTREF_FRAME, NONE } },
101 { NEARMV, { GOLDEN_FRAME, NONE } },
102
103 { ZEROMV, { LAST_FRAME, NONE } },
104 { ZEROMV, { GOLDEN_FRAME, NONE } },
105 { ZEROMV, { ALTREF_FRAME, NONE } },
106
107 { NEARESTMV, { LAST_FRAME, ALTREF_FRAME } },
108 { NEARESTMV, { GOLDEN_FRAME, ALTREF_FRAME } },
109
110 { TM_PRED, { INTRA_FRAME, NONE } },
111
112 { NEARMV, { LAST_FRAME, ALTREF_FRAME } },
113 { NEWMV, { LAST_FRAME, ALTREF_FRAME } },
114 { NEARMV, { GOLDEN_FRAME, ALTREF_FRAME } },
115 { NEWMV, { GOLDEN_FRAME, ALTREF_FRAME } },
116
117 { ZEROMV, { LAST_FRAME, ALTREF_FRAME } },
118 { ZEROMV, { GOLDEN_FRAME, ALTREF_FRAME } },
119
120 { H_PRED, { INTRA_FRAME, NONE } },
121 { V_PRED, { INTRA_FRAME, NONE } },
122 { D135_PRED, { INTRA_FRAME, NONE } },
123 { D207_PRED, { INTRA_FRAME, NONE } },
124 { D153_PRED, { INTRA_FRAME, NONE } },
125 { D63_PRED, { INTRA_FRAME, NONE } },
126 { D117_PRED, { INTRA_FRAME, NONE } },
127 { D45_PRED, { INTRA_FRAME, NONE } },
128 };
129
130 static const REF_DEFINITION vp9_ref_order[MAX_REFS] = {
131 { { LAST_FRAME, NONE } }, { { GOLDEN_FRAME, NONE } },
132 { { ALTREF_FRAME, NONE } }, { { LAST_FRAME, ALTREF_FRAME } },
133 { { GOLDEN_FRAME, ALTREF_FRAME } }, { { INTRA_FRAME, NONE } },
134 };
135 #endif // !CONFIG_REALTIME_ONLY
136
swap_block_ptr(MACROBLOCK * x,PICK_MODE_CONTEXT * ctx,int m,int n,int min_plane,int max_plane)137 static void swap_block_ptr(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx, int m, int n,
138 int min_plane, int max_plane) {
139 int i;
140
141 for (i = min_plane; i < max_plane; ++i) {
142 struct macroblock_plane *const p = &x->plane[i];
143 struct macroblockd_plane *const pd = &x->e_mbd.plane[i];
144
145 p->coeff = ctx->coeff_pbuf[i][m];
146 p->qcoeff = ctx->qcoeff_pbuf[i][m];
147 pd->dqcoeff = ctx->dqcoeff_pbuf[i][m];
148 p->eobs = ctx->eobs_pbuf[i][m];
149
150 ctx->coeff_pbuf[i][m] = ctx->coeff_pbuf[i][n];
151 ctx->qcoeff_pbuf[i][m] = ctx->qcoeff_pbuf[i][n];
152 ctx->dqcoeff_pbuf[i][m] = ctx->dqcoeff_pbuf[i][n];
153 ctx->eobs_pbuf[i][m] = ctx->eobs_pbuf[i][n];
154
155 ctx->coeff_pbuf[i][n] = p->coeff;
156 ctx->qcoeff_pbuf[i][n] = p->qcoeff;
157 ctx->dqcoeff_pbuf[i][n] = pd->dqcoeff;
158 ctx->eobs_pbuf[i][n] = p->eobs;
159 }
160 }
161
162 #if !CONFIG_REALTIME_ONLY
model_rd_for_sb(VP9_COMP * cpi,BLOCK_SIZE bsize,MACROBLOCK * x,MACROBLOCKD * xd,int * out_rate_sum,int64_t * out_dist_sum,int * skip_txfm_sb,int64_t * skip_sse_sb)163 static void model_rd_for_sb(VP9_COMP *cpi, BLOCK_SIZE bsize, MACROBLOCK *x,
164 MACROBLOCKD *xd, int *out_rate_sum,
165 int64_t *out_dist_sum, int *skip_txfm_sb,
166 int64_t *skip_sse_sb) {
167 // Note our transform coeffs are 8 times an orthogonal transform.
168 // Hence quantizer step is also 8 times. To get effective quantizer
169 // we need to divide by 8 before sending to modeling function.
170 int i;
171 int64_t rate_sum = 0;
172 int64_t dist_sum = 0;
173 const int ref = xd->mi[0]->ref_frame[0];
174 unsigned int sse;
175 unsigned int var = 0;
176 int64_t total_sse = 0;
177 int skip_flag = 1;
178 const int shift = 6;
179 int64_t dist;
180 const int dequant_shift =
181 #if CONFIG_VP9_HIGHBITDEPTH
182 (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) ? xd->bd - 5 :
183 #endif // CONFIG_VP9_HIGHBITDEPTH
184 3;
185 unsigned int qstep_vec[MAX_MB_PLANE];
186 unsigned int nlog2_vec[MAX_MB_PLANE];
187 unsigned int sum_sse_vec[MAX_MB_PLANE];
188 int any_zero_sum_sse = 0;
189
190 x->pred_sse[ref] = 0;
191
192 for (i = 0; i < MAX_MB_PLANE; ++i) {
193 struct macroblock_plane *const p = &x->plane[i];
194 struct macroblockd_plane *const pd = &xd->plane[i];
195 const BLOCK_SIZE bs = get_plane_block_size(bsize, pd);
196 const TX_SIZE max_tx_size = max_txsize_lookup[bs];
197 const BLOCK_SIZE unit_size = txsize_to_bsize[max_tx_size];
198 const int64_t dc_thr = p->quant_thred[0] >> shift;
199 const int64_t ac_thr = p->quant_thred[1] >> shift;
200 unsigned int sum_sse = 0;
201 // The low thresholds are used to measure if the prediction errors are
202 // low enough so that we can skip the mode search.
203 const int64_t low_dc_thr = VPXMIN(50, dc_thr >> 2);
204 const int64_t low_ac_thr = VPXMIN(80, ac_thr >> 2);
205 int bw = 1 << (b_width_log2_lookup[bs] - b_width_log2_lookup[unit_size]);
206 int bh = 1 << (b_height_log2_lookup[bs] - b_width_log2_lookup[unit_size]);
207 int idx, idy;
208 int lw = b_width_log2_lookup[unit_size] + 2;
209 int lh = b_height_log2_lookup[unit_size] + 2;
210
211 for (idy = 0; idy < bh; ++idy) {
212 for (idx = 0; idx < bw; ++idx) {
213 uint8_t *src = p->src.buf + (idy * p->src.stride << lh) + (idx << lw);
214 uint8_t *dst = pd->dst.buf + (idy * pd->dst.stride << lh) + (idx << lh);
215 int block_idx = (idy << 1) + idx;
216 int low_err_skip = 0;
217
218 var = cpi->fn_ptr[unit_size].vf(src, p->src.stride, dst, pd->dst.stride,
219 &sse);
220 x->bsse[(i << 2) + block_idx] = sse;
221 sum_sse += sse;
222
223 x->skip_txfm[(i << 2) + block_idx] = SKIP_TXFM_NONE;
224 if (!x->select_tx_size) {
225 // Check if all ac coefficients can be quantized to zero.
226 if (var < ac_thr || var == 0) {
227 x->skip_txfm[(i << 2) + block_idx] = SKIP_TXFM_AC_ONLY;
228
229 // Check if dc coefficient can be quantized to zero.
230 if (sse - var < dc_thr || sse == var) {
231 x->skip_txfm[(i << 2) + block_idx] = SKIP_TXFM_AC_DC;
232
233 if (!sse || (var < low_ac_thr && sse - var < low_dc_thr))
234 low_err_skip = 1;
235 }
236 }
237 }
238
239 if (skip_flag && !low_err_skip) skip_flag = 0;
240
241 if (i == 0) x->pred_sse[ref] += sse;
242 }
243 }
244
245 total_sse += sum_sse;
246 sum_sse_vec[i] = sum_sse;
247 any_zero_sum_sse = any_zero_sum_sse || (sum_sse == 0);
248 qstep_vec[i] = pd->dequant[1] >> dequant_shift;
249 nlog2_vec[i] = num_pels_log2_lookup[bs];
250 }
251
252 // Fast approximate the modelling function.
253 if (cpi->sf.simple_model_rd_from_var) {
254 for (i = 0; i < MAX_MB_PLANE; ++i) {
255 int64_t rate;
256 const int64_t square_error = sum_sse_vec[i];
257 int quantizer = qstep_vec[i];
258
259 if (quantizer < 120)
260 rate = (square_error * (280 - quantizer)) >> (16 - VP9_PROB_COST_SHIFT);
261 else
262 rate = 0;
263 dist = (square_error * quantizer) >> 8;
264 rate_sum += rate;
265 dist_sum += dist;
266 }
267 } else {
268 if (any_zero_sum_sse) {
269 for (i = 0; i < MAX_MB_PLANE; ++i) {
270 int rate;
271 vp9_model_rd_from_var_lapndz(sum_sse_vec[i], nlog2_vec[i], qstep_vec[i],
272 &rate, &dist);
273 rate_sum += rate;
274 dist_sum += dist;
275 }
276 } else {
277 vp9_model_rd_from_var_lapndz_vec(sum_sse_vec, nlog2_vec, qstep_vec,
278 &rate_sum, &dist_sum);
279 }
280 }
281
282 *skip_txfm_sb = skip_flag;
283 *skip_sse_sb = total_sse << VP9_DIST_SCALE_LOG2;
284 *out_rate_sum = (int)rate_sum;
285 *out_dist_sum = dist_sum << VP9_DIST_SCALE_LOG2;
286 }
287 #endif // !CONFIG_REALTIME_ONLY
288
289 #if CONFIG_VP9_HIGHBITDEPTH
vp9_highbd_block_error_c(const tran_low_t * coeff,const tran_low_t * dqcoeff,intptr_t block_size,int64_t * ssz,int bd)290 int64_t vp9_highbd_block_error_c(const tran_low_t *coeff,
291 const tran_low_t *dqcoeff, intptr_t block_size,
292 int64_t *ssz, int bd) {
293 int i;
294 int64_t error = 0, sqcoeff = 0;
295 int shift = 2 * (bd - 8);
296 int rounding = shift > 0 ? 1 << (shift - 1) : 0;
297
298 for (i = 0; i < block_size; i++) {
299 const int64_t diff = coeff[i] - dqcoeff[i];
300 error += diff * diff;
301 sqcoeff += (int64_t)coeff[i] * (int64_t)coeff[i];
302 }
303 assert(error >= 0 && sqcoeff >= 0);
304 error = (error + rounding) >> shift;
305 sqcoeff = (sqcoeff + rounding) >> shift;
306
307 *ssz = sqcoeff;
308 return error;
309 }
310
vp9_highbd_block_error_dispatch(const tran_low_t * coeff,const tran_low_t * dqcoeff,intptr_t block_size,int64_t * ssz,int bd)311 static int64_t vp9_highbd_block_error_dispatch(const tran_low_t *coeff,
312 const tran_low_t *dqcoeff,
313 intptr_t block_size,
314 int64_t *ssz, int bd) {
315 if (bd == 8) {
316 return vp9_block_error(coeff, dqcoeff, block_size, ssz);
317 } else {
318 return vp9_highbd_block_error(coeff, dqcoeff, block_size, ssz, bd);
319 }
320 }
321 #endif // CONFIG_VP9_HIGHBITDEPTH
322
vp9_block_error_c(const tran_low_t * coeff,const tran_low_t * dqcoeff,intptr_t block_size,int64_t * ssz)323 int64_t vp9_block_error_c(const tran_low_t *coeff, const tran_low_t *dqcoeff,
324 intptr_t block_size, int64_t *ssz) {
325 int i;
326 int64_t error = 0, sqcoeff = 0;
327
328 for (i = 0; i < block_size; i++) {
329 const int diff = coeff[i] - dqcoeff[i];
330 error += diff * diff;
331 sqcoeff += coeff[i] * coeff[i];
332 }
333
334 *ssz = sqcoeff;
335 return error;
336 }
337
vp9_block_error_fp_c(const tran_low_t * coeff,const tran_low_t * dqcoeff,int block_size)338 int64_t vp9_block_error_fp_c(const tran_low_t *coeff, const tran_low_t *dqcoeff,
339 int block_size) {
340 int i;
341 int64_t error = 0;
342
343 for (i = 0; i < block_size; i++) {
344 const int diff = coeff[i] - dqcoeff[i];
345 error += diff * diff;
346 }
347
348 return error;
349 }
350
351 /* The trailing '0' is a terminator which is used inside cost_coeffs() to
352 * decide whether to include cost of a trailing EOB node or not (i.e. we
353 * can skip this if the last coefficient in this transform block, e.g. the
354 * 16th coefficient in a 4x4 block or the 64th coefficient in a 8x8 block,
355 * were non-zero). */
356 static const int16_t band_counts[TX_SIZES][8] = {
357 { 1, 2, 3, 4, 3, 16 - 13, 0 },
358 { 1, 2, 3, 4, 11, 64 - 21, 0 },
359 { 1, 2, 3, 4, 11, 256 - 21, 0 },
360 { 1, 2, 3, 4, 11, 1024 - 21, 0 },
361 };
cost_coeffs(MACROBLOCK * x,int plane,int block,TX_SIZE tx_size,int pt,const int16_t * scan,const int16_t * nb,int use_fast_coef_costing)362 static int cost_coeffs(MACROBLOCK *x, int plane, int block, TX_SIZE tx_size,
363 int pt, const int16_t *scan, const int16_t *nb,
364 int use_fast_coef_costing) {
365 MACROBLOCKD *const xd = &x->e_mbd;
366 MODE_INFO *mi = xd->mi[0];
367 const struct macroblock_plane *p = &x->plane[plane];
368 const PLANE_TYPE type = get_plane_type(plane);
369 const int16_t *band_count = &band_counts[tx_size][1];
370 const int eob = p->eobs[block];
371 const tran_low_t *const qcoeff = BLOCK_OFFSET(p->qcoeff, block);
372 unsigned int(*token_costs)[2][COEFF_CONTEXTS][ENTROPY_TOKENS] =
373 x->token_costs[tx_size][type][is_inter_block(mi)];
374 uint8_t token_cache[32 * 32];
375 int cost;
376 #if CONFIG_VP9_HIGHBITDEPTH
377 const uint16_t *cat6_high_cost = vp9_get_high_cost_table(xd->bd);
378 #else
379 const uint16_t *cat6_high_cost = vp9_get_high_cost_table(8);
380 #endif
381
382 // Check for consistency of tx_size with mode info
383 assert(type == PLANE_TYPE_Y
384 ? mi->tx_size == tx_size
385 : get_uv_tx_size(mi, &xd->plane[plane]) == tx_size);
386
387 if (eob == 0) {
388 // single eob token
389 cost = token_costs[0][0][pt][EOB_TOKEN];
390 } else {
391 if (use_fast_coef_costing) {
392 int band_left = *band_count++;
393 int c;
394
395 // dc token
396 int v = qcoeff[0];
397 int16_t prev_t;
398 cost = vp9_get_token_cost(v, &prev_t, cat6_high_cost);
399 cost += (*token_costs)[0][pt][prev_t];
400
401 token_cache[0] = vp9_pt_energy_class[prev_t];
402 ++token_costs;
403
404 // ac tokens
405 for (c = 1; c < eob; c++) {
406 const int rc = scan[c];
407 int16_t t;
408
409 v = qcoeff[rc];
410 cost += vp9_get_token_cost(v, &t, cat6_high_cost);
411 cost += (*token_costs)[!prev_t][!prev_t][t];
412 prev_t = t;
413 if (!--band_left) {
414 band_left = *band_count++;
415 ++token_costs;
416 }
417 }
418
419 // eob token
420 if (band_left) cost += (*token_costs)[0][!prev_t][EOB_TOKEN];
421
422 } else { // !use_fast_coef_costing
423 int band_left = *band_count++;
424 int c;
425
426 // dc token
427 int v = qcoeff[0];
428 int16_t tok;
429 unsigned int(*tok_cost_ptr)[COEFF_CONTEXTS][ENTROPY_TOKENS];
430 cost = vp9_get_token_cost(v, &tok, cat6_high_cost);
431 cost += (*token_costs)[0][pt][tok];
432
433 token_cache[0] = vp9_pt_energy_class[tok];
434 ++token_costs;
435
436 tok_cost_ptr = &((*token_costs)[!tok]);
437
438 // ac tokens
439 for (c = 1; c < eob; c++) {
440 const int rc = scan[c];
441
442 v = qcoeff[rc];
443 cost += vp9_get_token_cost(v, &tok, cat6_high_cost);
444 pt = get_coef_context(nb, token_cache, c);
445 cost += (*tok_cost_ptr)[pt][tok];
446 token_cache[rc] = vp9_pt_energy_class[tok];
447 if (!--band_left) {
448 band_left = *band_count++;
449 ++token_costs;
450 }
451 tok_cost_ptr = &((*token_costs)[!tok]);
452 }
453
454 // eob token
455 if (band_left) {
456 pt = get_coef_context(nb, token_cache, c);
457 cost += (*token_costs)[0][pt][EOB_TOKEN];
458 }
459 }
460 }
461
462 return cost;
463 }
464
num_4x4_to_edge(int plane_4x4_dim,int mb_to_edge_dim,int subsampling_dim,int blk_dim)465 static INLINE int num_4x4_to_edge(int plane_4x4_dim, int mb_to_edge_dim,
466 int subsampling_dim, int blk_dim) {
467 return plane_4x4_dim + (mb_to_edge_dim >> (5 + subsampling_dim)) - blk_dim;
468 }
469
470 // Copy all visible 4x4s in the transform block.
copy_block_visible(const MACROBLOCKD * xd,const struct macroblockd_plane * const pd,const uint8_t * src,const int src_stride,uint8_t * dst,const int dst_stride,int blk_row,int blk_col,const BLOCK_SIZE plane_bsize,const BLOCK_SIZE tx_bsize)471 static void copy_block_visible(const MACROBLOCKD *xd,
472 const struct macroblockd_plane *const pd,
473 const uint8_t *src, const int src_stride,
474 uint8_t *dst, const int dst_stride, int blk_row,
475 int blk_col, const BLOCK_SIZE plane_bsize,
476 const BLOCK_SIZE tx_bsize) {
477 const int plane_4x4_w = num_4x4_blocks_wide_lookup[plane_bsize];
478 const int plane_4x4_h = num_4x4_blocks_high_lookup[plane_bsize];
479 const int tx_4x4_w = num_4x4_blocks_wide_lookup[tx_bsize];
480 const int tx_4x4_h = num_4x4_blocks_high_lookup[tx_bsize];
481 int b4x4s_to_right_edge = num_4x4_to_edge(plane_4x4_w, xd->mb_to_right_edge,
482 pd->subsampling_x, blk_col);
483 int b4x4s_to_bottom_edge = num_4x4_to_edge(plane_4x4_h, xd->mb_to_bottom_edge,
484 pd->subsampling_y, blk_row);
485 const int is_highbd = xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH;
486 if (tx_bsize == BLOCK_4X4 ||
487 (b4x4s_to_right_edge >= tx_4x4_w && b4x4s_to_bottom_edge >= tx_4x4_h)) {
488 const int w = tx_4x4_w << 2;
489 const int h = tx_4x4_h << 2;
490 #if CONFIG_VP9_HIGHBITDEPTH
491 if (is_highbd) {
492 vpx_highbd_convolve_copy(CONVERT_TO_SHORTPTR(src), src_stride,
493 CONVERT_TO_SHORTPTR(dst), dst_stride, NULL, 0, 0,
494 0, 0, w, h, xd->bd);
495 } else {
496 #endif
497 vpx_convolve_copy(src, src_stride, dst, dst_stride, NULL, 0, 0, 0, 0, w,
498 h);
499 #if CONFIG_VP9_HIGHBITDEPTH
500 }
501 #endif
502 } else {
503 int r, c;
504 int max_r = VPXMIN(b4x4s_to_bottom_edge, tx_4x4_h);
505 int max_c = VPXMIN(b4x4s_to_right_edge, tx_4x4_w);
506 // if we are in the unrestricted motion border.
507 for (r = 0; r < max_r; ++r) {
508 // Skip visiting the sub blocks that are wholly within the UMV.
509 for (c = 0; c < max_c; ++c) {
510 const uint8_t *src_ptr = src + r * src_stride * 4 + c * 4;
511 uint8_t *dst_ptr = dst + r * dst_stride * 4 + c * 4;
512 #if CONFIG_VP9_HIGHBITDEPTH
513 if (is_highbd) {
514 vpx_highbd_convolve_copy(CONVERT_TO_SHORTPTR(src_ptr), src_stride,
515 CONVERT_TO_SHORTPTR(dst_ptr), dst_stride,
516 NULL, 0, 0, 0, 0, 4, 4, xd->bd);
517 } else {
518 #endif
519 vpx_convolve_copy(src_ptr, src_stride, dst_ptr, dst_stride, NULL, 0,
520 0, 0, 0, 4, 4);
521 #if CONFIG_VP9_HIGHBITDEPTH
522 }
523 #endif
524 }
525 }
526 }
527 (void)is_highbd;
528 }
529
530 // Compute the pixel domain sum square error on all visible 4x4s in the
531 // transform block.
pixel_sse(const VP9_COMP * const cpi,const MACROBLOCKD * xd,const struct macroblockd_plane * const pd,const uint8_t * src,const int src_stride,const uint8_t * dst,const int dst_stride,int blk_row,int blk_col,const BLOCK_SIZE plane_bsize,const BLOCK_SIZE tx_bsize)532 static unsigned pixel_sse(const VP9_COMP *const cpi, const MACROBLOCKD *xd,
533 const struct macroblockd_plane *const pd,
534 const uint8_t *src, const int src_stride,
535 const uint8_t *dst, const int dst_stride, int blk_row,
536 int blk_col, const BLOCK_SIZE plane_bsize,
537 const BLOCK_SIZE tx_bsize) {
538 unsigned int sse = 0;
539 const int plane_4x4_w = num_4x4_blocks_wide_lookup[plane_bsize];
540 const int plane_4x4_h = num_4x4_blocks_high_lookup[plane_bsize];
541 const int tx_4x4_w = num_4x4_blocks_wide_lookup[tx_bsize];
542 const int tx_4x4_h = num_4x4_blocks_high_lookup[tx_bsize];
543 int b4x4s_to_right_edge = num_4x4_to_edge(plane_4x4_w, xd->mb_to_right_edge,
544 pd->subsampling_x, blk_col);
545 int b4x4s_to_bottom_edge = num_4x4_to_edge(plane_4x4_h, xd->mb_to_bottom_edge,
546 pd->subsampling_y, blk_row);
547 if (tx_bsize == BLOCK_4X4 ||
548 (b4x4s_to_right_edge >= tx_4x4_w && b4x4s_to_bottom_edge >= tx_4x4_h)) {
549 cpi->fn_ptr[tx_bsize].vf(src, src_stride, dst, dst_stride, &sse);
550 } else {
551 const vpx_variance_fn_t vf_4x4 = cpi->fn_ptr[BLOCK_4X4].vf;
552 int r, c;
553 unsigned this_sse = 0;
554 int max_r = VPXMIN(b4x4s_to_bottom_edge, tx_4x4_h);
555 int max_c = VPXMIN(b4x4s_to_right_edge, tx_4x4_w);
556 sse = 0;
557 // if we are in the unrestricted motion border.
558 for (r = 0; r < max_r; ++r) {
559 // Skip visiting the sub blocks that are wholly within the UMV.
560 for (c = 0; c < max_c; ++c) {
561 vf_4x4(src + r * src_stride * 4 + c * 4, src_stride,
562 dst + r * dst_stride * 4 + c * 4, dst_stride, &this_sse);
563 sse += this_sse;
564 }
565 }
566 }
567 return sse;
568 }
569
570 // Compute the squares sum squares on all visible 4x4s in the transform block.
sum_squares_visible(const MACROBLOCKD * xd,const struct macroblockd_plane * const pd,const int16_t * diff,const int diff_stride,int blk_row,int blk_col,const BLOCK_SIZE plane_bsize,const BLOCK_SIZE tx_bsize)571 static int64_t sum_squares_visible(const MACROBLOCKD *xd,
572 const struct macroblockd_plane *const pd,
573 const int16_t *diff, const int diff_stride,
574 int blk_row, int blk_col,
575 const BLOCK_SIZE plane_bsize,
576 const BLOCK_SIZE tx_bsize) {
577 int64_t sse;
578 const int plane_4x4_w = num_4x4_blocks_wide_lookup[plane_bsize];
579 const int plane_4x4_h = num_4x4_blocks_high_lookup[plane_bsize];
580 const int tx_4x4_w = num_4x4_blocks_wide_lookup[tx_bsize];
581 const int tx_4x4_h = num_4x4_blocks_high_lookup[tx_bsize];
582 int b4x4s_to_right_edge = num_4x4_to_edge(plane_4x4_w, xd->mb_to_right_edge,
583 pd->subsampling_x, blk_col);
584 int b4x4s_to_bottom_edge = num_4x4_to_edge(plane_4x4_h, xd->mb_to_bottom_edge,
585 pd->subsampling_y, blk_row);
586 if (tx_bsize == BLOCK_4X4 ||
587 (b4x4s_to_right_edge >= tx_4x4_w && b4x4s_to_bottom_edge >= tx_4x4_h)) {
588 assert(tx_4x4_w == tx_4x4_h);
589 sse = (int64_t)vpx_sum_squares_2d_i16(diff, diff_stride, tx_4x4_w << 2);
590 } else {
591 int r, c;
592 int max_r = VPXMIN(b4x4s_to_bottom_edge, tx_4x4_h);
593 int max_c = VPXMIN(b4x4s_to_right_edge, tx_4x4_w);
594 sse = 0;
595 // if we are in the unrestricted motion border.
596 for (r = 0; r < max_r; ++r) {
597 // Skip visiting the sub blocks that are wholly within the UMV.
598 for (c = 0; c < max_c; ++c) {
599 sse += (int64_t)vpx_sum_squares_2d_i16(
600 diff + r * diff_stride * 4 + c * 4, diff_stride, 4);
601 }
602 }
603 }
604 return sse;
605 }
606
dist_block(const VP9_COMP * cpi,MACROBLOCK * x,int plane,BLOCK_SIZE plane_bsize,int block,int blk_row,int blk_col,TX_SIZE tx_size,int64_t * out_dist,int64_t * out_sse,struct buf_2d * out_recon)607 static void dist_block(const VP9_COMP *cpi, MACROBLOCK *x, int plane,
608 BLOCK_SIZE plane_bsize, int block, int blk_row,
609 int blk_col, TX_SIZE tx_size, int64_t *out_dist,
610 int64_t *out_sse, struct buf_2d *out_recon) {
611 MACROBLOCKD *const xd = &x->e_mbd;
612 const struct macroblock_plane *const p = &x->plane[plane];
613 const struct macroblockd_plane *const pd = &xd->plane[plane];
614 const int eob = p->eobs[block];
615
616 if (!out_recon && x->block_tx_domain && eob) {
617 const int ss_txfrm_size = tx_size << 1;
618 int64_t this_sse;
619 const int shift = tx_size == TX_32X32 ? 0 : 2;
620 const tran_low_t *const coeff = BLOCK_OFFSET(p->coeff, block);
621 const tran_low_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block);
622 #if CONFIG_VP9_HIGHBITDEPTH
623 const int bd = (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) ? xd->bd : 8;
624 *out_dist = vp9_highbd_block_error_dispatch(
625 coeff, dqcoeff, 16 << ss_txfrm_size, &this_sse, bd) >>
626 shift;
627 #else
628 *out_dist =
629 vp9_block_error(coeff, dqcoeff, 16 << ss_txfrm_size, &this_sse) >>
630 shift;
631 #endif // CONFIG_VP9_HIGHBITDEPTH
632 *out_sse = this_sse >> shift;
633
634 if (x->skip_encode && !is_inter_block(xd->mi[0])) {
635 // TODO(jingning): tune the model to better capture the distortion.
636 const int64_t p =
637 (pd->dequant[1] * pd->dequant[1] * (1 << ss_txfrm_size)) >>
638 #if CONFIG_VP9_HIGHBITDEPTH
639 (shift + 2 + (bd - 8) * 2);
640 #else
641 (shift + 2);
642 #endif // CONFIG_VP9_HIGHBITDEPTH
643 *out_dist += (p >> 4);
644 *out_sse += p;
645 }
646 } else {
647 const BLOCK_SIZE tx_bsize = txsize_to_bsize[tx_size];
648 const int bs = 4 * num_4x4_blocks_wide_lookup[tx_bsize];
649 const int src_stride = p->src.stride;
650 const int dst_stride = pd->dst.stride;
651 const int src_idx = 4 * (blk_row * src_stride + blk_col);
652 const int dst_idx = 4 * (blk_row * dst_stride + blk_col);
653 const uint8_t *src = &p->src.buf[src_idx];
654 const uint8_t *dst = &pd->dst.buf[dst_idx];
655 uint8_t *out_recon_ptr = 0;
656
657 const tran_low_t *dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block);
658 unsigned int tmp;
659
660 tmp = pixel_sse(cpi, xd, pd, src, src_stride, dst, dst_stride, blk_row,
661 blk_col, plane_bsize, tx_bsize);
662 *out_sse = (int64_t)tmp * 16;
663 if (out_recon) {
664 const int out_recon_idx = 4 * (blk_row * out_recon->stride + blk_col);
665 out_recon_ptr = &out_recon->buf[out_recon_idx];
666 copy_block_visible(xd, pd, dst, dst_stride, out_recon_ptr,
667 out_recon->stride, blk_row, blk_col, plane_bsize,
668 tx_bsize);
669 }
670
671 if (eob) {
672 #if CONFIG_VP9_HIGHBITDEPTH
673 DECLARE_ALIGNED(16, uint16_t, recon16[1024]);
674 uint8_t *recon = (uint8_t *)recon16;
675 #else
676 DECLARE_ALIGNED(16, uint8_t, recon[1024]);
677 #endif // CONFIG_VP9_HIGHBITDEPTH
678
679 #if CONFIG_VP9_HIGHBITDEPTH
680 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
681 vpx_highbd_convolve_copy(CONVERT_TO_SHORTPTR(dst), dst_stride, recon16,
682 32, NULL, 0, 0, 0, 0, bs, bs, xd->bd);
683 if (xd->lossless) {
684 vp9_highbd_iwht4x4_add(dqcoeff, recon16, 32, eob, xd->bd);
685 } else {
686 switch (tx_size) {
687 case TX_4X4:
688 vp9_highbd_idct4x4_add(dqcoeff, recon16, 32, eob, xd->bd);
689 break;
690 case TX_8X8:
691 vp9_highbd_idct8x8_add(dqcoeff, recon16, 32, eob, xd->bd);
692 break;
693 case TX_16X16:
694 vp9_highbd_idct16x16_add(dqcoeff, recon16, 32, eob, xd->bd);
695 break;
696 default:
697 assert(tx_size == TX_32X32);
698 vp9_highbd_idct32x32_add(dqcoeff, recon16, 32, eob, xd->bd);
699 break;
700 }
701 }
702 recon = CONVERT_TO_BYTEPTR(recon16);
703 } else {
704 #endif // CONFIG_VP9_HIGHBITDEPTH
705 vpx_convolve_copy(dst, dst_stride, recon, 32, NULL, 0, 0, 0, 0, bs, bs);
706 switch (tx_size) {
707 case TX_32X32: vp9_idct32x32_add(dqcoeff, recon, 32, eob); break;
708 case TX_16X16: vp9_idct16x16_add(dqcoeff, recon, 32, eob); break;
709 case TX_8X8: vp9_idct8x8_add(dqcoeff, recon, 32, eob); break;
710 default:
711 assert(tx_size == TX_4X4);
712 // this is like vp9_short_idct4x4 but has a special case around
713 // eob<=1, which is significant (not just an optimization) for
714 // the lossless case.
715 x->inv_txfm_add(dqcoeff, recon, 32, eob);
716 break;
717 }
718 #if CONFIG_VP9_HIGHBITDEPTH
719 }
720 #endif // CONFIG_VP9_HIGHBITDEPTH
721
722 tmp = pixel_sse(cpi, xd, pd, src, src_stride, recon, 32, blk_row, blk_col,
723 plane_bsize, tx_bsize);
724 if (out_recon) {
725 copy_block_visible(xd, pd, recon, 32, out_recon_ptr, out_recon->stride,
726 blk_row, blk_col, plane_bsize, tx_bsize);
727 }
728 }
729
730 *out_dist = (int64_t)tmp * 16;
731 }
732 }
733
rate_block(int plane,int block,TX_SIZE tx_size,int coeff_ctx,struct rdcost_block_args * args)734 static int rate_block(int plane, int block, TX_SIZE tx_size, int coeff_ctx,
735 struct rdcost_block_args *args) {
736 return cost_coeffs(args->x, plane, block, tx_size, coeff_ctx, args->so->scan,
737 args->so->neighbors, args->use_fast_coef_costing);
738 }
739
block_rd_txfm(int plane,int block,int blk_row,int blk_col,BLOCK_SIZE plane_bsize,TX_SIZE tx_size,void * arg)740 static void block_rd_txfm(int plane, int block, int blk_row, int blk_col,
741 BLOCK_SIZE plane_bsize, TX_SIZE tx_size, void *arg) {
742 struct rdcost_block_args *args = arg;
743 MACROBLOCK *const x = args->x;
744 MACROBLOCKD *const xd = &x->e_mbd;
745 MODE_INFO *const mi = xd->mi[0];
746 int64_t rd1, rd2, rd;
747 int rate;
748 int64_t dist;
749 int64_t sse;
750 const int coeff_ctx =
751 combine_entropy_contexts(args->t_left[blk_row], args->t_above[blk_col]);
752 struct buf_2d *recon = args->this_recon;
753 const BLOCK_SIZE tx_bsize = txsize_to_bsize[tx_size];
754 const struct macroblockd_plane *const pd = &xd->plane[plane];
755 const int dst_stride = pd->dst.stride;
756 const uint8_t *dst = &pd->dst.buf[4 * (blk_row * dst_stride + blk_col)];
757
758 if (args->exit_early) return;
759
760 if (!is_inter_block(mi)) {
761 #if CONFIG_MISMATCH_DEBUG
762 struct encode_b_args intra_arg = {
763 x, x->block_qcoeff_opt, args->t_above, args->t_left, &mi->skip, 0, 0, 0
764 };
765 #else
766 struct encode_b_args intra_arg = { x, x->block_qcoeff_opt, args->t_above,
767 args->t_left, &mi->skip };
768 #endif
769 vp9_encode_block_intra(plane, block, blk_row, blk_col, plane_bsize, tx_size,
770 &intra_arg);
771 if (recon) {
772 uint8_t *rec_ptr = &recon->buf[4 * (blk_row * recon->stride + blk_col)];
773 copy_block_visible(xd, pd, dst, dst_stride, rec_ptr, recon->stride,
774 blk_row, blk_col, plane_bsize, tx_bsize);
775 }
776 if (x->block_tx_domain) {
777 dist_block(args->cpi, x, plane, plane_bsize, block, blk_row, blk_col,
778 tx_size, &dist, &sse, /*recon =*/0);
779 } else {
780 const struct macroblock_plane *const p = &x->plane[plane];
781 const int src_stride = p->src.stride;
782 const int diff_stride = 4 * num_4x4_blocks_wide_lookup[plane_bsize];
783 const uint8_t *src = &p->src.buf[4 * (blk_row * src_stride + blk_col)];
784 const int16_t *diff = &p->src_diff[4 * (blk_row * diff_stride + blk_col)];
785 unsigned int tmp;
786 sse = sum_squares_visible(xd, pd, diff, diff_stride, blk_row, blk_col,
787 plane_bsize, tx_bsize);
788 #if CONFIG_VP9_HIGHBITDEPTH
789 if ((xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) && (xd->bd > 8))
790 sse = ROUND64_POWER_OF_TWO(sse, (xd->bd - 8) * 2);
791 #endif // CONFIG_VP9_HIGHBITDEPTH
792 sse = sse * 16;
793 tmp = pixel_sse(args->cpi, xd, pd, src, src_stride, dst, dst_stride,
794 blk_row, blk_col, plane_bsize, tx_bsize);
795 dist = (int64_t)tmp * 16;
796 }
797 } else {
798 int skip_txfm_flag = SKIP_TXFM_NONE;
799 if (max_txsize_lookup[plane_bsize] == tx_size)
800 skip_txfm_flag = x->skip_txfm[(plane << 2) + (block >> (tx_size << 1))];
801
802 if (skip_txfm_flag == SKIP_TXFM_NONE ||
803 (recon && skip_txfm_flag == SKIP_TXFM_AC_ONLY)) {
804 // full forward transform and quantization
805 vp9_xform_quant(x, plane, block, blk_row, blk_col, plane_bsize, tx_size);
806 if (x->block_qcoeff_opt)
807 vp9_optimize_b(x, plane, block, tx_size, coeff_ctx);
808 dist_block(args->cpi, x, plane, plane_bsize, block, blk_row, blk_col,
809 tx_size, &dist, &sse, recon);
810 } else if (skip_txfm_flag == SKIP_TXFM_AC_ONLY) {
811 // compute DC coefficient
812 tran_low_t *const coeff = BLOCK_OFFSET(x->plane[plane].coeff, block);
813 tran_low_t *const dqcoeff = BLOCK_OFFSET(xd->plane[plane].dqcoeff, block);
814 vp9_xform_quant_dc(x, plane, block, blk_row, blk_col, plane_bsize,
815 tx_size);
816 sse = x->bsse[(plane << 2) + (block >> (tx_size << 1))] << 4;
817 dist = sse;
818 if (x->plane[plane].eobs[block]) {
819 const int64_t orig_sse = (int64_t)coeff[0] * coeff[0];
820 const int64_t resd_sse = coeff[0] - dqcoeff[0];
821 int64_t dc_correct = orig_sse - resd_sse * resd_sse;
822 #if CONFIG_VP9_HIGHBITDEPTH
823 dc_correct >>= ((xd->bd - 8) * 2);
824 #endif
825 if (tx_size != TX_32X32) dc_correct >>= 2;
826
827 dist = VPXMAX(0, sse - dc_correct);
828 }
829 } else {
830 // SKIP_TXFM_AC_DC
831 // skip forward transform. Because this is handled here, the quantization
832 // does not need to do it.
833 x->plane[plane].eobs[block] = 0;
834 sse = x->bsse[(plane << 2) + (block >> (tx_size << 1))] << 4;
835 dist = sse;
836 if (recon) {
837 uint8_t *rec_ptr = &recon->buf[4 * (blk_row * recon->stride + blk_col)];
838 copy_block_visible(xd, pd, dst, dst_stride, rec_ptr, recon->stride,
839 blk_row, blk_col, plane_bsize, tx_bsize);
840 }
841 }
842 }
843
844 rd = RDCOST(x->rdmult, x->rddiv, 0, dist);
845 if (args->this_rd + rd > args->best_rd) {
846 args->exit_early = 1;
847 return;
848 }
849
850 rate = rate_block(plane, block, tx_size, coeff_ctx, args);
851 args->t_above[blk_col] = (x->plane[plane].eobs[block] > 0) ? 1 : 0;
852 args->t_left[blk_row] = (x->plane[plane].eobs[block] > 0) ? 1 : 0;
853 rd1 = RDCOST(x->rdmult, x->rddiv, rate, dist);
854 rd2 = RDCOST(x->rdmult, x->rddiv, 0, sse);
855
856 // TODO(jingning): temporarily enabled only for luma component
857 rd = VPXMIN(rd1, rd2);
858 if (plane == 0) {
859 x->zcoeff_blk[tx_size][block] =
860 !x->plane[plane].eobs[block] ||
861 (x->sharpness == 0 && rd1 > rd2 && !xd->lossless);
862 x->sum_y_eobs[tx_size] += x->plane[plane].eobs[block];
863 }
864
865 args->this_rate += rate;
866 args->this_dist += dist;
867 args->this_sse += sse;
868 args->this_rd += rd;
869
870 if (args->this_rd > args->best_rd) {
871 args->exit_early = 1;
872 return;
873 }
874
875 args->skippable &= !x->plane[plane].eobs[block];
876 }
877
txfm_rd_in_plane(const VP9_COMP * cpi,MACROBLOCK * x,int * rate,int64_t * distortion,int * skippable,int64_t * sse,int64_t ref_best_rd,int plane,BLOCK_SIZE bsize,TX_SIZE tx_size,int use_fast_coef_costing,struct buf_2d * recon)878 static void txfm_rd_in_plane(const VP9_COMP *cpi, MACROBLOCK *x, int *rate,
879 int64_t *distortion, int *skippable, int64_t *sse,
880 int64_t ref_best_rd, int plane, BLOCK_SIZE bsize,
881 TX_SIZE tx_size, int use_fast_coef_costing,
882 struct buf_2d *recon) {
883 MACROBLOCKD *const xd = &x->e_mbd;
884 const struct macroblockd_plane *const pd = &xd->plane[plane];
885 struct rdcost_block_args args;
886 vp9_zero(args);
887 args.cpi = cpi;
888 args.x = x;
889 args.best_rd = ref_best_rd;
890 args.use_fast_coef_costing = use_fast_coef_costing;
891 args.skippable = 1;
892 args.this_recon = recon;
893
894 if (plane == 0) xd->mi[0]->tx_size = tx_size;
895
896 vp9_get_entropy_contexts(bsize, tx_size, pd, args.t_above, args.t_left);
897
898 args.so = get_scan(xd, tx_size, get_plane_type(plane), 0);
899
900 vp9_foreach_transformed_block_in_plane(xd, bsize, plane, block_rd_txfm,
901 &args);
902 if (args.exit_early) {
903 *rate = INT_MAX;
904 *distortion = INT64_MAX;
905 *sse = INT64_MAX;
906 *skippable = 0;
907 } else {
908 *distortion = args.this_dist;
909 *rate = args.this_rate;
910 *sse = args.this_sse;
911 *skippable = args.skippable;
912 }
913 }
914
choose_largest_tx_size(VP9_COMP * cpi,MACROBLOCK * x,int * rate,int64_t * distortion,int * skip,int64_t * sse,int64_t ref_best_rd,BLOCK_SIZE bs,struct buf_2d * recon)915 static void choose_largest_tx_size(VP9_COMP *cpi, MACROBLOCK *x, int *rate,
916 int64_t *distortion, int *skip, int64_t *sse,
917 int64_t ref_best_rd, BLOCK_SIZE bs,
918 struct buf_2d *recon) {
919 const TX_SIZE max_tx_size = max_txsize_lookup[bs];
920 VP9_COMMON *const cm = &cpi->common;
921 const TX_SIZE largest_tx_size = tx_mode_to_biggest_tx_size[cm->tx_mode];
922 MACROBLOCKD *const xd = &x->e_mbd;
923 MODE_INFO *const mi = xd->mi[0];
924
925 mi->tx_size = VPXMIN(max_tx_size, largest_tx_size);
926
927 txfm_rd_in_plane(cpi, x, rate, distortion, skip, sse, ref_best_rd, 0, bs,
928 mi->tx_size, cpi->sf.use_fast_coef_costing, recon);
929 }
930
choose_tx_size_from_rd(VP9_COMP * cpi,MACROBLOCK * x,int * rate,int64_t * distortion,int * skip,int64_t * psse,int64_t ref_best_rd,BLOCK_SIZE bs,struct buf_2d * recon)931 static void choose_tx_size_from_rd(VP9_COMP *cpi, MACROBLOCK *x, int *rate,
932 int64_t *distortion, int *skip,
933 int64_t *psse, int64_t ref_best_rd,
934 BLOCK_SIZE bs, struct buf_2d *recon) {
935 const TX_SIZE max_tx_size = max_txsize_lookup[bs];
936 VP9_COMMON *const cm = &cpi->common;
937 MACROBLOCKD *const xd = &x->e_mbd;
938 MODE_INFO *const mi = xd->mi[0];
939 vpx_prob skip_prob = vp9_get_skip_prob(cm, xd);
940 int r[TX_SIZES][2], s[TX_SIZES];
941 int64_t d[TX_SIZES], sse[TX_SIZES];
942 int64_t rd[TX_SIZES][2] = { { INT64_MAX, INT64_MAX },
943 { INT64_MAX, INT64_MAX },
944 { INT64_MAX, INT64_MAX },
945 { INT64_MAX, INT64_MAX } };
946 int n;
947 int s0, s1;
948 int64_t best_rd = ref_best_rd;
949 TX_SIZE best_tx = max_tx_size;
950 int start_tx, end_tx;
951 const int tx_size_ctx = get_tx_size_context(xd);
952 #if CONFIG_VP9_HIGHBITDEPTH
953 DECLARE_ALIGNED(16, uint16_t, recon_buf16[TX_SIZES][64 * 64]);
954 uint8_t *recon_buf[TX_SIZES];
955 for (n = 0; n < TX_SIZES; ++n) {
956 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
957 recon_buf[n] = CONVERT_TO_BYTEPTR(recon_buf16[n]);
958 } else {
959 recon_buf[n] = (uint8_t *)recon_buf16[n];
960 }
961 }
962 #else
963 DECLARE_ALIGNED(16, uint8_t, recon_buf[TX_SIZES][64 * 64]);
964 #endif // CONFIG_VP9_HIGHBITDEPTH
965
966 assert(skip_prob > 0);
967 s0 = vp9_cost_bit(skip_prob, 0);
968 s1 = vp9_cost_bit(skip_prob, 1);
969
970 if (cm->tx_mode == TX_MODE_SELECT) {
971 start_tx = max_tx_size;
972 end_tx = VPXMAX(start_tx - cpi->sf.tx_size_search_depth, 0);
973 if (bs > BLOCK_32X32) end_tx = VPXMIN(end_tx + 1, start_tx);
974 } else {
975 TX_SIZE chosen_tx_size =
976 VPXMIN(max_tx_size, tx_mode_to_biggest_tx_size[cm->tx_mode]);
977 start_tx = chosen_tx_size;
978 end_tx = chosen_tx_size;
979 }
980
981 for (n = start_tx; n >= end_tx; n--) {
982 const int r_tx_size = cpi->tx_size_cost[max_tx_size - 1][tx_size_ctx][n];
983 if (recon) {
984 struct buf_2d this_recon;
985 this_recon.buf = recon_buf[n];
986 this_recon.stride = recon->stride;
987 txfm_rd_in_plane(cpi, x, &r[n][0], &d[n], &s[n], &sse[n], best_rd, 0, bs,
988 n, cpi->sf.use_fast_coef_costing, &this_recon);
989 } else {
990 txfm_rd_in_plane(cpi, x, &r[n][0], &d[n], &s[n], &sse[n], best_rd, 0, bs,
991 n, cpi->sf.use_fast_coef_costing, 0);
992 }
993 r[n][1] = r[n][0];
994 if (r[n][0] < INT_MAX) {
995 r[n][1] += r_tx_size;
996 }
997 if (d[n] == INT64_MAX || r[n][0] == INT_MAX) {
998 rd[n][0] = rd[n][1] = INT64_MAX;
999 } else if (s[n]) {
1000 if (is_inter_block(mi)) {
1001 rd[n][0] = rd[n][1] = RDCOST(x->rdmult, x->rddiv, s1, sse[n]);
1002 r[n][1] -= r_tx_size;
1003 } else {
1004 rd[n][0] = RDCOST(x->rdmult, x->rddiv, s1, sse[n]);
1005 rd[n][1] = RDCOST(x->rdmult, x->rddiv, s1 + r_tx_size, sse[n]);
1006 }
1007 } else {
1008 rd[n][0] = RDCOST(x->rdmult, x->rddiv, r[n][0] + s0, d[n]);
1009 rd[n][1] = RDCOST(x->rdmult, x->rddiv, r[n][1] + s0, d[n]);
1010 }
1011
1012 if (is_inter_block(mi) && !xd->lossless && !s[n] && sse[n] != INT64_MAX) {
1013 rd[n][0] = VPXMIN(rd[n][0], RDCOST(x->rdmult, x->rddiv, s1, sse[n]));
1014 rd[n][1] = VPXMIN(rd[n][1], RDCOST(x->rdmult, x->rddiv, s1, sse[n]));
1015 }
1016
1017 // Early termination in transform size search.
1018 if (cpi->sf.tx_size_search_breakout &&
1019 (rd[n][1] == INT64_MAX ||
1020 (n < (int)max_tx_size && rd[n][1] > rd[n + 1][1]) || s[n] == 1))
1021 break;
1022
1023 if (rd[n][1] < best_rd) {
1024 best_tx = n;
1025 best_rd = rd[n][1];
1026 }
1027 }
1028 mi->tx_size = best_tx;
1029
1030 *distortion = d[mi->tx_size];
1031 *rate = r[mi->tx_size][cm->tx_mode == TX_MODE_SELECT];
1032 *skip = s[mi->tx_size];
1033 *psse = sse[mi->tx_size];
1034 if (recon) {
1035 #if CONFIG_VP9_HIGHBITDEPTH
1036 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
1037 memcpy(CONVERT_TO_SHORTPTR(recon->buf),
1038 CONVERT_TO_SHORTPTR(recon_buf[mi->tx_size]),
1039 64 * 64 * sizeof(uint16_t));
1040 } else {
1041 #endif
1042 memcpy(recon->buf, recon_buf[mi->tx_size], 64 * 64);
1043 #if CONFIG_VP9_HIGHBITDEPTH
1044 }
1045 #endif
1046 }
1047 }
1048
super_block_yrd(VP9_COMP * cpi,MACROBLOCK * x,int * rate,int64_t * distortion,int * skip,int64_t * psse,BLOCK_SIZE bs,int64_t ref_best_rd,struct buf_2d * recon)1049 static void super_block_yrd(VP9_COMP *cpi, MACROBLOCK *x, int *rate,
1050 int64_t *distortion, int *skip, int64_t *psse,
1051 BLOCK_SIZE bs, int64_t ref_best_rd,
1052 struct buf_2d *recon) {
1053 MACROBLOCKD *xd = &x->e_mbd;
1054 int64_t sse;
1055 int64_t *ret_sse = psse ? psse : &sse;
1056
1057 assert(bs == xd->mi[0]->sb_type);
1058
1059 if (cpi->sf.tx_size_search_method == USE_LARGESTALL || xd->lossless) {
1060 choose_largest_tx_size(cpi, x, rate, distortion, skip, ret_sse, ref_best_rd,
1061 bs, recon);
1062 } else {
1063 choose_tx_size_from_rd(cpi, x, rate, distortion, skip, ret_sse, ref_best_rd,
1064 bs, recon);
1065 }
1066 }
1067
conditional_skipintra(PREDICTION_MODE mode,PREDICTION_MODE best_intra_mode)1068 static int conditional_skipintra(PREDICTION_MODE mode,
1069 PREDICTION_MODE best_intra_mode) {
1070 if (mode == D117_PRED && best_intra_mode != V_PRED &&
1071 best_intra_mode != D135_PRED)
1072 return 1;
1073 if (mode == D63_PRED && best_intra_mode != V_PRED &&
1074 best_intra_mode != D45_PRED)
1075 return 1;
1076 if (mode == D207_PRED && best_intra_mode != H_PRED &&
1077 best_intra_mode != D45_PRED)
1078 return 1;
1079 if (mode == D153_PRED && best_intra_mode != H_PRED &&
1080 best_intra_mode != D135_PRED)
1081 return 1;
1082 return 0;
1083 }
1084
rd_pick_intra4x4block(VP9_COMP * cpi,MACROBLOCK * x,int row,int col,PREDICTION_MODE * best_mode,const int * bmode_costs,ENTROPY_CONTEXT * a,ENTROPY_CONTEXT * l,int * bestrate,int * bestratey,int64_t * bestdistortion,BLOCK_SIZE bsize,int64_t rd_thresh)1085 static int64_t rd_pick_intra4x4block(VP9_COMP *cpi, MACROBLOCK *x, int row,
1086 int col, PREDICTION_MODE *best_mode,
1087 const int *bmode_costs, ENTROPY_CONTEXT *a,
1088 ENTROPY_CONTEXT *l, int *bestrate,
1089 int *bestratey, int64_t *bestdistortion,
1090 BLOCK_SIZE bsize, int64_t rd_thresh) {
1091 PREDICTION_MODE mode;
1092 MACROBLOCKD *const xd = &x->e_mbd;
1093 int64_t best_rd = rd_thresh;
1094 struct macroblock_plane *p = &x->plane[0];
1095 struct macroblockd_plane *pd = &xd->plane[0];
1096 const int src_stride = p->src.stride;
1097 const int dst_stride = pd->dst.stride;
1098 const uint8_t *src_init = &p->src.buf[row * 4 * src_stride + col * 4];
1099 uint8_t *dst_init = &pd->dst.buf[row * 4 * src_stride + col * 4];
1100 ENTROPY_CONTEXT ta[2], tempa[2];
1101 ENTROPY_CONTEXT tl[2], templ[2];
1102 const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize];
1103 const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize];
1104 int idx, idy;
1105 uint8_t best_dst[8 * 8];
1106 #if CONFIG_VP9_HIGHBITDEPTH
1107 uint16_t best_dst16[8 * 8];
1108 #endif
1109 memcpy(ta, a, num_4x4_blocks_wide * sizeof(a[0]));
1110 memcpy(tl, l, num_4x4_blocks_high * sizeof(l[0]));
1111
1112 xd->mi[0]->tx_size = TX_4X4;
1113
1114 #if CONFIG_VP9_HIGHBITDEPTH
1115 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
1116 for (mode = DC_PRED; mode <= TM_PRED; ++mode) {
1117 int64_t this_rd;
1118 int ratey = 0;
1119 int64_t distortion = 0;
1120 int rate = bmode_costs[mode];
1121
1122 if (!(cpi->sf.intra_y_mode_mask[TX_4X4] & (1 << mode))) continue;
1123
1124 // Only do the oblique modes if the best so far is
1125 // one of the neighboring directional modes
1126 if (cpi->sf.mode_search_skip_flags & FLAG_SKIP_INTRA_DIRMISMATCH) {
1127 if (conditional_skipintra(mode, *best_mode)) continue;
1128 }
1129
1130 memcpy(tempa, ta, num_4x4_blocks_wide * sizeof(ta[0]));
1131 memcpy(templ, tl, num_4x4_blocks_high * sizeof(tl[0]));
1132
1133 for (idy = 0; idy < num_4x4_blocks_high; ++idy) {
1134 for (idx = 0; idx < num_4x4_blocks_wide; ++idx) {
1135 const int block = (row + idy) * 2 + (col + idx);
1136 const uint8_t *const src = &src_init[idx * 4 + idy * 4 * src_stride];
1137 uint8_t *const dst = &dst_init[idx * 4 + idy * 4 * dst_stride];
1138 uint16_t *const dst16 = CONVERT_TO_SHORTPTR(dst);
1139 int16_t *const src_diff =
1140 vp9_raster_block_offset_int16(BLOCK_8X8, block, p->src_diff);
1141 tran_low_t *const coeff = BLOCK_OFFSET(x->plane[0].coeff, block);
1142 xd->mi[0]->bmi[block].as_mode = mode;
1143 vp9_predict_intra_block(xd, 1, TX_4X4, mode,
1144 x->skip_encode ? src : dst,
1145 x->skip_encode ? src_stride : dst_stride, dst,
1146 dst_stride, col + idx, row + idy, 0);
1147 vpx_highbd_subtract_block(4, 4, src_diff, 8, src, src_stride, dst,
1148 dst_stride, xd->bd);
1149 if (xd->lossless) {
1150 const scan_order *so = &vp9_default_scan_orders[TX_4X4];
1151 const int coeff_ctx =
1152 combine_entropy_contexts(tempa[idx], templ[idy]);
1153 vp9_highbd_fwht4x4(src_diff, coeff, 8);
1154 vp9_regular_quantize_b_4x4(x, 0, block, so->scan, so->iscan);
1155 ratey += cost_coeffs(x, 0, block, TX_4X4, coeff_ctx, so->scan,
1156 so->neighbors, cpi->sf.use_fast_coef_costing);
1157 tempa[idx] = templ[idy] = (x->plane[0].eobs[block] > 0 ? 1 : 0);
1158 if (RDCOST(x->rdmult, x->rddiv, ratey, distortion) >= best_rd)
1159 goto next_highbd;
1160 vp9_highbd_iwht4x4_add(BLOCK_OFFSET(pd->dqcoeff, block), dst16,
1161 dst_stride, p->eobs[block], xd->bd);
1162 } else {
1163 int64_t unused;
1164 const TX_TYPE tx_type = get_tx_type_4x4(PLANE_TYPE_Y, xd, block);
1165 const scan_order *so = &vp9_scan_orders[TX_4X4][tx_type];
1166 const int coeff_ctx =
1167 combine_entropy_contexts(tempa[idx], templ[idy]);
1168 if (tx_type == DCT_DCT)
1169 vpx_highbd_fdct4x4(src_diff, coeff, 8);
1170 else
1171 vp9_highbd_fht4x4(src_diff, coeff, 8, tx_type);
1172 vp9_regular_quantize_b_4x4(x, 0, block, so->scan, so->iscan);
1173 ratey += cost_coeffs(x, 0, block, TX_4X4, coeff_ctx, so->scan,
1174 so->neighbors, cpi->sf.use_fast_coef_costing);
1175 distortion += vp9_highbd_block_error_dispatch(
1176 coeff, BLOCK_OFFSET(pd->dqcoeff, block), 16,
1177 &unused, xd->bd) >>
1178 2;
1179 tempa[idx] = templ[idy] = (x->plane[0].eobs[block] > 0 ? 1 : 0);
1180 if (RDCOST(x->rdmult, x->rddiv, ratey, distortion) >= best_rd)
1181 goto next_highbd;
1182 vp9_highbd_iht4x4_add(tx_type, BLOCK_OFFSET(pd->dqcoeff, block),
1183 dst16, dst_stride, p->eobs[block], xd->bd);
1184 }
1185 }
1186 }
1187
1188 rate += ratey;
1189 this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion);
1190
1191 if (this_rd < best_rd) {
1192 *bestrate = rate;
1193 *bestratey = ratey;
1194 *bestdistortion = distortion;
1195 best_rd = this_rd;
1196 *best_mode = mode;
1197 memcpy(a, tempa, num_4x4_blocks_wide * sizeof(tempa[0]));
1198 memcpy(l, templ, num_4x4_blocks_high * sizeof(templ[0]));
1199 for (idy = 0; idy < num_4x4_blocks_high * 4; ++idy) {
1200 memcpy(best_dst16 + idy * 8,
1201 CONVERT_TO_SHORTPTR(dst_init + idy * dst_stride),
1202 num_4x4_blocks_wide * 4 * sizeof(uint16_t));
1203 }
1204 }
1205 next_highbd : {}
1206 }
1207 if (best_rd >= rd_thresh || x->skip_encode) return best_rd;
1208
1209 for (idy = 0; idy < num_4x4_blocks_high * 4; ++idy) {
1210 memcpy(CONVERT_TO_SHORTPTR(dst_init + idy * dst_stride),
1211 best_dst16 + idy * 8, num_4x4_blocks_wide * 4 * sizeof(uint16_t));
1212 }
1213
1214 return best_rd;
1215 }
1216 #endif // CONFIG_VP9_HIGHBITDEPTH
1217
1218 for (mode = DC_PRED; mode <= TM_PRED; ++mode) {
1219 int64_t this_rd;
1220 int ratey = 0;
1221 int64_t distortion = 0;
1222 int rate = bmode_costs[mode];
1223
1224 if (!(cpi->sf.intra_y_mode_mask[TX_4X4] & (1 << mode))) continue;
1225
1226 // Only do the oblique modes if the best so far is
1227 // one of the neighboring directional modes
1228 if (cpi->sf.mode_search_skip_flags & FLAG_SKIP_INTRA_DIRMISMATCH) {
1229 if (conditional_skipintra(mode, *best_mode)) continue;
1230 }
1231
1232 memcpy(tempa, ta, num_4x4_blocks_wide * sizeof(ta[0]));
1233 memcpy(templ, tl, num_4x4_blocks_high * sizeof(tl[0]));
1234
1235 for (idy = 0; idy < num_4x4_blocks_high; ++idy) {
1236 for (idx = 0; idx < num_4x4_blocks_wide; ++idx) {
1237 const int block = (row + idy) * 2 + (col + idx);
1238 const uint8_t *const src = &src_init[idx * 4 + idy * 4 * src_stride];
1239 uint8_t *const dst = &dst_init[idx * 4 + idy * 4 * dst_stride];
1240 int16_t *const src_diff =
1241 vp9_raster_block_offset_int16(BLOCK_8X8, block, p->src_diff);
1242 tran_low_t *const coeff = BLOCK_OFFSET(x->plane[0].coeff, block);
1243 xd->mi[0]->bmi[block].as_mode = mode;
1244 vp9_predict_intra_block(xd, 1, TX_4X4, mode, x->skip_encode ? src : dst,
1245 x->skip_encode ? src_stride : dst_stride, dst,
1246 dst_stride, col + idx, row + idy, 0);
1247 vpx_subtract_block(4, 4, src_diff, 8, src, src_stride, dst, dst_stride);
1248
1249 if (xd->lossless) {
1250 const scan_order *so = &vp9_default_scan_orders[TX_4X4];
1251 const int coeff_ctx =
1252 combine_entropy_contexts(tempa[idx], templ[idy]);
1253 vp9_fwht4x4(src_diff, coeff, 8);
1254 vp9_regular_quantize_b_4x4(x, 0, block, so->scan, so->iscan);
1255 ratey += cost_coeffs(x, 0, block, TX_4X4, coeff_ctx, so->scan,
1256 so->neighbors, cpi->sf.use_fast_coef_costing);
1257 tempa[idx] = templ[idy] = (x->plane[0].eobs[block] > 0) ? 1 : 0;
1258 if (RDCOST(x->rdmult, x->rddiv, ratey, distortion) >= best_rd)
1259 goto next;
1260 vp9_iwht4x4_add(BLOCK_OFFSET(pd->dqcoeff, block), dst, dst_stride,
1261 p->eobs[block]);
1262 } else {
1263 int64_t unused;
1264 const TX_TYPE tx_type = get_tx_type_4x4(PLANE_TYPE_Y, xd, block);
1265 const scan_order *so = &vp9_scan_orders[TX_4X4][tx_type];
1266 const int coeff_ctx =
1267 combine_entropy_contexts(tempa[idx], templ[idy]);
1268 vp9_fht4x4(src_diff, coeff, 8, tx_type);
1269 vp9_regular_quantize_b_4x4(x, 0, block, so->scan, so->iscan);
1270 ratey += cost_coeffs(x, 0, block, TX_4X4, coeff_ctx, so->scan,
1271 so->neighbors, cpi->sf.use_fast_coef_costing);
1272 tempa[idx] = templ[idy] = (x->plane[0].eobs[block] > 0) ? 1 : 0;
1273 distortion += vp9_block_error(coeff, BLOCK_OFFSET(pd->dqcoeff, block),
1274 16, &unused) >>
1275 2;
1276 if (RDCOST(x->rdmult, x->rddiv, ratey, distortion) >= best_rd)
1277 goto next;
1278 vp9_iht4x4_add(tx_type, BLOCK_OFFSET(pd->dqcoeff, block), dst,
1279 dst_stride, p->eobs[block]);
1280 }
1281 }
1282 }
1283
1284 rate += ratey;
1285 this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion);
1286
1287 if (this_rd < best_rd) {
1288 *bestrate = rate;
1289 *bestratey = ratey;
1290 *bestdistortion = distortion;
1291 best_rd = this_rd;
1292 *best_mode = mode;
1293 memcpy(a, tempa, num_4x4_blocks_wide * sizeof(tempa[0]));
1294 memcpy(l, templ, num_4x4_blocks_high * sizeof(templ[0]));
1295 for (idy = 0; idy < num_4x4_blocks_high * 4; ++idy)
1296 memcpy(best_dst + idy * 8, dst_init + idy * dst_stride,
1297 num_4x4_blocks_wide * 4);
1298 }
1299 next : {}
1300 }
1301
1302 if (best_rd >= rd_thresh || x->skip_encode) return best_rd;
1303
1304 for (idy = 0; idy < num_4x4_blocks_high * 4; ++idy)
1305 memcpy(dst_init + idy * dst_stride, best_dst + idy * 8,
1306 num_4x4_blocks_wide * 4);
1307
1308 return best_rd;
1309 }
1310
rd_pick_intra_sub_8x8_y_mode(VP9_COMP * cpi,MACROBLOCK * mb,int * rate,int * rate_y,int64_t * distortion,int64_t best_rd)1311 static int64_t rd_pick_intra_sub_8x8_y_mode(VP9_COMP *cpi, MACROBLOCK *mb,
1312 int *rate, int *rate_y,
1313 int64_t *distortion,
1314 int64_t best_rd) {
1315 int i, j;
1316 const MACROBLOCKD *const xd = &mb->e_mbd;
1317 MODE_INFO *const mic = xd->mi[0];
1318 const MODE_INFO *above_mi = xd->above_mi;
1319 const MODE_INFO *left_mi = xd->left_mi;
1320 const BLOCK_SIZE bsize = xd->mi[0]->sb_type;
1321 const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize];
1322 const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize];
1323 int idx, idy;
1324 int cost = 0;
1325 int64_t total_distortion = 0;
1326 int tot_rate_y = 0;
1327 int64_t total_rd = 0;
1328 const int *bmode_costs = cpi->mbmode_cost;
1329
1330 // Pick modes for each sub-block (of size 4x4, 4x8, or 8x4) in an 8x8 block.
1331 for (idy = 0; idy < 2; idy += num_4x4_blocks_high) {
1332 for (idx = 0; idx < 2; idx += num_4x4_blocks_wide) {
1333 PREDICTION_MODE best_mode = DC_PRED;
1334 int r = INT_MAX, ry = INT_MAX;
1335 int64_t d = INT64_MAX, this_rd = INT64_MAX;
1336 i = idy * 2 + idx;
1337 if (cpi->common.frame_type == KEY_FRAME) {
1338 const PREDICTION_MODE A = vp9_above_block_mode(mic, above_mi, i);
1339 const PREDICTION_MODE L = vp9_left_block_mode(mic, left_mi, i);
1340
1341 bmode_costs = cpi->y_mode_costs[A][L];
1342 }
1343
1344 this_rd = rd_pick_intra4x4block(
1345 cpi, mb, idy, idx, &best_mode, bmode_costs,
1346 xd->plane[0].above_context + idx, xd->plane[0].left_context + idy, &r,
1347 &ry, &d, bsize, best_rd - total_rd);
1348
1349 if (this_rd >= best_rd - total_rd) return INT64_MAX;
1350
1351 total_rd += this_rd;
1352 cost += r;
1353 total_distortion += d;
1354 tot_rate_y += ry;
1355
1356 mic->bmi[i].as_mode = best_mode;
1357 for (j = 1; j < num_4x4_blocks_high; ++j)
1358 mic->bmi[i + j * 2].as_mode = best_mode;
1359 for (j = 1; j < num_4x4_blocks_wide; ++j)
1360 mic->bmi[i + j].as_mode = best_mode;
1361
1362 if (total_rd >= best_rd) return INT64_MAX;
1363 }
1364 }
1365
1366 *rate = cost;
1367 *rate_y = tot_rate_y;
1368 *distortion = total_distortion;
1369 mic->mode = mic->bmi[3].as_mode;
1370
1371 return RDCOST(mb->rdmult, mb->rddiv, cost, total_distortion);
1372 }
1373
1374 // This function is used only for intra_only frames
rd_pick_intra_sby_mode(VP9_COMP * cpi,MACROBLOCK * x,int * rate,int * rate_tokenonly,int64_t * distortion,int * skippable,BLOCK_SIZE bsize,int64_t best_rd)1375 static int64_t rd_pick_intra_sby_mode(VP9_COMP *cpi, MACROBLOCK *x, int *rate,
1376 int *rate_tokenonly, int64_t *distortion,
1377 int *skippable, BLOCK_SIZE bsize,
1378 int64_t best_rd) {
1379 PREDICTION_MODE mode;
1380 PREDICTION_MODE mode_selected = DC_PRED;
1381 MACROBLOCKD *const xd = &x->e_mbd;
1382 MODE_INFO *const mic = xd->mi[0];
1383 int this_rate, this_rate_tokenonly, s;
1384 int64_t this_distortion, this_rd;
1385 TX_SIZE best_tx = TX_4X4;
1386 int *bmode_costs;
1387 const MODE_INFO *above_mi = xd->above_mi;
1388 const MODE_INFO *left_mi = xd->left_mi;
1389 const PREDICTION_MODE A = vp9_above_block_mode(mic, above_mi, 0);
1390 const PREDICTION_MODE L = vp9_left_block_mode(mic, left_mi, 0);
1391 bmode_costs = cpi->y_mode_costs[A][L];
1392
1393 memset(x->skip_txfm, SKIP_TXFM_NONE, sizeof(x->skip_txfm));
1394 /* Y Search for intra prediction mode */
1395 for (mode = DC_PRED; mode <= TM_PRED; mode++) {
1396 if (cpi->sf.use_nonrd_pick_mode) {
1397 // These speed features are turned on in hybrid non-RD and RD mode
1398 // for key frame coding in the context of real-time setting.
1399 if (conditional_skipintra(mode, mode_selected)) continue;
1400 if (*skippable) break;
1401 }
1402
1403 mic->mode = mode;
1404
1405 super_block_yrd(cpi, x, &this_rate_tokenonly, &this_distortion, &s, NULL,
1406 bsize, best_rd, /*recon = */ 0);
1407
1408 if (this_rate_tokenonly == INT_MAX) continue;
1409
1410 this_rate = this_rate_tokenonly + bmode_costs[mode];
1411 this_rd = RDCOST(x->rdmult, x->rddiv, this_rate, this_distortion);
1412
1413 if (this_rd < best_rd) {
1414 mode_selected = mode;
1415 best_rd = this_rd;
1416 best_tx = mic->tx_size;
1417 *rate = this_rate;
1418 *rate_tokenonly = this_rate_tokenonly;
1419 *distortion = this_distortion;
1420 *skippable = s;
1421 }
1422 }
1423
1424 mic->mode = mode_selected;
1425 mic->tx_size = best_tx;
1426
1427 return best_rd;
1428 }
1429
1430 // Return value 0: early termination triggered, no valid rd cost available;
1431 // 1: rd cost values are valid.
super_block_uvrd(const VP9_COMP * cpi,MACROBLOCK * x,int * rate,int64_t * distortion,int * skippable,int64_t * sse,BLOCK_SIZE bsize,int64_t ref_best_rd)1432 static int super_block_uvrd(const VP9_COMP *cpi, MACROBLOCK *x, int *rate,
1433 int64_t *distortion, int *skippable, int64_t *sse,
1434 BLOCK_SIZE bsize, int64_t ref_best_rd) {
1435 MACROBLOCKD *const xd = &x->e_mbd;
1436 MODE_INFO *const mi = xd->mi[0];
1437 const TX_SIZE uv_tx_size = get_uv_tx_size(mi, &xd->plane[1]);
1438 int plane;
1439 int pnrate = 0, pnskip = 1;
1440 int64_t pndist = 0, pnsse = 0;
1441 int is_cost_valid = 1;
1442
1443 if (ref_best_rd < 0) is_cost_valid = 0;
1444
1445 if (is_inter_block(mi) && is_cost_valid) {
1446 int plane;
1447 for (plane = 1; plane < MAX_MB_PLANE; ++plane)
1448 vp9_subtract_plane(x, bsize, plane);
1449 }
1450
1451 *rate = 0;
1452 *distortion = 0;
1453 *sse = 0;
1454 *skippable = 1;
1455
1456 for (plane = 1; plane < MAX_MB_PLANE; ++plane) {
1457 txfm_rd_in_plane(cpi, x, &pnrate, &pndist, &pnskip, &pnsse, ref_best_rd,
1458 plane, bsize, uv_tx_size, cpi->sf.use_fast_coef_costing,
1459 /*recon = */ 0);
1460 if (pnrate == INT_MAX) {
1461 is_cost_valid = 0;
1462 break;
1463 }
1464 *rate += pnrate;
1465 *distortion += pndist;
1466 *sse += pnsse;
1467 *skippable &= pnskip;
1468 }
1469
1470 if (!is_cost_valid) {
1471 // reset cost value
1472 *rate = INT_MAX;
1473 *distortion = INT64_MAX;
1474 *sse = INT64_MAX;
1475 *skippable = 0;
1476 }
1477
1478 return is_cost_valid;
1479 }
1480
rd_pick_intra_sbuv_mode(VP9_COMP * cpi,MACROBLOCK * x,PICK_MODE_CONTEXT * ctx,int * rate,int * rate_tokenonly,int64_t * distortion,int * skippable,BLOCK_SIZE bsize,TX_SIZE max_tx_size)1481 static int64_t rd_pick_intra_sbuv_mode(VP9_COMP *cpi, MACROBLOCK *x,
1482 PICK_MODE_CONTEXT *ctx, int *rate,
1483 int *rate_tokenonly, int64_t *distortion,
1484 int *skippable, BLOCK_SIZE bsize,
1485 TX_SIZE max_tx_size) {
1486 MACROBLOCKD *xd = &x->e_mbd;
1487 PREDICTION_MODE mode;
1488 PREDICTION_MODE mode_selected = DC_PRED;
1489 int64_t best_rd = INT64_MAX, this_rd;
1490 int this_rate_tokenonly, this_rate, s;
1491 int64_t this_distortion, this_sse;
1492
1493 memset(x->skip_txfm, SKIP_TXFM_NONE, sizeof(x->skip_txfm));
1494 for (mode = DC_PRED; mode <= TM_PRED; ++mode) {
1495 if (!(cpi->sf.intra_uv_mode_mask[max_tx_size] & (1 << mode))) continue;
1496 #if CONFIG_BETTER_HW_COMPATIBILITY && CONFIG_VP9_HIGHBITDEPTH
1497 if ((xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) &&
1498 (xd->above_mi == NULL || xd->left_mi == NULL) && need_top_left[mode])
1499 continue;
1500 #endif // CONFIG_BETTER_HW_COMPATIBILITY && CONFIG_VP9_HIGHBITDEPTH
1501
1502 xd->mi[0]->uv_mode = mode;
1503
1504 if (!super_block_uvrd(cpi, x, &this_rate_tokenonly, &this_distortion, &s,
1505 &this_sse, bsize, best_rd))
1506 continue;
1507 this_rate =
1508 this_rate_tokenonly +
1509 cpi->intra_uv_mode_cost[cpi->common.frame_type][xd->mi[0]->mode][mode];
1510 this_rd = RDCOST(x->rdmult, x->rddiv, this_rate, this_distortion);
1511
1512 if (this_rd < best_rd) {
1513 mode_selected = mode;
1514 best_rd = this_rd;
1515 *rate = this_rate;
1516 *rate_tokenonly = this_rate_tokenonly;
1517 *distortion = this_distortion;
1518 *skippable = s;
1519 if (!x->select_tx_size) swap_block_ptr(x, ctx, 2, 0, 1, MAX_MB_PLANE);
1520 }
1521 }
1522
1523 xd->mi[0]->uv_mode = mode_selected;
1524 return best_rd;
1525 }
1526
1527 #if !CONFIG_REALTIME_ONLY
rd_sbuv_dcpred(const VP9_COMP * cpi,MACROBLOCK * x,int * rate,int * rate_tokenonly,int64_t * distortion,int * skippable,BLOCK_SIZE bsize)1528 static int64_t rd_sbuv_dcpred(const VP9_COMP *cpi, MACROBLOCK *x, int *rate,
1529 int *rate_tokenonly, int64_t *distortion,
1530 int *skippable, BLOCK_SIZE bsize) {
1531 const VP9_COMMON *cm = &cpi->common;
1532 int64_t unused;
1533
1534 x->e_mbd.mi[0]->uv_mode = DC_PRED;
1535 memset(x->skip_txfm, SKIP_TXFM_NONE, sizeof(x->skip_txfm));
1536 super_block_uvrd(cpi, x, rate_tokenonly, distortion, skippable, &unused,
1537 bsize, INT64_MAX);
1538 *rate =
1539 *rate_tokenonly +
1540 cpi->intra_uv_mode_cost[cm->frame_type][x->e_mbd.mi[0]->mode][DC_PRED];
1541 return RDCOST(x->rdmult, x->rddiv, *rate, *distortion);
1542 }
1543
choose_intra_uv_mode(VP9_COMP * cpi,MACROBLOCK * const x,PICK_MODE_CONTEXT * ctx,BLOCK_SIZE bsize,TX_SIZE max_tx_size,int * rate_uv,int * rate_uv_tokenonly,int64_t * dist_uv,int * skip_uv,PREDICTION_MODE * mode_uv)1544 static void choose_intra_uv_mode(VP9_COMP *cpi, MACROBLOCK *const x,
1545 PICK_MODE_CONTEXT *ctx, BLOCK_SIZE bsize,
1546 TX_SIZE max_tx_size, int *rate_uv,
1547 int *rate_uv_tokenonly, int64_t *dist_uv,
1548 int *skip_uv, PREDICTION_MODE *mode_uv) {
1549 // Use an estimated rd for uv_intra based on DC_PRED if the
1550 // appropriate speed flag is set.
1551 if (cpi->sf.use_uv_intra_rd_estimate) {
1552 rd_sbuv_dcpred(cpi, x, rate_uv, rate_uv_tokenonly, dist_uv, skip_uv,
1553 bsize < BLOCK_8X8 ? BLOCK_8X8 : bsize);
1554 // Else do a proper rd search for each possible transform size that may
1555 // be considered in the main rd loop.
1556 } else {
1557 rd_pick_intra_sbuv_mode(cpi, x, ctx, rate_uv, rate_uv_tokenonly, dist_uv,
1558 skip_uv, bsize < BLOCK_8X8 ? BLOCK_8X8 : bsize,
1559 max_tx_size);
1560 }
1561 *mode_uv = x->e_mbd.mi[0]->uv_mode;
1562 }
1563
cost_mv_ref(const VP9_COMP * cpi,PREDICTION_MODE mode,int mode_context)1564 static int cost_mv_ref(const VP9_COMP *cpi, PREDICTION_MODE mode,
1565 int mode_context) {
1566 assert(is_inter_mode(mode));
1567 return cpi->inter_mode_cost[mode_context][INTER_OFFSET(mode)];
1568 }
1569
set_and_cost_bmi_mvs(VP9_COMP * cpi,MACROBLOCK * x,MACROBLOCKD * xd,int i,PREDICTION_MODE mode,int_mv this_mv[2],int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES],int_mv seg_mvs[MAX_REF_FRAMES],int_mv * best_ref_mv[2],const int * mvjcost,int * mvcost[2])1570 static int set_and_cost_bmi_mvs(VP9_COMP *cpi, MACROBLOCK *x, MACROBLOCKD *xd,
1571 int i, PREDICTION_MODE mode, int_mv this_mv[2],
1572 int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES],
1573 int_mv seg_mvs[MAX_REF_FRAMES],
1574 int_mv *best_ref_mv[2], const int *mvjcost,
1575 int *mvcost[2]) {
1576 MODE_INFO *const mi = xd->mi[0];
1577 const MB_MODE_INFO_EXT *const mbmi_ext = x->mbmi_ext;
1578 int thismvcost = 0;
1579 int idx, idy;
1580 const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[mi->sb_type];
1581 const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[mi->sb_type];
1582 const int is_compound = has_second_ref(mi);
1583
1584 switch (mode) {
1585 case NEWMV:
1586 this_mv[0].as_int = seg_mvs[mi->ref_frame[0]].as_int;
1587 thismvcost += vp9_mv_bit_cost(&this_mv[0].as_mv, &best_ref_mv[0]->as_mv,
1588 mvjcost, mvcost, MV_COST_WEIGHT_SUB);
1589 if (is_compound) {
1590 this_mv[1].as_int = seg_mvs[mi->ref_frame[1]].as_int;
1591 thismvcost += vp9_mv_bit_cost(&this_mv[1].as_mv, &best_ref_mv[1]->as_mv,
1592 mvjcost, mvcost, MV_COST_WEIGHT_SUB);
1593 }
1594 break;
1595 case NEARMV:
1596 case NEARESTMV:
1597 this_mv[0].as_int = frame_mv[mode][mi->ref_frame[0]].as_int;
1598 if (is_compound)
1599 this_mv[1].as_int = frame_mv[mode][mi->ref_frame[1]].as_int;
1600 break;
1601 default:
1602 assert(mode == ZEROMV);
1603 this_mv[0].as_int = 0;
1604 if (is_compound) this_mv[1].as_int = 0;
1605 break;
1606 }
1607
1608 mi->bmi[i].as_mv[0].as_int = this_mv[0].as_int;
1609 if (is_compound) mi->bmi[i].as_mv[1].as_int = this_mv[1].as_int;
1610
1611 mi->bmi[i].as_mode = mode;
1612
1613 for (idy = 0; idy < num_4x4_blocks_high; ++idy)
1614 for (idx = 0; idx < num_4x4_blocks_wide; ++idx)
1615 memmove(&mi->bmi[i + idy * 2 + idx], &mi->bmi[i], sizeof(mi->bmi[i]));
1616
1617 return cost_mv_ref(cpi, mode, mbmi_ext->mode_context[mi->ref_frame[0]]) +
1618 thismvcost;
1619 }
1620
encode_inter_mb_segment(VP9_COMP * cpi,MACROBLOCK * x,int64_t best_yrd,int i,int * labelyrate,int64_t * distortion,int64_t * sse,ENTROPY_CONTEXT * ta,ENTROPY_CONTEXT * tl,int mi_row,int mi_col)1621 static int64_t encode_inter_mb_segment(VP9_COMP *cpi, MACROBLOCK *x,
1622 int64_t best_yrd, int i, int *labelyrate,
1623 int64_t *distortion, int64_t *sse,
1624 ENTROPY_CONTEXT *ta, ENTROPY_CONTEXT *tl,
1625 int mi_row, int mi_col) {
1626 int k;
1627 MACROBLOCKD *xd = &x->e_mbd;
1628 struct macroblockd_plane *const pd = &xd->plane[0];
1629 struct macroblock_plane *const p = &x->plane[0];
1630 MODE_INFO *const mi = xd->mi[0];
1631 const BLOCK_SIZE plane_bsize = get_plane_block_size(mi->sb_type, pd);
1632 const int width = 4 * num_4x4_blocks_wide_lookup[plane_bsize];
1633 const int height = 4 * num_4x4_blocks_high_lookup[plane_bsize];
1634 int idx, idy;
1635
1636 const uint8_t *const src =
1637 &p->src.buf[vp9_raster_block_offset(BLOCK_8X8, i, p->src.stride)];
1638 uint8_t *const dst =
1639 &pd->dst.buf[vp9_raster_block_offset(BLOCK_8X8, i, pd->dst.stride)];
1640 int64_t thisdistortion = 0, thissse = 0;
1641 int thisrate = 0, ref;
1642 const scan_order *so = &vp9_default_scan_orders[TX_4X4];
1643 const int is_compound = has_second_ref(mi);
1644 const InterpKernel *kernel = vp9_filter_kernels[mi->interp_filter];
1645
1646 for (ref = 0; ref < 1 + is_compound; ++ref) {
1647 const int bw = b_width_log2_lookup[BLOCK_8X8];
1648 const int h = 4 * (i >> bw);
1649 const int w = 4 * (i & ((1 << bw) - 1));
1650 const struct scale_factors *sf = &xd->block_refs[ref]->sf;
1651 int y_stride = pd->pre[ref].stride;
1652 uint8_t *pre = pd->pre[ref].buf + (h * pd->pre[ref].stride + w);
1653
1654 if (vp9_is_scaled(sf)) {
1655 const int x_start = (-xd->mb_to_left_edge >> (3 + pd->subsampling_x));
1656 const int y_start = (-xd->mb_to_top_edge >> (3 + pd->subsampling_y));
1657
1658 y_stride = xd->block_refs[ref]->buf->y_stride;
1659 pre = xd->block_refs[ref]->buf->y_buffer;
1660 pre += scaled_buffer_offset(x_start + w, y_start + h, y_stride, sf);
1661 }
1662 #if CONFIG_VP9_HIGHBITDEPTH
1663 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
1664 vp9_highbd_build_inter_predictor(
1665 CONVERT_TO_SHORTPTR(pre), y_stride, CONVERT_TO_SHORTPTR(dst),
1666 pd->dst.stride, &mi->bmi[i].as_mv[ref].as_mv,
1667 &xd->block_refs[ref]->sf, width, height, ref, kernel, MV_PRECISION_Q3,
1668 mi_col * MI_SIZE + 4 * (i % 2), mi_row * MI_SIZE + 4 * (i / 2),
1669 xd->bd);
1670 } else {
1671 vp9_build_inter_predictor(
1672 pre, y_stride, dst, pd->dst.stride, &mi->bmi[i].as_mv[ref].as_mv,
1673 &xd->block_refs[ref]->sf, width, height, ref, kernel, MV_PRECISION_Q3,
1674 mi_col * MI_SIZE + 4 * (i % 2), mi_row * MI_SIZE + 4 * (i / 2));
1675 }
1676 #else
1677 vp9_build_inter_predictor(
1678 pre, y_stride, dst, pd->dst.stride, &mi->bmi[i].as_mv[ref].as_mv,
1679 &xd->block_refs[ref]->sf, width, height, ref, kernel, MV_PRECISION_Q3,
1680 mi_col * MI_SIZE + 4 * (i % 2), mi_row * MI_SIZE + 4 * (i / 2));
1681 #endif // CONFIG_VP9_HIGHBITDEPTH
1682 }
1683
1684 #if CONFIG_VP9_HIGHBITDEPTH
1685 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
1686 vpx_highbd_subtract_block(
1687 height, width, vp9_raster_block_offset_int16(BLOCK_8X8, i, p->src_diff),
1688 8, src, p->src.stride, dst, pd->dst.stride, xd->bd);
1689 } else {
1690 vpx_subtract_block(height, width,
1691 vp9_raster_block_offset_int16(BLOCK_8X8, i, p->src_diff),
1692 8, src, p->src.stride, dst, pd->dst.stride);
1693 }
1694 #else
1695 vpx_subtract_block(height, width,
1696 vp9_raster_block_offset_int16(BLOCK_8X8, i, p->src_diff),
1697 8, src, p->src.stride, dst, pd->dst.stride);
1698 #endif // CONFIG_VP9_HIGHBITDEPTH
1699
1700 k = i;
1701 for (idy = 0; idy < height / 4; ++idy) {
1702 for (idx = 0; idx < width / 4; ++idx) {
1703 #if CONFIG_VP9_HIGHBITDEPTH
1704 const int bd = (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) ? xd->bd : 8;
1705 #endif
1706 int64_t ssz, rd, rd1, rd2;
1707 tran_low_t *coeff;
1708 int coeff_ctx;
1709 k += (idy * 2 + idx);
1710 coeff_ctx = combine_entropy_contexts(ta[k & 1], tl[k >> 1]);
1711 coeff = BLOCK_OFFSET(p->coeff, k);
1712 x->fwd_txfm4x4(vp9_raster_block_offset_int16(BLOCK_8X8, k, p->src_diff),
1713 coeff, 8);
1714 vp9_regular_quantize_b_4x4(x, 0, k, so->scan, so->iscan);
1715 #if CONFIG_VP9_HIGHBITDEPTH
1716 thisdistortion += vp9_highbd_block_error_dispatch(
1717 coeff, BLOCK_OFFSET(pd->dqcoeff, k), 16, &ssz, bd);
1718 #else
1719 thisdistortion +=
1720 vp9_block_error(coeff, BLOCK_OFFSET(pd->dqcoeff, k), 16, &ssz);
1721 #endif // CONFIG_VP9_HIGHBITDEPTH
1722 thissse += ssz;
1723 thisrate += cost_coeffs(x, 0, k, TX_4X4, coeff_ctx, so->scan,
1724 so->neighbors, cpi->sf.use_fast_coef_costing);
1725 ta[k & 1] = tl[k >> 1] = (x->plane[0].eobs[k] > 0) ? 1 : 0;
1726 rd1 = RDCOST(x->rdmult, x->rddiv, thisrate, thisdistortion >> 2);
1727 rd2 = RDCOST(x->rdmult, x->rddiv, 0, thissse >> 2);
1728 rd = VPXMIN(rd1, rd2);
1729 if (rd >= best_yrd) return INT64_MAX;
1730 }
1731 }
1732
1733 *distortion = thisdistortion >> 2;
1734 *labelyrate = thisrate;
1735 *sse = thissse >> 2;
1736
1737 return RDCOST(x->rdmult, x->rddiv, *labelyrate, *distortion);
1738 }
1739 #endif // !CONFIG_REALTIME_ONLY
1740
1741 typedef struct {
1742 int eobs;
1743 int brate;
1744 int byrate;
1745 int64_t bdist;
1746 int64_t bsse;
1747 int64_t brdcost;
1748 int_mv mvs[2];
1749 ENTROPY_CONTEXT ta[2];
1750 ENTROPY_CONTEXT tl[2];
1751 } SEG_RDSTAT;
1752
1753 typedef struct {
1754 int_mv *ref_mv[2];
1755 int_mv mvp;
1756
1757 int64_t segment_rd;
1758 int r;
1759 int64_t d;
1760 int64_t sse;
1761 int segment_yrate;
1762 PREDICTION_MODE modes[4];
1763 SEG_RDSTAT rdstat[4][INTER_MODES];
1764 int mvthresh;
1765 } BEST_SEG_INFO;
1766
1767 #if !CONFIG_REALTIME_ONLY
mv_check_bounds(const MvLimits * mv_limits,const MV * mv)1768 static INLINE int mv_check_bounds(const MvLimits *mv_limits, const MV *mv) {
1769 return (mv->row >> 3) < mv_limits->row_min ||
1770 (mv->row >> 3) > mv_limits->row_max ||
1771 (mv->col >> 3) < mv_limits->col_min ||
1772 (mv->col >> 3) > mv_limits->col_max;
1773 }
1774
mi_buf_shift(MACROBLOCK * x,int i)1775 static INLINE void mi_buf_shift(MACROBLOCK *x, int i) {
1776 MODE_INFO *const mi = x->e_mbd.mi[0];
1777 struct macroblock_plane *const p = &x->plane[0];
1778 struct macroblockd_plane *const pd = &x->e_mbd.plane[0];
1779
1780 p->src.buf =
1781 &p->src.buf[vp9_raster_block_offset(BLOCK_8X8, i, p->src.stride)];
1782 assert(((intptr_t)pd->pre[0].buf & 0x7) == 0);
1783 pd->pre[0].buf =
1784 &pd->pre[0].buf[vp9_raster_block_offset(BLOCK_8X8, i, pd->pre[0].stride)];
1785 if (has_second_ref(mi))
1786 pd->pre[1].buf =
1787 &pd->pre[1]
1788 .buf[vp9_raster_block_offset(BLOCK_8X8, i, pd->pre[1].stride)];
1789 }
1790
mi_buf_restore(MACROBLOCK * x,struct buf_2d orig_src,struct buf_2d orig_pre[2])1791 static INLINE void mi_buf_restore(MACROBLOCK *x, struct buf_2d orig_src,
1792 struct buf_2d orig_pre[2]) {
1793 MODE_INFO *mi = x->e_mbd.mi[0];
1794 x->plane[0].src = orig_src;
1795 x->e_mbd.plane[0].pre[0] = orig_pre[0];
1796 if (has_second_ref(mi)) x->e_mbd.plane[0].pre[1] = orig_pre[1];
1797 }
1798
mv_has_subpel(const MV * mv)1799 static INLINE int mv_has_subpel(const MV *mv) {
1800 return (mv->row & 0x0F) || (mv->col & 0x0F);
1801 }
1802
1803 // Check if NEARESTMV/NEARMV/ZEROMV is the cheapest way encode zero motion.
1804 // TODO(aconverse): Find out if this is still productive then clean up or remove
check_best_zero_mv(const VP9_COMP * cpi,const uint8_t mode_context[MAX_REF_FRAMES],int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES],int this_mode,const MV_REFERENCE_FRAME ref_frames[2])1805 static int check_best_zero_mv(const VP9_COMP *cpi,
1806 const uint8_t mode_context[MAX_REF_FRAMES],
1807 int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES],
1808 int this_mode,
1809 const MV_REFERENCE_FRAME ref_frames[2]) {
1810 if ((this_mode == NEARMV || this_mode == NEARESTMV || this_mode == ZEROMV) &&
1811 frame_mv[this_mode][ref_frames[0]].as_int == 0 &&
1812 (ref_frames[1] == NONE ||
1813 frame_mv[this_mode][ref_frames[1]].as_int == 0)) {
1814 int rfc = mode_context[ref_frames[0]];
1815 int c1 = cost_mv_ref(cpi, NEARMV, rfc);
1816 int c2 = cost_mv_ref(cpi, NEARESTMV, rfc);
1817 int c3 = cost_mv_ref(cpi, ZEROMV, rfc);
1818
1819 if (this_mode == NEARMV) {
1820 if (c1 > c3) return 0;
1821 } else if (this_mode == NEARESTMV) {
1822 if (c2 > c3) return 0;
1823 } else {
1824 assert(this_mode == ZEROMV);
1825 if (ref_frames[1] == NONE) {
1826 if ((c3 >= c2 && frame_mv[NEARESTMV][ref_frames[0]].as_int == 0) ||
1827 (c3 >= c1 && frame_mv[NEARMV][ref_frames[0]].as_int == 0))
1828 return 0;
1829 } else {
1830 if ((c3 >= c2 && frame_mv[NEARESTMV][ref_frames[0]].as_int == 0 &&
1831 frame_mv[NEARESTMV][ref_frames[1]].as_int == 0) ||
1832 (c3 >= c1 && frame_mv[NEARMV][ref_frames[0]].as_int == 0 &&
1833 frame_mv[NEARMV][ref_frames[1]].as_int == 0))
1834 return 0;
1835 }
1836 }
1837 }
1838 return 1;
1839 }
1840
joint_motion_search(VP9_COMP * cpi,MACROBLOCK * x,BLOCK_SIZE bsize,int_mv * frame_mv,int mi_row,int mi_col,int_mv single_newmv[MAX_REF_FRAMES],int * rate_mv)1841 static void joint_motion_search(VP9_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize,
1842 int_mv *frame_mv, int mi_row, int mi_col,
1843 int_mv single_newmv[MAX_REF_FRAMES],
1844 int *rate_mv) {
1845 const VP9_COMMON *const cm = &cpi->common;
1846 const int pw = 4 * num_4x4_blocks_wide_lookup[bsize];
1847 const int ph = 4 * num_4x4_blocks_high_lookup[bsize];
1848 MACROBLOCKD *xd = &x->e_mbd;
1849 MODE_INFO *mi = xd->mi[0];
1850 const int refs[2] = { mi->ref_frame[0],
1851 mi->ref_frame[1] < 0 ? 0 : mi->ref_frame[1] };
1852 int_mv ref_mv[2];
1853 int ite, ref;
1854 const InterpKernel *kernel = vp9_filter_kernels[mi->interp_filter];
1855 struct scale_factors sf;
1856
1857 // Do joint motion search in compound mode to get more accurate mv.
1858 struct buf_2d backup_yv12[2][MAX_MB_PLANE];
1859 uint32_t last_besterr[2] = { UINT_MAX, UINT_MAX };
1860 const YV12_BUFFER_CONFIG *const scaled_ref_frame[2] = {
1861 vp9_get_scaled_ref_frame(cpi, mi->ref_frame[0]),
1862 vp9_get_scaled_ref_frame(cpi, mi->ref_frame[1])
1863 };
1864
1865 // Prediction buffer from second frame.
1866 #if CONFIG_VP9_HIGHBITDEPTH
1867 DECLARE_ALIGNED(16, uint16_t, second_pred_alloc_16[64 * 64]);
1868 uint8_t *second_pred;
1869 #else
1870 DECLARE_ALIGNED(16, uint8_t, second_pred[64 * 64]);
1871 #endif // CONFIG_VP9_HIGHBITDEPTH
1872
1873 for (ref = 0; ref < 2; ++ref) {
1874 ref_mv[ref] = x->mbmi_ext->ref_mvs[refs[ref]][0];
1875
1876 if (scaled_ref_frame[ref]) {
1877 int i;
1878 // Swap out the reference frame for a version that's been scaled to
1879 // match the resolution of the current frame, allowing the existing
1880 // motion search code to be used without additional modifications.
1881 for (i = 0; i < MAX_MB_PLANE; i++)
1882 backup_yv12[ref][i] = xd->plane[i].pre[ref];
1883 vp9_setup_pre_planes(xd, ref, scaled_ref_frame[ref], mi_row, mi_col,
1884 NULL);
1885 }
1886
1887 frame_mv[refs[ref]].as_int = single_newmv[refs[ref]].as_int;
1888 }
1889
1890 // Since we have scaled the reference frames to match the size of the current
1891 // frame we must use a unit scaling factor during mode selection.
1892 #if CONFIG_VP9_HIGHBITDEPTH
1893 vp9_setup_scale_factors_for_frame(&sf, cm->width, cm->height, cm->width,
1894 cm->height, cm->use_highbitdepth);
1895 #else
1896 vp9_setup_scale_factors_for_frame(&sf, cm->width, cm->height, cm->width,
1897 cm->height);
1898 #endif // CONFIG_VP9_HIGHBITDEPTH
1899
1900 // Allow joint search multiple times iteratively for each reference frame
1901 // and break out of the search loop if it couldn't find a better mv.
1902 for (ite = 0; ite < 4; ite++) {
1903 struct buf_2d ref_yv12[2];
1904 uint32_t bestsme = UINT_MAX;
1905 int sadpb = x->sadperbit16;
1906 MV tmp_mv;
1907 int search_range = 3;
1908
1909 const MvLimits tmp_mv_limits = x->mv_limits;
1910 int id = ite % 2; // Even iterations search in the first reference frame,
1911 // odd iterations search in the second. The predictor
1912 // found for the 'other' reference frame is factored in.
1913
1914 // Initialized here because of compiler problem in Visual Studio.
1915 ref_yv12[0] = xd->plane[0].pre[0];
1916 ref_yv12[1] = xd->plane[0].pre[1];
1917
1918 // Get the prediction block from the 'other' reference frame.
1919 #if CONFIG_VP9_HIGHBITDEPTH
1920 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
1921 second_pred = CONVERT_TO_BYTEPTR(second_pred_alloc_16);
1922 vp9_highbd_build_inter_predictor(
1923 CONVERT_TO_SHORTPTR(ref_yv12[!id].buf), ref_yv12[!id].stride,
1924 second_pred_alloc_16, pw, &frame_mv[refs[!id]].as_mv, &sf, pw, ph, 0,
1925 kernel, MV_PRECISION_Q3, mi_col * MI_SIZE, mi_row * MI_SIZE, xd->bd);
1926 } else {
1927 second_pred = (uint8_t *)second_pred_alloc_16;
1928 vp9_build_inter_predictor(ref_yv12[!id].buf, ref_yv12[!id].stride,
1929 second_pred, pw, &frame_mv[refs[!id]].as_mv,
1930 &sf, pw, ph, 0, kernel, MV_PRECISION_Q3,
1931 mi_col * MI_SIZE, mi_row * MI_SIZE);
1932 }
1933 #else
1934 vp9_build_inter_predictor(ref_yv12[!id].buf, ref_yv12[!id].stride,
1935 second_pred, pw, &frame_mv[refs[!id]].as_mv, &sf,
1936 pw, ph, 0, kernel, MV_PRECISION_Q3,
1937 mi_col * MI_SIZE, mi_row * MI_SIZE);
1938 #endif // CONFIG_VP9_HIGHBITDEPTH
1939
1940 // Do compound motion search on the current reference frame.
1941 if (id) xd->plane[0].pre[0] = ref_yv12[id];
1942 vp9_set_mv_search_range(&x->mv_limits, &ref_mv[id].as_mv);
1943
1944 // Use the mv result from the single mode as mv predictor.
1945 tmp_mv = frame_mv[refs[id]].as_mv;
1946
1947 tmp_mv.col >>= 3;
1948 tmp_mv.row >>= 3;
1949
1950 // Small-range full-pixel motion search.
1951 bestsme = vp9_refining_search_8p_c(x, &tmp_mv, sadpb, search_range,
1952 &cpi->fn_ptr[bsize], &ref_mv[id].as_mv,
1953 second_pred);
1954 if (bestsme < UINT_MAX)
1955 bestsme = vp9_get_mvpred_av_var(x, &tmp_mv, &ref_mv[id].as_mv,
1956 second_pred, &cpi->fn_ptr[bsize], 1);
1957
1958 x->mv_limits = tmp_mv_limits;
1959
1960 if (bestsme < UINT_MAX) {
1961 uint32_t dis; /* TODO: use dis in distortion calculation later. */
1962 uint32_t sse;
1963 bestsme = cpi->find_fractional_mv_step(
1964 x, &tmp_mv, &ref_mv[id].as_mv, cpi->common.allow_high_precision_mv,
1965 x->errorperbit, &cpi->fn_ptr[bsize], 0,
1966 cpi->sf.mv.subpel_search_level, NULL, x->nmvjointcost, x->mvcost,
1967 &dis, &sse, second_pred, pw, ph, cpi->sf.use_accurate_subpel_search);
1968 }
1969
1970 // Restore the pointer to the first (possibly scaled) prediction buffer.
1971 if (id) xd->plane[0].pre[0] = ref_yv12[0];
1972
1973 if (bestsme < last_besterr[id]) {
1974 frame_mv[refs[id]].as_mv = tmp_mv;
1975 last_besterr[id] = bestsme;
1976 } else {
1977 break;
1978 }
1979 }
1980
1981 *rate_mv = 0;
1982
1983 for (ref = 0; ref < 2; ++ref) {
1984 if (scaled_ref_frame[ref]) {
1985 // Restore the prediction frame pointers to their unscaled versions.
1986 int i;
1987 for (i = 0; i < MAX_MB_PLANE; i++)
1988 xd->plane[i].pre[ref] = backup_yv12[ref][i];
1989 }
1990
1991 *rate_mv += vp9_mv_bit_cost(&frame_mv[refs[ref]].as_mv,
1992 &x->mbmi_ext->ref_mvs[refs[ref]][0].as_mv,
1993 x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
1994 }
1995 }
1996
rd_pick_best_sub8x8_mode(VP9_COMP * cpi,MACROBLOCK * x,int_mv * best_ref_mv,int_mv * second_best_ref_mv,int64_t best_rd,int * returntotrate,int * returnyrate,int64_t * returndistortion,int * skippable,int64_t * psse,int mvthresh,int_mv seg_mvs[4][MAX_REF_FRAMES],BEST_SEG_INFO * bsi_buf,int filter_idx,int mi_row,int mi_col)1997 static int64_t rd_pick_best_sub8x8_mode(
1998 VP9_COMP *cpi, MACROBLOCK *x, int_mv *best_ref_mv,
1999 int_mv *second_best_ref_mv, int64_t best_rd, int *returntotrate,
2000 int *returnyrate, int64_t *returndistortion, int *skippable, int64_t *psse,
2001 int mvthresh, int_mv seg_mvs[4][MAX_REF_FRAMES], BEST_SEG_INFO *bsi_buf,
2002 int filter_idx, int mi_row, int mi_col) {
2003 int i;
2004 BEST_SEG_INFO *bsi = bsi_buf + filter_idx;
2005 MACROBLOCKD *xd = &x->e_mbd;
2006 MODE_INFO *mi = xd->mi[0];
2007 int mode_idx;
2008 int k, br = 0, idx, idy;
2009 int64_t bd = 0, block_sse = 0;
2010 PREDICTION_MODE this_mode;
2011 VP9_COMMON *cm = &cpi->common;
2012 struct macroblock_plane *const p = &x->plane[0];
2013 struct macroblockd_plane *const pd = &xd->plane[0];
2014 const int label_count = 4;
2015 int64_t this_segment_rd = 0;
2016 int label_mv_thresh;
2017 int segmentyrate = 0;
2018 const BLOCK_SIZE bsize = mi->sb_type;
2019 const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize];
2020 const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize];
2021 const int pw = num_4x4_blocks_wide << 2;
2022 const int ph = num_4x4_blocks_high << 2;
2023 ENTROPY_CONTEXT t_above[2], t_left[2];
2024 int subpelmv = 1, have_ref = 0;
2025 SPEED_FEATURES *const sf = &cpi->sf;
2026 const int has_second_rf = has_second_ref(mi);
2027 const int inter_mode_mask = sf->inter_mode_mask[bsize];
2028 MB_MODE_INFO_EXT *const mbmi_ext = x->mbmi_ext;
2029
2030 vp9_zero(*bsi);
2031
2032 bsi->segment_rd = best_rd;
2033 bsi->ref_mv[0] = best_ref_mv;
2034 bsi->ref_mv[1] = second_best_ref_mv;
2035 bsi->mvp.as_int = best_ref_mv->as_int;
2036 bsi->mvthresh = mvthresh;
2037
2038 for (i = 0; i < 4; i++) bsi->modes[i] = ZEROMV;
2039
2040 memcpy(t_above, pd->above_context, sizeof(t_above));
2041 memcpy(t_left, pd->left_context, sizeof(t_left));
2042
2043 // 64 makes this threshold really big effectively
2044 // making it so that we very rarely check mvs on
2045 // segments. setting this to 1 would make mv thresh
2046 // roughly equal to what it is for macroblocks
2047 label_mv_thresh = 1 * bsi->mvthresh / label_count;
2048
2049 // Segmentation method overheads
2050 for (idy = 0; idy < 2; idy += num_4x4_blocks_high) {
2051 for (idx = 0; idx < 2; idx += num_4x4_blocks_wide) {
2052 // TODO(jingning,rbultje): rewrite the rate-distortion optimization
2053 // loop for 4x4/4x8/8x4 block coding. to be replaced with new rd loop
2054 int_mv mode_mv[MB_MODE_COUNT][2];
2055 int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES];
2056 PREDICTION_MODE mode_selected = ZEROMV;
2057 int64_t best_rd = INT64_MAX;
2058 const int i = idy * 2 + idx;
2059 int ref;
2060
2061 for (ref = 0; ref < 1 + has_second_rf; ++ref) {
2062 const MV_REFERENCE_FRAME frame = mi->ref_frame[ref];
2063 frame_mv[ZEROMV][frame].as_int = 0;
2064 vp9_append_sub8x8_mvs_for_idx(
2065 cm, xd, i, ref, mi_row, mi_col, &frame_mv[NEARESTMV][frame],
2066 &frame_mv[NEARMV][frame], mbmi_ext->mode_context);
2067 }
2068
2069 // search for the best motion vector on this segment
2070 for (this_mode = NEARESTMV; this_mode <= NEWMV; ++this_mode) {
2071 const struct buf_2d orig_src = x->plane[0].src;
2072 struct buf_2d orig_pre[2];
2073
2074 mode_idx = INTER_OFFSET(this_mode);
2075 bsi->rdstat[i][mode_idx].brdcost = INT64_MAX;
2076 if (!(inter_mode_mask & (1 << this_mode))) continue;
2077
2078 if (!check_best_zero_mv(cpi, mbmi_ext->mode_context, frame_mv,
2079 this_mode, mi->ref_frame))
2080 continue;
2081
2082 memcpy(orig_pre, pd->pre, sizeof(orig_pre));
2083 memcpy(bsi->rdstat[i][mode_idx].ta, t_above,
2084 sizeof(bsi->rdstat[i][mode_idx].ta));
2085 memcpy(bsi->rdstat[i][mode_idx].tl, t_left,
2086 sizeof(bsi->rdstat[i][mode_idx].tl));
2087
2088 // motion search for newmv (single predictor case only)
2089 if (!has_second_rf && this_mode == NEWMV &&
2090 seg_mvs[i][mi->ref_frame[0]].as_int == INVALID_MV) {
2091 MV *const new_mv = &mode_mv[NEWMV][0].as_mv;
2092 int step_param = 0;
2093 uint32_t bestsme = UINT_MAX;
2094 int sadpb = x->sadperbit4;
2095 MV mvp_full;
2096 int max_mv;
2097 int cost_list[5];
2098 const MvLimits tmp_mv_limits = x->mv_limits;
2099
2100 /* Is the best so far sufficiently good that we cant justify doing
2101 * and new motion search. */
2102 if (best_rd < label_mv_thresh) break;
2103
2104 if (cpi->oxcf.mode != BEST) {
2105 // use previous block's result as next block's MV predictor.
2106 if (i > 0) {
2107 bsi->mvp.as_int = mi->bmi[i - 1].as_mv[0].as_int;
2108 if (i == 2) bsi->mvp.as_int = mi->bmi[i - 2].as_mv[0].as_int;
2109 }
2110 }
2111 if (i == 0)
2112 max_mv = x->max_mv_context[mi->ref_frame[0]];
2113 else
2114 max_mv =
2115 VPXMAX(abs(bsi->mvp.as_mv.row), abs(bsi->mvp.as_mv.col)) >> 3;
2116
2117 if (sf->mv.auto_mv_step_size && cm->show_frame) {
2118 // Take wtd average of the step_params based on the last frame's
2119 // max mv magnitude and the best ref mvs of the current block for
2120 // the given reference.
2121 step_param =
2122 (vp9_init_search_range(max_mv) + cpi->mv_step_param) / 2;
2123 } else {
2124 step_param = cpi->mv_step_param;
2125 }
2126
2127 mvp_full.row = bsi->mvp.as_mv.row >> 3;
2128 mvp_full.col = bsi->mvp.as_mv.col >> 3;
2129
2130 if (sf->adaptive_motion_search) {
2131 if (x->pred_mv[mi->ref_frame[0]].row != INT16_MAX &&
2132 x->pred_mv[mi->ref_frame[0]].col != INT16_MAX) {
2133 mvp_full.row = x->pred_mv[mi->ref_frame[0]].row >> 3;
2134 mvp_full.col = x->pred_mv[mi->ref_frame[0]].col >> 3;
2135 }
2136 step_param = VPXMAX(step_param, 8);
2137 }
2138
2139 // adjust src pointer for this block
2140 mi_buf_shift(x, i);
2141
2142 vp9_set_mv_search_range(&x->mv_limits, &bsi->ref_mv[0]->as_mv);
2143
2144 bestsme = vp9_full_pixel_search(
2145 cpi, x, bsize, &mvp_full, step_param, cpi->sf.mv.search_method,
2146 sadpb,
2147 sf->mv.subpel_search_method != SUBPEL_TREE ? cost_list : NULL,
2148 &bsi->ref_mv[0]->as_mv, new_mv, INT_MAX, 1);
2149
2150 x->mv_limits = tmp_mv_limits;
2151
2152 if (bestsme < UINT_MAX) {
2153 uint32_t distortion;
2154 cpi->find_fractional_mv_step(
2155 x, new_mv, &bsi->ref_mv[0]->as_mv, cm->allow_high_precision_mv,
2156 x->errorperbit, &cpi->fn_ptr[bsize], sf->mv.subpel_force_stop,
2157 sf->mv.subpel_search_level, cond_cost_list(cpi, cost_list),
2158 x->nmvjointcost, x->mvcost, &distortion,
2159 &x->pred_sse[mi->ref_frame[0]], NULL, pw, ph,
2160 cpi->sf.use_accurate_subpel_search);
2161
2162 // save motion search result for use in compound prediction
2163 seg_mvs[i][mi->ref_frame[0]].as_mv = *new_mv;
2164 }
2165
2166 x->pred_mv[mi->ref_frame[0]] = *new_mv;
2167
2168 // restore src pointers
2169 mi_buf_restore(x, orig_src, orig_pre);
2170 }
2171
2172 if (has_second_rf) {
2173 if (seg_mvs[i][mi->ref_frame[1]].as_int == INVALID_MV ||
2174 seg_mvs[i][mi->ref_frame[0]].as_int == INVALID_MV)
2175 continue;
2176 }
2177
2178 if (has_second_rf && this_mode == NEWMV &&
2179 mi->interp_filter == EIGHTTAP) {
2180 // adjust src pointers
2181 mi_buf_shift(x, i);
2182 if (sf->comp_inter_joint_search_thresh <= bsize) {
2183 int rate_mv;
2184 joint_motion_search(cpi, x, bsize, frame_mv[this_mode], mi_row,
2185 mi_col, seg_mvs[i], &rate_mv);
2186 seg_mvs[i][mi->ref_frame[0]].as_int =
2187 frame_mv[this_mode][mi->ref_frame[0]].as_int;
2188 seg_mvs[i][mi->ref_frame[1]].as_int =
2189 frame_mv[this_mode][mi->ref_frame[1]].as_int;
2190 }
2191 // restore src pointers
2192 mi_buf_restore(x, orig_src, orig_pre);
2193 }
2194
2195 bsi->rdstat[i][mode_idx].brate = set_and_cost_bmi_mvs(
2196 cpi, x, xd, i, this_mode, mode_mv[this_mode], frame_mv, seg_mvs[i],
2197 bsi->ref_mv, x->nmvjointcost, x->mvcost);
2198
2199 for (ref = 0; ref < 1 + has_second_rf; ++ref) {
2200 bsi->rdstat[i][mode_idx].mvs[ref].as_int =
2201 mode_mv[this_mode][ref].as_int;
2202 if (num_4x4_blocks_wide > 1)
2203 bsi->rdstat[i + 1][mode_idx].mvs[ref].as_int =
2204 mode_mv[this_mode][ref].as_int;
2205 if (num_4x4_blocks_high > 1)
2206 bsi->rdstat[i + 2][mode_idx].mvs[ref].as_int =
2207 mode_mv[this_mode][ref].as_int;
2208 }
2209
2210 // Trap vectors that reach beyond the UMV borders
2211 if (mv_check_bounds(&x->mv_limits, &mode_mv[this_mode][0].as_mv) ||
2212 (has_second_rf &&
2213 mv_check_bounds(&x->mv_limits, &mode_mv[this_mode][1].as_mv)))
2214 continue;
2215
2216 if (filter_idx > 0) {
2217 BEST_SEG_INFO *ref_bsi = bsi_buf;
2218 subpelmv = 0;
2219 have_ref = 1;
2220
2221 for (ref = 0; ref < 1 + has_second_rf; ++ref) {
2222 subpelmv |= mv_has_subpel(&mode_mv[this_mode][ref].as_mv);
2223 have_ref &= mode_mv[this_mode][ref].as_int ==
2224 ref_bsi->rdstat[i][mode_idx].mvs[ref].as_int;
2225 }
2226
2227 if (filter_idx > 1 && !subpelmv && !have_ref) {
2228 ref_bsi = bsi_buf + 1;
2229 have_ref = 1;
2230 for (ref = 0; ref < 1 + has_second_rf; ++ref)
2231 have_ref &= mode_mv[this_mode][ref].as_int ==
2232 ref_bsi->rdstat[i][mode_idx].mvs[ref].as_int;
2233 }
2234
2235 if (!subpelmv && have_ref &&
2236 ref_bsi->rdstat[i][mode_idx].brdcost < INT64_MAX) {
2237 memcpy(&bsi->rdstat[i][mode_idx], &ref_bsi->rdstat[i][mode_idx],
2238 sizeof(SEG_RDSTAT));
2239 if (num_4x4_blocks_wide > 1)
2240 bsi->rdstat[i + 1][mode_idx].eobs =
2241 ref_bsi->rdstat[i + 1][mode_idx].eobs;
2242 if (num_4x4_blocks_high > 1)
2243 bsi->rdstat[i + 2][mode_idx].eobs =
2244 ref_bsi->rdstat[i + 2][mode_idx].eobs;
2245
2246 if (bsi->rdstat[i][mode_idx].brdcost < best_rd) {
2247 mode_selected = this_mode;
2248 best_rd = bsi->rdstat[i][mode_idx].brdcost;
2249 }
2250 continue;
2251 }
2252 }
2253
2254 bsi->rdstat[i][mode_idx].brdcost = encode_inter_mb_segment(
2255 cpi, x, bsi->segment_rd - this_segment_rd, i,
2256 &bsi->rdstat[i][mode_idx].byrate, &bsi->rdstat[i][mode_idx].bdist,
2257 &bsi->rdstat[i][mode_idx].bsse, bsi->rdstat[i][mode_idx].ta,
2258 bsi->rdstat[i][mode_idx].tl, mi_row, mi_col);
2259 if (bsi->rdstat[i][mode_idx].brdcost < INT64_MAX) {
2260 bsi->rdstat[i][mode_idx].brdcost +=
2261 RDCOST(x->rdmult, x->rddiv, bsi->rdstat[i][mode_idx].brate, 0);
2262 bsi->rdstat[i][mode_idx].brate += bsi->rdstat[i][mode_idx].byrate;
2263 bsi->rdstat[i][mode_idx].eobs = p->eobs[i];
2264 if (num_4x4_blocks_wide > 1)
2265 bsi->rdstat[i + 1][mode_idx].eobs = p->eobs[i + 1];
2266 if (num_4x4_blocks_high > 1)
2267 bsi->rdstat[i + 2][mode_idx].eobs = p->eobs[i + 2];
2268 }
2269
2270 if (bsi->rdstat[i][mode_idx].brdcost < best_rd) {
2271 mode_selected = this_mode;
2272 best_rd = bsi->rdstat[i][mode_idx].brdcost;
2273 }
2274 } /*for each 4x4 mode*/
2275
2276 if (best_rd == INT64_MAX) {
2277 int iy, midx;
2278 for (iy = i + 1; iy < 4; ++iy)
2279 for (midx = 0; midx < INTER_MODES; ++midx)
2280 bsi->rdstat[iy][midx].brdcost = INT64_MAX;
2281 bsi->segment_rd = INT64_MAX;
2282 return INT64_MAX;
2283 }
2284
2285 mode_idx = INTER_OFFSET(mode_selected);
2286 memcpy(t_above, bsi->rdstat[i][mode_idx].ta, sizeof(t_above));
2287 memcpy(t_left, bsi->rdstat[i][mode_idx].tl, sizeof(t_left));
2288
2289 set_and_cost_bmi_mvs(cpi, x, xd, i, mode_selected, mode_mv[mode_selected],
2290 frame_mv, seg_mvs[i], bsi->ref_mv, x->nmvjointcost,
2291 x->mvcost);
2292
2293 br += bsi->rdstat[i][mode_idx].brate;
2294 bd += bsi->rdstat[i][mode_idx].bdist;
2295 block_sse += bsi->rdstat[i][mode_idx].bsse;
2296 segmentyrate += bsi->rdstat[i][mode_idx].byrate;
2297 this_segment_rd += bsi->rdstat[i][mode_idx].brdcost;
2298
2299 if (this_segment_rd > bsi->segment_rd) {
2300 int iy, midx;
2301 for (iy = i + 1; iy < 4; ++iy)
2302 for (midx = 0; midx < INTER_MODES; ++midx)
2303 bsi->rdstat[iy][midx].brdcost = INT64_MAX;
2304 bsi->segment_rd = INT64_MAX;
2305 return INT64_MAX;
2306 }
2307 }
2308 } /* for each label */
2309
2310 bsi->r = br;
2311 bsi->d = bd;
2312 bsi->segment_yrate = segmentyrate;
2313 bsi->segment_rd = this_segment_rd;
2314 bsi->sse = block_sse;
2315
2316 // update the coding decisions
2317 for (k = 0; k < 4; ++k) bsi->modes[k] = mi->bmi[k].as_mode;
2318
2319 if (bsi->segment_rd > best_rd) return INT64_MAX;
2320 /* set it to the best */
2321 for (i = 0; i < 4; i++) {
2322 mode_idx = INTER_OFFSET(bsi->modes[i]);
2323 mi->bmi[i].as_mv[0].as_int = bsi->rdstat[i][mode_idx].mvs[0].as_int;
2324 if (has_second_ref(mi))
2325 mi->bmi[i].as_mv[1].as_int = bsi->rdstat[i][mode_idx].mvs[1].as_int;
2326 x->plane[0].eobs[i] = bsi->rdstat[i][mode_idx].eobs;
2327 mi->bmi[i].as_mode = bsi->modes[i];
2328 }
2329
2330 /*
2331 * used to set mbmi->mv.as_int
2332 */
2333 *returntotrate = bsi->r;
2334 *returndistortion = bsi->d;
2335 *returnyrate = bsi->segment_yrate;
2336 *skippable = vp9_is_skippable_in_plane(x, BLOCK_8X8, 0);
2337 *psse = bsi->sse;
2338 mi->mode = bsi->modes[3];
2339
2340 return bsi->segment_rd;
2341 }
2342
estimate_ref_frame_costs(const VP9_COMMON * cm,const MACROBLOCKD * xd,int segment_id,unsigned int * ref_costs_single,unsigned int * ref_costs_comp,vpx_prob * comp_mode_p)2343 static void estimate_ref_frame_costs(const VP9_COMMON *cm,
2344 const MACROBLOCKD *xd, int segment_id,
2345 unsigned int *ref_costs_single,
2346 unsigned int *ref_costs_comp,
2347 vpx_prob *comp_mode_p) {
2348 int seg_ref_active =
2349 segfeature_active(&cm->seg, segment_id, SEG_LVL_REF_FRAME);
2350 if (seg_ref_active) {
2351 memset(ref_costs_single, 0, MAX_REF_FRAMES * sizeof(*ref_costs_single));
2352 memset(ref_costs_comp, 0, MAX_REF_FRAMES * sizeof(*ref_costs_comp));
2353 *comp_mode_p = 128;
2354 } else {
2355 vpx_prob intra_inter_p = vp9_get_intra_inter_prob(cm, xd);
2356 vpx_prob comp_inter_p = 128;
2357
2358 if (cm->reference_mode == REFERENCE_MODE_SELECT) {
2359 comp_inter_p = vp9_get_reference_mode_prob(cm, xd);
2360 *comp_mode_p = comp_inter_p;
2361 } else {
2362 *comp_mode_p = 128;
2363 }
2364
2365 ref_costs_single[INTRA_FRAME] = vp9_cost_bit(intra_inter_p, 0);
2366
2367 if (cm->reference_mode != COMPOUND_REFERENCE) {
2368 vpx_prob ref_single_p1 = vp9_get_pred_prob_single_ref_p1(cm, xd);
2369 vpx_prob ref_single_p2 = vp9_get_pred_prob_single_ref_p2(cm, xd);
2370 unsigned int base_cost = vp9_cost_bit(intra_inter_p, 1);
2371
2372 if (cm->reference_mode == REFERENCE_MODE_SELECT)
2373 base_cost += vp9_cost_bit(comp_inter_p, 0);
2374
2375 ref_costs_single[LAST_FRAME] = ref_costs_single[GOLDEN_FRAME] =
2376 ref_costs_single[ALTREF_FRAME] = base_cost;
2377 ref_costs_single[LAST_FRAME] += vp9_cost_bit(ref_single_p1, 0);
2378 ref_costs_single[GOLDEN_FRAME] += vp9_cost_bit(ref_single_p1, 1);
2379 ref_costs_single[ALTREF_FRAME] += vp9_cost_bit(ref_single_p1, 1);
2380 ref_costs_single[GOLDEN_FRAME] += vp9_cost_bit(ref_single_p2, 0);
2381 ref_costs_single[ALTREF_FRAME] += vp9_cost_bit(ref_single_p2, 1);
2382 } else {
2383 ref_costs_single[LAST_FRAME] = 512;
2384 ref_costs_single[GOLDEN_FRAME] = 512;
2385 ref_costs_single[ALTREF_FRAME] = 512;
2386 }
2387 if (cm->reference_mode != SINGLE_REFERENCE) {
2388 vpx_prob ref_comp_p = vp9_get_pred_prob_comp_ref_p(cm, xd);
2389 unsigned int base_cost = vp9_cost_bit(intra_inter_p, 1);
2390
2391 if (cm->reference_mode == REFERENCE_MODE_SELECT)
2392 base_cost += vp9_cost_bit(comp_inter_p, 1);
2393
2394 ref_costs_comp[LAST_FRAME] = base_cost + vp9_cost_bit(ref_comp_p, 0);
2395 ref_costs_comp[GOLDEN_FRAME] = base_cost + vp9_cost_bit(ref_comp_p, 1);
2396 } else {
2397 ref_costs_comp[LAST_FRAME] = 512;
2398 ref_costs_comp[GOLDEN_FRAME] = 512;
2399 }
2400 }
2401 }
2402
store_coding_context(MACROBLOCK * x,PICK_MODE_CONTEXT * ctx,int mode_index,int64_t comp_pred_diff[REFERENCE_MODES],int64_t best_filter_diff[SWITCHABLE_FILTER_CONTEXTS],int skippable)2403 static void store_coding_context(
2404 MACROBLOCK *x, PICK_MODE_CONTEXT *ctx, int mode_index,
2405 int64_t comp_pred_diff[REFERENCE_MODES],
2406 int64_t best_filter_diff[SWITCHABLE_FILTER_CONTEXTS], int skippable) {
2407 MACROBLOCKD *const xd = &x->e_mbd;
2408
2409 // Take a snapshot of the coding context so it can be
2410 // restored if we decide to encode this way
2411 ctx->skip = x->skip;
2412 ctx->skippable = skippable;
2413 ctx->best_mode_index = mode_index;
2414 ctx->mic = *xd->mi[0];
2415 ctx->mbmi_ext = *x->mbmi_ext;
2416 ctx->single_pred_diff = (int)comp_pred_diff[SINGLE_REFERENCE];
2417 ctx->comp_pred_diff = (int)comp_pred_diff[COMPOUND_REFERENCE];
2418 ctx->hybrid_pred_diff = (int)comp_pred_diff[REFERENCE_MODE_SELECT];
2419
2420 memcpy(ctx->best_filter_diff, best_filter_diff,
2421 sizeof(*best_filter_diff) * SWITCHABLE_FILTER_CONTEXTS);
2422 }
2423
setup_buffer_inter(VP9_COMP * cpi,MACROBLOCK * x,MV_REFERENCE_FRAME ref_frame,BLOCK_SIZE block_size,int mi_row,int mi_col,int_mv frame_nearest_mv[MAX_REF_FRAMES],int_mv frame_near_mv[MAX_REF_FRAMES],struct buf_2d yv12_mb[4][MAX_MB_PLANE])2424 static void setup_buffer_inter(VP9_COMP *cpi, MACROBLOCK *x,
2425 MV_REFERENCE_FRAME ref_frame,
2426 BLOCK_SIZE block_size, int mi_row, int mi_col,
2427 int_mv frame_nearest_mv[MAX_REF_FRAMES],
2428 int_mv frame_near_mv[MAX_REF_FRAMES],
2429 struct buf_2d yv12_mb[4][MAX_MB_PLANE]) {
2430 const VP9_COMMON *cm = &cpi->common;
2431 const YV12_BUFFER_CONFIG *yv12 = get_ref_frame_buffer(cpi, ref_frame);
2432 MACROBLOCKD *const xd = &x->e_mbd;
2433 MODE_INFO *const mi = xd->mi[0];
2434 int_mv *const candidates = x->mbmi_ext->ref_mvs[ref_frame];
2435 const struct scale_factors *const sf = &cm->frame_refs[ref_frame - 1].sf;
2436 MB_MODE_INFO_EXT *const mbmi_ext = x->mbmi_ext;
2437
2438 assert(yv12 != NULL);
2439
2440 // TODO(jkoleszar): Is the UV buffer ever used here? If so, need to make this
2441 // use the UV scaling factors.
2442 vp9_setup_pred_block(xd, yv12_mb[ref_frame], yv12, mi_row, mi_col, sf, sf);
2443
2444 // Gets an initial list of candidate vectors from neighbours and orders them
2445 vp9_find_mv_refs(cm, xd, mi, ref_frame, candidates, mi_row, mi_col,
2446 mbmi_ext->mode_context);
2447
2448 // Candidate refinement carried out at encoder and decoder
2449 vp9_find_best_ref_mvs(xd, cm->allow_high_precision_mv, candidates,
2450 &frame_nearest_mv[ref_frame],
2451 &frame_near_mv[ref_frame]);
2452
2453 // Further refinement that is encode side only to test the top few candidates
2454 // in full and choose the best as the centre point for subsequent searches.
2455 // The current implementation doesn't support scaling.
2456 if (!vp9_is_scaled(sf) && block_size >= BLOCK_8X8)
2457 vp9_mv_pred(cpi, x, yv12_mb[ref_frame][0].buf, yv12->y_stride, ref_frame,
2458 block_size);
2459 }
2460
2461 #if CONFIG_NON_GREEDY_MV
ref_frame_to_gf_rf_idx(int ref_frame)2462 static int ref_frame_to_gf_rf_idx(int ref_frame) {
2463 if (ref_frame == GOLDEN_FRAME) {
2464 return 0;
2465 }
2466 if (ref_frame == LAST_FRAME) {
2467 return 1;
2468 }
2469 if (ref_frame == ALTREF_FRAME) {
2470 return 2;
2471 }
2472 assert(0);
2473 return -1;
2474 }
2475 #endif
2476
single_motion_search(VP9_COMP * cpi,MACROBLOCK * x,BLOCK_SIZE bsize,int mi_row,int mi_col,int_mv * tmp_mv,int * rate_mv)2477 static void single_motion_search(VP9_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize,
2478 int mi_row, int mi_col, int_mv *tmp_mv,
2479 int *rate_mv) {
2480 MACROBLOCKD *xd = &x->e_mbd;
2481 const VP9_COMMON *cm = &cpi->common;
2482 MODE_INFO *mi = xd->mi[0];
2483 struct buf_2d backup_yv12[MAX_MB_PLANE] = { { 0, 0 } };
2484 int step_param;
2485 MV mvp_full;
2486 int ref = mi->ref_frame[0];
2487 MV ref_mv = x->mbmi_ext->ref_mvs[ref][0].as_mv;
2488 const MvLimits tmp_mv_limits = x->mv_limits;
2489 int cost_list[5];
2490 const int best_predmv_idx = x->mv_best_ref_index[ref];
2491 const YV12_BUFFER_CONFIG *scaled_ref_frame =
2492 vp9_get_scaled_ref_frame(cpi, ref);
2493 const int pw = num_4x4_blocks_wide_lookup[bsize] << 2;
2494 const int ph = num_4x4_blocks_high_lookup[bsize] << 2;
2495 MV pred_mv[3];
2496
2497 int bestsme = INT_MAX;
2498 #if CONFIG_NON_GREEDY_MV
2499 int gf_group_idx = cpi->twopass.gf_group.index;
2500 int gf_rf_idx = ref_frame_to_gf_rf_idx(ref);
2501 BLOCK_SIZE square_bsize = get_square_block_size(bsize);
2502 int_mv nb_full_mvs[NB_MVS_NUM] = { 0 };
2503 MotionField *motion_field = vp9_motion_field_info_get_motion_field(
2504 &cpi->motion_field_info, gf_group_idx, gf_rf_idx, square_bsize);
2505 const int nb_full_mv_num =
2506 vp9_prepare_nb_full_mvs(motion_field, mi_row, mi_col, nb_full_mvs);
2507 const int lambda = (pw * ph) / 4;
2508 assert(pw * ph == lambda << 2);
2509 #else // CONFIG_NON_GREEDY_MV
2510 int sadpb = x->sadperbit16;
2511 #endif // CONFIG_NON_GREEDY_MV
2512
2513 pred_mv[0] = x->mbmi_ext->ref_mvs[ref][0].as_mv;
2514 pred_mv[1] = x->mbmi_ext->ref_mvs[ref][1].as_mv;
2515 pred_mv[2] = x->pred_mv[ref];
2516
2517 if (scaled_ref_frame) {
2518 int i;
2519 // Swap out the reference frame for a version that's been scaled to
2520 // match the resolution of the current frame, allowing the existing
2521 // motion search code to be used without additional modifications.
2522 for (i = 0; i < MAX_MB_PLANE; i++) backup_yv12[i] = xd->plane[i].pre[0];
2523
2524 vp9_setup_pre_planes(xd, 0, scaled_ref_frame, mi_row, mi_col, NULL);
2525 }
2526
2527 // Work out the size of the first step in the mv step search.
2528 // 0 here is maximum length first step. 1 is VPXMAX >> 1 etc.
2529 if (cpi->sf.mv.auto_mv_step_size && cm->show_frame) {
2530 // Take wtd average of the step_params based on the last frame's
2531 // max mv magnitude and that based on the best ref mvs of the current
2532 // block for the given reference.
2533 step_param =
2534 (vp9_init_search_range(x->max_mv_context[ref]) + cpi->mv_step_param) /
2535 2;
2536 } else {
2537 step_param = cpi->mv_step_param;
2538 }
2539
2540 if (cpi->sf.adaptive_motion_search && bsize < BLOCK_64X64) {
2541 const int boffset =
2542 2 * (b_width_log2_lookup[BLOCK_64X64] -
2543 VPXMIN(b_height_log2_lookup[bsize], b_width_log2_lookup[bsize]));
2544 step_param = VPXMAX(step_param, boffset);
2545 }
2546
2547 if (cpi->sf.adaptive_motion_search) {
2548 int bwl = b_width_log2_lookup[bsize];
2549 int bhl = b_height_log2_lookup[bsize];
2550 int tlevel = x->pred_mv_sad[ref] >> (bwl + bhl + 4);
2551
2552 if (tlevel < 5) step_param += 2;
2553
2554 // prev_mv_sad is not setup for dynamically scaled frames.
2555 if (cpi->oxcf.resize_mode != RESIZE_DYNAMIC) {
2556 int i;
2557 for (i = LAST_FRAME; i <= ALTREF_FRAME && cm->show_frame; ++i) {
2558 if ((x->pred_mv_sad[ref] >> 3) > x->pred_mv_sad[i]) {
2559 x->pred_mv[ref].row = INT16_MAX;
2560 x->pred_mv[ref].col = INT16_MAX;
2561 tmp_mv->as_int = INVALID_MV;
2562
2563 if (scaled_ref_frame) {
2564 int i;
2565 for (i = 0; i < MAX_MB_PLANE; ++i)
2566 xd->plane[i].pre[0] = backup_yv12[i];
2567 }
2568 return;
2569 }
2570 }
2571 }
2572 }
2573
2574 // Note: MV limits are modified here. Always restore the original values
2575 // after full-pixel motion search.
2576 vp9_set_mv_search_range(&x->mv_limits, &ref_mv);
2577
2578 mvp_full = pred_mv[best_predmv_idx];
2579 mvp_full.col >>= 3;
2580 mvp_full.row >>= 3;
2581
2582 #if CONFIG_NON_GREEDY_MV
2583 bestsme = vp9_full_pixel_diamond_new(cpi, x, bsize, &mvp_full, step_param,
2584 lambda, 1, nb_full_mvs, nb_full_mv_num,
2585 &tmp_mv->as_mv);
2586 #else // CONFIG_NON_GREEDY_MV
2587 bestsme = vp9_full_pixel_search(
2588 cpi, x, bsize, &mvp_full, step_param, cpi->sf.mv.search_method, sadpb,
2589 cond_cost_list(cpi, cost_list), &ref_mv, &tmp_mv->as_mv, INT_MAX, 1);
2590 #endif // CONFIG_NON_GREEDY_MV
2591
2592 if (cpi->sf.enhanced_full_pixel_motion_search) {
2593 int i;
2594 for (i = 0; i < 3; ++i) {
2595 int this_me;
2596 MV this_mv;
2597 int diff_row;
2598 int diff_col;
2599 int step;
2600
2601 if (pred_mv[i].row == INT16_MAX || pred_mv[i].col == INT16_MAX) continue;
2602 if (i == best_predmv_idx) continue;
2603
2604 diff_row = ((int)pred_mv[i].row -
2605 pred_mv[i > 0 ? (i - 1) : best_predmv_idx].row) >>
2606 3;
2607 diff_col = ((int)pred_mv[i].col -
2608 pred_mv[i > 0 ? (i - 1) : best_predmv_idx].col) >>
2609 3;
2610 if (diff_row == 0 && diff_col == 0) continue;
2611 if (diff_row < 0) diff_row = -diff_row;
2612 if (diff_col < 0) diff_col = -diff_col;
2613 step = get_msb((diff_row + diff_col + 1) >> 1);
2614 if (step <= 0) continue;
2615
2616 mvp_full = pred_mv[i];
2617 mvp_full.col >>= 3;
2618 mvp_full.row >>= 3;
2619 #if CONFIG_NON_GREEDY_MV
2620 this_me = vp9_full_pixel_diamond_new(
2621 cpi, x, bsize, &mvp_full,
2622 VPXMAX(step_param, MAX_MVSEARCH_STEPS - step), lambda, 1, nb_full_mvs,
2623 nb_full_mv_num, &this_mv);
2624 #else // CONFIG_NON_GREEDY_MV
2625 this_me = vp9_full_pixel_search(
2626 cpi, x, bsize, &mvp_full,
2627 VPXMAX(step_param, MAX_MVSEARCH_STEPS - step),
2628 cpi->sf.mv.search_method, sadpb, cond_cost_list(cpi, cost_list),
2629 &ref_mv, &this_mv, INT_MAX, 1);
2630 #endif // CONFIG_NON_GREEDY_MV
2631 if (this_me < bestsme) {
2632 tmp_mv->as_mv = this_mv;
2633 bestsme = this_me;
2634 }
2635 }
2636 }
2637
2638 x->mv_limits = tmp_mv_limits;
2639
2640 if (bestsme < INT_MAX) {
2641 uint32_t dis; /* TODO: use dis in distortion calculation later. */
2642 cpi->find_fractional_mv_step(
2643 x, &tmp_mv->as_mv, &ref_mv, cm->allow_high_precision_mv, x->errorperbit,
2644 &cpi->fn_ptr[bsize], cpi->sf.mv.subpel_force_stop,
2645 cpi->sf.mv.subpel_search_level, cond_cost_list(cpi, cost_list),
2646 x->nmvjointcost, x->mvcost, &dis, &x->pred_sse[ref], NULL, pw, ph,
2647 cpi->sf.use_accurate_subpel_search);
2648 }
2649 *rate_mv = vp9_mv_bit_cost(&tmp_mv->as_mv, &ref_mv, x->nmvjointcost,
2650 x->mvcost, MV_COST_WEIGHT);
2651
2652 x->pred_mv[ref] = tmp_mv->as_mv;
2653
2654 if (scaled_ref_frame) {
2655 int i;
2656 for (i = 0; i < MAX_MB_PLANE; i++) xd->plane[i].pre[0] = backup_yv12[i];
2657 }
2658 }
2659
restore_dst_buf(MACROBLOCKD * xd,uint8_t * orig_dst[MAX_MB_PLANE],int orig_dst_stride[MAX_MB_PLANE])2660 static INLINE void restore_dst_buf(MACROBLOCKD *xd,
2661 uint8_t *orig_dst[MAX_MB_PLANE],
2662 int orig_dst_stride[MAX_MB_PLANE]) {
2663 int i;
2664 for (i = 0; i < MAX_MB_PLANE; i++) {
2665 xd->plane[i].dst.buf = orig_dst[i];
2666 xd->plane[i].dst.stride = orig_dst_stride[i];
2667 }
2668 }
2669
2670 // In some situations we want to discount tha pparent cost of a new motion
2671 // vector. Where there is a subtle motion field and especially where there is
2672 // low spatial complexity then it can be hard to cover the cost of a new motion
2673 // vector in a single block, even if that motion vector reduces distortion.
2674 // However, once established that vector may be usable through the nearest and
2675 // near mv modes to reduce distortion in subsequent blocks and also improve
2676 // visual quality.
discount_newmv_test(VP9_COMP * cpi,int this_mode,int_mv this_mv,int_mv (* mode_mv)[MAX_REF_FRAMES],int ref_frame,int mi_row,int mi_col,BLOCK_SIZE bsize)2677 static int discount_newmv_test(VP9_COMP *cpi, int this_mode, int_mv this_mv,
2678 int_mv (*mode_mv)[MAX_REF_FRAMES], int ref_frame,
2679 int mi_row, int mi_col, BLOCK_SIZE bsize) {
2680 #if CONFIG_NON_GREEDY_MV
2681 (void)mode_mv;
2682 (void)this_mv;
2683 if (this_mode == NEWMV && bsize >= BLOCK_8X8 && cpi->tpl_ready) {
2684 const int gf_group_idx = cpi->twopass.gf_group.index;
2685 const int gf_rf_idx = ref_frame_to_gf_rf_idx(ref_frame);
2686 const TplDepFrame tpl_frame = cpi->tpl_stats[gf_group_idx];
2687 const MotionField *motion_field = vp9_motion_field_info_get_motion_field(
2688 &cpi->motion_field_info, gf_group_idx, gf_rf_idx, cpi->tpl_bsize);
2689 const int tpl_block_mi_h = num_8x8_blocks_high_lookup[cpi->tpl_bsize];
2690 const int tpl_block_mi_w = num_8x8_blocks_wide_lookup[cpi->tpl_bsize];
2691 const int tpl_mi_row = mi_row - (mi_row % tpl_block_mi_h);
2692 const int tpl_mi_col = mi_col - (mi_col % tpl_block_mi_w);
2693 const int mv_mode =
2694 tpl_frame
2695 .mv_mode_arr[gf_rf_idx][tpl_mi_row * tpl_frame.stride + tpl_mi_col];
2696 if (mv_mode == NEW_MV_MODE) {
2697 int_mv tpl_new_mv =
2698 vp9_motion_field_mi_get_mv(motion_field, tpl_mi_row, tpl_mi_col);
2699 int row_diff = abs(tpl_new_mv.as_mv.row - this_mv.as_mv.row);
2700 int col_diff = abs(tpl_new_mv.as_mv.col - this_mv.as_mv.col);
2701 if (VPXMAX(row_diff, col_diff) <= 8) {
2702 return 1;
2703 } else {
2704 return 0;
2705 }
2706 } else {
2707 return 0;
2708 }
2709 } else {
2710 return 0;
2711 }
2712 #else
2713 (void)mi_row;
2714 (void)mi_col;
2715 (void)bsize;
2716 return (!cpi->rc.is_src_frame_alt_ref && (this_mode == NEWMV) &&
2717 (this_mv.as_int != 0) &&
2718 ((mode_mv[NEARESTMV][ref_frame].as_int == 0) ||
2719 (mode_mv[NEARESTMV][ref_frame].as_int == INVALID_MV)) &&
2720 ((mode_mv[NEARMV][ref_frame].as_int == 0) ||
2721 (mode_mv[NEARMV][ref_frame].as_int == INVALID_MV)));
2722 #endif
2723 }
2724
handle_inter_mode(VP9_COMP * cpi,MACROBLOCK * x,BLOCK_SIZE bsize,int * rate2,int64_t * distortion,int * skippable,int * rate_y,int * rate_uv,struct buf_2d * recon,int * disable_skip,int_mv (* mode_mv)[MAX_REF_FRAMES],int mi_row,int mi_col,int_mv single_newmv[MAX_REF_FRAMES],INTERP_FILTER (* single_filter)[MAX_REF_FRAMES],int (* single_skippable)[MAX_REF_FRAMES],int64_t * psse,const int64_t ref_best_rd,int64_t * mask_filter,int64_t filter_cache[])2725 static int64_t handle_inter_mode(
2726 VP9_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize, int *rate2,
2727 int64_t *distortion, int *skippable, int *rate_y, int *rate_uv,
2728 struct buf_2d *recon, int *disable_skip, int_mv (*mode_mv)[MAX_REF_FRAMES],
2729 int mi_row, int mi_col, int_mv single_newmv[MAX_REF_FRAMES],
2730 INTERP_FILTER (*single_filter)[MAX_REF_FRAMES],
2731 int (*single_skippable)[MAX_REF_FRAMES], int64_t *psse,
2732 const int64_t ref_best_rd, int64_t *mask_filter, int64_t filter_cache[]) {
2733 VP9_COMMON *cm = &cpi->common;
2734 MACROBLOCKD *xd = &x->e_mbd;
2735 MODE_INFO *mi = xd->mi[0];
2736 MB_MODE_INFO_EXT *const mbmi_ext = x->mbmi_ext;
2737 const int is_comp_pred = has_second_ref(mi);
2738 const int this_mode = mi->mode;
2739 int_mv *frame_mv = mode_mv[this_mode];
2740 int i;
2741 int refs[2] = { mi->ref_frame[0],
2742 (mi->ref_frame[1] < 0 ? 0 : mi->ref_frame[1]) };
2743 int_mv cur_mv[2];
2744 #if CONFIG_VP9_HIGHBITDEPTH
2745 DECLARE_ALIGNED(16, uint16_t, tmp_buf16[MAX_MB_PLANE * 64 * 64]);
2746 uint8_t *tmp_buf;
2747 #else
2748 DECLARE_ALIGNED(16, uint8_t, tmp_buf[MAX_MB_PLANE * 64 * 64]);
2749 #endif // CONFIG_VP9_HIGHBITDEPTH
2750 int pred_exists = 0;
2751 int intpel_mv;
2752 int64_t rd, tmp_rd, best_rd = INT64_MAX;
2753 int best_needs_copy = 0;
2754 uint8_t *orig_dst[MAX_MB_PLANE];
2755 int orig_dst_stride[MAX_MB_PLANE];
2756 int rs = 0;
2757 INTERP_FILTER best_filter = SWITCHABLE;
2758 uint8_t skip_txfm[MAX_MB_PLANE << 2] = { 0 };
2759 int64_t bsse[MAX_MB_PLANE << 2] = { 0 };
2760
2761 int bsl = mi_width_log2_lookup[bsize];
2762 int pred_filter_search =
2763 cpi->sf.cb_pred_filter_search
2764 ? (((mi_row + mi_col) >> bsl) +
2765 get_chessboard_index(cm->current_video_frame)) &
2766 0x1
2767 : 0;
2768
2769 int skip_txfm_sb = 0;
2770 int64_t skip_sse_sb = INT64_MAX;
2771 int64_t distortion_y = 0, distortion_uv = 0;
2772
2773 #if CONFIG_VP9_HIGHBITDEPTH
2774 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
2775 tmp_buf = CONVERT_TO_BYTEPTR(tmp_buf16);
2776 } else {
2777 tmp_buf = (uint8_t *)tmp_buf16;
2778 }
2779 #endif // CONFIG_VP9_HIGHBITDEPTH
2780
2781 if (pred_filter_search) {
2782 INTERP_FILTER af = SWITCHABLE, lf = SWITCHABLE;
2783 if (xd->above_mi && is_inter_block(xd->above_mi))
2784 af = xd->above_mi->interp_filter;
2785 if (xd->left_mi && is_inter_block(xd->left_mi))
2786 lf = xd->left_mi->interp_filter;
2787
2788 if ((this_mode != NEWMV) || (af == lf)) best_filter = af;
2789 }
2790
2791 if (is_comp_pred) {
2792 if (frame_mv[refs[0]].as_int == INVALID_MV ||
2793 frame_mv[refs[1]].as_int == INVALID_MV)
2794 return INT64_MAX;
2795
2796 if (cpi->sf.adaptive_mode_search) {
2797 if (single_filter[this_mode][refs[0]] ==
2798 single_filter[this_mode][refs[1]])
2799 best_filter = single_filter[this_mode][refs[0]];
2800 }
2801 }
2802
2803 if (this_mode == NEWMV) {
2804 int rate_mv;
2805 if (is_comp_pred) {
2806 // Initialize mv using single prediction mode result.
2807 frame_mv[refs[0]].as_int = single_newmv[refs[0]].as_int;
2808 frame_mv[refs[1]].as_int = single_newmv[refs[1]].as_int;
2809
2810 if (cpi->sf.comp_inter_joint_search_thresh <= bsize) {
2811 joint_motion_search(cpi, x, bsize, frame_mv, mi_row, mi_col,
2812 single_newmv, &rate_mv);
2813 } else {
2814 rate_mv = vp9_mv_bit_cost(&frame_mv[refs[0]].as_mv,
2815 &x->mbmi_ext->ref_mvs[refs[0]][0].as_mv,
2816 x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
2817 rate_mv += vp9_mv_bit_cost(&frame_mv[refs[1]].as_mv,
2818 &x->mbmi_ext->ref_mvs[refs[1]][0].as_mv,
2819 x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
2820 }
2821 *rate2 += rate_mv;
2822 } else {
2823 int_mv tmp_mv;
2824 single_motion_search(cpi, x, bsize, mi_row, mi_col, &tmp_mv, &rate_mv);
2825 if (tmp_mv.as_int == INVALID_MV) return INT64_MAX;
2826
2827 frame_mv[refs[0]].as_int = xd->mi[0]->bmi[0].as_mv[0].as_int =
2828 tmp_mv.as_int;
2829 single_newmv[refs[0]].as_int = tmp_mv.as_int;
2830
2831 // Estimate the rate implications of a new mv but discount this
2832 // under certain circumstances where we want to help initiate a weak
2833 // motion field, where the distortion gain for a single block may not
2834 // be enough to overcome the cost of a new mv.
2835 if (discount_newmv_test(cpi, this_mode, tmp_mv, mode_mv, refs[0], mi_row,
2836 mi_col, bsize)) {
2837 *rate2 += VPXMAX((rate_mv / NEW_MV_DISCOUNT_FACTOR), 1);
2838 } else {
2839 *rate2 += rate_mv;
2840 }
2841 }
2842 }
2843
2844 for (i = 0; i < is_comp_pred + 1; ++i) {
2845 cur_mv[i] = frame_mv[refs[i]];
2846 // Clip "next_nearest" so that it does not extend to far out of image
2847 if (this_mode != NEWMV) clamp_mv2(&cur_mv[i].as_mv, xd);
2848
2849 if (mv_check_bounds(&x->mv_limits, &cur_mv[i].as_mv)) return INT64_MAX;
2850 mi->mv[i].as_int = cur_mv[i].as_int;
2851 }
2852
2853 // do first prediction into the destination buffer. Do the next
2854 // prediction into a temporary buffer. Then keep track of which one
2855 // of these currently holds the best predictor, and use the other
2856 // one for future predictions. In the end, copy from tmp_buf to
2857 // dst if necessary.
2858 for (i = 0; i < MAX_MB_PLANE; i++) {
2859 orig_dst[i] = xd->plane[i].dst.buf;
2860 orig_dst_stride[i] = xd->plane[i].dst.stride;
2861 }
2862
2863 // We don't include the cost of the second reference here, because there
2864 // are only two options: Last/ARF or Golden/ARF; The second one is always
2865 // known, which is ARF.
2866 //
2867 // Under some circumstances we discount the cost of new mv mode to encourage
2868 // initiation of a motion field.
2869 if (discount_newmv_test(cpi, this_mode, frame_mv[refs[0]], mode_mv, refs[0],
2870 mi_row, mi_col, bsize)) {
2871 *rate2 +=
2872 VPXMIN(cost_mv_ref(cpi, this_mode, mbmi_ext->mode_context[refs[0]]),
2873 cost_mv_ref(cpi, NEARESTMV, mbmi_ext->mode_context[refs[0]]));
2874 } else {
2875 *rate2 += cost_mv_ref(cpi, this_mode, mbmi_ext->mode_context[refs[0]]);
2876 }
2877
2878 if (RDCOST(x->rdmult, x->rddiv, *rate2, 0) > ref_best_rd &&
2879 mi->mode != NEARESTMV)
2880 return INT64_MAX;
2881
2882 pred_exists = 0;
2883 // Are all MVs integer pel for Y and UV
2884 intpel_mv = !mv_has_subpel(&mi->mv[0].as_mv);
2885 if (is_comp_pred) intpel_mv &= !mv_has_subpel(&mi->mv[1].as_mv);
2886
2887 // Search for best switchable filter by checking the variance of
2888 // pred error irrespective of whether the filter will be used
2889 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i) filter_cache[i] = INT64_MAX;
2890
2891 if (cm->interp_filter != BILINEAR) {
2892 if (x->source_variance < cpi->sf.disable_filter_search_var_thresh) {
2893 best_filter = EIGHTTAP;
2894 } else if (best_filter == SWITCHABLE) {
2895 int newbest;
2896 int tmp_rate_sum = 0;
2897 int64_t tmp_dist_sum = 0;
2898
2899 for (i = 0; i < SWITCHABLE_FILTERS; ++i) {
2900 int j;
2901 int64_t rs_rd;
2902 int tmp_skip_sb = 0;
2903 int64_t tmp_skip_sse = INT64_MAX;
2904
2905 mi->interp_filter = i;
2906 rs = vp9_get_switchable_rate(cpi, xd);
2907 rs_rd = RDCOST(x->rdmult, x->rddiv, rs, 0);
2908
2909 if (i > 0 && intpel_mv) {
2910 rd = RDCOST(x->rdmult, x->rddiv, tmp_rate_sum, tmp_dist_sum);
2911 filter_cache[i] = rd;
2912 filter_cache[SWITCHABLE_FILTERS] =
2913 VPXMIN(filter_cache[SWITCHABLE_FILTERS], rd + rs_rd);
2914 if (cm->interp_filter == SWITCHABLE) rd += rs_rd;
2915 *mask_filter = VPXMAX(*mask_filter, rd);
2916 } else {
2917 int rate_sum = 0;
2918 int64_t dist_sum = 0;
2919 if (i > 0 && cpi->sf.adaptive_interp_filter_search &&
2920 (cpi->sf.interp_filter_search_mask & (1 << i))) {
2921 rate_sum = INT_MAX;
2922 dist_sum = INT64_MAX;
2923 continue;
2924 }
2925
2926 if ((cm->interp_filter == SWITCHABLE && (!i || best_needs_copy)) ||
2927 (cm->interp_filter != SWITCHABLE &&
2928 (cm->interp_filter == mi->interp_filter ||
2929 (i == 0 && intpel_mv)))) {
2930 restore_dst_buf(xd, orig_dst, orig_dst_stride);
2931 } else {
2932 for (j = 0; j < MAX_MB_PLANE; j++) {
2933 xd->plane[j].dst.buf = tmp_buf + j * 64 * 64;
2934 xd->plane[j].dst.stride = 64;
2935 }
2936 }
2937 vp9_build_inter_predictors_sb(xd, mi_row, mi_col, bsize);
2938 model_rd_for_sb(cpi, bsize, x, xd, &rate_sum, &dist_sum, &tmp_skip_sb,
2939 &tmp_skip_sse);
2940
2941 rd = RDCOST(x->rdmult, x->rddiv, rate_sum, dist_sum);
2942 filter_cache[i] = rd;
2943 filter_cache[SWITCHABLE_FILTERS] =
2944 VPXMIN(filter_cache[SWITCHABLE_FILTERS], rd + rs_rd);
2945 if (cm->interp_filter == SWITCHABLE) rd += rs_rd;
2946 *mask_filter = VPXMAX(*mask_filter, rd);
2947
2948 if (i == 0 && intpel_mv) {
2949 tmp_rate_sum = rate_sum;
2950 tmp_dist_sum = dist_sum;
2951 }
2952 }
2953
2954 if (i == 0 && cpi->sf.use_rd_breakout && ref_best_rd < INT64_MAX) {
2955 if (rd / 2 > ref_best_rd) {
2956 restore_dst_buf(xd, orig_dst, orig_dst_stride);
2957 return INT64_MAX;
2958 }
2959 }
2960 newbest = i == 0 || rd < best_rd;
2961
2962 if (newbest) {
2963 best_rd = rd;
2964 best_filter = mi->interp_filter;
2965 if (cm->interp_filter == SWITCHABLE && i && !intpel_mv)
2966 best_needs_copy = !best_needs_copy;
2967 }
2968
2969 if ((cm->interp_filter == SWITCHABLE && newbest) ||
2970 (cm->interp_filter != SWITCHABLE &&
2971 cm->interp_filter == mi->interp_filter)) {
2972 pred_exists = 1;
2973 tmp_rd = best_rd;
2974
2975 skip_txfm_sb = tmp_skip_sb;
2976 skip_sse_sb = tmp_skip_sse;
2977 memcpy(skip_txfm, x->skip_txfm, sizeof(skip_txfm));
2978 memcpy(bsse, x->bsse, sizeof(bsse));
2979 }
2980 }
2981 restore_dst_buf(xd, orig_dst, orig_dst_stride);
2982 }
2983 }
2984 // Set the appropriate filter
2985 mi->interp_filter =
2986 cm->interp_filter != SWITCHABLE ? cm->interp_filter : best_filter;
2987 rs = cm->interp_filter == SWITCHABLE ? vp9_get_switchable_rate(cpi, xd) : 0;
2988
2989 if (pred_exists) {
2990 if (best_needs_copy) {
2991 // again temporarily set the buffers to local memory to prevent a memcpy
2992 for (i = 0; i < MAX_MB_PLANE; i++) {
2993 xd->plane[i].dst.buf = tmp_buf + i * 64 * 64;
2994 xd->plane[i].dst.stride = 64;
2995 }
2996 }
2997 rd = tmp_rd + RDCOST(x->rdmult, x->rddiv, rs, 0);
2998 } else {
2999 int tmp_rate;
3000 int64_t tmp_dist;
3001 // Handles the special case when a filter that is not in the
3002 // switchable list (ex. bilinear) is indicated at the frame level, or
3003 // skip condition holds.
3004 vp9_build_inter_predictors_sb(xd, mi_row, mi_col, bsize);
3005 model_rd_for_sb(cpi, bsize, x, xd, &tmp_rate, &tmp_dist, &skip_txfm_sb,
3006 &skip_sse_sb);
3007 rd = RDCOST(x->rdmult, x->rddiv, rs + tmp_rate, tmp_dist);
3008 memcpy(skip_txfm, x->skip_txfm, sizeof(skip_txfm));
3009 memcpy(bsse, x->bsse, sizeof(bsse));
3010 }
3011
3012 if (!is_comp_pred) single_filter[this_mode][refs[0]] = mi->interp_filter;
3013
3014 if (cpi->sf.adaptive_mode_search)
3015 if (is_comp_pred)
3016 if (single_skippable[this_mode][refs[0]] &&
3017 single_skippable[this_mode][refs[1]])
3018 memset(skip_txfm, SKIP_TXFM_AC_DC, sizeof(skip_txfm));
3019
3020 if (cpi->sf.use_rd_breakout && ref_best_rd < INT64_MAX) {
3021 // if current pred_error modeled rd is substantially more than the best
3022 // so far, do not bother doing full rd
3023 if (rd / 2 > ref_best_rd) {
3024 restore_dst_buf(xd, orig_dst, orig_dst_stride);
3025 return INT64_MAX;
3026 }
3027 }
3028
3029 if (cm->interp_filter == SWITCHABLE) *rate2 += rs;
3030
3031 memcpy(x->skip_txfm, skip_txfm, sizeof(skip_txfm));
3032 memcpy(x->bsse, bsse, sizeof(bsse));
3033
3034 if (!skip_txfm_sb || xd->lossless) {
3035 int skippable_y, skippable_uv;
3036 int64_t sseuv = INT64_MAX;
3037 int64_t rdcosty = INT64_MAX;
3038
3039 // Y cost and distortion
3040 vp9_subtract_plane(x, bsize, 0);
3041 super_block_yrd(cpi, x, rate_y, &distortion_y, &skippable_y, psse, bsize,
3042 ref_best_rd, recon);
3043
3044 if (*rate_y == INT_MAX) {
3045 *rate2 = INT_MAX;
3046 *distortion = INT64_MAX;
3047 restore_dst_buf(xd, orig_dst, orig_dst_stride);
3048 return INT64_MAX;
3049 }
3050
3051 *rate2 += *rate_y;
3052 *distortion += distortion_y;
3053
3054 rdcosty = RDCOST(x->rdmult, x->rddiv, *rate2, *distortion);
3055 rdcosty = VPXMIN(rdcosty, RDCOST(x->rdmult, x->rddiv, 0, *psse));
3056
3057 if (!super_block_uvrd(cpi, x, rate_uv, &distortion_uv, &skippable_uv,
3058 &sseuv, bsize, ref_best_rd - rdcosty)) {
3059 *rate2 = INT_MAX;
3060 *distortion = INT64_MAX;
3061 restore_dst_buf(xd, orig_dst, orig_dst_stride);
3062 return INT64_MAX;
3063 }
3064
3065 *psse += sseuv;
3066 *rate2 += *rate_uv;
3067 *distortion += distortion_uv;
3068 *skippable = skippable_y && skippable_uv;
3069 } else {
3070 x->skip = 1;
3071 *disable_skip = 1;
3072
3073 // The cost of skip bit needs to be added.
3074 *rate2 += vp9_cost_bit(vp9_get_skip_prob(cm, xd), 1);
3075
3076 *distortion = skip_sse_sb;
3077 }
3078
3079 if (!is_comp_pred) single_skippable[this_mode][refs[0]] = *skippable;
3080
3081 restore_dst_buf(xd, orig_dst, orig_dst_stride);
3082 return 0; // The rate-distortion cost will be re-calculated by caller.
3083 }
3084 #endif // !CONFIG_REALTIME_ONLY
3085
vp9_rd_pick_intra_mode_sb(VP9_COMP * cpi,MACROBLOCK * x,RD_COST * rd_cost,BLOCK_SIZE bsize,PICK_MODE_CONTEXT * ctx,int64_t best_rd)3086 void vp9_rd_pick_intra_mode_sb(VP9_COMP *cpi, MACROBLOCK *x, RD_COST *rd_cost,
3087 BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx,
3088 int64_t best_rd) {
3089 VP9_COMMON *const cm = &cpi->common;
3090 MACROBLOCKD *const xd = &x->e_mbd;
3091 struct macroblockd_plane *const pd = xd->plane;
3092 int rate_y = 0, rate_uv = 0, rate_y_tokenonly = 0, rate_uv_tokenonly = 0;
3093 int y_skip = 0, uv_skip = 0;
3094 int64_t dist_y = 0, dist_uv = 0;
3095 TX_SIZE max_uv_tx_size;
3096 x->skip_encode = 0;
3097 ctx->skip = 0;
3098 xd->mi[0]->ref_frame[0] = INTRA_FRAME;
3099 xd->mi[0]->ref_frame[1] = NONE;
3100 // Initialize interp_filter here so we do not have to check for inter block
3101 // modes in get_pred_context_switchable_interp()
3102 xd->mi[0]->interp_filter = SWITCHABLE_FILTERS;
3103
3104 if (bsize >= BLOCK_8X8) {
3105 if (rd_pick_intra_sby_mode(cpi, x, &rate_y, &rate_y_tokenonly, &dist_y,
3106 &y_skip, bsize, best_rd) >= best_rd) {
3107 rd_cost->rate = INT_MAX;
3108 return;
3109 }
3110 } else {
3111 y_skip = 0;
3112 if (rd_pick_intra_sub_8x8_y_mode(cpi, x, &rate_y, &rate_y_tokenonly,
3113 &dist_y, best_rd) >= best_rd) {
3114 rd_cost->rate = INT_MAX;
3115 return;
3116 }
3117 }
3118 max_uv_tx_size = uv_txsize_lookup[bsize][xd->mi[0]->tx_size]
3119 [pd[1].subsampling_x][pd[1].subsampling_y];
3120 rd_pick_intra_sbuv_mode(cpi, x, ctx, &rate_uv, &rate_uv_tokenonly, &dist_uv,
3121 &uv_skip, VPXMAX(BLOCK_8X8, bsize), max_uv_tx_size);
3122
3123 if (y_skip && uv_skip) {
3124 rd_cost->rate = rate_y + rate_uv - rate_y_tokenonly - rate_uv_tokenonly +
3125 vp9_cost_bit(vp9_get_skip_prob(cm, xd), 1);
3126 rd_cost->dist = dist_y + dist_uv;
3127 } else {
3128 rd_cost->rate =
3129 rate_y + rate_uv + vp9_cost_bit(vp9_get_skip_prob(cm, xd), 0);
3130 rd_cost->dist = dist_y + dist_uv;
3131 }
3132
3133 ctx->mic = *xd->mi[0];
3134 ctx->mbmi_ext = *x->mbmi_ext;
3135 rd_cost->rdcost = RDCOST(x->rdmult, x->rddiv, rd_cost->rate, rd_cost->dist);
3136 }
3137
3138 #if !CONFIG_REALTIME_ONLY
3139 // This function is designed to apply a bias or adjustment to an rd value based
3140 // on the relative variance of the source and reconstruction.
3141 #define LOW_VAR_THRESH 250
3142 #define VAR_MULT 250
3143 static unsigned int max_var_adjust[VP9E_CONTENT_INVALID] = { 16, 16, 250 };
3144
rd_variance_adjustment(VP9_COMP * cpi,MACROBLOCK * x,BLOCK_SIZE bsize,int64_t * this_rd,struct buf_2d * recon,MV_REFERENCE_FRAME ref_frame,MV_REFERENCE_FRAME second_ref_frame,PREDICTION_MODE this_mode)3145 static void rd_variance_adjustment(VP9_COMP *cpi, MACROBLOCK *x,
3146 BLOCK_SIZE bsize, int64_t *this_rd,
3147 struct buf_2d *recon,
3148 MV_REFERENCE_FRAME ref_frame,
3149 MV_REFERENCE_FRAME second_ref_frame,
3150 PREDICTION_MODE this_mode) {
3151 MACROBLOCKD *const xd = &x->e_mbd;
3152 unsigned int rec_variance;
3153 unsigned int src_variance;
3154 unsigned int src_rec_min;
3155 unsigned int var_diff = 0;
3156 unsigned int var_factor = 0;
3157 unsigned int adj_max;
3158 unsigned int low_var_thresh = LOW_VAR_THRESH;
3159 const int bw = num_8x8_blocks_wide_lookup[bsize];
3160 const int bh = num_8x8_blocks_high_lookup[bsize];
3161 vp9e_tune_content content_type = cpi->oxcf.content;
3162
3163 if (*this_rd == INT64_MAX) return;
3164
3165 #if CONFIG_VP9_HIGHBITDEPTH
3166 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
3167 rec_variance = vp9_high_get_sby_variance(cpi, recon, bsize, xd->bd);
3168 src_variance =
3169 vp9_high_get_sby_variance(cpi, &x->plane[0].src, bsize, xd->bd);
3170 } else {
3171 rec_variance = vp9_get_sby_variance(cpi, recon, bsize);
3172 src_variance = vp9_get_sby_variance(cpi, &x->plane[0].src, bsize);
3173 }
3174 #else
3175 rec_variance = vp9_get_sby_variance(cpi, recon, bsize);
3176 src_variance = vp9_get_sby_variance(cpi, &x->plane[0].src, bsize);
3177 #endif // CONFIG_VP9_HIGHBITDEPTH
3178
3179 // Scale based on area in 8x8 blocks
3180 rec_variance /= (bw * bh);
3181 src_variance /= (bw * bh);
3182
3183 if (content_type == VP9E_CONTENT_FILM) {
3184 if (cpi->oxcf.pass == 2) {
3185 // Adjust low variance threshold based on estimated group noise enegry.
3186 double noise_factor =
3187 (double)cpi->twopass.gf_group.group_noise_energy / SECTION_NOISE_DEF;
3188 low_var_thresh = (unsigned int)(low_var_thresh * noise_factor);
3189
3190 if (ref_frame == INTRA_FRAME) {
3191 low_var_thresh *= 2;
3192 if (this_mode == DC_PRED) low_var_thresh *= 5;
3193 } else if (second_ref_frame > INTRA_FRAME) {
3194 low_var_thresh *= 2;
3195 }
3196 }
3197 } else {
3198 low_var_thresh = LOW_VAR_THRESH / 2;
3199 }
3200
3201 // Lower of source (raw per pixel value) and recon variance. Note that
3202 // if the source per pixel is 0 then the recon value here will not be per
3203 // pixel (see above) so will likely be much larger.
3204 src_rec_min = VPXMIN(src_variance, rec_variance);
3205
3206 if (src_rec_min > low_var_thresh) return;
3207
3208 // We care more when the reconstruction has lower variance so give this case
3209 // a stronger weighting.
3210 var_diff = (src_variance > rec_variance) ? (src_variance - rec_variance) * 2
3211 : (rec_variance - src_variance) / 2;
3212
3213 adj_max = max_var_adjust[content_type];
3214
3215 var_factor =
3216 (unsigned int)((int64_t)VAR_MULT * var_diff) / VPXMAX(1, src_variance);
3217 var_factor = VPXMIN(adj_max, var_factor);
3218
3219 if ((content_type == VP9E_CONTENT_FILM) &&
3220 ((ref_frame == INTRA_FRAME) || (second_ref_frame > INTRA_FRAME))) {
3221 var_factor *= 2;
3222 }
3223
3224 *this_rd += (*this_rd * var_factor) / 100;
3225
3226 (void)xd;
3227 }
3228 #endif // !CONFIG_REALTIME_ONLY
3229
3230 // Do we have an internal image edge (e.g. formatting bars).
vp9_internal_image_edge(VP9_COMP * cpi)3231 int vp9_internal_image_edge(VP9_COMP *cpi) {
3232 return (cpi->oxcf.pass == 2) &&
3233 ((cpi->twopass.this_frame_stats.inactive_zone_rows > 0) ||
3234 (cpi->twopass.this_frame_stats.inactive_zone_cols > 0));
3235 }
3236
3237 // Checks to see if a super block is on a horizontal image edge.
3238 // In most cases this is the "real" edge unless there are formatting
3239 // bars embedded in the stream.
vp9_active_h_edge(VP9_COMP * cpi,int mi_row,int mi_step)3240 int vp9_active_h_edge(VP9_COMP *cpi, int mi_row, int mi_step) {
3241 int top_edge = 0;
3242 int bottom_edge = cpi->common.mi_rows;
3243 int is_active_h_edge = 0;
3244
3245 // For two pass account for any formatting bars detected.
3246 if (cpi->oxcf.pass == 2) {
3247 TWO_PASS *twopass = &cpi->twopass;
3248
3249 // The inactive region is specified in MBs not mi units.
3250 // The image edge is in the following MB row.
3251 top_edge += (int)(twopass->this_frame_stats.inactive_zone_rows * 2);
3252
3253 bottom_edge -= (int)(twopass->this_frame_stats.inactive_zone_rows * 2);
3254 bottom_edge = VPXMAX(top_edge, bottom_edge);
3255 }
3256
3257 if (((top_edge >= mi_row) && (top_edge < (mi_row + mi_step))) ||
3258 ((bottom_edge >= mi_row) && (bottom_edge < (mi_row + mi_step)))) {
3259 is_active_h_edge = 1;
3260 }
3261 return is_active_h_edge;
3262 }
3263
3264 // Checks to see if a super block is on a vertical image edge.
3265 // In most cases this is the "real" edge unless there are formatting
3266 // bars embedded in the stream.
vp9_active_v_edge(VP9_COMP * cpi,int mi_col,int mi_step)3267 int vp9_active_v_edge(VP9_COMP *cpi, int mi_col, int mi_step) {
3268 int left_edge = 0;
3269 int right_edge = cpi->common.mi_cols;
3270 int is_active_v_edge = 0;
3271
3272 // For two pass account for any formatting bars detected.
3273 if (cpi->oxcf.pass == 2) {
3274 TWO_PASS *twopass = &cpi->twopass;
3275
3276 // The inactive region is specified in MBs not mi units.
3277 // The image edge is in the following MB row.
3278 left_edge += (int)(twopass->this_frame_stats.inactive_zone_cols * 2);
3279
3280 right_edge -= (int)(twopass->this_frame_stats.inactive_zone_cols * 2);
3281 right_edge = VPXMAX(left_edge, right_edge);
3282 }
3283
3284 if (((left_edge >= mi_col) && (left_edge < (mi_col + mi_step))) ||
3285 ((right_edge >= mi_col) && (right_edge < (mi_col + mi_step)))) {
3286 is_active_v_edge = 1;
3287 }
3288 return is_active_v_edge;
3289 }
3290
3291 // Checks to see if a super block is at the edge of the active image.
3292 // In most cases this is the "real" edge unless there are formatting
3293 // bars embedded in the stream.
vp9_active_edge_sb(VP9_COMP * cpi,int mi_row,int mi_col)3294 int vp9_active_edge_sb(VP9_COMP *cpi, int mi_row, int mi_col) {
3295 return vp9_active_h_edge(cpi, mi_row, MI_BLOCK_SIZE) ||
3296 vp9_active_v_edge(cpi, mi_col, MI_BLOCK_SIZE);
3297 }
3298
3299 #if !CONFIG_REALTIME_ONLY
vp9_rd_pick_inter_mode_sb(VP9_COMP * cpi,TileDataEnc * tile_data,MACROBLOCK * x,int mi_row,int mi_col,RD_COST * rd_cost,BLOCK_SIZE bsize,PICK_MODE_CONTEXT * ctx,int64_t best_rd_so_far)3300 void vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, TileDataEnc *tile_data,
3301 MACROBLOCK *x, int mi_row, int mi_col,
3302 RD_COST *rd_cost, BLOCK_SIZE bsize,
3303 PICK_MODE_CONTEXT *ctx, int64_t best_rd_so_far) {
3304 VP9_COMMON *const cm = &cpi->common;
3305 TileInfo *const tile_info = &tile_data->tile_info;
3306 RD_OPT *const rd_opt = &cpi->rd;
3307 SPEED_FEATURES *const sf = &cpi->sf;
3308 MACROBLOCKD *const xd = &x->e_mbd;
3309 MODE_INFO *const mi = xd->mi[0];
3310 MB_MODE_INFO_EXT *const mbmi_ext = x->mbmi_ext;
3311 const struct segmentation *const seg = &cm->seg;
3312 PREDICTION_MODE this_mode;
3313 MV_REFERENCE_FRAME ref_frame, second_ref_frame;
3314 unsigned char segment_id = mi->segment_id;
3315 int comp_pred, i, k;
3316 int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES];
3317 struct buf_2d yv12_mb[4][MAX_MB_PLANE];
3318 int_mv single_newmv[MAX_REF_FRAMES] = { { 0 } };
3319 INTERP_FILTER single_inter_filter[MB_MODE_COUNT][MAX_REF_FRAMES];
3320 int single_skippable[MB_MODE_COUNT][MAX_REF_FRAMES];
3321 static const int flag_list[4] = { 0, VP9_LAST_FLAG, VP9_GOLD_FLAG,
3322 VP9_ALT_FLAG };
3323 int64_t best_rd = best_rd_so_far;
3324 int64_t best_pred_diff[REFERENCE_MODES];
3325 int64_t best_pred_rd[REFERENCE_MODES];
3326 int64_t best_filter_rd[SWITCHABLE_FILTER_CONTEXTS];
3327 int64_t best_filter_diff[SWITCHABLE_FILTER_CONTEXTS];
3328 MODE_INFO best_mbmode;
3329 int best_mode_skippable = 0;
3330 int midx, best_mode_index = -1;
3331 unsigned int ref_costs_single[MAX_REF_FRAMES], ref_costs_comp[MAX_REF_FRAMES];
3332 vpx_prob comp_mode_p;
3333 int64_t best_intra_rd = INT64_MAX;
3334 unsigned int best_pred_sse = UINT_MAX;
3335 PREDICTION_MODE best_intra_mode = DC_PRED;
3336 int rate_uv_intra[TX_SIZES], rate_uv_tokenonly[TX_SIZES];
3337 int64_t dist_uv[TX_SIZES];
3338 int skip_uv[TX_SIZES];
3339 PREDICTION_MODE mode_uv[TX_SIZES];
3340 const int intra_cost_penalty =
3341 vp9_get_intra_cost_penalty(cpi, bsize, cm->base_qindex, cm->y_dc_delta_q);
3342 int best_skip2 = 0;
3343 uint8_t ref_frame_skip_mask[2] = { 0, 1 };
3344 uint16_t mode_skip_mask[MAX_REF_FRAMES] = { 0 };
3345 int mode_skip_start = sf->mode_skip_start + 1;
3346 const int *const rd_threshes = rd_opt->threshes[segment_id][bsize];
3347 const int *const rd_thresh_freq_fact = tile_data->thresh_freq_fact[bsize];
3348 int64_t mode_threshold[MAX_MODES];
3349 int8_t *tile_mode_map = tile_data->mode_map[bsize];
3350 int8_t mode_map[MAX_MODES]; // Maintain mode_map information locally to avoid
3351 // lock mechanism involved with reads from
3352 // tile_mode_map
3353 const int mode_search_skip_flags = sf->mode_search_skip_flags;
3354 const int is_rect_partition =
3355 num_4x4_blocks_wide_lookup[bsize] != num_4x4_blocks_high_lookup[bsize];
3356 int64_t mask_filter = 0;
3357 int64_t filter_cache[SWITCHABLE_FILTER_CONTEXTS];
3358
3359 struct buf_2d *recon;
3360 struct buf_2d recon_buf;
3361 #if CONFIG_VP9_HIGHBITDEPTH
3362 DECLARE_ALIGNED(16, uint16_t, recon16[64 * 64]);
3363 recon_buf.buf = xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH
3364 ? CONVERT_TO_BYTEPTR(recon16)
3365 : (uint8_t *)recon16;
3366 #else
3367 DECLARE_ALIGNED(16, uint8_t, recon8[64 * 64]);
3368 recon_buf.buf = recon8;
3369 #endif // CONFIG_VP9_HIGHBITDEPTH
3370 recon_buf.stride = 64;
3371 recon = cpi->oxcf.content == VP9E_CONTENT_FILM ? &recon_buf : 0;
3372
3373 vp9_zero(best_mbmode);
3374
3375 x->skip_encode = sf->skip_encode_frame && x->q_index < QIDX_SKIP_THRESH;
3376
3377 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i) filter_cache[i] = INT64_MAX;
3378
3379 estimate_ref_frame_costs(cm, xd, segment_id, ref_costs_single, ref_costs_comp,
3380 &comp_mode_p);
3381
3382 for (i = 0; i < REFERENCE_MODES; ++i) best_pred_rd[i] = INT64_MAX;
3383 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++)
3384 best_filter_rd[i] = INT64_MAX;
3385 for (i = 0; i < TX_SIZES; i++) rate_uv_intra[i] = INT_MAX;
3386 for (i = 0; i < MAX_REF_FRAMES; ++i) x->pred_sse[i] = INT_MAX;
3387 for (i = 0; i < MB_MODE_COUNT; ++i) {
3388 for (k = 0; k < MAX_REF_FRAMES; ++k) {
3389 single_inter_filter[i][k] = SWITCHABLE;
3390 single_skippable[i][k] = 0;
3391 }
3392 }
3393
3394 rd_cost->rate = INT_MAX;
3395
3396 for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) {
3397 x->pred_mv_sad[ref_frame] = INT_MAX;
3398 if ((cpi->ref_frame_flags & flag_list[ref_frame]) &&
3399 !(is_rect_partition && (ctx->skip_ref_frame_mask & (1 << ref_frame)))) {
3400 assert(get_ref_frame_buffer(cpi, ref_frame) != NULL);
3401 setup_buffer_inter(cpi, x, ref_frame, bsize, mi_row, mi_col,
3402 frame_mv[NEARESTMV], frame_mv[NEARMV], yv12_mb);
3403 }
3404 frame_mv[NEWMV][ref_frame].as_int = INVALID_MV;
3405 frame_mv[ZEROMV][ref_frame].as_int = 0;
3406 }
3407
3408 for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) {
3409 if (!(cpi->ref_frame_flags & flag_list[ref_frame])) {
3410 // Skip checking missing references in both single and compound reference
3411 // modes. Note that a mode will be skipped if both reference frames
3412 // are masked out.
3413 ref_frame_skip_mask[0] |= (1 << ref_frame);
3414 ref_frame_skip_mask[1] |= SECOND_REF_FRAME_MASK;
3415 } else if (sf->reference_masking) {
3416 for (i = LAST_FRAME; i <= ALTREF_FRAME; ++i) {
3417 // Skip fixed mv modes for poor references
3418 if ((x->pred_mv_sad[ref_frame] >> 2) > x->pred_mv_sad[i]) {
3419 mode_skip_mask[ref_frame] |= INTER_NEAREST_NEAR_ZERO;
3420 break;
3421 }
3422 }
3423 }
3424 // If the segment reference frame feature is enabled....
3425 // then do nothing if the current ref frame is not allowed..
3426 if (segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME) &&
3427 get_segdata(seg, segment_id, SEG_LVL_REF_FRAME) != (int)ref_frame) {
3428 ref_frame_skip_mask[0] |= (1 << ref_frame);
3429 ref_frame_skip_mask[1] |= SECOND_REF_FRAME_MASK;
3430 }
3431 }
3432
3433 // Disable this drop out case if the ref frame
3434 // segment level feature is enabled for this segment. This is to
3435 // prevent the possibility that we end up unable to pick any mode.
3436 if (!segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME)) {
3437 // Only consider ZEROMV/ALTREF_FRAME for alt ref frame,
3438 // unless ARNR filtering is enabled in which case we want
3439 // an unfiltered alternative. We allow near/nearest as well
3440 // because they may result in zero-zero MVs but be cheaper.
3441 if (cpi->rc.is_src_frame_alt_ref && (cpi->oxcf.arnr_max_frames == 0)) {
3442 ref_frame_skip_mask[0] = (1 << LAST_FRAME) | (1 << GOLDEN_FRAME);
3443 ref_frame_skip_mask[1] = SECOND_REF_FRAME_MASK;
3444 mode_skip_mask[ALTREF_FRAME] = ~INTER_NEAREST_NEAR_ZERO;
3445 if (frame_mv[NEARMV][ALTREF_FRAME].as_int != 0)
3446 mode_skip_mask[ALTREF_FRAME] |= (1 << NEARMV);
3447 if (frame_mv[NEARESTMV][ALTREF_FRAME].as_int != 0)
3448 mode_skip_mask[ALTREF_FRAME] |= (1 << NEARESTMV);
3449 }
3450 }
3451
3452 if (cpi->rc.is_src_frame_alt_ref) {
3453 if (sf->alt_ref_search_fp) {
3454 mode_skip_mask[ALTREF_FRAME] = 0;
3455 ref_frame_skip_mask[0] = ~(1 << ALTREF_FRAME) & 0xff;
3456 ref_frame_skip_mask[1] = SECOND_REF_FRAME_MASK;
3457 }
3458 }
3459
3460 if (sf->alt_ref_search_fp)
3461 if (!cm->show_frame && x->pred_mv_sad[GOLDEN_FRAME] < INT_MAX)
3462 if (x->pred_mv_sad[ALTREF_FRAME] > (x->pred_mv_sad[GOLDEN_FRAME] << 1))
3463 mode_skip_mask[ALTREF_FRAME] |= INTER_ALL;
3464
3465 if (sf->adaptive_mode_search) {
3466 if (cm->show_frame && !cpi->rc.is_src_frame_alt_ref &&
3467 cpi->rc.frames_since_golden >= 3)
3468 if (x->pred_mv_sad[GOLDEN_FRAME] > (x->pred_mv_sad[LAST_FRAME] << 1))
3469 mode_skip_mask[GOLDEN_FRAME] |= INTER_ALL;
3470 }
3471
3472 if (bsize > sf->max_intra_bsize) {
3473 ref_frame_skip_mask[0] |= (1 << INTRA_FRAME);
3474 ref_frame_skip_mask[1] |= (1 << INTRA_FRAME);
3475 }
3476
3477 mode_skip_mask[INTRA_FRAME] |=
3478 ~(sf->intra_y_mode_mask[max_txsize_lookup[bsize]]);
3479
3480 for (i = 0; i <= LAST_NEW_MV_INDEX; ++i) mode_threshold[i] = 0;
3481
3482 for (i = LAST_NEW_MV_INDEX + 1; i < MAX_MODES; ++i)
3483 mode_threshold[i] = ((int64_t)rd_threshes[i] * rd_thresh_freq_fact[i]) >> 5;
3484
3485 midx = sf->schedule_mode_search ? mode_skip_start : 0;
3486
3487 while (midx > 4) {
3488 uint8_t end_pos = 0;
3489 for (i = 5; i < midx; ++i) {
3490 if (mode_threshold[tile_mode_map[i - 1]] >
3491 mode_threshold[tile_mode_map[i]]) {
3492 uint8_t tmp = tile_mode_map[i];
3493 tile_mode_map[i] = tile_mode_map[i - 1];
3494 tile_mode_map[i - 1] = tmp;
3495 end_pos = i;
3496 }
3497 }
3498 midx = end_pos;
3499 }
3500
3501 memcpy(mode_map, tile_mode_map, sizeof(mode_map));
3502
3503 for (midx = 0; midx < MAX_MODES; ++midx) {
3504 int mode_index = mode_map[midx];
3505 int mode_excluded = 0;
3506 int64_t this_rd = INT64_MAX;
3507 int disable_skip = 0;
3508 int compmode_cost = 0;
3509 int rate2 = 0, rate_y = 0, rate_uv = 0;
3510 int64_t distortion2 = 0, distortion_y = 0, distortion_uv = 0;
3511 int skippable = 0;
3512 int this_skip2 = 0;
3513 int64_t total_sse = INT64_MAX;
3514 int early_term = 0;
3515
3516 this_mode = vp9_mode_order[mode_index].mode;
3517 ref_frame = vp9_mode_order[mode_index].ref_frame[0];
3518 second_ref_frame = vp9_mode_order[mode_index].ref_frame[1];
3519
3520 vp9_zero(x->sum_y_eobs);
3521
3522 if (is_rect_partition) {
3523 if (ctx->skip_ref_frame_mask & (1 << ref_frame)) continue;
3524 if (second_ref_frame > 0 &&
3525 (ctx->skip_ref_frame_mask & (1 << second_ref_frame)))
3526 continue;
3527 }
3528
3529 // Look at the reference frame of the best mode so far and set the
3530 // skip mask to look at a subset of the remaining modes.
3531 if (midx == mode_skip_start && best_mode_index >= 0) {
3532 switch (best_mbmode.ref_frame[0]) {
3533 case INTRA_FRAME: break;
3534 case LAST_FRAME: ref_frame_skip_mask[0] |= LAST_FRAME_MODE_MASK; break;
3535 case GOLDEN_FRAME:
3536 ref_frame_skip_mask[0] |= GOLDEN_FRAME_MODE_MASK;
3537 break;
3538 case ALTREF_FRAME: ref_frame_skip_mask[0] |= ALT_REF_MODE_MASK; break;
3539 case NONE:
3540 case MAX_REF_FRAMES: assert(0 && "Invalid Reference frame"); break;
3541 }
3542 }
3543
3544 if ((ref_frame_skip_mask[0] & (1 << ref_frame)) &&
3545 (ref_frame_skip_mask[1] & (1 << VPXMAX(0, second_ref_frame))))
3546 continue;
3547
3548 if (mode_skip_mask[ref_frame] & (1 << this_mode)) continue;
3549
3550 // Test best rd so far against threshold for trying this mode.
3551 if (best_mode_skippable && sf->schedule_mode_search)
3552 mode_threshold[mode_index] <<= 1;
3553
3554 if (best_rd < mode_threshold[mode_index]) continue;
3555
3556 // This is only used in motion vector unit test.
3557 if (cpi->oxcf.motion_vector_unit_test && ref_frame == INTRA_FRAME) continue;
3558
3559 if (sf->motion_field_mode_search) {
3560 const int mi_width = VPXMIN(num_8x8_blocks_wide_lookup[bsize],
3561 tile_info->mi_col_end - mi_col);
3562 const int mi_height = VPXMIN(num_8x8_blocks_high_lookup[bsize],
3563 tile_info->mi_row_end - mi_row);
3564 const int bsl = mi_width_log2_lookup[bsize];
3565 int cb_partition_search_ctrl =
3566 (((mi_row + mi_col) >> bsl) +
3567 get_chessboard_index(cm->current_video_frame)) &
3568 0x1;
3569 MODE_INFO *ref_mi;
3570 int const_motion = 1;
3571 int skip_ref_frame = !cb_partition_search_ctrl;
3572 MV_REFERENCE_FRAME rf = NONE;
3573 int_mv ref_mv;
3574 ref_mv.as_int = INVALID_MV;
3575
3576 if ((mi_row - 1) >= tile_info->mi_row_start) {
3577 ref_mv = xd->mi[-xd->mi_stride]->mv[0];
3578 rf = xd->mi[-xd->mi_stride]->ref_frame[0];
3579 for (i = 0; i < mi_width; ++i) {
3580 ref_mi = xd->mi[-xd->mi_stride + i];
3581 const_motion &= (ref_mv.as_int == ref_mi->mv[0].as_int) &&
3582 (ref_frame == ref_mi->ref_frame[0]);
3583 skip_ref_frame &= (rf == ref_mi->ref_frame[0]);
3584 }
3585 }
3586
3587 if ((mi_col - 1) >= tile_info->mi_col_start) {
3588 if (ref_mv.as_int == INVALID_MV) ref_mv = xd->mi[-1]->mv[0];
3589 if (rf == NONE) rf = xd->mi[-1]->ref_frame[0];
3590 for (i = 0; i < mi_height; ++i) {
3591 ref_mi = xd->mi[i * xd->mi_stride - 1];
3592 const_motion &= (ref_mv.as_int == ref_mi->mv[0].as_int) &&
3593 (ref_frame == ref_mi->ref_frame[0]);
3594 skip_ref_frame &= (rf == ref_mi->ref_frame[0]);
3595 }
3596 }
3597
3598 if (skip_ref_frame && this_mode != NEARESTMV && this_mode != NEWMV)
3599 if (rf > INTRA_FRAME)
3600 if (ref_frame != rf) continue;
3601
3602 if (const_motion)
3603 if (this_mode == NEARMV || this_mode == ZEROMV) continue;
3604 }
3605
3606 comp_pred = second_ref_frame > INTRA_FRAME;
3607 if (comp_pred) {
3608 if (!cpi->allow_comp_inter_inter) continue;
3609
3610 if (cm->ref_frame_sign_bias[ref_frame] ==
3611 cm->ref_frame_sign_bias[second_ref_frame])
3612 continue;
3613
3614 // Skip compound inter modes if ARF is not available.
3615 if (!(cpi->ref_frame_flags & flag_list[second_ref_frame])) continue;
3616
3617 // Do not allow compound prediction if the segment level reference frame
3618 // feature is in use as in this case there can only be one reference.
3619 if (segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME)) continue;
3620
3621 if ((mode_search_skip_flags & FLAG_SKIP_COMP_BESTINTRA) &&
3622 best_mode_index >= 0 && best_mbmode.ref_frame[0] == INTRA_FRAME)
3623 continue;
3624
3625 mode_excluded = cm->reference_mode == SINGLE_REFERENCE;
3626 } else {
3627 if (ref_frame != INTRA_FRAME)
3628 mode_excluded = cm->reference_mode == COMPOUND_REFERENCE;
3629 }
3630
3631 if (ref_frame == INTRA_FRAME) {
3632 if (sf->adaptive_mode_search)
3633 if ((x->source_variance << num_pels_log2_lookup[bsize]) > best_pred_sse)
3634 continue;
3635
3636 if (this_mode != DC_PRED) {
3637 // Disable intra modes other than DC_PRED for blocks with low variance
3638 // Threshold for intra skipping based on source variance
3639 // TODO(debargha): Specialize the threshold for super block sizes
3640 const unsigned int skip_intra_var_thresh =
3641 (cpi->oxcf.content == VP9E_CONTENT_FILM) ? 0 : 64;
3642 if ((mode_search_skip_flags & FLAG_SKIP_INTRA_LOWVAR) &&
3643 x->source_variance < skip_intra_var_thresh)
3644 continue;
3645 // Only search the oblique modes if the best so far is
3646 // one of the neighboring directional modes
3647 if ((mode_search_skip_flags & FLAG_SKIP_INTRA_BESTINTER) &&
3648 (this_mode >= D45_PRED && this_mode <= TM_PRED)) {
3649 if (best_mode_index >= 0 && best_mbmode.ref_frame[0] > INTRA_FRAME)
3650 continue;
3651 }
3652 if (mode_search_skip_flags & FLAG_SKIP_INTRA_DIRMISMATCH) {
3653 if (conditional_skipintra(this_mode, best_intra_mode)) continue;
3654 }
3655 }
3656 } else {
3657 const MV_REFERENCE_FRAME ref_frames[2] = { ref_frame, second_ref_frame };
3658 if (!check_best_zero_mv(cpi, mbmi_ext->mode_context, frame_mv, this_mode,
3659 ref_frames))
3660 continue;
3661 }
3662
3663 mi->mode = this_mode;
3664 mi->uv_mode = DC_PRED;
3665 mi->ref_frame[0] = ref_frame;
3666 mi->ref_frame[1] = second_ref_frame;
3667 // Evaluate all sub-pel filters irrespective of whether we can use
3668 // them for this frame.
3669 mi->interp_filter =
3670 cm->interp_filter == SWITCHABLE ? EIGHTTAP : cm->interp_filter;
3671 mi->mv[0].as_int = mi->mv[1].as_int = 0;
3672
3673 x->skip = 0;
3674 set_ref_ptrs(cm, xd, ref_frame, second_ref_frame);
3675
3676 // Select prediction reference frames.
3677 for (i = 0; i < MAX_MB_PLANE; i++) {
3678 xd->plane[i].pre[0] = yv12_mb[ref_frame][i];
3679 if (comp_pred) xd->plane[i].pre[1] = yv12_mb[second_ref_frame][i];
3680 }
3681
3682 if (ref_frame == INTRA_FRAME) {
3683 TX_SIZE uv_tx;
3684 struct macroblockd_plane *const pd = &xd->plane[1];
3685 memset(x->skip_txfm, 0, sizeof(x->skip_txfm));
3686 super_block_yrd(cpi, x, &rate_y, &distortion_y, &skippable, NULL, bsize,
3687 best_rd, recon);
3688 if (rate_y == INT_MAX) continue;
3689
3690 uv_tx = uv_txsize_lookup[bsize][mi->tx_size][pd->subsampling_x]
3691 [pd->subsampling_y];
3692 if (rate_uv_intra[uv_tx] == INT_MAX) {
3693 choose_intra_uv_mode(cpi, x, ctx, bsize, uv_tx, &rate_uv_intra[uv_tx],
3694 &rate_uv_tokenonly[uv_tx], &dist_uv[uv_tx],
3695 &skip_uv[uv_tx], &mode_uv[uv_tx]);
3696 }
3697
3698 rate_uv = rate_uv_tokenonly[uv_tx];
3699 distortion_uv = dist_uv[uv_tx];
3700 skippable = skippable && skip_uv[uv_tx];
3701 mi->uv_mode = mode_uv[uv_tx];
3702
3703 rate2 = rate_y + cpi->mbmode_cost[mi->mode] + rate_uv_intra[uv_tx];
3704 if (this_mode != DC_PRED && this_mode != TM_PRED)
3705 rate2 += intra_cost_penalty;
3706 distortion2 = distortion_y + distortion_uv;
3707 } else {
3708 this_rd = handle_inter_mode(
3709 cpi, x, bsize, &rate2, &distortion2, &skippable, &rate_y, &rate_uv,
3710 recon, &disable_skip, frame_mv, mi_row, mi_col, single_newmv,
3711 single_inter_filter, single_skippable, &total_sse, best_rd,
3712 &mask_filter, filter_cache);
3713 if (this_rd == INT64_MAX) continue;
3714
3715 compmode_cost = vp9_cost_bit(comp_mode_p, comp_pred);
3716
3717 if (cm->reference_mode == REFERENCE_MODE_SELECT) rate2 += compmode_cost;
3718 }
3719
3720 // Estimate the reference frame signaling cost and add it
3721 // to the rolling cost variable.
3722 if (comp_pred) {
3723 rate2 += ref_costs_comp[ref_frame];
3724 } else {
3725 rate2 += ref_costs_single[ref_frame];
3726 }
3727
3728 if (!disable_skip) {
3729 const vpx_prob skip_prob = vp9_get_skip_prob(cm, xd);
3730 const int skip_cost0 = vp9_cost_bit(skip_prob, 0);
3731 const int skip_cost1 = vp9_cost_bit(skip_prob, 1);
3732
3733 if (skippable) {
3734 // Back out the coefficient coding costs
3735 rate2 -= (rate_y + rate_uv);
3736
3737 // Cost the skip mb case
3738 rate2 += skip_cost1;
3739 } else if (ref_frame != INTRA_FRAME && !xd->lossless &&
3740 !cpi->oxcf.sharpness) {
3741 if (RDCOST(x->rdmult, x->rddiv, rate_y + rate_uv + skip_cost0,
3742 distortion2) <
3743 RDCOST(x->rdmult, x->rddiv, skip_cost1, total_sse)) {
3744 // Add in the cost of the no skip flag.
3745 rate2 += skip_cost0;
3746 } else {
3747 // FIXME(rbultje) make this work for splitmv also
3748 assert(total_sse >= 0);
3749
3750 rate2 += skip_cost1;
3751 distortion2 = total_sse;
3752 rate2 -= (rate_y + rate_uv);
3753 this_skip2 = 1;
3754 }
3755 } else {
3756 // Add in the cost of the no skip flag.
3757 rate2 += skip_cost0;
3758 }
3759
3760 // Calculate the final RD estimate for this mode.
3761 this_rd = RDCOST(x->rdmult, x->rddiv, rate2, distortion2);
3762 }
3763
3764 if (recon) {
3765 // In film mode bias against DC pred and other intra if there is a
3766 // significant difference between the variance of the sub blocks in the
3767 // the source. Also apply some bias against compound modes which also
3768 // tend to blur fine texture such as film grain over time.
3769 //
3770 // The sub block test here acts in the case where one or more sub
3771 // blocks have high relatively variance but others relatively low
3772 // variance. Here the high variance sub blocks may push the
3773 // total variance for the current block size over the thresholds
3774 // used in rd_variance_adjustment() below.
3775 if (cpi->oxcf.content == VP9E_CONTENT_FILM) {
3776 if (bsize >= BLOCK_16X16) {
3777 int min_energy, max_energy;
3778 vp9_get_sub_block_energy(cpi, x, mi_row, mi_col, bsize, &min_energy,
3779 &max_energy);
3780 if (max_energy > min_energy) {
3781 if (ref_frame == INTRA_FRAME) {
3782 if (this_mode == DC_PRED)
3783 this_rd += (this_rd * (max_energy - min_energy));
3784 else
3785 this_rd += (this_rd * (max_energy - min_energy)) / 4;
3786 } else if (second_ref_frame > INTRA_FRAME) {
3787 this_rd += this_rd / 4;
3788 }
3789 }
3790 }
3791 }
3792 // Apply an adjustment to the rd value based on the similarity of the
3793 // source variance and reconstructed variance.
3794 rd_variance_adjustment(cpi, x, bsize, &this_rd, recon, ref_frame,
3795 second_ref_frame, this_mode);
3796 }
3797
3798 if (ref_frame == INTRA_FRAME) {
3799 // Keep record of best intra rd
3800 if (this_rd < best_intra_rd) {
3801 best_intra_rd = this_rd;
3802 best_intra_mode = mi->mode;
3803 }
3804 }
3805
3806 if (!disable_skip && ref_frame == INTRA_FRAME) {
3807 for (i = 0; i < REFERENCE_MODES; ++i)
3808 best_pred_rd[i] = VPXMIN(best_pred_rd[i], this_rd);
3809 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++)
3810 best_filter_rd[i] = VPXMIN(best_filter_rd[i], this_rd);
3811 }
3812
3813 // Did this mode help.. i.e. is it the new best mode
3814 if (this_rd < best_rd || x->skip) {
3815 int max_plane = MAX_MB_PLANE;
3816 if (!mode_excluded) {
3817 // Note index of best mode so far
3818 best_mode_index = mode_index;
3819
3820 if (ref_frame == INTRA_FRAME) {
3821 /* required for left and above block mv */
3822 mi->mv[0].as_int = 0;
3823 max_plane = 1;
3824 // Initialize interp_filter here so we do not have to check for
3825 // inter block modes in get_pred_context_switchable_interp()
3826 mi->interp_filter = SWITCHABLE_FILTERS;
3827 } else {
3828 best_pred_sse = x->pred_sse[ref_frame];
3829 }
3830
3831 rd_cost->rate = rate2;
3832 rd_cost->dist = distortion2;
3833 rd_cost->rdcost = this_rd;
3834 best_rd = this_rd;
3835 best_mbmode = *mi;
3836 best_skip2 = this_skip2;
3837 best_mode_skippable = skippable;
3838
3839 if (!x->select_tx_size) swap_block_ptr(x, ctx, 1, 0, 0, max_plane);
3840 memcpy(ctx->zcoeff_blk, x->zcoeff_blk[mi->tx_size],
3841 sizeof(ctx->zcoeff_blk[0]) * ctx->num_4x4_blk);
3842 ctx->sum_y_eobs = x->sum_y_eobs[mi->tx_size];
3843
3844 // TODO(debargha): enhance this test with a better distortion prediction
3845 // based on qp, activity mask and history
3846 if ((mode_search_skip_flags & FLAG_EARLY_TERMINATE) &&
3847 (mode_index > MIN_EARLY_TERM_INDEX)) {
3848 int qstep = xd->plane[0].dequant[1];
3849 // TODO(debargha): Enhance this by specializing for each mode_index
3850 int scale = 4;
3851 #if CONFIG_VP9_HIGHBITDEPTH
3852 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
3853 qstep >>= (xd->bd - 8);
3854 }
3855 #endif // CONFIG_VP9_HIGHBITDEPTH
3856 if (x->source_variance < UINT_MAX) {
3857 const int var_adjust = (x->source_variance < 16);
3858 scale -= var_adjust;
3859 }
3860 if (ref_frame > INTRA_FRAME && distortion2 * scale < qstep * qstep) {
3861 early_term = 1;
3862 }
3863 }
3864 }
3865 }
3866
3867 /* keep record of best compound/single-only prediction */
3868 if (!disable_skip && ref_frame != INTRA_FRAME) {
3869 int64_t single_rd, hybrid_rd, single_rate, hybrid_rate;
3870
3871 if (cm->reference_mode == REFERENCE_MODE_SELECT) {
3872 single_rate = rate2 - compmode_cost;
3873 hybrid_rate = rate2;
3874 } else {
3875 single_rate = rate2;
3876 hybrid_rate = rate2 + compmode_cost;
3877 }
3878
3879 single_rd = RDCOST(x->rdmult, x->rddiv, single_rate, distortion2);
3880 hybrid_rd = RDCOST(x->rdmult, x->rddiv, hybrid_rate, distortion2);
3881
3882 if (!comp_pred) {
3883 if (single_rd < best_pred_rd[SINGLE_REFERENCE])
3884 best_pred_rd[SINGLE_REFERENCE] = single_rd;
3885 } else {
3886 if (single_rd < best_pred_rd[COMPOUND_REFERENCE])
3887 best_pred_rd[COMPOUND_REFERENCE] = single_rd;
3888 }
3889 if (hybrid_rd < best_pred_rd[REFERENCE_MODE_SELECT])
3890 best_pred_rd[REFERENCE_MODE_SELECT] = hybrid_rd;
3891
3892 /* keep record of best filter type */
3893 if (!mode_excluded && cm->interp_filter != BILINEAR) {
3894 int64_t ref =
3895 filter_cache[cm->interp_filter == SWITCHABLE ? SWITCHABLE_FILTERS
3896 : cm->interp_filter];
3897
3898 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++) {
3899 int64_t adj_rd;
3900 if (ref == INT64_MAX)
3901 adj_rd = 0;
3902 else if (filter_cache[i] == INT64_MAX)
3903 // when early termination is triggered, the encoder does not have
3904 // access to the rate-distortion cost. it only knows that the cost
3905 // should be above the maximum valid value. hence it takes the known
3906 // maximum plus an arbitrary constant as the rate-distortion cost.
3907 adj_rd = mask_filter - ref + 10;
3908 else
3909 adj_rd = filter_cache[i] - ref;
3910
3911 adj_rd += this_rd;
3912 best_filter_rd[i] = VPXMIN(best_filter_rd[i], adj_rd);
3913 }
3914 }
3915 }
3916
3917 if (early_term) break;
3918
3919 if (x->skip && !comp_pred) break;
3920 }
3921
3922 // The inter modes' rate costs are not calculated precisely in some cases.
3923 // Therefore, sometimes, NEWMV is chosen instead of NEARESTMV, NEARMV, and
3924 // ZEROMV. Here, checks are added for those cases, and the mode decisions
3925 // are corrected.
3926 if (best_mbmode.mode == NEWMV) {
3927 const MV_REFERENCE_FRAME refs[2] = { best_mbmode.ref_frame[0],
3928 best_mbmode.ref_frame[1] };
3929 int comp_pred_mode = refs[1] > INTRA_FRAME;
3930
3931 if (frame_mv[NEARESTMV][refs[0]].as_int == best_mbmode.mv[0].as_int &&
3932 ((comp_pred_mode &&
3933 frame_mv[NEARESTMV][refs[1]].as_int == best_mbmode.mv[1].as_int) ||
3934 !comp_pred_mode))
3935 best_mbmode.mode = NEARESTMV;
3936 else if (frame_mv[NEARMV][refs[0]].as_int == best_mbmode.mv[0].as_int &&
3937 ((comp_pred_mode &&
3938 frame_mv[NEARMV][refs[1]].as_int == best_mbmode.mv[1].as_int) ||
3939 !comp_pred_mode))
3940 best_mbmode.mode = NEARMV;
3941 else if (best_mbmode.mv[0].as_int == 0 &&
3942 ((comp_pred_mode && best_mbmode.mv[1].as_int == 0) ||
3943 !comp_pred_mode))
3944 best_mbmode.mode = ZEROMV;
3945 }
3946
3947 if (best_mode_index < 0 || best_rd >= best_rd_so_far) {
3948 // If adaptive interp filter is enabled, then the current leaf node of 8x8
3949 // data is needed for sub8x8. Hence preserve the context.
3950 #if CONFIG_CONSISTENT_RECODE
3951 if (bsize == BLOCK_8X8) ctx->mic = *xd->mi[0];
3952 #else
3953 if (cpi->row_mt && bsize == BLOCK_8X8) ctx->mic = *xd->mi[0];
3954 #endif
3955 rd_cost->rate = INT_MAX;
3956 rd_cost->rdcost = INT64_MAX;
3957 return;
3958 }
3959
3960 // If we used an estimate for the uv intra rd in the loop above...
3961 if (sf->use_uv_intra_rd_estimate) {
3962 // Do Intra UV best rd mode selection if best mode choice above was intra.
3963 if (best_mbmode.ref_frame[0] == INTRA_FRAME) {
3964 TX_SIZE uv_tx_size;
3965 *mi = best_mbmode;
3966 uv_tx_size = get_uv_tx_size(mi, &xd->plane[1]);
3967 rd_pick_intra_sbuv_mode(cpi, x, ctx, &rate_uv_intra[uv_tx_size],
3968 &rate_uv_tokenonly[uv_tx_size],
3969 &dist_uv[uv_tx_size], &skip_uv[uv_tx_size],
3970 bsize < BLOCK_8X8 ? BLOCK_8X8 : bsize,
3971 uv_tx_size);
3972 }
3973 }
3974
3975 assert((cm->interp_filter == SWITCHABLE) ||
3976 (cm->interp_filter == best_mbmode.interp_filter) ||
3977 !is_inter_block(&best_mbmode));
3978
3979 if (!cpi->rc.is_src_frame_alt_ref)
3980 vp9_update_rd_thresh_fact(tile_data->thresh_freq_fact,
3981 sf->adaptive_rd_thresh, bsize, best_mode_index);
3982
3983 // macroblock modes
3984 *mi = best_mbmode;
3985 x->skip |= best_skip2;
3986
3987 for (i = 0; i < REFERENCE_MODES; ++i) {
3988 if (best_pred_rd[i] == INT64_MAX)
3989 best_pred_diff[i] = INT_MIN;
3990 else
3991 best_pred_diff[i] = best_rd - best_pred_rd[i];
3992 }
3993
3994 if (!x->skip) {
3995 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++) {
3996 if (best_filter_rd[i] == INT64_MAX)
3997 best_filter_diff[i] = 0;
3998 else
3999 best_filter_diff[i] = best_rd - best_filter_rd[i];
4000 }
4001 if (cm->interp_filter == SWITCHABLE)
4002 assert(best_filter_diff[SWITCHABLE_FILTERS] == 0);
4003 } else {
4004 vp9_zero(best_filter_diff);
4005 }
4006
4007 // TODO(yunqingwang): Moving this line in front of the above best_filter_diff
4008 // updating code causes PSNR loss. Need to figure out the confliction.
4009 x->skip |= best_mode_skippable;
4010
4011 if (!x->skip && !x->select_tx_size) {
4012 int has_high_freq_coeff = 0;
4013 int plane;
4014 int max_plane = is_inter_block(xd->mi[0]) ? MAX_MB_PLANE : 1;
4015 for (plane = 0; plane < max_plane; ++plane) {
4016 x->plane[plane].eobs = ctx->eobs_pbuf[plane][1];
4017 has_high_freq_coeff |= vp9_has_high_freq_in_plane(x, bsize, plane);
4018 }
4019
4020 for (plane = max_plane; plane < MAX_MB_PLANE; ++plane) {
4021 x->plane[plane].eobs = ctx->eobs_pbuf[plane][2];
4022 has_high_freq_coeff |= vp9_has_high_freq_in_plane(x, bsize, plane);
4023 }
4024
4025 best_mode_skippable |= !has_high_freq_coeff;
4026 }
4027
4028 assert(best_mode_index >= 0);
4029
4030 store_coding_context(x, ctx, best_mode_index, best_pred_diff,
4031 best_filter_diff, best_mode_skippable);
4032 }
4033
vp9_rd_pick_inter_mode_sb_seg_skip(VP9_COMP * cpi,TileDataEnc * tile_data,MACROBLOCK * x,RD_COST * rd_cost,BLOCK_SIZE bsize,PICK_MODE_CONTEXT * ctx,int64_t best_rd_so_far)4034 void vp9_rd_pick_inter_mode_sb_seg_skip(VP9_COMP *cpi, TileDataEnc *tile_data,
4035 MACROBLOCK *x, RD_COST *rd_cost,
4036 BLOCK_SIZE bsize,
4037 PICK_MODE_CONTEXT *ctx,
4038 int64_t best_rd_so_far) {
4039 VP9_COMMON *const cm = &cpi->common;
4040 MACROBLOCKD *const xd = &x->e_mbd;
4041 MODE_INFO *const mi = xd->mi[0];
4042 unsigned char segment_id = mi->segment_id;
4043 const int comp_pred = 0;
4044 int i;
4045 int64_t best_pred_diff[REFERENCE_MODES];
4046 int64_t best_filter_diff[SWITCHABLE_FILTER_CONTEXTS];
4047 unsigned int ref_costs_single[MAX_REF_FRAMES], ref_costs_comp[MAX_REF_FRAMES];
4048 vpx_prob comp_mode_p;
4049 INTERP_FILTER best_filter = SWITCHABLE;
4050 int64_t this_rd = INT64_MAX;
4051 int rate2 = 0;
4052 const int64_t distortion2 = 0;
4053
4054 x->skip_encode = cpi->sf.skip_encode_frame && x->q_index < QIDX_SKIP_THRESH;
4055
4056 estimate_ref_frame_costs(cm, xd, segment_id, ref_costs_single, ref_costs_comp,
4057 &comp_mode_p);
4058
4059 for (i = 0; i < MAX_REF_FRAMES; ++i) x->pred_sse[i] = INT_MAX;
4060 for (i = LAST_FRAME; i < MAX_REF_FRAMES; ++i) x->pred_mv_sad[i] = INT_MAX;
4061
4062 rd_cost->rate = INT_MAX;
4063
4064 assert(segfeature_active(&cm->seg, segment_id, SEG_LVL_SKIP));
4065
4066 mi->mode = ZEROMV;
4067 mi->uv_mode = DC_PRED;
4068 mi->ref_frame[0] = LAST_FRAME;
4069 mi->ref_frame[1] = NONE;
4070 mi->mv[0].as_int = 0;
4071 x->skip = 1;
4072
4073 ctx->sum_y_eobs = 0;
4074
4075 if (cm->interp_filter != BILINEAR) {
4076 best_filter = EIGHTTAP;
4077 if (cm->interp_filter == SWITCHABLE &&
4078 x->source_variance >= cpi->sf.disable_filter_search_var_thresh) {
4079 int rs;
4080 int best_rs = INT_MAX;
4081 for (i = 0; i < SWITCHABLE_FILTERS; ++i) {
4082 mi->interp_filter = i;
4083 rs = vp9_get_switchable_rate(cpi, xd);
4084 if (rs < best_rs) {
4085 best_rs = rs;
4086 best_filter = mi->interp_filter;
4087 }
4088 }
4089 }
4090 }
4091 // Set the appropriate filter
4092 if (cm->interp_filter == SWITCHABLE) {
4093 mi->interp_filter = best_filter;
4094 rate2 += vp9_get_switchable_rate(cpi, xd);
4095 } else {
4096 mi->interp_filter = cm->interp_filter;
4097 }
4098
4099 if (cm->reference_mode == REFERENCE_MODE_SELECT)
4100 rate2 += vp9_cost_bit(comp_mode_p, comp_pred);
4101
4102 // Estimate the reference frame signaling cost and add it
4103 // to the rolling cost variable.
4104 rate2 += ref_costs_single[LAST_FRAME];
4105 this_rd = RDCOST(x->rdmult, x->rddiv, rate2, distortion2);
4106
4107 rd_cost->rate = rate2;
4108 rd_cost->dist = distortion2;
4109 rd_cost->rdcost = this_rd;
4110
4111 if (this_rd >= best_rd_so_far) {
4112 rd_cost->rate = INT_MAX;
4113 rd_cost->rdcost = INT64_MAX;
4114 return;
4115 }
4116
4117 assert((cm->interp_filter == SWITCHABLE) ||
4118 (cm->interp_filter == mi->interp_filter));
4119
4120 vp9_update_rd_thresh_fact(tile_data->thresh_freq_fact,
4121 cpi->sf.adaptive_rd_thresh, bsize, THR_ZEROMV);
4122
4123 vp9_zero(best_pred_diff);
4124 vp9_zero(best_filter_diff);
4125
4126 if (!x->select_tx_size) swap_block_ptr(x, ctx, 1, 0, 0, MAX_MB_PLANE);
4127 store_coding_context(x, ctx, THR_ZEROMV, best_pred_diff, best_filter_diff, 0);
4128 }
4129
vp9_rd_pick_inter_mode_sub8x8(VP9_COMP * cpi,TileDataEnc * tile_data,MACROBLOCK * x,int mi_row,int mi_col,RD_COST * rd_cost,BLOCK_SIZE bsize,PICK_MODE_CONTEXT * ctx,int64_t best_rd_so_far)4130 void vp9_rd_pick_inter_mode_sub8x8(VP9_COMP *cpi, TileDataEnc *tile_data,
4131 MACROBLOCK *x, int mi_row, int mi_col,
4132 RD_COST *rd_cost, BLOCK_SIZE bsize,
4133 PICK_MODE_CONTEXT *ctx,
4134 int64_t best_rd_so_far) {
4135 VP9_COMMON *const cm = &cpi->common;
4136 RD_OPT *const rd_opt = &cpi->rd;
4137 SPEED_FEATURES *const sf = &cpi->sf;
4138 MACROBLOCKD *const xd = &x->e_mbd;
4139 MODE_INFO *const mi = xd->mi[0];
4140 const struct segmentation *const seg = &cm->seg;
4141 MV_REFERENCE_FRAME ref_frame, second_ref_frame;
4142 unsigned char segment_id = mi->segment_id;
4143 int comp_pred, i;
4144 int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES];
4145 struct buf_2d yv12_mb[4][MAX_MB_PLANE];
4146 static const int flag_list[4] = { 0, VP9_LAST_FLAG, VP9_GOLD_FLAG,
4147 VP9_ALT_FLAG };
4148 int64_t best_rd = best_rd_so_far;
4149 int64_t best_yrd = best_rd_so_far; // FIXME(rbultje) more precise
4150 int64_t best_pred_diff[REFERENCE_MODES];
4151 int64_t best_pred_rd[REFERENCE_MODES];
4152 int64_t best_filter_rd[SWITCHABLE_FILTER_CONTEXTS];
4153 int64_t best_filter_diff[SWITCHABLE_FILTER_CONTEXTS];
4154 MODE_INFO best_mbmode;
4155 int ref_index, best_ref_index = 0;
4156 unsigned int ref_costs_single[MAX_REF_FRAMES], ref_costs_comp[MAX_REF_FRAMES];
4157 vpx_prob comp_mode_p;
4158 INTERP_FILTER tmp_best_filter = SWITCHABLE;
4159 int rate_uv_intra, rate_uv_tokenonly;
4160 int64_t dist_uv;
4161 int skip_uv;
4162 PREDICTION_MODE mode_uv = DC_PRED;
4163 const int intra_cost_penalty =
4164 vp9_get_intra_cost_penalty(cpi, bsize, cm->base_qindex, cm->y_dc_delta_q);
4165 int_mv seg_mvs[4][MAX_REF_FRAMES];
4166 b_mode_info best_bmodes[4];
4167 int best_skip2 = 0;
4168 int ref_frame_skip_mask[2] = { 0 };
4169 int64_t mask_filter = 0;
4170 int64_t filter_cache[SWITCHABLE_FILTER_CONTEXTS];
4171 int internal_active_edge =
4172 vp9_active_edge_sb(cpi, mi_row, mi_col) && vp9_internal_image_edge(cpi);
4173 const int *const rd_thresh_freq_fact = tile_data->thresh_freq_fact[bsize];
4174
4175 x->skip_encode = sf->skip_encode_frame && x->q_index < QIDX_SKIP_THRESH;
4176 memset(x->zcoeff_blk[TX_4X4], 0, 4);
4177 vp9_zero(best_mbmode);
4178
4179 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i) filter_cache[i] = INT64_MAX;
4180
4181 for (i = 0; i < 4; i++) {
4182 int j;
4183 for (j = 0; j < MAX_REF_FRAMES; j++) seg_mvs[i][j].as_int = INVALID_MV;
4184 }
4185
4186 estimate_ref_frame_costs(cm, xd, segment_id, ref_costs_single, ref_costs_comp,
4187 &comp_mode_p);
4188
4189 for (i = 0; i < REFERENCE_MODES; ++i) best_pred_rd[i] = INT64_MAX;
4190 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++)
4191 best_filter_rd[i] = INT64_MAX;
4192 rate_uv_intra = INT_MAX;
4193
4194 rd_cost->rate = INT_MAX;
4195
4196 for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ref_frame++) {
4197 if (cpi->ref_frame_flags & flag_list[ref_frame]) {
4198 setup_buffer_inter(cpi, x, ref_frame, bsize, mi_row, mi_col,
4199 frame_mv[NEARESTMV], frame_mv[NEARMV], yv12_mb);
4200 } else {
4201 ref_frame_skip_mask[0] |= (1 << ref_frame);
4202 ref_frame_skip_mask[1] |= SECOND_REF_FRAME_MASK;
4203 }
4204 frame_mv[NEWMV][ref_frame].as_int = INVALID_MV;
4205 frame_mv[ZEROMV][ref_frame].as_int = 0;
4206 }
4207
4208 for (ref_index = 0; ref_index < MAX_REFS; ++ref_index) {
4209 int mode_excluded = 0;
4210 int64_t this_rd = INT64_MAX;
4211 int disable_skip = 0;
4212 int compmode_cost = 0;
4213 int rate2 = 0, rate_y = 0, rate_uv = 0;
4214 int64_t distortion2 = 0, distortion_y = 0, distortion_uv = 0;
4215 int skippable = 0;
4216 int i;
4217 int this_skip2 = 0;
4218 int64_t total_sse = INT_MAX;
4219 int early_term = 0;
4220 struct buf_2d backup_yv12[2][MAX_MB_PLANE];
4221
4222 ref_frame = vp9_ref_order[ref_index].ref_frame[0];
4223 second_ref_frame = vp9_ref_order[ref_index].ref_frame[1];
4224
4225 vp9_zero(x->sum_y_eobs);
4226
4227 #if CONFIG_BETTER_HW_COMPATIBILITY
4228 // forbid 8X4 and 4X8 partitions if any reference frame is scaled.
4229 if (bsize == BLOCK_8X4 || bsize == BLOCK_4X8) {
4230 int ref_scaled = ref_frame > INTRA_FRAME &&
4231 vp9_is_scaled(&cm->frame_refs[ref_frame - 1].sf);
4232 if (second_ref_frame > INTRA_FRAME)
4233 ref_scaled += vp9_is_scaled(&cm->frame_refs[second_ref_frame - 1].sf);
4234 if (ref_scaled) continue;
4235 }
4236 #endif
4237 // Look at the reference frame of the best mode so far and set the
4238 // skip mask to look at a subset of the remaining modes.
4239 if (ref_index > 2 && sf->mode_skip_start < MAX_MODES) {
4240 if (ref_index == 3) {
4241 switch (best_mbmode.ref_frame[0]) {
4242 case INTRA_FRAME: break;
4243 case LAST_FRAME:
4244 ref_frame_skip_mask[0] |= (1 << GOLDEN_FRAME) | (1 << ALTREF_FRAME);
4245 ref_frame_skip_mask[1] |= SECOND_REF_FRAME_MASK;
4246 break;
4247 case GOLDEN_FRAME:
4248 ref_frame_skip_mask[0] |= (1 << LAST_FRAME) | (1 << ALTREF_FRAME);
4249 ref_frame_skip_mask[1] |= SECOND_REF_FRAME_MASK;
4250 break;
4251 case ALTREF_FRAME:
4252 ref_frame_skip_mask[0] |= (1 << GOLDEN_FRAME) | (1 << LAST_FRAME);
4253 break;
4254 case NONE:
4255 case MAX_REF_FRAMES: assert(0 && "Invalid Reference frame"); break;
4256 }
4257 }
4258 }
4259
4260 if ((ref_frame_skip_mask[0] & (1 << ref_frame)) &&
4261 (ref_frame_skip_mask[1] & (1 << VPXMAX(0, second_ref_frame))))
4262 continue;
4263
4264 // Test best rd so far against threshold for trying this mode.
4265 if (!internal_active_edge &&
4266 rd_less_than_thresh(best_rd,
4267 rd_opt->threshes[segment_id][bsize][ref_index],
4268 &rd_thresh_freq_fact[ref_index]))
4269 continue;
4270
4271 // This is only used in motion vector unit test.
4272 if (cpi->oxcf.motion_vector_unit_test && ref_frame == INTRA_FRAME) continue;
4273
4274 comp_pred = second_ref_frame > INTRA_FRAME;
4275 if (comp_pred) {
4276 if (!cpi->allow_comp_inter_inter) continue;
4277
4278 if (cm->ref_frame_sign_bias[ref_frame] ==
4279 cm->ref_frame_sign_bias[second_ref_frame])
4280 continue;
4281
4282 if (!(cpi->ref_frame_flags & flag_list[second_ref_frame])) continue;
4283 // Do not allow compound prediction if the segment level reference frame
4284 // feature is in use as in this case there can only be one reference.
4285 if (segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME)) continue;
4286
4287 if ((sf->mode_search_skip_flags & FLAG_SKIP_COMP_BESTINTRA) &&
4288 best_mbmode.ref_frame[0] == INTRA_FRAME)
4289 continue;
4290 }
4291
4292 if (comp_pred)
4293 mode_excluded = cm->reference_mode == SINGLE_REFERENCE;
4294 else if (ref_frame != INTRA_FRAME)
4295 mode_excluded = cm->reference_mode == COMPOUND_REFERENCE;
4296
4297 // If the segment reference frame feature is enabled....
4298 // then do nothing if the current ref frame is not allowed..
4299 if (segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME) &&
4300 get_segdata(seg, segment_id, SEG_LVL_REF_FRAME) != (int)ref_frame) {
4301 continue;
4302 // Disable this drop out case if the ref frame
4303 // segment level feature is enabled for this segment. This is to
4304 // prevent the possibility that we end up unable to pick any mode.
4305 } else if (!segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME)) {
4306 // Only consider ZEROMV/ALTREF_FRAME for alt ref frame,
4307 // unless ARNR filtering is enabled in which case we want
4308 // an unfiltered alternative. We allow near/nearest as well
4309 // because they may result in zero-zero MVs but be cheaper.
4310 if (cpi->rc.is_src_frame_alt_ref && (cpi->oxcf.arnr_max_frames == 0))
4311 continue;
4312 }
4313
4314 mi->tx_size = TX_4X4;
4315 mi->uv_mode = DC_PRED;
4316 mi->ref_frame[0] = ref_frame;
4317 mi->ref_frame[1] = second_ref_frame;
4318 // Evaluate all sub-pel filters irrespective of whether we can use
4319 // them for this frame.
4320 mi->interp_filter =
4321 cm->interp_filter == SWITCHABLE ? EIGHTTAP : cm->interp_filter;
4322 x->skip = 0;
4323 set_ref_ptrs(cm, xd, ref_frame, second_ref_frame);
4324
4325 // Select prediction reference frames.
4326 for (i = 0; i < MAX_MB_PLANE; i++) {
4327 xd->plane[i].pre[0] = yv12_mb[ref_frame][i];
4328 if (comp_pred) xd->plane[i].pre[1] = yv12_mb[second_ref_frame][i];
4329 }
4330
4331 if (ref_frame == INTRA_FRAME) {
4332 int rate;
4333 if (rd_pick_intra_sub_8x8_y_mode(cpi, x, &rate, &rate_y, &distortion_y,
4334 best_rd) >= best_rd)
4335 continue;
4336 rate2 += rate;
4337 rate2 += intra_cost_penalty;
4338 distortion2 += distortion_y;
4339
4340 if (rate_uv_intra == INT_MAX) {
4341 choose_intra_uv_mode(cpi, x, ctx, bsize, TX_4X4, &rate_uv_intra,
4342 &rate_uv_tokenonly, &dist_uv, &skip_uv, &mode_uv);
4343 }
4344 rate2 += rate_uv_intra;
4345 rate_uv = rate_uv_tokenonly;
4346 distortion2 += dist_uv;
4347 distortion_uv = dist_uv;
4348 mi->uv_mode = mode_uv;
4349 } else {
4350 int rate;
4351 int64_t distortion;
4352 int64_t this_rd_thresh;
4353 int64_t tmp_rd, tmp_best_rd = INT64_MAX, tmp_best_rdu = INT64_MAX;
4354 int tmp_best_rate = INT_MAX, tmp_best_ratey = INT_MAX;
4355 int64_t tmp_best_distortion = INT_MAX, tmp_best_sse, uv_sse;
4356 int tmp_best_skippable = 0;
4357 int switchable_filter_index;
4358 int_mv *second_ref =
4359 comp_pred ? &x->mbmi_ext->ref_mvs[second_ref_frame][0] : NULL;
4360 b_mode_info tmp_best_bmodes[16];
4361 MODE_INFO tmp_best_mbmode;
4362 BEST_SEG_INFO bsi[SWITCHABLE_FILTERS];
4363 int pred_exists = 0;
4364 int uv_skippable;
4365
4366 YV12_BUFFER_CONFIG *scaled_ref_frame[2] = { NULL, NULL };
4367 int ref;
4368
4369 for (ref = 0; ref < 2; ++ref) {
4370 scaled_ref_frame[ref] =
4371 mi->ref_frame[ref] > INTRA_FRAME
4372 ? vp9_get_scaled_ref_frame(cpi, mi->ref_frame[ref])
4373 : NULL;
4374
4375 if (scaled_ref_frame[ref]) {
4376 int i;
4377 // Swap out the reference frame for a version that's been scaled to
4378 // match the resolution of the current frame, allowing the existing
4379 // motion search code to be used without additional modifications.
4380 for (i = 0; i < MAX_MB_PLANE; i++)
4381 backup_yv12[ref][i] = xd->plane[i].pre[ref];
4382 vp9_setup_pre_planes(xd, ref, scaled_ref_frame[ref], mi_row, mi_col,
4383 NULL);
4384 }
4385 }
4386
4387 this_rd_thresh = (ref_frame == LAST_FRAME)
4388 ? rd_opt->threshes[segment_id][bsize][THR_LAST]
4389 : rd_opt->threshes[segment_id][bsize][THR_ALTR];
4390 this_rd_thresh = (ref_frame == GOLDEN_FRAME)
4391 ? rd_opt->threshes[segment_id][bsize][THR_GOLD]
4392 : this_rd_thresh;
4393 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i)
4394 filter_cache[i] = INT64_MAX;
4395
4396 if (cm->interp_filter != BILINEAR) {
4397 tmp_best_filter = EIGHTTAP;
4398 if (x->source_variance < sf->disable_filter_search_var_thresh) {
4399 tmp_best_filter = EIGHTTAP;
4400 } else if (sf->adaptive_pred_interp_filter == 1 &&
4401 ctx->pred_interp_filter < SWITCHABLE) {
4402 tmp_best_filter = ctx->pred_interp_filter;
4403 } else if (sf->adaptive_pred_interp_filter == 2) {
4404 tmp_best_filter = ctx->pred_interp_filter < SWITCHABLE
4405 ? ctx->pred_interp_filter
4406 : 0;
4407 } else {
4408 for (switchable_filter_index = 0;
4409 switchable_filter_index < SWITCHABLE_FILTERS;
4410 ++switchable_filter_index) {
4411 int newbest, rs;
4412 int64_t rs_rd;
4413 MB_MODE_INFO_EXT *mbmi_ext = x->mbmi_ext;
4414 mi->interp_filter = switchable_filter_index;
4415 tmp_rd = rd_pick_best_sub8x8_mode(
4416 cpi, x, &mbmi_ext->ref_mvs[ref_frame][0], second_ref, best_yrd,
4417 &rate, &rate_y, &distortion, &skippable, &total_sse,
4418 (int)this_rd_thresh, seg_mvs, bsi, switchable_filter_index,
4419 mi_row, mi_col);
4420
4421 if (tmp_rd == INT64_MAX) continue;
4422 rs = vp9_get_switchable_rate(cpi, xd);
4423 rs_rd = RDCOST(x->rdmult, x->rddiv, rs, 0);
4424 filter_cache[switchable_filter_index] = tmp_rd;
4425 filter_cache[SWITCHABLE_FILTERS] =
4426 VPXMIN(filter_cache[SWITCHABLE_FILTERS], tmp_rd + rs_rd);
4427 if (cm->interp_filter == SWITCHABLE) tmp_rd += rs_rd;
4428
4429 mask_filter = VPXMAX(mask_filter, tmp_rd);
4430
4431 newbest = (tmp_rd < tmp_best_rd);
4432 if (newbest) {
4433 tmp_best_filter = mi->interp_filter;
4434 tmp_best_rd = tmp_rd;
4435 }
4436 if ((newbest && cm->interp_filter == SWITCHABLE) ||
4437 (mi->interp_filter == cm->interp_filter &&
4438 cm->interp_filter != SWITCHABLE)) {
4439 tmp_best_rdu = tmp_rd;
4440 tmp_best_rate = rate;
4441 tmp_best_ratey = rate_y;
4442 tmp_best_distortion = distortion;
4443 tmp_best_sse = total_sse;
4444 tmp_best_skippable = skippable;
4445 tmp_best_mbmode = *mi;
4446 for (i = 0; i < 4; i++) {
4447 tmp_best_bmodes[i] = xd->mi[0]->bmi[i];
4448 x->zcoeff_blk[TX_4X4][i] = !x->plane[0].eobs[i];
4449 x->sum_y_eobs[TX_4X4] += x->plane[0].eobs[i];
4450 }
4451 pred_exists = 1;
4452 if (switchable_filter_index == 0 && sf->use_rd_breakout &&
4453 best_rd < INT64_MAX) {
4454 if (tmp_best_rdu / 2 > best_rd) {
4455 // skip searching the other filters if the first is
4456 // already substantially larger than the best so far
4457 tmp_best_filter = mi->interp_filter;
4458 tmp_best_rdu = INT64_MAX;
4459 break;
4460 }
4461 }
4462 }
4463 } // switchable_filter_index loop
4464 }
4465 }
4466
4467 if (tmp_best_rdu == INT64_MAX && pred_exists) continue;
4468
4469 mi->interp_filter = (cm->interp_filter == SWITCHABLE ? tmp_best_filter
4470 : cm->interp_filter);
4471 if (!pred_exists) {
4472 // Handles the special case when a filter that is not in the
4473 // switchable list (bilinear, 6-tap) is indicated at the frame level
4474 tmp_rd = rd_pick_best_sub8x8_mode(
4475 cpi, x, &x->mbmi_ext->ref_mvs[ref_frame][0], second_ref, best_yrd,
4476 &rate, &rate_y, &distortion, &skippable, &total_sse,
4477 (int)this_rd_thresh, seg_mvs, bsi, 0, mi_row, mi_col);
4478 if (tmp_rd == INT64_MAX) continue;
4479 } else {
4480 total_sse = tmp_best_sse;
4481 rate = tmp_best_rate;
4482 rate_y = tmp_best_ratey;
4483 distortion = tmp_best_distortion;
4484 skippable = tmp_best_skippable;
4485 *mi = tmp_best_mbmode;
4486 for (i = 0; i < 4; i++) xd->mi[0]->bmi[i] = tmp_best_bmodes[i];
4487 }
4488
4489 rate2 += rate;
4490 distortion2 += distortion;
4491
4492 if (cm->interp_filter == SWITCHABLE)
4493 rate2 += vp9_get_switchable_rate(cpi, xd);
4494
4495 if (!mode_excluded)
4496 mode_excluded = comp_pred ? cm->reference_mode == SINGLE_REFERENCE
4497 : cm->reference_mode == COMPOUND_REFERENCE;
4498
4499 compmode_cost = vp9_cost_bit(comp_mode_p, comp_pred);
4500
4501 tmp_best_rdu =
4502 best_rd - VPXMIN(RDCOST(x->rdmult, x->rddiv, rate2, distortion2),
4503 RDCOST(x->rdmult, x->rddiv, 0, total_sse));
4504
4505 if (tmp_best_rdu > 0) {
4506 // If even the 'Y' rd value of split is higher than best so far
4507 // then dont bother looking at UV
4508 vp9_build_inter_predictors_sbuv(&x->e_mbd, mi_row, mi_col, BLOCK_8X8);
4509 memset(x->skip_txfm, SKIP_TXFM_NONE, sizeof(x->skip_txfm));
4510 if (!super_block_uvrd(cpi, x, &rate_uv, &distortion_uv, &uv_skippable,
4511 &uv_sse, BLOCK_8X8, tmp_best_rdu)) {
4512 for (ref = 0; ref < 2; ++ref) {
4513 if (scaled_ref_frame[ref]) {
4514 int i;
4515 for (i = 0; i < MAX_MB_PLANE; ++i)
4516 xd->plane[i].pre[ref] = backup_yv12[ref][i];
4517 }
4518 }
4519 continue;
4520 }
4521
4522 rate2 += rate_uv;
4523 distortion2 += distortion_uv;
4524 skippable = skippable && uv_skippable;
4525 total_sse += uv_sse;
4526 }
4527
4528 for (ref = 0; ref < 2; ++ref) {
4529 if (scaled_ref_frame[ref]) {
4530 // Restore the prediction frame pointers to their unscaled versions.
4531 int i;
4532 for (i = 0; i < MAX_MB_PLANE; ++i)
4533 xd->plane[i].pre[ref] = backup_yv12[ref][i];
4534 }
4535 }
4536 }
4537
4538 if (cm->reference_mode == REFERENCE_MODE_SELECT) rate2 += compmode_cost;
4539
4540 // Estimate the reference frame signaling cost and add it
4541 // to the rolling cost variable.
4542 if (second_ref_frame > INTRA_FRAME) {
4543 rate2 += ref_costs_comp[ref_frame];
4544 } else {
4545 rate2 += ref_costs_single[ref_frame];
4546 }
4547
4548 if (!disable_skip) {
4549 const vpx_prob skip_prob = vp9_get_skip_prob(cm, xd);
4550 const int skip_cost0 = vp9_cost_bit(skip_prob, 0);
4551 const int skip_cost1 = vp9_cost_bit(skip_prob, 1);
4552
4553 // Skip is never coded at the segment level for sub8x8 blocks and instead
4554 // always coded in the bitstream at the mode info level.
4555 if (ref_frame != INTRA_FRAME && !xd->lossless) {
4556 if (RDCOST(x->rdmult, x->rddiv, rate_y + rate_uv + skip_cost0,
4557 distortion2) <
4558 RDCOST(x->rdmult, x->rddiv, skip_cost1, total_sse)) {
4559 // Add in the cost of the no skip flag.
4560 rate2 += skip_cost0;
4561 } else {
4562 // FIXME(rbultje) make this work for splitmv also
4563 rate2 += skip_cost1;
4564 distortion2 = total_sse;
4565 assert(total_sse >= 0);
4566 rate2 -= (rate_y + rate_uv);
4567 rate_y = 0;
4568 rate_uv = 0;
4569 this_skip2 = 1;
4570 }
4571 } else {
4572 // Add in the cost of the no skip flag.
4573 rate2 += skip_cost0;
4574 }
4575
4576 // Calculate the final RD estimate for this mode.
4577 this_rd = RDCOST(x->rdmult, x->rddiv, rate2, distortion2);
4578 }
4579
4580 if (!disable_skip && ref_frame == INTRA_FRAME) {
4581 for (i = 0; i < REFERENCE_MODES; ++i)
4582 best_pred_rd[i] = VPXMIN(best_pred_rd[i], this_rd);
4583 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++)
4584 best_filter_rd[i] = VPXMIN(best_filter_rd[i], this_rd);
4585 }
4586
4587 // Did this mode help.. i.e. is it the new best mode
4588 if (this_rd < best_rd || x->skip) {
4589 if (!mode_excluded) {
4590 int max_plane = MAX_MB_PLANE;
4591 // Note index of best mode so far
4592 best_ref_index = ref_index;
4593
4594 if (ref_frame == INTRA_FRAME) {
4595 /* required for left and above block mv */
4596 mi->mv[0].as_int = 0;
4597 max_plane = 1;
4598 // Initialize interp_filter here so we do not have to check for
4599 // inter block modes in get_pred_context_switchable_interp()
4600 mi->interp_filter = SWITCHABLE_FILTERS;
4601 }
4602
4603 rd_cost->rate = rate2;
4604 rd_cost->dist = distortion2;
4605 rd_cost->rdcost = this_rd;
4606 best_rd = this_rd;
4607 best_yrd =
4608 best_rd - RDCOST(x->rdmult, x->rddiv, rate_uv, distortion_uv);
4609 best_mbmode = *mi;
4610 best_skip2 = this_skip2;
4611 if (!x->select_tx_size) swap_block_ptr(x, ctx, 1, 0, 0, max_plane);
4612 memcpy(ctx->zcoeff_blk, x->zcoeff_blk[TX_4X4],
4613 sizeof(ctx->zcoeff_blk[0]) * ctx->num_4x4_blk);
4614 ctx->sum_y_eobs = x->sum_y_eobs[TX_4X4];
4615
4616 for (i = 0; i < 4; i++) best_bmodes[i] = xd->mi[0]->bmi[i];
4617
4618 // TODO(debargha): enhance this test with a better distortion prediction
4619 // based on qp, activity mask and history
4620 if ((sf->mode_search_skip_flags & FLAG_EARLY_TERMINATE) &&
4621 (ref_index > MIN_EARLY_TERM_INDEX)) {
4622 int qstep = xd->plane[0].dequant[1];
4623 // TODO(debargha): Enhance this by specializing for each mode_index
4624 int scale = 4;
4625 #if CONFIG_VP9_HIGHBITDEPTH
4626 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
4627 qstep >>= (xd->bd - 8);
4628 }
4629 #endif // CONFIG_VP9_HIGHBITDEPTH
4630 if (x->source_variance < UINT_MAX) {
4631 const int var_adjust = (x->source_variance < 16);
4632 scale -= var_adjust;
4633 }
4634 if (ref_frame > INTRA_FRAME && distortion2 * scale < qstep * qstep) {
4635 early_term = 1;
4636 }
4637 }
4638 }
4639 }
4640
4641 /* keep record of best compound/single-only prediction */
4642 if (!disable_skip && ref_frame != INTRA_FRAME) {
4643 int64_t single_rd, hybrid_rd, single_rate, hybrid_rate;
4644
4645 if (cm->reference_mode == REFERENCE_MODE_SELECT) {
4646 single_rate = rate2 - compmode_cost;
4647 hybrid_rate = rate2;
4648 } else {
4649 single_rate = rate2;
4650 hybrid_rate = rate2 + compmode_cost;
4651 }
4652
4653 single_rd = RDCOST(x->rdmult, x->rddiv, single_rate, distortion2);
4654 hybrid_rd = RDCOST(x->rdmult, x->rddiv, hybrid_rate, distortion2);
4655
4656 if (!comp_pred && single_rd < best_pred_rd[SINGLE_REFERENCE])
4657 best_pred_rd[SINGLE_REFERENCE] = single_rd;
4658 else if (comp_pred && single_rd < best_pred_rd[COMPOUND_REFERENCE])
4659 best_pred_rd[COMPOUND_REFERENCE] = single_rd;
4660
4661 if (hybrid_rd < best_pred_rd[REFERENCE_MODE_SELECT])
4662 best_pred_rd[REFERENCE_MODE_SELECT] = hybrid_rd;
4663 }
4664
4665 /* keep record of best filter type */
4666 if (!mode_excluded && !disable_skip && ref_frame != INTRA_FRAME &&
4667 cm->interp_filter != BILINEAR) {
4668 int64_t ref =
4669 filter_cache[cm->interp_filter == SWITCHABLE ? SWITCHABLE_FILTERS
4670 : cm->interp_filter];
4671 int64_t adj_rd;
4672 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++) {
4673 if (ref == INT64_MAX)
4674 adj_rd = 0;
4675 else if (filter_cache[i] == INT64_MAX)
4676 // when early termination is triggered, the encoder does not have
4677 // access to the rate-distortion cost. it only knows that the cost
4678 // should be above the maximum valid value. hence it takes the known
4679 // maximum plus an arbitrary constant as the rate-distortion cost.
4680 adj_rd = mask_filter - ref + 10;
4681 else
4682 adj_rd = filter_cache[i] - ref;
4683
4684 adj_rd += this_rd;
4685 best_filter_rd[i] = VPXMIN(best_filter_rd[i], adj_rd);
4686 }
4687 }
4688
4689 if (early_term) break;
4690
4691 if (x->skip && !comp_pred) break;
4692 }
4693
4694 if (best_rd >= best_rd_so_far) {
4695 rd_cost->rate = INT_MAX;
4696 rd_cost->rdcost = INT64_MAX;
4697 return;
4698 }
4699
4700 // If we used an estimate for the uv intra rd in the loop above...
4701 if (sf->use_uv_intra_rd_estimate) {
4702 // Do Intra UV best rd mode selection if best mode choice above was intra.
4703 if (best_mbmode.ref_frame[0] == INTRA_FRAME) {
4704 *mi = best_mbmode;
4705 rd_pick_intra_sbuv_mode(cpi, x, ctx, &rate_uv_intra, &rate_uv_tokenonly,
4706 &dist_uv, &skip_uv, BLOCK_8X8, TX_4X4);
4707 }
4708 }
4709
4710 if (best_rd == INT64_MAX) {
4711 rd_cost->rate = INT_MAX;
4712 rd_cost->dist = INT64_MAX;
4713 rd_cost->rdcost = INT64_MAX;
4714 return;
4715 }
4716
4717 assert((cm->interp_filter == SWITCHABLE) ||
4718 (cm->interp_filter == best_mbmode.interp_filter) ||
4719 !is_inter_block(&best_mbmode));
4720
4721 vp9_update_rd_thresh_fact(tile_data->thresh_freq_fact, sf->adaptive_rd_thresh,
4722 bsize, best_ref_index);
4723
4724 // macroblock modes
4725 *mi = best_mbmode;
4726 x->skip |= best_skip2;
4727 if (!is_inter_block(&best_mbmode)) {
4728 for (i = 0; i < 4; i++) xd->mi[0]->bmi[i].as_mode = best_bmodes[i].as_mode;
4729 } else {
4730 for (i = 0; i < 4; ++i)
4731 memcpy(&xd->mi[0]->bmi[i], &best_bmodes[i], sizeof(b_mode_info));
4732
4733 mi->mv[0].as_int = xd->mi[0]->bmi[3].as_mv[0].as_int;
4734 mi->mv[1].as_int = xd->mi[0]->bmi[3].as_mv[1].as_int;
4735 }
4736
4737 for (i = 0; i < REFERENCE_MODES; ++i) {
4738 if (best_pred_rd[i] == INT64_MAX)
4739 best_pred_diff[i] = INT_MIN;
4740 else
4741 best_pred_diff[i] = best_rd - best_pred_rd[i];
4742 }
4743
4744 if (!x->skip) {
4745 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++) {
4746 if (best_filter_rd[i] == INT64_MAX)
4747 best_filter_diff[i] = 0;
4748 else
4749 best_filter_diff[i] = best_rd - best_filter_rd[i];
4750 }
4751 if (cm->interp_filter == SWITCHABLE)
4752 assert(best_filter_diff[SWITCHABLE_FILTERS] == 0);
4753 } else {
4754 vp9_zero(best_filter_diff);
4755 }
4756
4757 store_coding_context(x, ctx, best_ref_index, best_pred_diff, best_filter_diff,
4758 0);
4759 }
4760 #endif // !CONFIG_REALTIME_ONLY
4761