1 /*
2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #include <assert.h>
12 #include <math.h>
13
14 #include "./vp9_rtcd.h"
15 #include "./vpx_dsp_rtcd.h"
16
17 #include "vpx_dsp/vpx_dsp_common.h"
18 #include "vpx_mem/vpx_mem.h"
19 #include "vpx_ports/mem.h"
20 #include "vpx_ports/system_state.h"
21
22 #include "vp9/common/vp9_common.h"
23 #include "vp9/common/vp9_entropy.h"
24 #include "vp9/common/vp9_entropymode.h"
25 #include "vp9/common/vp9_idct.h"
26 #include "vp9/common/vp9_mvref_common.h"
27 #include "vp9/common/vp9_pred_common.h"
28 #include "vp9/common/vp9_quant_common.h"
29 #include "vp9/common/vp9_reconinter.h"
30 #include "vp9/common/vp9_reconintra.h"
31 #include "vp9/common/vp9_scan.h"
32 #include "vp9/common/vp9_seg_common.h"
33
34 #include "vp9/encoder/vp9_cost.h"
35 #include "vp9/encoder/vp9_encodemb.h"
36 #include "vp9/encoder/vp9_encodemv.h"
37 #include "vp9/encoder/vp9_encoder.h"
38 #include "vp9/encoder/vp9_mcomp.h"
39 #include "vp9/encoder/vp9_quantize.h"
40 #include "vp9/encoder/vp9_ratectrl.h"
41 #include "vp9/encoder/vp9_rd.h"
42 #include "vp9/encoder/vp9_rdopt.h"
43 #include "vp9/encoder/vp9_aq_variance.h"
44
45 #define LAST_FRAME_MODE_MASK \
46 ((1 << GOLDEN_FRAME) | (1 << ALTREF_FRAME) | (1 << INTRA_FRAME))
47 #define GOLDEN_FRAME_MODE_MASK \
48 ((1 << LAST_FRAME) | (1 << ALTREF_FRAME) | (1 << INTRA_FRAME))
49 #define ALT_REF_MODE_MASK \
50 ((1 << LAST_FRAME) | (1 << GOLDEN_FRAME) | (1 << INTRA_FRAME))
51
52 #define SECOND_REF_FRAME_MASK ((1 << ALTREF_FRAME) | 0x01)
53
54 #define MIN_EARLY_TERM_INDEX 3
55 #define NEW_MV_DISCOUNT_FACTOR 8
56
57 typedef struct {
58 PREDICTION_MODE mode;
59 MV_REFERENCE_FRAME ref_frame[2];
60 } MODE_DEFINITION;
61
62 typedef struct { MV_REFERENCE_FRAME ref_frame[2]; } REF_DEFINITION;
63
64 struct rdcost_block_args {
65 const VP9_COMP *cpi;
66 MACROBLOCK *x;
67 ENTROPY_CONTEXT t_above[16];
68 ENTROPY_CONTEXT t_left[16];
69 int this_rate;
70 int64_t this_dist;
71 int64_t this_sse;
72 int64_t this_rd;
73 int64_t best_rd;
74 int exit_early;
75 int use_fast_coef_costing;
76 const scan_order *so;
77 uint8_t skippable;
78 };
79
80 #define LAST_NEW_MV_INDEX 6
81 static const MODE_DEFINITION vp9_mode_order[MAX_MODES] = {
82 { NEARESTMV, { LAST_FRAME, NONE } },
83 { NEARESTMV, { ALTREF_FRAME, NONE } },
84 { NEARESTMV, { GOLDEN_FRAME, NONE } },
85
86 { DC_PRED, { INTRA_FRAME, NONE } },
87
88 { NEWMV, { LAST_FRAME, NONE } },
89 { NEWMV, { ALTREF_FRAME, NONE } },
90 { NEWMV, { GOLDEN_FRAME, NONE } },
91
92 { NEARMV, { LAST_FRAME, NONE } },
93 { NEARMV, { ALTREF_FRAME, NONE } },
94 { NEARMV, { GOLDEN_FRAME, NONE } },
95
96 { ZEROMV, { LAST_FRAME, NONE } },
97 { ZEROMV, { GOLDEN_FRAME, NONE } },
98 { ZEROMV, { ALTREF_FRAME, NONE } },
99
100 { NEARESTMV, { LAST_FRAME, ALTREF_FRAME } },
101 { NEARESTMV, { GOLDEN_FRAME, ALTREF_FRAME } },
102
103 { TM_PRED, { INTRA_FRAME, NONE } },
104
105 { NEARMV, { LAST_FRAME, ALTREF_FRAME } },
106 { NEWMV, { LAST_FRAME, ALTREF_FRAME } },
107 { NEARMV, { GOLDEN_FRAME, ALTREF_FRAME } },
108 { NEWMV, { GOLDEN_FRAME, ALTREF_FRAME } },
109
110 { ZEROMV, { LAST_FRAME, ALTREF_FRAME } },
111 { ZEROMV, { GOLDEN_FRAME, ALTREF_FRAME } },
112
113 { H_PRED, { INTRA_FRAME, NONE } },
114 { V_PRED, { INTRA_FRAME, NONE } },
115 { D135_PRED, { INTRA_FRAME, NONE } },
116 { D207_PRED, { INTRA_FRAME, NONE } },
117 { D153_PRED, { INTRA_FRAME, NONE } },
118 { D63_PRED, { INTRA_FRAME, NONE } },
119 { D117_PRED, { INTRA_FRAME, NONE } },
120 { D45_PRED, { INTRA_FRAME, NONE } },
121 };
122
123 static const REF_DEFINITION vp9_ref_order[MAX_REFS] = {
124 { { LAST_FRAME, NONE } }, { { GOLDEN_FRAME, NONE } },
125 { { ALTREF_FRAME, NONE } }, { { LAST_FRAME, ALTREF_FRAME } },
126 { { GOLDEN_FRAME, ALTREF_FRAME } }, { { INTRA_FRAME, NONE } },
127 };
128
swap_block_ptr(MACROBLOCK * x,PICK_MODE_CONTEXT * ctx,int m,int n,int min_plane,int max_plane)129 static void swap_block_ptr(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx, int m, int n,
130 int min_plane, int max_plane) {
131 int i;
132
133 for (i = min_plane; i < max_plane; ++i) {
134 struct macroblock_plane *const p = &x->plane[i];
135 struct macroblockd_plane *const pd = &x->e_mbd.plane[i];
136
137 p->coeff = ctx->coeff_pbuf[i][m];
138 p->qcoeff = ctx->qcoeff_pbuf[i][m];
139 pd->dqcoeff = ctx->dqcoeff_pbuf[i][m];
140 p->eobs = ctx->eobs_pbuf[i][m];
141
142 ctx->coeff_pbuf[i][m] = ctx->coeff_pbuf[i][n];
143 ctx->qcoeff_pbuf[i][m] = ctx->qcoeff_pbuf[i][n];
144 ctx->dqcoeff_pbuf[i][m] = ctx->dqcoeff_pbuf[i][n];
145 ctx->eobs_pbuf[i][m] = ctx->eobs_pbuf[i][n];
146
147 ctx->coeff_pbuf[i][n] = p->coeff;
148 ctx->qcoeff_pbuf[i][n] = p->qcoeff;
149 ctx->dqcoeff_pbuf[i][n] = pd->dqcoeff;
150 ctx->eobs_pbuf[i][n] = p->eobs;
151 }
152 }
153
model_rd_for_sb(VP9_COMP * cpi,BLOCK_SIZE bsize,MACROBLOCK * x,MACROBLOCKD * xd,int * out_rate_sum,int64_t * out_dist_sum,int * skip_txfm_sb,int64_t * skip_sse_sb)154 static void model_rd_for_sb(VP9_COMP *cpi, BLOCK_SIZE bsize, MACROBLOCK *x,
155 MACROBLOCKD *xd, int *out_rate_sum,
156 int64_t *out_dist_sum, int *skip_txfm_sb,
157 int64_t *skip_sse_sb) {
158 // Note our transform coeffs are 8 times an orthogonal transform.
159 // Hence quantizer step is also 8 times. To get effective quantizer
160 // we need to divide by 8 before sending to modeling function.
161 int i;
162 int64_t rate_sum = 0;
163 int64_t dist_sum = 0;
164 const int ref = xd->mi[0]->ref_frame[0];
165 unsigned int sse;
166 unsigned int var = 0;
167 int64_t total_sse = 0;
168 int skip_flag = 1;
169 const int shift = 6;
170 int64_t dist;
171 const int dequant_shift =
172 #if CONFIG_VP9_HIGHBITDEPTH
173 (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) ? xd->bd - 5 :
174 #endif // CONFIG_VP9_HIGHBITDEPTH
175 3;
176 unsigned int qstep_vec[MAX_MB_PLANE];
177 unsigned int nlog2_vec[MAX_MB_PLANE];
178 unsigned int sum_sse_vec[MAX_MB_PLANE];
179 int any_zero_sum_sse = 0;
180
181 x->pred_sse[ref] = 0;
182
183 for (i = 0; i < MAX_MB_PLANE; ++i) {
184 struct macroblock_plane *const p = &x->plane[i];
185 struct macroblockd_plane *const pd = &xd->plane[i];
186 const BLOCK_SIZE bs = get_plane_block_size(bsize, pd);
187 const TX_SIZE max_tx_size = max_txsize_lookup[bs];
188 const BLOCK_SIZE unit_size = txsize_to_bsize[max_tx_size];
189 const int64_t dc_thr = p->quant_thred[0] >> shift;
190 const int64_t ac_thr = p->quant_thred[1] >> shift;
191 unsigned int sum_sse = 0;
192 // The low thresholds are used to measure if the prediction errors are
193 // low enough so that we can skip the mode search.
194 const int64_t low_dc_thr = VPXMIN(50, dc_thr >> 2);
195 const int64_t low_ac_thr = VPXMIN(80, ac_thr >> 2);
196 int bw = 1 << (b_width_log2_lookup[bs] - b_width_log2_lookup[unit_size]);
197 int bh = 1 << (b_height_log2_lookup[bs] - b_width_log2_lookup[unit_size]);
198 int idx, idy;
199 int lw = b_width_log2_lookup[unit_size] + 2;
200 int lh = b_height_log2_lookup[unit_size] + 2;
201
202 for (idy = 0; idy < bh; ++idy) {
203 for (idx = 0; idx < bw; ++idx) {
204 uint8_t *src = p->src.buf + (idy * p->src.stride << lh) + (idx << lw);
205 uint8_t *dst = pd->dst.buf + (idy * pd->dst.stride << lh) + (idx << lh);
206 int block_idx = (idy << 1) + idx;
207 int low_err_skip = 0;
208
209 var = cpi->fn_ptr[unit_size].vf(src, p->src.stride, dst, pd->dst.stride,
210 &sse);
211 x->bsse[(i << 2) + block_idx] = sse;
212 sum_sse += sse;
213
214 x->skip_txfm[(i << 2) + block_idx] = SKIP_TXFM_NONE;
215 if (!x->select_tx_size) {
216 // Check if all ac coefficients can be quantized to zero.
217 if (var < ac_thr || var == 0) {
218 x->skip_txfm[(i << 2) + block_idx] = SKIP_TXFM_AC_ONLY;
219
220 // Check if dc coefficient can be quantized to zero.
221 if (sse - var < dc_thr || sse == var) {
222 x->skip_txfm[(i << 2) + block_idx] = SKIP_TXFM_AC_DC;
223
224 if (!sse || (var < low_ac_thr && sse - var < low_dc_thr))
225 low_err_skip = 1;
226 }
227 }
228 }
229
230 if (skip_flag && !low_err_skip) skip_flag = 0;
231
232 if (i == 0) x->pred_sse[ref] += sse;
233 }
234 }
235
236 total_sse += sum_sse;
237 sum_sse_vec[i] = sum_sse;
238 any_zero_sum_sse = any_zero_sum_sse || (sum_sse == 0);
239 qstep_vec[i] = pd->dequant[1] >> dequant_shift;
240 nlog2_vec[i] = num_pels_log2_lookup[bs];
241 }
242
243 // Fast approximate the modelling function.
244 if (cpi->sf.simple_model_rd_from_var) {
245 for (i = 0; i < MAX_MB_PLANE; ++i) {
246 int64_t rate;
247 const int64_t square_error = sum_sse_vec[i];
248 int quantizer = qstep_vec[i];
249
250 if (quantizer < 120)
251 rate = (square_error * (280 - quantizer)) >> (16 - VP9_PROB_COST_SHIFT);
252 else
253 rate = 0;
254 dist = (square_error * quantizer) >> 8;
255 rate_sum += rate;
256 dist_sum += dist;
257 }
258 } else {
259 if (any_zero_sum_sse) {
260 for (i = 0; i < MAX_MB_PLANE; ++i) {
261 int rate;
262 vp9_model_rd_from_var_lapndz(sum_sse_vec[i], nlog2_vec[i], qstep_vec[i],
263 &rate, &dist);
264 rate_sum += rate;
265 dist_sum += dist;
266 }
267 } else {
268 vp9_model_rd_from_var_lapndz_vec(sum_sse_vec, nlog2_vec, qstep_vec,
269 &rate_sum, &dist_sum);
270 }
271 }
272
273 *skip_txfm_sb = skip_flag;
274 *skip_sse_sb = total_sse << 4;
275 *out_rate_sum = (int)rate_sum;
276 *out_dist_sum = dist_sum << 4;
277 }
278
279 #if CONFIG_VP9_HIGHBITDEPTH
vp9_highbd_block_error_c(const tran_low_t * coeff,const tran_low_t * dqcoeff,intptr_t block_size,int64_t * ssz,int bd)280 int64_t vp9_highbd_block_error_c(const tran_low_t *coeff,
281 const tran_low_t *dqcoeff, intptr_t block_size,
282 int64_t *ssz, int bd) {
283 int i;
284 int64_t error = 0, sqcoeff = 0;
285 int shift = 2 * (bd - 8);
286 int rounding = shift > 0 ? 1 << (shift - 1) : 0;
287
288 for (i = 0; i < block_size; i++) {
289 const int64_t diff = coeff[i] - dqcoeff[i];
290 error += diff * diff;
291 sqcoeff += (int64_t)coeff[i] * (int64_t)coeff[i];
292 }
293 assert(error >= 0 && sqcoeff >= 0);
294 error = (error + rounding) >> shift;
295 sqcoeff = (sqcoeff + rounding) >> shift;
296
297 *ssz = sqcoeff;
298 return error;
299 }
300
vp9_highbd_block_error_dispatch(const tran_low_t * coeff,const tran_low_t * dqcoeff,intptr_t block_size,int64_t * ssz,int bd)301 static int64_t vp9_highbd_block_error_dispatch(const tran_low_t *coeff,
302 const tran_low_t *dqcoeff,
303 intptr_t block_size,
304 int64_t *ssz, int bd) {
305 if (bd == 8) {
306 return vp9_block_error(coeff, dqcoeff, block_size, ssz);
307 } else {
308 return vp9_highbd_block_error(coeff, dqcoeff, block_size, ssz, bd);
309 }
310 }
311 #endif // CONFIG_VP9_HIGHBITDEPTH
312
vp9_block_error_c(const tran_low_t * coeff,const tran_low_t * dqcoeff,intptr_t block_size,int64_t * ssz)313 int64_t vp9_block_error_c(const tran_low_t *coeff, const tran_low_t *dqcoeff,
314 intptr_t block_size, int64_t *ssz) {
315 int i;
316 int64_t error = 0, sqcoeff = 0;
317
318 for (i = 0; i < block_size; i++) {
319 const int diff = coeff[i] - dqcoeff[i];
320 error += diff * diff;
321 sqcoeff += coeff[i] * coeff[i];
322 }
323
324 *ssz = sqcoeff;
325 return error;
326 }
327
vp9_block_error_fp_c(const tran_low_t * coeff,const tran_low_t * dqcoeff,int block_size)328 int64_t vp9_block_error_fp_c(const tran_low_t *coeff, const tran_low_t *dqcoeff,
329 int block_size) {
330 int i;
331 int64_t error = 0;
332
333 for (i = 0; i < block_size; i++) {
334 const int diff = coeff[i] - dqcoeff[i];
335 error += diff * diff;
336 }
337
338 return error;
339 }
340
341 /* The trailing '0' is a terminator which is used inside cost_coeffs() to
342 * decide whether to include cost of a trailing EOB node or not (i.e. we
343 * can skip this if the last coefficient in this transform block, e.g. the
344 * 16th coefficient in a 4x4 block or the 64th coefficient in a 8x8 block,
345 * were non-zero). */
346 static const int16_t band_counts[TX_SIZES][8] = {
347 { 1, 2, 3, 4, 3, 16 - 13, 0 },
348 { 1, 2, 3, 4, 11, 64 - 21, 0 },
349 { 1, 2, 3, 4, 11, 256 - 21, 0 },
350 { 1, 2, 3, 4, 11, 1024 - 21, 0 },
351 };
cost_coeffs(MACROBLOCK * x,int plane,int block,TX_SIZE tx_size,int pt,const int16_t * scan,const int16_t * nb,int use_fast_coef_costing)352 static int cost_coeffs(MACROBLOCK *x, int plane, int block, TX_SIZE tx_size,
353 int pt, const int16_t *scan, const int16_t *nb,
354 int use_fast_coef_costing) {
355 MACROBLOCKD *const xd = &x->e_mbd;
356 MODE_INFO *mi = xd->mi[0];
357 const struct macroblock_plane *p = &x->plane[plane];
358 const PLANE_TYPE type = get_plane_type(plane);
359 const int16_t *band_count = &band_counts[tx_size][1];
360 const int eob = p->eobs[block];
361 const tran_low_t *const qcoeff = BLOCK_OFFSET(p->qcoeff, block);
362 unsigned int(*token_costs)[2][COEFF_CONTEXTS][ENTROPY_TOKENS] =
363 x->token_costs[tx_size][type][is_inter_block(mi)];
364 uint8_t token_cache[32 * 32];
365 int cost;
366 #if CONFIG_VP9_HIGHBITDEPTH
367 const uint16_t *cat6_high_cost = vp9_get_high_cost_table(xd->bd);
368 #else
369 const uint16_t *cat6_high_cost = vp9_get_high_cost_table(8);
370 #endif
371
372 // Check for consistency of tx_size with mode info
373 assert(type == PLANE_TYPE_Y
374 ? mi->tx_size == tx_size
375 : get_uv_tx_size(mi, &xd->plane[plane]) == tx_size);
376
377 if (eob == 0) {
378 // single eob token
379 cost = token_costs[0][0][pt][EOB_TOKEN];
380 } else {
381 if (use_fast_coef_costing) {
382 int band_left = *band_count++;
383 int c;
384
385 // dc token
386 int v = qcoeff[0];
387 int16_t prev_t;
388 cost = vp9_get_token_cost(v, &prev_t, cat6_high_cost);
389 cost += (*token_costs)[0][pt][prev_t];
390
391 token_cache[0] = vp9_pt_energy_class[prev_t];
392 ++token_costs;
393
394 // ac tokens
395 for (c = 1; c < eob; c++) {
396 const int rc = scan[c];
397 int16_t t;
398
399 v = qcoeff[rc];
400 cost += vp9_get_token_cost(v, &t, cat6_high_cost);
401 cost += (*token_costs)[!prev_t][!prev_t][t];
402 prev_t = t;
403 if (!--band_left) {
404 band_left = *band_count++;
405 ++token_costs;
406 }
407 }
408
409 // eob token
410 if (band_left) cost += (*token_costs)[0][!prev_t][EOB_TOKEN];
411
412 } else { // !use_fast_coef_costing
413 int band_left = *band_count++;
414 int c;
415
416 // dc token
417 int v = qcoeff[0];
418 int16_t tok;
419 unsigned int(*tok_cost_ptr)[COEFF_CONTEXTS][ENTROPY_TOKENS];
420 cost = vp9_get_token_cost(v, &tok, cat6_high_cost);
421 cost += (*token_costs)[0][pt][tok];
422
423 token_cache[0] = vp9_pt_energy_class[tok];
424 ++token_costs;
425
426 tok_cost_ptr = &((*token_costs)[!tok]);
427
428 // ac tokens
429 for (c = 1; c < eob; c++) {
430 const int rc = scan[c];
431
432 v = qcoeff[rc];
433 cost += vp9_get_token_cost(v, &tok, cat6_high_cost);
434 pt = get_coef_context(nb, token_cache, c);
435 cost += (*tok_cost_ptr)[pt][tok];
436 token_cache[rc] = vp9_pt_energy_class[tok];
437 if (!--band_left) {
438 band_left = *band_count++;
439 ++token_costs;
440 }
441 tok_cost_ptr = &((*token_costs)[!tok]);
442 }
443
444 // eob token
445 if (band_left) {
446 pt = get_coef_context(nb, token_cache, c);
447 cost += (*token_costs)[0][pt][EOB_TOKEN];
448 }
449 }
450 }
451
452 return cost;
453 }
454
num_4x4_to_edge(int plane_4x4_dim,int mb_to_edge_dim,int subsampling_dim,int blk_dim)455 static INLINE int num_4x4_to_edge(int plane_4x4_dim, int mb_to_edge_dim,
456 int subsampling_dim, int blk_dim) {
457 return plane_4x4_dim + (mb_to_edge_dim >> (5 + subsampling_dim)) - blk_dim;
458 }
459
460 // Compute the pixel domain sum square error on all visible 4x4s in the
461 // transform block.
pixel_sse(const VP9_COMP * const cpi,const MACROBLOCKD * xd,const struct macroblockd_plane * const pd,const uint8_t * src,const int src_stride,const uint8_t * dst,const int dst_stride,int blk_row,int blk_col,const BLOCK_SIZE plane_bsize,const BLOCK_SIZE tx_bsize)462 static unsigned pixel_sse(const VP9_COMP *const cpi, const MACROBLOCKD *xd,
463 const struct macroblockd_plane *const pd,
464 const uint8_t *src, const int src_stride,
465 const uint8_t *dst, const int dst_stride, int blk_row,
466 int blk_col, const BLOCK_SIZE plane_bsize,
467 const BLOCK_SIZE tx_bsize) {
468 unsigned int sse = 0;
469 const int plane_4x4_w = num_4x4_blocks_wide_lookup[plane_bsize];
470 const int plane_4x4_h = num_4x4_blocks_high_lookup[plane_bsize];
471 const int tx_4x4_w = num_4x4_blocks_wide_lookup[tx_bsize];
472 const int tx_4x4_h = num_4x4_blocks_high_lookup[tx_bsize];
473 int b4x4s_to_right_edge = num_4x4_to_edge(plane_4x4_w, xd->mb_to_right_edge,
474 pd->subsampling_x, blk_col);
475 int b4x4s_to_bottom_edge = num_4x4_to_edge(plane_4x4_h, xd->mb_to_bottom_edge,
476 pd->subsampling_y, blk_row);
477 if (tx_bsize == BLOCK_4X4 ||
478 (b4x4s_to_right_edge >= tx_4x4_w && b4x4s_to_bottom_edge >= tx_4x4_h)) {
479 cpi->fn_ptr[tx_bsize].vf(src, src_stride, dst, dst_stride, &sse);
480 } else {
481 const vpx_variance_fn_t vf_4x4 = cpi->fn_ptr[BLOCK_4X4].vf;
482 int r, c;
483 unsigned this_sse = 0;
484 int max_r = VPXMIN(b4x4s_to_bottom_edge, tx_4x4_h);
485 int max_c = VPXMIN(b4x4s_to_right_edge, tx_4x4_w);
486 sse = 0;
487 // if we are in the unrestricted motion border.
488 for (r = 0; r < max_r; ++r) {
489 // Skip visiting the sub blocks that are wholly within the UMV.
490 for (c = 0; c < max_c; ++c) {
491 vf_4x4(src + r * src_stride * 4 + c * 4, src_stride,
492 dst + r * dst_stride * 4 + c * 4, dst_stride, &this_sse);
493 sse += this_sse;
494 }
495 }
496 }
497 return sse;
498 }
499
500 // Compute the squares sum squares on all visible 4x4s in the transform block.
sum_squares_visible(const MACROBLOCKD * xd,const struct macroblockd_plane * const pd,const int16_t * diff,const int diff_stride,int blk_row,int blk_col,const BLOCK_SIZE plane_bsize,const BLOCK_SIZE tx_bsize)501 static int64_t sum_squares_visible(const MACROBLOCKD *xd,
502 const struct macroblockd_plane *const pd,
503 const int16_t *diff, const int diff_stride,
504 int blk_row, int blk_col,
505 const BLOCK_SIZE plane_bsize,
506 const BLOCK_SIZE tx_bsize) {
507 int64_t sse;
508 const int plane_4x4_w = num_4x4_blocks_wide_lookup[plane_bsize];
509 const int plane_4x4_h = num_4x4_blocks_high_lookup[plane_bsize];
510 const int tx_4x4_w = num_4x4_blocks_wide_lookup[tx_bsize];
511 const int tx_4x4_h = num_4x4_blocks_high_lookup[tx_bsize];
512 int b4x4s_to_right_edge = num_4x4_to_edge(plane_4x4_w, xd->mb_to_right_edge,
513 pd->subsampling_x, blk_col);
514 int b4x4s_to_bottom_edge = num_4x4_to_edge(plane_4x4_h, xd->mb_to_bottom_edge,
515 pd->subsampling_y, blk_row);
516 if (tx_bsize == BLOCK_4X4 ||
517 (b4x4s_to_right_edge >= tx_4x4_w && b4x4s_to_bottom_edge >= tx_4x4_h)) {
518 assert(tx_4x4_w == tx_4x4_h);
519 sse = (int64_t)vpx_sum_squares_2d_i16(diff, diff_stride, tx_4x4_w << 2);
520 } else {
521 int r, c;
522 int max_r = VPXMIN(b4x4s_to_bottom_edge, tx_4x4_h);
523 int max_c = VPXMIN(b4x4s_to_right_edge, tx_4x4_w);
524 sse = 0;
525 // if we are in the unrestricted motion border.
526 for (r = 0; r < max_r; ++r) {
527 // Skip visiting the sub blocks that are wholly within the UMV.
528 for (c = 0; c < max_c; ++c) {
529 sse += (int64_t)vpx_sum_squares_2d_i16(
530 diff + r * diff_stride * 4 + c * 4, diff_stride, 4);
531 }
532 }
533 }
534 return sse;
535 }
536
dist_block(const VP9_COMP * cpi,MACROBLOCK * x,int plane,BLOCK_SIZE plane_bsize,int block,int blk_row,int blk_col,TX_SIZE tx_size,int64_t * out_dist,int64_t * out_sse)537 static void dist_block(const VP9_COMP *cpi, MACROBLOCK *x, int plane,
538 BLOCK_SIZE plane_bsize, int block, int blk_row,
539 int blk_col, TX_SIZE tx_size, int64_t *out_dist,
540 int64_t *out_sse) {
541 MACROBLOCKD *const xd = &x->e_mbd;
542 const struct macroblock_plane *const p = &x->plane[plane];
543 const struct macroblockd_plane *const pd = &xd->plane[plane];
544
545 if (x->block_tx_domain) {
546 const int ss_txfrm_size = tx_size << 1;
547 int64_t this_sse;
548 const int shift = tx_size == TX_32X32 ? 0 : 2;
549 const tran_low_t *const coeff = BLOCK_OFFSET(p->coeff, block);
550 const tran_low_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block);
551 #if CONFIG_VP9_HIGHBITDEPTH
552 const int bd = (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) ? xd->bd : 8;
553 *out_dist = vp9_highbd_block_error_dispatch(
554 coeff, dqcoeff, 16 << ss_txfrm_size, &this_sse, bd) >>
555 shift;
556 #else
557 *out_dist =
558 vp9_block_error(coeff, dqcoeff, 16 << ss_txfrm_size, &this_sse) >>
559 shift;
560 #endif // CONFIG_VP9_HIGHBITDEPTH
561 *out_sse = this_sse >> shift;
562
563 if (x->skip_encode && !is_inter_block(xd->mi[0])) {
564 // TODO(jingning): tune the model to better capture the distortion.
565 const int64_t p =
566 (pd->dequant[1] * pd->dequant[1] * (1 << ss_txfrm_size)) >>
567 #if CONFIG_VP9_HIGHBITDEPTH
568 (shift + 2 + (bd - 8) * 2);
569 #else
570 (shift + 2);
571 #endif // CONFIG_VP9_HIGHBITDEPTH
572 *out_dist += (p >> 4);
573 *out_sse += p;
574 }
575 } else {
576 const BLOCK_SIZE tx_bsize = txsize_to_bsize[tx_size];
577 const int bs = 4 * num_4x4_blocks_wide_lookup[tx_bsize];
578 const int src_stride = p->src.stride;
579 const int dst_stride = pd->dst.stride;
580 const int src_idx = 4 * (blk_row * src_stride + blk_col);
581 const int dst_idx = 4 * (blk_row * dst_stride + blk_col);
582 const uint8_t *src = &p->src.buf[src_idx];
583 const uint8_t *dst = &pd->dst.buf[dst_idx];
584 const tran_low_t *dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block);
585 const uint16_t *eob = &p->eobs[block];
586 unsigned int tmp;
587
588 tmp = pixel_sse(cpi, xd, pd, src, src_stride, dst, dst_stride, blk_row,
589 blk_col, plane_bsize, tx_bsize);
590 *out_sse = (int64_t)tmp * 16;
591
592 if (*eob) {
593 #if CONFIG_VP9_HIGHBITDEPTH
594 DECLARE_ALIGNED(16, uint16_t, recon16[1024]);
595 uint8_t *recon = (uint8_t *)recon16;
596 #else
597 DECLARE_ALIGNED(16, uint8_t, recon[1024]);
598 #endif // CONFIG_VP9_HIGHBITDEPTH
599
600 #if CONFIG_VP9_HIGHBITDEPTH
601 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
602 vpx_highbd_convolve_copy(CONVERT_TO_SHORTPTR(dst), dst_stride, recon16,
603 32, NULL, 0, NULL, 0, bs, bs, xd->bd);
604 if (xd->lossless) {
605 vp9_highbd_iwht4x4_add(dqcoeff, recon16, 32, *eob, xd->bd);
606 } else {
607 switch (tx_size) {
608 case TX_4X4:
609 vp9_highbd_idct4x4_add(dqcoeff, recon16, 32, *eob, xd->bd);
610 break;
611 case TX_8X8:
612 vp9_highbd_idct8x8_add(dqcoeff, recon16, 32, *eob, xd->bd);
613 break;
614 case TX_16X16:
615 vp9_highbd_idct16x16_add(dqcoeff, recon16, 32, *eob, xd->bd);
616 break;
617 case TX_32X32:
618 vp9_highbd_idct32x32_add(dqcoeff, recon16, 32, *eob, xd->bd);
619 break;
620 default: assert(0 && "Invalid transform size");
621 }
622 }
623 recon = CONVERT_TO_BYTEPTR(recon16);
624 } else {
625 #endif // CONFIG_VP9_HIGHBITDEPTH
626 vpx_convolve_copy(dst, dst_stride, recon, 32, NULL, 0, NULL, 0, bs, bs);
627 switch (tx_size) {
628 case TX_32X32: vp9_idct32x32_add(dqcoeff, recon, 32, *eob); break;
629 case TX_16X16: vp9_idct16x16_add(dqcoeff, recon, 32, *eob); break;
630 case TX_8X8: vp9_idct8x8_add(dqcoeff, recon, 32, *eob); break;
631 case TX_4X4:
632 // this is like vp9_short_idct4x4 but has a special case around
633 // eob<=1, which is significant (not just an optimization) for
634 // the lossless case.
635 x->itxm_add(dqcoeff, recon, 32, *eob);
636 break;
637 default: assert(0 && "Invalid transform size"); break;
638 }
639 #if CONFIG_VP9_HIGHBITDEPTH
640 }
641 #endif // CONFIG_VP9_HIGHBITDEPTH
642
643 tmp = pixel_sse(cpi, xd, pd, src, src_stride, recon, 32, blk_row, blk_col,
644 plane_bsize, tx_bsize);
645 }
646
647 *out_dist = (int64_t)tmp * 16;
648 }
649 }
650
rate_block(int plane,int block,TX_SIZE tx_size,int coeff_ctx,struct rdcost_block_args * args)651 static int rate_block(int plane, int block, TX_SIZE tx_size, int coeff_ctx,
652 struct rdcost_block_args *args) {
653 return cost_coeffs(args->x, plane, block, tx_size, coeff_ctx, args->so->scan,
654 args->so->neighbors, args->use_fast_coef_costing);
655 }
656
block_rd_txfm(int plane,int block,int blk_row,int blk_col,BLOCK_SIZE plane_bsize,TX_SIZE tx_size,void * arg)657 static void block_rd_txfm(int plane, int block, int blk_row, int blk_col,
658 BLOCK_SIZE plane_bsize, TX_SIZE tx_size, void *arg) {
659 struct rdcost_block_args *args = arg;
660 MACROBLOCK *const x = args->x;
661 MACROBLOCKD *const xd = &x->e_mbd;
662 MODE_INFO *const mi = xd->mi[0];
663 int64_t rd1, rd2, rd;
664 int rate;
665 int64_t dist;
666 int64_t sse;
667 const int coeff_ctx =
668 combine_entropy_contexts(args->t_left[blk_row], args->t_above[blk_col]);
669
670 if (args->exit_early) return;
671
672 if (!is_inter_block(mi)) {
673 struct encode_b_args intra_arg = { x, x->block_qcoeff_opt, args->t_above,
674 args->t_left, &mi->skip };
675 vp9_encode_block_intra(plane, block, blk_row, blk_col, plane_bsize, tx_size,
676 &intra_arg);
677 if (x->block_tx_domain) {
678 dist_block(args->cpi, x, plane, plane_bsize, block, blk_row, blk_col,
679 tx_size, &dist, &sse);
680 } else {
681 const BLOCK_SIZE tx_bsize = txsize_to_bsize[tx_size];
682 const struct macroblock_plane *const p = &x->plane[plane];
683 const struct macroblockd_plane *const pd = &xd->plane[plane];
684 const int src_stride = p->src.stride;
685 const int dst_stride = pd->dst.stride;
686 const int diff_stride = 4 * num_4x4_blocks_wide_lookup[plane_bsize];
687 const uint8_t *src = &p->src.buf[4 * (blk_row * src_stride + blk_col)];
688 const uint8_t *dst = &pd->dst.buf[4 * (blk_row * dst_stride + blk_col)];
689 const int16_t *diff = &p->src_diff[4 * (blk_row * diff_stride + blk_col)];
690 unsigned int tmp;
691 sse = sum_squares_visible(xd, pd, diff, diff_stride, blk_row, blk_col,
692 plane_bsize, tx_bsize);
693 #if CONFIG_VP9_HIGHBITDEPTH
694 if ((xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) && (xd->bd > 8))
695 sse = ROUND64_POWER_OF_TWO(sse, (xd->bd - 8) * 2);
696 #endif // CONFIG_VP9_HIGHBITDEPTH
697 sse = sse * 16;
698 tmp = pixel_sse(args->cpi, xd, pd, src, src_stride, dst, dst_stride,
699 blk_row, blk_col, plane_bsize, tx_bsize);
700 dist = (int64_t)tmp * 16;
701 }
702 } else if (max_txsize_lookup[plane_bsize] == tx_size) {
703 if (x->skip_txfm[(plane << 2) + (block >> (tx_size << 1))] ==
704 SKIP_TXFM_NONE) {
705 // full forward transform and quantization
706 vp9_xform_quant(x, plane, block, blk_row, blk_col, plane_bsize, tx_size);
707 if (x->block_qcoeff_opt)
708 vp9_optimize_b(x, plane, block, tx_size, coeff_ctx);
709 dist_block(args->cpi, x, plane, plane_bsize, block, blk_row, blk_col,
710 tx_size, &dist, &sse);
711 } else if (x->skip_txfm[(plane << 2) + (block >> (tx_size << 1))] ==
712 SKIP_TXFM_AC_ONLY) {
713 // compute DC coefficient
714 tran_low_t *const coeff = BLOCK_OFFSET(x->plane[plane].coeff, block);
715 tran_low_t *const dqcoeff = BLOCK_OFFSET(xd->plane[plane].dqcoeff, block);
716 vp9_xform_quant_dc(x, plane, block, blk_row, blk_col, plane_bsize,
717 tx_size);
718 sse = x->bsse[(plane << 2) + (block >> (tx_size << 1))] << 4;
719 dist = sse;
720 if (x->plane[plane].eobs[block]) {
721 const int64_t orig_sse = (int64_t)coeff[0] * coeff[0];
722 const int64_t resd_sse = coeff[0] - dqcoeff[0];
723 int64_t dc_correct = orig_sse - resd_sse * resd_sse;
724 #if CONFIG_VP9_HIGHBITDEPTH
725 dc_correct >>= ((xd->bd - 8) * 2);
726 #endif
727 if (tx_size != TX_32X32) dc_correct >>= 2;
728
729 dist = VPXMAX(0, sse - dc_correct);
730 }
731 } else {
732 // SKIP_TXFM_AC_DC
733 // skip forward transform
734 x->plane[plane].eobs[block] = 0;
735 sse = x->bsse[(plane << 2) + (block >> (tx_size << 1))] << 4;
736 dist = sse;
737 }
738 } else {
739 // full forward transform and quantization
740 vp9_xform_quant(x, plane, block, blk_row, blk_col, plane_bsize, tx_size);
741 if (x->block_qcoeff_opt)
742 vp9_optimize_b(x, plane, block, tx_size, coeff_ctx);
743 dist_block(args->cpi, x, plane, plane_bsize, block, blk_row, blk_col,
744 tx_size, &dist, &sse);
745 }
746
747 rd = RDCOST(x->rdmult, x->rddiv, 0, dist);
748 if (args->this_rd + rd > args->best_rd) {
749 args->exit_early = 1;
750 return;
751 }
752
753 rate = rate_block(plane, block, tx_size, coeff_ctx, args);
754 args->t_above[blk_col] = (x->plane[plane].eobs[block] > 0) ? 1 : 0;
755 args->t_left[blk_row] = (x->plane[plane].eobs[block] > 0) ? 1 : 0;
756 rd1 = RDCOST(x->rdmult, x->rddiv, rate, dist);
757 rd2 = RDCOST(x->rdmult, x->rddiv, 0, sse);
758
759 // TODO(jingning): temporarily enabled only for luma component
760 rd = VPXMIN(rd1, rd2);
761 if (plane == 0) {
762 x->zcoeff_blk[tx_size][block] =
763 !x->plane[plane].eobs[block] || (rd1 > rd2 && !xd->lossless);
764 x->sum_y_eobs[tx_size] += x->plane[plane].eobs[block];
765 }
766
767 args->this_rate += rate;
768 args->this_dist += dist;
769 args->this_sse += sse;
770 args->this_rd += rd;
771
772 if (args->this_rd > args->best_rd) {
773 args->exit_early = 1;
774 return;
775 }
776
777 args->skippable &= !x->plane[plane].eobs[block];
778 }
779
txfm_rd_in_plane(const VP9_COMP * cpi,MACROBLOCK * x,int * rate,int64_t * distortion,int * skippable,int64_t * sse,int64_t ref_best_rd,int plane,BLOCK_SIZE bsize,TX_SIZE tx_size,int use_fast_coef_casting)780 static void txfm_rd_in_plane(const VP9_COMP *cpi, MACROBLOCK *x, int *rate,
781 int64_t *distortion, int *skippable, int64_t *sse,
782 int64_t ref_best_rd, int plane, BLOCK_SIZE bsize,
783 TX_SIZE tx_size, int use_fast_coef_casting) {
784 MACROBLOCKD *const xd = &x->e_mbd;
785 const struct macroblockd_plane *const pd = &xd->plane[plane];
786 struct rdcost_block_args args;
787 vp9_zero(args);
788 args.cpi = cpi;
789 args.x = x;
790 args.best_rd = ref_best_rd;
791 args.use_fast_coef_costing = use_fast_coef_casting;
792 args.skippable = 1;
793
794 if (plane == 0) xd->mi[0]->tx_size = tx_size;
795
796 vp9_get_entropy_contexts(bsize, tx_size, pd, args.t_above, args.t_left);
797
798 args.so = get_scan(xd, tx_size, get_plane_type(plane), 0);
799
800 vp9_foreach_transformed_block_in_plane(xd, bsize, plane, block_rd_txfm,
801 &args);
802 if (args.exit_early) {
803 *rate = INT_MAX;
804 *distortion = INT64_MAX;
805 *sse = INT64_MAX;
806 *skippable = 0;
807 } else {
808 *distortion = args.this_dist;
809 *rate = args.this_rate;
810 *sse = args.this_sse;
811 *skippable = args.skippable;
812 }
813 }
814
choose_largest_tx_size(VP9_COMP * cpi,MACROBLOCK * x,int * rate,int64_t * distortion,int * skip,int64_t * sse,int64_t ref_best_rd,BLOCK_SIZE bs)815 static void choose_largest_tx_size(VP9_COMP *cpi, MACROBLOCK *x, int *rate,
816 int64_t *distortion, int *skip, int64_t *sse,
817 int64_t ref_best_rd, BLOCK_SIZE bs) {
818 const TX_SIZE max_tx_size = max_txsize_lookup[bs];
819 VP9_COMMON *const cm = &cpi->common;
820 const TX_SIZE largest_tx_size = tx_mode_to_biggest_tx_size[cm->tx_mode];
821 MACROBLOCKD *const xd = &x->e_mbd;
822 MODE_INFO *const mi = xd->mi[0];
823
824 mi->tx_size = VPXMIN(max_tx_size, largest_tx_size);
825
826 txfm_rd_in_plane(cpi, x, rate, distortion, skip, sse, ref_best_rd, 0, bs,
827 mi->tx_size, cpi->sf.use_fast_coef_costing);
828 }
829
choose_tx_size_from_rd(VP9_COMP * cpi,MACROBLOCK * x,int * rate,int64_t * distortion,int * skip,int64_t * psse,int64_t ref_best_rd,BLOCK_SIZE bs)830 static void choose_tx_size_from_rd(VP9_COMP *cpi, MACROBLOCK *x, int *rate,
831 int64_t *distortion, int *skip,
832 int64_t *psse, int64_t ref_best_rd,
833 BLOCK_SIZE bs) {
834 const TX_SIZE max_tx_size = max_txsize_lookup[bs];
835 VP9_COMMON *const cm = &cpi->common;
836 MACROBLOCKD *const xd = &x->e_mbd;
837 MODE_INFO *const mi = xd->mi[0];
838 vpx_prob skip_prob = vp9_get_skip_prob(cm, xd);
839 int r[TX_SIZES][2], s[TX_SIZES];
840 int64_t d[TX_SIZES], sse[TX_SIZES];
841 int64_t rd[TX_SIZES][2] = { { INT64_MAX, INT64_MAX },
842 { INT64_MAX, INT64_MAX },
843 { INT64_MAX, INT64_MAX },
844 { INT64_MAX, INT64_MAX } };
845 int n, m;
846 int s0, s1;
847 int64_t best_rd = INT64_MAX;
848 TX_SIZE best_tx = max_tx_size;
849 int start_tx, end_tx;
850
851 const vpx_prob *tx_probs = get_tx_probs2(max_tx_size, xd, &cm->fc->tx_probs);
852 assert(skip_prob > 0);
853 s0 = vp9_cost_bit(skip_prob, 0);
854 s1 = vp9_cost_bit(skip_prob, 1);
855
856 if (cm->tx_mode == TX_MODE_SELECT) {
857 start_tx = max_tx_size;
858 end_tx = 0;
859 } else {
860 TX_SIZE chosen_tx_size =
861 VPXMIN(max_tx_size, tx_mode_to_biggest_tx_size[cm->tx_mode]);
862 start_tx = chosen_tx_size;
863 end_tx = chosen_tx_size;
864 }
865
866 for (n = start_tx; n >= end_tx; n--) {
867 int r_tx_size = 0;
868 for (m = 0; m <= n - (n == (int)max_tx_size); m++) {
869 if (m == n)
870 r_tx_size += vp9_cost_zero(tx_probs[m]);
871 else
872 r_tx_size += vp9_cost_one(tx_probs[m]);
873 }
874 txfm_rd_in_plane(cpi, x, &r[n][0], &d[n], &s[n], &sse[n], ref_best_rd, 0,
875 bs, n, cpi->sf.use_fast_coef_costing);
876 r[n][1] = r[n][0];
877 if (r[n][0] < INT_MAX) {
878 r[n][1] += r_tx_size;
879 }
880 if (d[n] == INT64_MAX || r[n][0] == INT_MAX) {
881 rd[n][0] = rd[n][1] = INT64_MAX;
882 } else if (s[n]) {
883 if (is_inter_block(mi)) {
884 rd[n][0] = rd[n][1] = RDCOST(x->rdmult, x->rddiv, s1, sse[n]);
885 r[n][1] -= r_tx_size;
886 } else {
887 rd[n][0] = RDCOST(x->rdmult, x->rddiv, s1, sse[n]);
888 rd[n][1] = RDCOST(x->rdmult, x->rddiv, s1 + r_tx_size, sse[n]);
889 }
890 } else {
891 rd[n][0] = RDCOST(x->rdmult, x->rddiv, r[n][0] + s0, d[n]);
892 rd[n][1] = RDCOST(x->rdmult, x->rddiv, r[n][1] + s0, d[n]);
893 }
894
895 if (is_inter_block(mi) && !xd->lossless && !s[n] && sse[n] != INT64_MAX) {
896 rd[n][0] = VPXMIN(rd[n][0], RDCOST(x->rdmult, x->rddiv, s1, sse[n]));
897 rd[n][1] = VPXMIN(rd[n][1], RDCOST(x->rdmult, x->rddiv, s1, sse[n]));
898 }
899
900 // Early termination in transform size search.
901 if (cpi->sf.tx_size_search_breakout &&
902 (rd[n][1] == INT64_MAX ||
903 (n < (int)max_tx_size && rd[n][1] > rd[n + 1][1]) || s[n] == 1))
904 break;
905
906 if (rd[n][1] < best_rd) {
907 best_tx = n;
908 best_rd = rd[n][1];
909 }
910 }
911 mi->tx_size = best_tx;
912
913 *distortion = d[mi->tx_size];
914 *rate = r[mi->tx_size][cm->tx_mode == TX_MODE_SELECT];
915 *skip = s[mi->tx_size];
916 *psse = sse[mi->tx_size];
917 }
918
super_block_yrd(VP9_COMP * cpi,MACROBLOCK * x,int * rate,int64_t * distortion,int * skip,int64_t * psse,BLOCK_SIZE bs,int64_t ref_best_rd)919 static void super_block_yrd(VP9_COMP *cpi, MACROBLOCK *x, int *rate,
920 int64_t *distortion, int *skip, int64_t *psse,
921 BLOCK_SIZE bs, int64_t ref_best_rd) {
922 MACROBLOCKD *xd = &x->e_mbd;
923 int64_t sse;
924 int64_t *ret_sse = psse ? psse : &sse;
925
926 assert(bs == xd->mi[0]->sb_type);
927
928 if (cpi->sf.tx_size_search_method == USE_LARGESTALL || xd->lossless) {
929 choose_largest_tx_size(cpi, x, rate, distortion, skip, ret_sse, ref_best_rd,
930 bs);
931 } else {
932 choose_tx_size_from_rd(cpi, x, rate, distortion, skip, ret_sse, ref_best_rd,
933 bs);
934 }
935 }
936
conditional_skipintra(PREDICTION_MODE mode,PREDICTION_MODE best_intra_mode)937 static int conditional_skipintra(PREDICTION_MODE mode,
938 PREDICTION_MODE best_intra_mode) {
939 if (mode == D117_PRED && best_intra_mode != V_PRED &&
940 best_intra_mode != D135_PRED)
941 return 1;
942 if (mode == D63_PRED && best_intra_mode != V_PRED &&
943 best_intra_mode != D45_PRED)
944 return 1;
945 if (mode == D207_PRED && best_intra_mode != H_PRED &&
946 best_intra_mode != D45_PRED)
947 return 1;
948 if (mode == D153_PRED && best_intra_mode != H_PRED &&
949 best_intra_mode != D135_PRED)
950 return 1;
951 return 0;
952 }
953
rd_pick_intra4x4block(VP9_COMP * cpi,MACROBLOCK * x,int row,int col,PREDICTION_MODE * best_mode,const int * bmode_costs,ENTROPY_CONTEXT * a,ENTROPY_CONTEXT * l,int * bestrate,int * bestratey,int64_t * bestdistortion,BLOCK_SIZE bsize,int64_t rd_thresh)954 static int64_t rd_pick_intra4x4block(VP9_COMP *cpi, MACROBLOCK *x, int row,
955 int col, PREDICTION_MODE *best_mode,
956 const int *bmode_costs, ENTROPY_CONTEXT *a,
957 ENTROPY_CONTEXT *l, int *bestrate,
958 int *bestratey, int64_t *bestdistortion,
959 BLOCK_SIZE bsize, int64_t rd_thresh) {
960 PREDICTION_MODE mode;
961 MACROBLOCKD *const xd = &x->e_mbd;
962 int64_t best_rd = rd_thresh;
963 struct macroblock_plane *p = &x->plane[0];
964 struct macroblockd_plane *pd = &xd->plane[0];
965 const int src_stride = p->src.stride;
966 const int dst_stride = pd->dst.stride;
967 const uint8_t *src_init = &p->src.buf[row * 4 * src_stride + col * 4];
968 uint8_t *dst_init = &pd->dst.buf[row * 4 * src_stride + col * 4];
969 ENTROPY_CONTEXT ta[2], tempa[2];
970 ENTROPY_CONTEXT tl[2], templ[2];
971 const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize];
972 const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize];
973 int idx, idy;
974 uint8_t best_dst[8 * 8];
975 #if CONFIG_VP9_HIGHBITDEPTH
976 uint16_t best_dst16[8 * 8];
977 #endif
978 memcpy(ta, a, num_4x4_blocks_wide * sizeof(a[0]));
979 memcpy(tl, l, num_4x4_blocks_high * sizeof(l[0]));
980
981 xd->mi[0]->tx_size = TX_4X4;
982
983 #if CONFIG_VP9_HIGHBITDEPTH
984 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
985 for (mode = DC_PRED; mode <= TM_PRED; ++mode) {
986 int64_t this_rd;
987 int ratey = 0;
988 int64_t distortion = 0;
989 int rate = bmode_costs[mode];
990
991 if (!(cpi->sf.intra_y_mode_mask[TX_4X4] & (1 << mode))) continue;
992
993 // Only do the oblique modes if the best so far is
994 // one of the neighboring directional modes
995 if (cpi->sf.mode_search_skip_flags & FLAG_SKIP_INTRA_DIRMISMATCH) {
996 if (conditional_skipintra(mode, *best_mode)) continue;
997 }
998
999 memcpy(tempa, ta, num_4x4_blocks_wide * sizeof(ta[0]));
1000 memcpy(templ, tl, num_4x4_blocks_high * sizeof(tl[0]));
1001
1002 for (idy = 0; idy < num_4x4_blocks_high; ++idy) {
1003 for (idx = 0; idx < num_4x4_blocks_wide; ++idx) {
1004 const int block = (row + idy) * 2 + (col + idx);
1005 const uint8_t *const src = &src_init[idx * 4 + idy * 4 * src_stride];
1006 uint8_t *const dst = &dst_init[idx * 4 + idy * 4 * dst_stride];
1007 uint16_t *const dst16 = CONVERT_TO_SHORTPTR(dst);
1008 int16_t *const src_diff =
1009 vp9_raster_block_offset_int16(BLOCK_8X8, block, p->src_diff);
1010 tran_low_t *const coeff = BLOCK_OFFSET(x->plane[0].coeff, block);
1011 xd->mi[0]->bmi[block].as_mode = mode;
1012 vp9_predict_intra_block(xd, 1, TX_4X4, mode,
1013 x->skip_encode ? src : dst,
1014 x->skip_encode ? src_stride : dst_stride, dst,
1015 dst_stride, col + idx, row + idy, 0);
1016 vpx_highbd_subtract_block(4, 4, src_diff, 8, src, src_stride, dst,
1017 dst_stride, xd->bd);
1018 if (xd->lossless) {
1019 const scan_order *so = &vp9_default_scan_orders[TX_4X4];
1020 const int coeff_ctx =
1021 combine_entropy_contexts(tempa[idx], templ[idy]);
1022 vp9_highbd_fwht4x4(src_diff, coeff, 8);
1023 vp9_regular_quantize_b_4x4(x, 0, block, so->scan, so->iscan);
1024 ratey += cost_coeffs(x, 0, block, TX_4X4, coeff_ctx, so->scan,
1025 so->neighbors, cpi->sf.use_fast_coef_costing);
1026 tempa[idx] = templ[idy] = (x->plane[0].eobs[block] > 0 ? 1 : 0);
1027 if (RDCOST(x->rdmult, x->rddiv, ratey, distortion) >= best_rd)
1028 goto next_highbd;
1029 vp9_highbd_iwht4x4_add(BLOCK_OFFSET(pd->dqcoeff, block), dst16,
1030 dst_stride, p->eobs[block], xd->bd);
1031 } else {
1032 int64_t unused;
1033 const TX_TYPE tx_type = get_tx_type_4x4(PLANE_TYPE_Y, xd, block);
1034 const scan_order *so = &vp9_scan_orders[TX_4X4][tx_type];
1035 const int coeff_ctx =
1036 combine_entropy_contexts(tempa[idx], templ[idy]);
1037 if (tx_type == DCT_DCT)
1038 vpx_highbd_fdct4x4(src_diff, coeff, 8);
1039 else
1040 vp9_highbd_fht4x4(src_diff, coeff, 8, tx_type);
1041 vp9_regular_quantize_b_4x4(x, 0, block, so->scan, so->iscan);
1042 ratey += cost_coeffs(x, 0, block, TX_4X4, coeff_ctx, so->scan,
1043 so->neighbors, cpi->sf.use_fast_coef_costing);
1044 distortion += vp9_highbd_block_error_dispatch(
1045 coeff, BLOCK_OFFSET(pd->dqcoeff, block), 16,
1046 &unused, xd->bd) >>
1047 2;
1048 tempa[idx] = templ[idy] = (x->plane[0].eobs[block] > 0 ? 1 : 0);
1049 if (RDCOST(x->rdmult, x->rddiv, ratey, distortion) >= best_rd)
1050 goto next_highbd;
1051 vp9_highbd_iht4x4_add(tx_type, BLOCK_OFFSET(pd->dqcoeff, block),
1052 dst16, dst_stride, p->eobs[block], xd->bd);
1053 }
1054 }
1055 }
1056
1057 rate += ratey;
1058 this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion);
1059
1060 if (this_rd < best_rd) {
1061 *bestrate = rate;
1062 *bestratey = ratey;
1063 *bestdistortion = distortion;
1064 best_rd = this_rd;
1065 *best_mode = mode;
1066 memcpy(a, tempa, num_4x4_blocks_wide * sizeof(tempa[0]));
1067 memcpy(l, templ, num_4x4_blocks_high * sizeof(templ[0]));
1068 for (idy = 0; idy < num_4x4_blocks_high * 4; ++idy) {
1069 memcpy(best_dst16 + idy * 8,
1070 CONVERT_TO_SHORTPTR(dst_init + idy * dst_stride),
1071 num_4x4_blocks_wide * 4 * sizeof(uint16_t));
1072 }
1073 }
1074 next_highbd : {}
1075 }
1076 if (best_rd >= rd_thresh || x->skip_encode) return best_rd;
1077
1078 for (idy = 0; idy < num_4x4_blocks_high * 4; ++idy) {
1079 memcpy(CONVERT_TO_SHORTPTR(dst_init + idy * dst_stride),
1080 best_dst16 + idy * 8, num_4x4_blocks_wide * 4 * sizeof(uint16_t));
1081 }
1082
1083 return best_rd;
1084 }
1085 #endif // CONFIG_VP9_HIGHBITDEPTH
1086
1087 for (mode = DC_PRED; mode <= TM_PRED; ++mode) {
1088 int64_t this_rd;
1089 int ratey = 0;
1090 int64_t distortion = 0;
1091 int rate = bmode_costs[mode];
1092
1093 if (!(cpi->sf.intra_y_mode_mask[TX_4X4] & (1 << mode))) continue;
1094
1095 // Only do the oblique modes if the best so far is
1096 // one of the neighboring directional modes
1097 if (cpi->sf.mode_search_skip_flags & FLAG_SKIP_INTRA_DIRMISMATCH) {
1098 if (conditional_skipintra(mode, *best_mode)) continue;
1099 }
1100
1101 memcpy(tempa, ta, num_4x4_blocks_wide * sizeof(ta[0]));
1102 memcpy(templ, tl, num_4x4_blocks_high * sizeof(tl[0]));
1103
1104 for (idy = 0; idy < num_4x4_blocks_high; ++idy) {
1105 for (idx = 0; idx < num_4x4_blocks_wide; ++idx) {
1106 const int block = (row + idy) * 2 + (col + idx);
1107 const uint8_t *const src = &src_init[idx * 4 + idy * 4 * src_stride];
1108 uint8_t *const dst = &dst_init[idx * 4 + idy * 4 * dst_stride];
1109 int16_t *const src_diff =
1110 vp9_raster_block_offset_int16(BLOCK_8X8, block, p->src_diff);
1111 tran_low_t *const coeff = BLOCK_OFFSET(x->plane[0].coeff, block);
1112 xd->mi[0]->bmi[block].as_mode = mode;
1113 vp9_predict_intra_block(xd, 1, TX_4X4, mode, x->skip_encode ? src : dst,
1114 x->skip_encode ? src_stride : dst_stride, dst,
1115 dst_stride, col + idx, row + idy, 0);
1116 vpx_subtract_block(4, 4, src_diff, 8, src, src_stride, dst, dst_stride);
1117
1118 if (xd->lossless) {
1119 const scan_order *so = &vp9_default_scan_orders[TX_4X4];
1120 const int coeff_ctx =
1121 combine_entropy_contexts(tempa[idx], templ[idy]);
1122 vp9_fwht4x4(src_diff, coeff, 8);
1123 vp9_regular_quantize_b_4x4(x, 0, block, so->scan, so->iscan);
1124 ratey += cost_coeffs(x, 0, block, TX_4X4, coeff_ctx, so->scan,
1125 so->neighbors, cpi->sf.use_fast_coef_costing);
1126 tempa[idx] = templ[idy] = (x->plane[0].eobs[block] > 0) ? 1 : 0;
1127 if (RDCOST(x->rdmult, x->rddiv, ratey, distortion) >= best_rd)
1128 goto next;
1129 vp9_iwht4x4_add(BLOCK_OFFSET(pd->dqcoeff, block), dst, dst_stride,
1130 p->eobs[block]);
1131 } else {
1132 int64_t unused;
1133 const TX_TYPE tx_type = get_tx_type_4x4(PLANE_TYPE_Y, xd, block);
1134 const scan_order *so = &vp9_scan_orders[TX_4X4][tx_type];
1135 const int coeff_ctx =
1136 combine_entropy_contexts(tempa[idx], templ[idy]);
1137 vp9_fht4x4(src_diff, coeff, 8, tx_type);
1138 vp9_regular_quantize_b_4x4(x, 0, block, so->scan, so->iscan);
1139 ratey += cost_coeffs(x, 0, block, TX_4X4, coeff_ctx, so->scan,
1140 so->neighbors, cpi->sf.use_fast_coef_costing);
1141 tempa[idx] = templ[idy] = (x->plane[0].eobs[block] > 0) ? 1 : 0;
1142 distortion += vp9_block_error(coeff, BLOCK_OFFSET(pd->dqcoeff, block),
1143 16, &unused) >>
1144 2;
1145 if (RDCOST(x->rdmult, x->rddiv, ratey, distortion) >= best_rd)
1146 goto next;
1147 vp9_iht4x4_add(tx_type, BLOCK_OFFSET(pd->dqcoeff, block), dst,
1148 dst_stride, p->eobs[block]);
1149 }
1150 }
1151 }
1152
1153 rate += ratey;
1154 this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion);
1155
1156 if (this_rd < best_rd) {
1157 *bestrate = rate;
1158 *bestratey = ratey;
1159 *bestdistortion = distortion;
1160 best_rd = this_rd;
1161 *best_mode = mode;
1162 memcpy(a, tempa, num_4x4_blocks_wide * sizeof(tempa[0]));
1163 memcpy(l, templ, num_4x4_blocks_high * sizeof(templ[0]));
1164 for (idy = 0; idy < num_4x4_blocks_high * 4; ++idy)
1165 memcpy(best_dst + idy * 8, dst_init + idy * dst_stride,
1166 num_4x4_blocks_wide * 4);
1167 }
1168 next : {}
1169 }
1170
1171 if (best_rd >= rd_thresh || x->skip_encode) return best_rd;
1172
1173 for (idy = 0; idy < num_4x4_blocks_high * 4; ++idy)
1174 memcpy(dst_init + idy * dst_stride, best_dst + idy * 8,
1175 num_4x4_blocks_wide * 4);
1176
1177 return best_rd;
1178 }
1179
rd_pick_intra_sub_8x8_y_mode(VP9_COMP * cpi,MACROBLOCK * mb,int * rate,int * rate_y,int64_t * distortion,int64_t best_rd)1180 static int64_t rd_pick_intra_sub_8x8_y_mode(VP9_COMP *cpi, MACROBLOCK *mb,
1181 int *rate, int *rate_y,
1182 int64_t *distortion,
1183 int64_t best_rd) {
1184 int i, j;
1185 const MACROBLOCKD *const xd = &mb->e_mbd;
1186 MODE_INFO *const mic = xd->mi[0];
1187 const MODE_INFO *above_mi = xd->above_mi;
1188 const MODE_INFO *left_mi = xd->left_mi;
1189 const BLOCK_SIZE bsize = xd->mi[0]->sb_type;
1190 const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize];
1191 const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize];
1192 int idx, idy;
1193 int cost = 0;
1194 int64_t total_distortion = 0;
1195 int tot_rate_y = 0;
1196 int64_t total_rd = 0;
1197 const int *bmode_costs = cpi->mbmode_cost;
1198
1199 // Pick modes for each sub-block (of size 4x4, 4x8, or 8x4) in an 8x8 block.
1200 for (idy = 0; idy < 2; idy += num_4x4_blocks_high) {
1201 for (idx = 0; idx < 2; idx += num_4x4_blocks_wide) {
1202 PREDICTION_MODE best_mode = DC_PRED;
1203 int r = INT_MAX, ry = INT_MAX;
1204 int64_t d = INT64_MAX, this_rd = INT64_MAX;
1205 i = idy * 2 + idx;
1206 if (cpi->common.frame_type == KEY_FRAME) {
1207 const PREDICTION_MODE A = vp9_above_block_mode(mic, above_mi, i);
1208 const PREDICTION_MODE L = vp9_left_block_mode(mic, left_mi, i);
1209
1210 bmode_costs = cpi->y_mode_costs[A][L];
1211 }
1212
1213 this_rd = rd_pick_intra4x4block(
1214 cpi, mb, idy, idx, &best_mode, bmode_costs,
1215 xd->plane[0].above_context + idx, xd->plane[0].left_context + idy, &r,
1216 &ry, &d, bsize, best_rd - total_rd);
1217
1218 if (this_rd >= best_rd - total_rd) return INT64_MAX;
1219
1220 total_rd += this_rd;
1221 cost += r;
1222 total_distortion += d;
1223 tot_rate_y += ry;
1224
1225 mic->bmi[i].as_mode = best_mode;
1226 for (j = 1; j < num_4x4_blocks_high; ++j)
1227 mic->bmi[i + j * 2].as_mode = best_mode;
1228 for (j = 1; j < num_4x4_blocks_wide; ++j)
1229 mic->bmi[i + j].as_mode = best_mode;
1230
1231 if (total_rd >= best_rd) return INT64_MAX;
1232 }
1233 }
1234
1235 *rate = cost;
1236 *rate_y = tot_rate_y;
1237 *distortion = total_distortion;
1238 mic->mode = mic->bmi[3].as_mode;
1239
1240 return RDCOST(mb->rdmult, mb->rddiv, cost, total_distortion);
1241 }
1242
1243 // This function is used only for intra_only frames
rd_pick_intra_sby_mode(VP9_COMP * cpi,MACROBLOCK * x,int * rate,int * rate_tokenonly,int64_t * distortion,int * skippable,BLOCK_SIZE bsize,int64_t best_rd)1244 static int64_t rd_pick_intra_sby_mode(VP9_COMP *cpi, MACROBLOCK *x, int *rate,
1245 int *rate_tokenonly, int64_t *distortion,
1246 int *skippable, BLOCK_SIZE bsize,
1247 int64_t best_rd) {
1248 PREDICTION_MODE mode;
1249 PREDICTION_MODE mode_selected = DC_PRED;
1250 MACROBLOCKD *const xd = &x->e_mbd;
1251 MODE_INFO *const mic = xd->mi[0];
1252 int this_rate, this_rate_tokenonly, s;
1253 int64_t this_distortion, this_rd;
1254 TX_SIZE best_tx = TX_4X4;
1255 int *bmode_costs;
1256 const MODE_INFO *above_mi = xd->above_mi;
1257 const MODE_INFO *left_mi = xd->left_mi;
1258 const PREDICTION_MODE A = vp9_above_block_mode(mic, above_mi, 0);
1259 const PREDICTION_MODE L = vp9_left_block_mode(mic, left_mi, 0);
1260 bmode_costs = cpi->y_mode_costs[A][L];
1261
1262 memset(x->skip_txfm, SKIP_TXFM_NONE, sizeof(x->skip_txfm));
1263 /* Y Search for intra prediction mode */
1264 for (mode = DC_PRED; mode <= TM_PRED; mode++) {
1265 if (cpi->sf.use_nonrd_pick_mode) {
1266 // These speed features are turned on in hybrid non-RD and RD mode
1267 // for key frame coding in the context of real-time setting.
1268 if (conditional_skipintra(mode, mode_selected)) continue;
1269 if (*skippable) break;
1270 }
1271
1272 mic->mode = mode;
1273
1274 super_block_yrd(cpi, x, &this_rate_tokenonly, &this_distortion, &s, NULL,
1275 bsize, best_rd);
1276
1277 if (this_rate_tokenonly == INT_MAX) continue;
1278
1279 this_rate = this_rate_tokenonly + bmode_costs[mode];
1280 this_rd = RDCOST(x->rdmult, x->rddiv, this_rate, this_distortion);
1281
1282 if (this_rd < best_rd) {
1283 mode_selected = mode;
1284 best_rd = this_rd;
1285 best_tx = mic->tx_size;
1286 *rate = this_rate;
1287 *rate_tokenonly = this_rate_tokenonly;
1288 *distortion = this_distortion;
1289 *skippable = s;
1290 }
1291 }
1292
1293 mic->mode = mode_selected;
1294 mic->tx_size = best_tx;
1295
1296 return best_rd;
1297 }
1298
1299 // Return value 0: early termination triggered, no valid rd cost available;
1300 // 1: rd cost values are valid.
super_block_uvrd(const VP9_COMP * cpi,MACROBLOCK * x,int * rate,int64_t * distortion,int * skippable,int64_t * sse,BLOCK_SIZE bsize,int64_t ref_best_rd)1301 static int super_block_uvrd(const VP9_COMP *cpi, MACROBLOCK *x, int *rate,
1302 int64_t *distortion, int *skippable, int64_t *sse,
1303 BLOCK_SIZE bsize, int64_t ref_best_rd) {
1304 MACROBLOCKD *const xd = &x->e_mbd;
1305 MODE_INFO *const mi = xd->mi[0];
1306 const TX_SIZE uv_tx_size = get_uv_tx_size(mi, &xd->plane[1]);
1307 int plane;
1308 int pnrate = 0, pnskip = 1;
1309 int64_t pndist = 0, pnsse = 0;
1310 int is_cost_valid = 1;
1311
1312 if (ref_best_rd < 0) is_cost_valid = 0;
1313
1314 if (is_inter_block(mi) && is_cost_valid) {
1315 int plane;
1316 for (plane = 1; plane < MAX_MB_PLANE; ++plane)
1317 vp9_subtract_plane(x, bsize, plane);
1318 }
1319
1320 *rate = 0;
1321 *distortion = 0;
1322 *sse = 0;
1323 *skippable = 1;
1324
1325 for (plane = 1; plane < MAX_MB_PLANE; ++plane) {
1326 txfm_rd_in_plane(cpi, x, &pnrate, &pndist, &pnskip, &pnsse, ref_best_rd,
1327 plane, bsize, uv_tx_size, cpi->sf.use_fast_coef_costing);
1328 if (pnrate == INT_MAX) {
1329 is_cost_valid = 0;
1330 break;
1331 }
1332 *rate += pnrate;
1333 *distortion += pndist;
1334 *sse += pnsse;
1335 *skippable &= pnskip;
1336 }
1337
1338 if (!is_cost_valid) {
1339 // reset cost value
1340 *rate = INT_MAX;
1341 *distortion = INT64_MAX;
1342 *sse = INT64_MAX;
1343 *skippable = 0;
1344 }
1345
1346 return is_cost_valid;
1347 }
1348
rd_pick_intra_sbuv_mode(VP9_COMP * cpi,MACROBLOCK * x,PICK_MODE_CONTEXT * ctx,int * rate,int * rate_tokenonly,int64_t * distortion,int * skippable,BLOCK_SIZE bsize,TX_SIZE max_tx_size)1349 static int64_t rd_pick_intra_sbuv_mode(VP9_COMP *cpi, MACROBLOCK *x,
1350 PICK_MODE_CONTEXT *ctx, int *rate,
1351 int *rate_tokenonly, int64_t *distortion,
1352 int *skippable, BLOCK_SIZE bsize,
1353 TX_SIZE max_tx_size) {
1354 MACROBLOCKD *xd = &x->e_mbd;
1355 PREDICTION_MODE mode;
1356 PREDICTION_MODE mode_selected = DC_PRED;
1357 int64_t best_rd = INT64_MAX, this_rd;
1358 int this_rate_tokenonly, this_rate, s;
1359 int64_t this_distortion, this_sse;
1360
1361 memset(x->skip_txfm, SKIP_TXFM_NONE, sizeof(x->skip_txfm));
1362 for (mode = DC_PRED; mode <= TM_PRED; ++mode) {
1363 if (!(cpi->sf.intra_uv_mode_mask[max_tx_size] & (1 << mode))) continue;
1364 #if CONFIG_BETTER_HW_COMPATIBILITY && CONFIG_VP9_HIGHBITDEPTH
1365 if ((xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) &&
1366 (xd->above_mi == NULL || xd->left_mi == NULL) && need_top_left[mode])
1367 continue;
1368 #endif // CONFIG_BETTER_HW_COMPATIBILITY && CONFIG_VP9_HIGHBITDEPTH
1369
1370 xd->mi[0]->uv_mode = mode;
1371
1372 if (!super_block_uvrd(cpi, x, &this_rate_tokenonly, &this_distortion, &s,
1373 &this_sse, bsize, best_rd))
1374 continue;
1375 this_rate =
1376 this_rate_tokenonly +
1377 cpi->intra_uv_mode_cost[cpi->common.frame_type][xd->mi[0]->mode][mode];
1378 this_rd = RDCOST(x->rdmult, x->rddiv, this_rate, this_distortion);
1379
1380 if (this_rd < best_rd) {
1381 mode_selected = mode;
1382 best_rd = this_rd;
1383 *rate = this_rate;
1384 *rate_tokenonly = this_rate_tokenonly;
1385 *distortion = this_distortion;
1386 *skippable = s;
1387 if (!x->select_tx_size) swap_block_ptr(x, ctx, 2, 0, 1, MAX_MB_PLANE);
1388 }
1389 }
1390
1391 xd->mi[0]->uv_mode = mode_selected;
1392 return best_rd;
1393 }
1394
rd_sbuv_dcpred(const VP9_COMP * cpi,MACROBLOCK * x,int * rate,int * rate_tokenonly,int64_t * distortion,int * skippable,BLOCK_SIZE bsize)1395 static int64_t rd_sbuv_dcpred(const VP9_COMP *cpi, MACROBLOCK *x, int *rate,
1396 int *rate_tokenonly, int64_t *distortion,
1397 int *skippable, BLOCK_SIZE bsize) {
1398 const VP9_COMMON *cm = &cpi->common;
1399 int64_t unused;
1400
1401 x->e_mbd.mi[0]->uv_mode = DC_PRED;
1402 memset(x->skip_txfm, SKIP_TXFM_NONE, sizeof(x->skip_txfm));
1403 super_block_uvrd(cpi, x, rate_tokenonly, distortion, skippable, &unused,
1404 bsize, INT64_MAX);
1405 *rate =
1406 *rate_tokenonly +
1407 cpi->intra_uv_mode_cost[cm->frame_type][x->e_mbd.mi[0]->mode][DC_PRED];
1408 return RDCOST(x->rdmult, x->rddiv, *rate, *distortion);
1409 }
1410
choose_intra_uv_mode(VP9_COMP * cpi,MACROBLOCK * const x,PICK_MODE_CONTEXT * ctx,BLOCK_SIZE bsize,TX_SIZE max_tx_size,int * rate_uv,int * rate_uv_tokenonly,int64_t * dist_uv,int * skip_uv,PREDICTION_MODE * mode_uv)1411 static void choose_intra_uv_mode(VP9_COMP *cpi, MACROBLOCK *const x,
1412 PICK_MODE_CONTEXT *ctx, BLOCK_SIZE bsize,
1413 TX_SIZE max_tx_size, int *rate_uv,
1414 int *rate_uv_tokenonly, int64_t *dist_uv,
1415 int *skip_uv, PREDICTION_MODE *mode_uv) {
1416 // Use an estimated rd for uv_intra based on DC_PRED if the
1417 // appropriate speed flag is set.
1418 if (cpi->sf.use_uv_intra_rd_estimate) {
1419 rd_sbuv_dcpred(cpi, x, rate_uv, rate_uv_tokenonly, dist_uv, skip_uv,
1420 bsize < BLOCK_8X8 ? BLOCK_8X8 : bsize);
1421 // Else do a proper rd search for each possible transform size that may
1422 // be considered in the main rd loop.
1423 } else {
1424 rd_pick_intra_sbuv_mode(cpi, x, ctx, rate_uv, rate_uv_tokenonly, dist_uv,
1425 skip_uv, bsize < BLOCK_8X8 ? BLOCK_8X8 : bsize,
1426 max_tx_size);
1427 }
1428 *mode_uv = x->e_mbd.mi[0]->uv_mode;
1429 }
1430
cost_mv_ref(const VP9_COMP * cpi,PREDICTION_MODE mode,int mode_context)1431 static int cost_mv_ref(const VP9_COMP *cpi, PREDICTION_MODE mode,
1432 int mode_context) {
1433 assert(is_inter_mode(mode));
1434 return cpi->inter_mode_cost[mode_context][INTER_OFFSET(mode)];
1435 }
1436
set_and_cost_bmi_mvs(VP9_COMP * cpi,MACROBLOCK * x,MACROBLOCKD * xd,int i,PREDICTION_MODE mode,int_mv this_mv[2],int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES],int_mv seg_mvs[MAX_REF_FRAMES],int_mv * best_ref_mv[2],const int * mvjcost,int * mvcost[2])1437 static int set_and_cost_bmi_mvs(VP9_COMP *cpi, MACROBLOCK *x, MACROBLOCKD *xd,
1438 int i, PREDICTION_MODE mode, int_mv this_mv[2],
1439 int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES],
1440 int_mv seg_mvs[MAX_REF_FRAMES],
1441 int_mv *best_ref_mv[2], const int *mvjcost,
1442 int *mvcost[2]) {
1443 MODE_INFO *const mi = xd->mi[0];
1444 const MB_MODE_INFO_EXT *const mbmi_ext = x->mbmi_ext;
1445 int thismvcost = 0;
1446 int idx, idy;
1447 const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[mi->sb_type];
1448 const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[mi->sb_type];
1449 const int is_compound = has_second_ref(mi);
1450
1451 switch (mode) {
1452 case NEWMV:
1453 this_mv[0].as_int = seg_mvs[mi->ref_frame[0]].as_int;
1454 thismvcost += vp9_mv_bit_cost(&this_mv[0].as_mv, &best_ref_mv[0]->as_mv,
1455 mvjcost, mvcost, MV_COST_WEIGHT_SUB);
1456 if (is_compound) {
1457 this_mv[1].as_int = seg_mvs[mi->ref_frame[1]].as_int;
1458 thismvcost += vp9_mv_bit_cost(&this_mv[1].as_mv, &best_ref_mv[1]->as_mv,
1459 mvjcost, mvcost, MV_COST_WEIGHT_SUB);
1460 }
1461 break;
1462 case NEARMV:
1463 case NEARESTMV:
1464 this_mv[0].as_int = frame_mv[mode][mi->ref_frame[0]].as_int;
1465 if (is_compound)
1466 this_mv[1].as_int = frame_mv[mode][mi->ref_frame[1]].as_int;
1467 break;
1468 case ZEROMV:
1469 this_mv[0].as_int = 0;
1470 if (is_compound) this_mv[1].as_int = 0;
1471 break;
1472 default: break;
1473 }
1474
1475 mi->bmi[i].as_mv[0].as_int = this_mv[0].as_int;
1476 if (is_compound) mi->bmi[i].as_mv[1].as_int = this_mv[1].as_int;
1477
1478 mi->bmi[i].as_mode = mode;
1479
1480 for (idy = 0; idy < num_4x4_blocks_high; ++idy)
1481 for (idx = 0; idx < num_4x4_blocks_wide; ++idx)
1482 memmove(&mi->bmi[i + idy * 2 + idx], &mi->bmi[i], sizeof(mi->bmi[i]));
1483
1484 return cost_mv_ref(cpi, mode, mbmi_ext->mode_context[mi->ref_frame[0]]) +
1485 thismvcost;
1486 }
1487
encode_inter_mb_segment(VP9_COMP * cpi,MACROBLOCK * x,int64_t best_yrd,int i,int * labelyrate,int64_t * distortion,int64_t * sse,ENTROPY_CONTEXT * ta,ENTROPY_CONTEXT * tl,int mi_row,int mi_col)1488 static int64_t encode_inter_mb_segment(VP9_COMP *cpi, MACROBLOCK *x,
1489 int64_t best_yrd, int i, int *labelyrate,
1490 int64_t *distortion, int64_t *sse,
1491 ENTROPY_CONTEXT *ta, ENTROPY_CONTEXT *tl,
1492 int mi_row, int mi_col) {
1493 int k;
1494 MACROBLOCKD *xd = &x->e_mbd;
1495 struct macroblockd_plane *const pd = &xd->plane[0];
1496 struct macroblock_plane *const p = &x->plane[0];
1497 MODE_INFO *const mi = xd->mi[0];
1498 const BLOCK_SIZE plane_bsize = get_plane_block_size(mi->sb_type, pd);
1499 const int width = 4 * num_4x4_blocks_wide_lookup[plane_bsize];
1500 const int height = 4 * num_4x4_blocks_high_lookup[plane_bsize];
1501 int idx, idy;
1502
1503 const uint8_t *const src =
1504 &p->src.buf[vp9_raster_block_offset(BLOCK_8X8, i, p->src.stride)];
1505 uint8_t *const dst =
1506 &pd->dst.buf[vp9_raster_block_offset(BLOCK_8X8, i, pd->dst.stride)];
1507 int64_t thisdistortion = 0, thissse = 0;
1508 int thisrate = 0, ref;
1509 const scan_order *so = &vp9_default_scan_orders[TX_4X4];
1510 const int is_compound = has_second_ref(mi);
1511 const InterpKernel *kernel = vp9_filter_kernels[mi->interp_filter];
1512
1513 for (ref = 0; ref < 1 + is_compound; ++ref) {
1514 const int bw = b_width_log2_lookup[BLOCK_8X8];
1515 const int h = 4 * (i >> bw);
1516 const int w = 4 * (i & ((1 << bw) - 1));
1517 const struct scale_factors *sf = &xd->block_refs[ref]->sf;
1518 int y_stride = pd->pre[ref].stride;
1519 uint8_t *pre = pd->pre[ref].buf + (h * pd->pre[ref].stride + w);
1520
1521 if (vp9_is_scaled(sf)) {
1522 const int x_start = (-xd->mb_to_left_edge >> (3 + pd->subsampling_x));
1523 const int y_start = (-xd->mb_to_top_edge >> (3 + pd->subsampling_y));
1524
1525 y_stride = xd->block_refs[ref]->buf->y_stride;
1526 pre = xd->block_refs[ref]->buf->y_buffer;
1527 pre += scaled_buffer_offset(x_start + w, y_start + h, y_stride, sf);
1528 }
1529 #if CONFIG_VP9_HIGHBITDEPTH
1530 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
1531 vp9_highbd_build_inter_predictor(
1532 CONVERT_TO_SHORTPTR(pre), y_stride, CONVERT_TO_SHORTPTR(dst),
1533 pd->dst.stride, &mi->bmi[i].as_mv[ref].as_mv,
1534 &xd->block_refs[ref]->sf, width, height, ref, kernel, MV_PRECISION_Q3,
1535 mi_col * MI_SIZE + 4 * (i % 2), mi_row * MI_SIZE + 4 * (i / 2),
1536 xd->bd);
1537 } else {
1538 vp9_build_inter_predictor(
1539 pre, y_stride, dst, pd->dst.stride, &mi->bmi[i].as_mv[ref].as_mv,
1540 &xd->block_refs[ref]->sf, width, height, ref, kernel, MV_PRECISION_Q3,
1541 mi_col * MI_SIZE + 4 * (i % 2), mi_row * MI_SIZE + 4 * (i / 2));
1542 }
1543 #else
1544 vp9_build_inter_predictor(
1545 pre, y_stride, dst, pd->dst.stride, &mi->bmi[i].as_mv[ref].as_mv,
1546 &xd->block_refs[ref]->sf, width, height, ref, kernel, MV_PRECISION_Q3,
1547 mi_col * MI_SIZE + 4 * (i % 2), mi_row * MI_SIZE + 4 * (i / 2));
1548 #endif // CONFIG_VP9_HIGHBITDEPTH
1549 }
1550
1551 #if CONFIG_VP9_HIGHBITDEPTH
1552 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
1553 vpx_highbd_subtract_block(
1554 height, width, vp9_raster_block_offset_int16(BLOCK_8X8, i, p->src_diff),
1555 8, src, p->src.stride, dst, pd->dst.stride, xd->bd);
1556 } else {
1557 vpx_subtract_block(height, width,
1558 vp9_raster_block_offset_int16(BLOCK_8X8, i, p->src_diff),
1559 8, src, p->src.stride, dst, pd->dst.stride);
1560 }
1561 #else
1562 vpx_subtract_block(height, width,
1563 vp9_raster_block_offset_int16(BLOCK_8X8, i, p->src_diff),
1564 8, src, p->src.stride, dst, pd->dst.stride);
1565 #endif // CONFIG_VP9_HIGHBITDEPTH
1566
1567 k = i;
1568 for (idy = 0; idy < height / 4; ++idy) {
1569 for (idx = 0; idx < width / 4; ++idx) {
1570 #if CONFIG_VP9_HIGHBITDEPTH
1571 const int bd = (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) ? xd->bd : 8;
1572 #endif
1573 int64_t ssz, rd, rd1, rd2;
1574 tran_low_t *coeff;
1575 int coeff_ctx;
1576 k += (idy * 2 + idx);
1577 coeff_ctx = combine_entropy_contexts(ta[k & 1], tl[k >> 1]);
1578 coeff = BLOCK_OFFSET(p->coeff, k);
1579 x->fwd_txm4x4(vp9_raster_block_offset_int16(BLOCK_8X8, k, p->src_diff),
1580 coeff, 8);
1581 vp9_regular_quantize_b_4x4(x, 0, k, so->scan, so->iscan);
1582 #if CONFIG_VP9_HIGHBITDEPTH
1583 thisdistortion += vp9_highbd_block_error_dispatch(
1584 coeff, BLOCK_OFFSET(pd->dqcoeff, k), 16, &ssz, bd);
1585 #else
1586 thisdistortion +=
1587 vp9_block_error(coeff, BLOCK_OFFSET(pd->dqcoeff, k), 16, &ssz);
1588 #endif // CONFIG_VP9_HIGHBITDEPTH
1589 thissse += ssz;
1590 thisrate += cost_coeffs(x, 0, k, TX_4X4, coeff_ctx, so->scan,
1591 so->neighbors, cpi->sf.use_fast_coef_costing);
1592 ta[k & 1] = tl[k >> 1] = (x->plane[0].eobs[k] > 0) ? 1 : 0;
1593 rd1 = RDCOST(x->rdmult, x->rddiv, thisrate, thisdistortion >> 2);
1594 rd2 = RDCOST(x->rdmult, x->rddiv, 0, thissse >> 2);
1595 rd = VPXMIN(rd1, rd2);
1596 if (rd >= best_yrd) return INT64_MAX;
1597 }
1598 }
1599
1600 *distortion = thisdistortion >> 2;
1601 *labelyrate = thisrate;
1602 *sse = thissse >> 2;
1603
1604 return RDCOST(x->rdmult, x->rddiv, *labelyrate, *distortion);
1605 }
1606
1607 typedef struct {
1608 int eobs;
1609 int brate;
1610 int byrate;
1611 int64_t bdist;
1612 int64_t bsse;
1613 int64_t brdcost;
1614 int_mv mvs[2];
1615 ENTROPY_CONTEXT ta[2];
1616 ENTROPY_CONTEXT tl[2];
1617 } SEG_RDSTAT;
1618
1619 typedef struct {
1620 int_mv *ref_mv[2];
1621 int_mv mvp;
1622
1623 int64_t segment_rd;
1624 int r;
1625 int64_t d;
1626 int64_t sse;
1627 int segment_yrate;
1628 PREDICTION_MODE modes[4];
1629 SEG_RDSTAT rdstat[4][INTER_MODES];
1630 int mvthresh;
1631 } BEST_SEG_INFO;
1632
mv_check_bounds(const MvLimits * mv_limits,const MV * mv)1633 static INLINE int mv_check_bounds(const MvLimits *mv_limits, const MV *mv) {
1634 return (mv->row >> 3) < mv_limits->row_min ||
1635 (mv->row >> 3) > mv_limits->row_max ||
1636 (mv->col >> 3) < mv_limits->col_min ||
1637 (mv->col >> 3) > mv_limits->col_max;
1638 }
1639
mi_buf_shift(MACROBLOCK * x,int i)1640 static INLINE void mi_buf_shift(MACROBLOCK *x, int i) {
1641 MODE_INFO *const mi = x->e_mbd.mi[0];
1642 struct macroblock_plane *const p = &x->plane[0];
1643 struct macroblockd_plane *const pd = &x->e_mbd.plane[0];
1644
1645 p->src.buf =
1646 &p->src.buf[vp9_raster_block_offset(BLOCK_8X8, i, p->src.stride)];
1647 assert(((intptr_t)pd->pre[0].buf & 0x7) == 0);
1648 pd->pre[0].buf =
1649 &pd->pre[0].buf[vp9_raster_block_offset(BLOCK_8X8, i, pd->pre[0].stride)];
1650 if (has_second_ref(mi))
1651 pd->pre[1].buf =
1652 &pd->pre[1]
1653 .buf[vp9_raster_block_offset(BLOCK_8X8, i, pd->pre[1].stride)];
1654 }
1655
mi_buf_restore(MACROBLOCK * x,struct buf_2d orig_src,struct buf_2d orig_pre[2])1656 static INLINE void mi_buf_restore(MACROBLOCK *x, struct buf_2d orig_src,
1657 struct buf_2d orig_pre[2]) {
1658 MODE_INFO *mi = x->e_mbd.mi[0];
1659 x->plane[0].src = orig_src;
1660 x->e_mbd.plane[0].pre[0] = orig_pre[0];
1661 if (has_second_ref(mi)) x->e_mbd.plane[0].pre[1] = orig_pre[1];
1662 }
1663
mv_has_subpel(const MV * mv)1664 static INLINE int mv_has_subpel(const MV *mv) {
1665 return (mv->row & 0x0F) || (mv->col & 0x0F);
1666 }
1667
1668 // Check if NEARESTMV/NEARMV/ZEROMV is the cheapest way encode zero motion.
1669 // TODO(aconverse): Find out if this is still productive then clean up or remove
check_best_zero_mv(const VP9_COMP * cpi,const uint8_t mode_context[MAX_REF_FRAMES],int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES],int this_mode,const MV_REFERENCE_FRAME ref_frames[2])1670 static int check_best_zero_mv(const VP9_COMP *cpi,
1671 const uint8_t mode_context[MAX_REF_FRAMES],
1672 int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES],
1673 int this_mode,
1674 const MV_REFERENCE_FRAME ref_frames[2]) {
1675 if ((this_mode == NEARMV || this_mode == NEARESTMV || this_mode == ZEROMV) &&
1676 frame_mv[this_mode][ref_frames[0]].as_int == 0 &&
1677 (ref_frames[1] == NONE ||
1678 frame_mv[this_mode][ref_frames[1]].as_int == 0)) {
1679 int rfc = mode_context[ref_frames[0]];
1680 int c1 = cost_mv_ref(cpi, NEARMV, rfc);
1681 int c2 = cost_mv_ref(cpi, NEARESTMV, rfc);
1682 int c3 = cost_mv_ref(cpi, ZEROMV, rfc);
1683
1684 if (this_mode == NEARMV) {
1685 if (c1 > c3) return 0;
1686 } else if (this_mode == NEARESTMV) {
1687 if (c2 > c3) return 0;
1688 } else {
1689 assert(this_mode == ZEROMV);
1690 if (ref_frames[1] == NONE) {
1691 if ((c3 >= c2 && frame_mv[NEARESTMV][ref_frames[0]].as_int == 0) ||
1692 (c3 >= c1 && frame_mv[NEARMV][ref_frames[0]].as_int == 0))
1693 return 0;
1694 } else {
1695 if ((c3 >= c2 && frame_mv[NEARESTMV][ref_frames[0]].as_int == 0 &&
1696 frame_mv[NEARESTMV][ref_frames[1]].as_int == 0) ||
1697 (c3 >= c1 && frame_mv[NEARMV][ref_frames[0]].as_int == 0 &&
1698 frame_mv[NEARMV][ref_frames[1]].as_int == 0))
1699 return 0;
1700 }
1701 }
1702 }
1703 return 1;
1704 }
1705
joint_motion_search(VP9_COMP * cpi,MACROBLOCK * x,BLOCK_SIZE bsize,int_mv * frame_mv,int mi_row,int mi_col,int_mv single_newmv[MAX_REF_FRAMES],int * rate_mv)1706 static void joint_motion_search(VP9_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize,
1707 int_mv *frame_mv, int mi_row, int mi_col,
1708 int_mv single_newmv[MAX_REF_FRAMES],
1709 int *rate_mv) {
1710 const VP9_COMMON *const cm = &cpi->common;
1711 const int pw = 4 * num_4x4_blocks_wide_lookup[bsize];
1712 const int ph = 4 * num_4x4_blocks_high_lookup[bsize];
1713 MACROBLOCKD *xd = &x->e_mbd;
1714 MODE_INFO *mi = xd->mi[0];
1715 const int refs[2] = { mi->ref_frame[0],
1716 mi->ref_frame[1] < 0 ? 0 : mi->ref_frame[1] };
1717 int_mv ref_mv[2];
1718 int ite, ref;
1719 const InterpKernel *kernel = vp9_filter_kernels[mi->interp_filter];
1720 struct scale_factors sf;
1721
1722 // Do joint motion search in compound mode to get more accurate mv.
1723 struct buf_2d backup_yv12[2][MAX_MB_PLANE];
1724 uint32_t last_besterr[2] = { UINT_MAX, UINT_MAX };
1725 const YV12_BUFFER_CONFIG *const scaled_ref_frame[2] = {
1726 vp9_get_scaled_ref_frame(cpi, mi->ref_frame[0]),
1727 vp9_get_scaled_ref_frame(cpi, mi->ref_frame[1])
1728 };
1729
1730 // Prediction buffer from second frame.
1731 #if CONFIG_VP9_HIGHBITDEPTH
1732 DECLARE_ALIGNED(16, uint16_t, second_pred_alloc_16[64 * 64]);
1733 uint8_t *second_pred;
1734 #else
1735 DECLARE_ALIGNED(16, uint8_t, second_pred[64 * 64]);
1736 #endif // CONFIG_VP9_HIGHBITDEPTH
1737
1738 for (ref = 0; ref < 2; ++ref) {
1739 ref_mv[ref] = x->mbmi_ext->ref_mvs[refs[ref]][0];
1740
1741 if (scaled_ref_frame[ref]) {
1742 int i;
1743 // Swap out the reference frame for a version that's been scaled to
1744 // match the resolution of the current frame, allowing the existing
1745 // motion search code to be used without additional modifications.
1746 for (i = 0; i < MAX_MB_PLANE; i++)
1747 backup_yv12[ref][i] = xd->plane[i].pre[ref];
1748 vp9_setup_pre_planes(xd, ref, scaled_ref_frame[ref], mi_row, mi_col,
1749 NULL);
1750 }
1751
1752 frame_mv[refs[ref]].as_int = single_newmv[refs[ref]].as_int;
1753 }
1754
1755 // Since we have scaled the reference frames to match the size of the current
1756 // frame we must use a unit scaling factor during mode selection.
1757 #if CONFIG_VP9_HIGHBITDEPTH
1758 vp9_setup_scale_factors_for_frame(&sf, cm->width, cm->height, cm->width,
1759 cm->height, cm->use_highbitdepth);
1760 #else
1761 vp9_setup_scale_factors_for_frame(&sf, cm->width, cm->height, cm->width,
1762 cm->height);
1763 #endif // CONFIG_VP9_HIGHBITDEPTH
1764
1765 // Allow joint search multiple times iteratively for each reference frame
1766 // and break out of the search loop if it couldn't find a better mv.
1767 for (ite = 0; ite < 4; ite++) {
1768 struct buf_2d ref_yv12[2];
1769 uint32_t bestsme = UINT_MAX;
1770 int sadpb = x->sadperbit16;
1771 MV tmp_mv;
1772 int search_range = 3;
1773
1774 const MvLimits tmp_mv_limits = x->mv_limits;
1775 int id = ite % 2; // Even iterations search in the first reference frame,
1776 // odd iterations search in the second. The predictor
1777 // found for the 'other' reference frame is factored in.
1778
1779 // Initialized here because of compiler problem in Visual Studio.
1780 ref_yv12[0] = xd->plane[0].pre[0];
1781 ref_yv12[1] = xd->plane[0].pre[1];
1782
1783 // Get the prediction block from the 'other' reference frame.
1784 #if CONFIG_VP9_HIGHBITDEPTH
1785 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
1786 second_pred = CONVERT_TO_BYTEPTR(second_pred_alloc_16);
1787 vp9_highbd_build_inter_predictor(
1788 CONVERT_TO_SHORTPTR(ref_yv12[!id].buf), ref_yv12[!id].stride,
1789 second_pred_alloc_16, pw, &frame_mv[refs[!id]].as_mv, &sf, pw, ph, 0,
1790 kernel, MV_PRECISION_Q3, mi_col * MI_SIZE, mi_row * MI_SIZE, xd->bd);
1791 } else {
1792 second_pred = (uint8_t *)second_pred_alloc_16;
1793 vp9_build_inter_predictor(ref_yv12[!id].buf, ref_yv12[!id].stride,
1794 second_pred, pw, &frame_mv[refs[!id]].as_mv,
1795 &sf, pw, ph, 0, kernel, MV_PRECISION_Q3,
1796 mi_col * MI_SIZE, mi_row * MI_SIZE);
1797 }
1798 #else
1799 vp9_build_inter_predictor(ref_yv12[!id].buf, ref_yv12[!id].stride,
1800 second_pred, pw, &frame_mv[refs[!id]].as_mv, &sf,
1801 pw, ph, 0, kernel, MV_PRECISION_Q3,
1802 mi_col * MI_SIZE, mi_row * MI_SIZE);
1803 #endif // CONFIG_VP9_HIGHBITDEPTH
1804
1805 // Do compound motion search on the current reference frame.
1806 if (id) xd->plane[0].pre[0] = ref_yv12[id];
1807 vp9_set_mv_search_range(&x->mv_limits, &ref_mv[id].as_mv);
1808
1809 // Use the mv result from the single mode as mv predictor.
1810 tmp_mv = frame_mv[refs[id]].as_mv;
1811
1812 tmp_mv.col >>= 3;
1813 tmp_mv.row >>= 3;
1814
1815 // Small-range full-pixel motion search.
1816 bestsme = vp9_refining_search_8p_c(x, &tmp_mv, sadpb, search_range,
1817 &cpi->fn_ptr[bsize], &ref_mv[id].as_mv,
1818 second_pred);
1819 if (bestsme < UINT_MAX)
1820 bestsme = vp9_get_mvpred_av_var(x, &tmp_mv, &ref_mv[id].as_mv,
1821 second_pred, &cpi->fn_ptr[bsize], 1);
1822
1823 x->mv_limits = tmp_mv_limits;
1824
1825 if (bestsme < UINT_MAX) {
1826 uint32_t dis; /* TODO: use dis in distortion calculation later. */
1827 uint32_t sse;
1828 bestsme = cpi->find_fractional_mv_step(
1829 x, &tmp_mv, &ref_mv[id].as_mv, cpi->common.allow_high_precision_mv,
1830 x->errorperbit, &cpi->fn_ptr[bsize], 0,
1831 cpi->sf.mv.subpel_iters_per_step, NULL, x->nmvjointcost, x->mvcost,
1832 &dis, &sse, second_pred, pw, ph);
1833 }
1834
1835 // Restore the pointer to the first (possibly scaled) prediction buffer.
1836 if (id) xd->plane[0].pre[0] = ref_yv12[0];
1837
1838 if (bestsme < last_besterr[id]) {
1839 frame_mv[refs[id]].as_mv = tmp_mv;
1840 last_besterr[id] = bestsme;
1841 } else {
1842 break;
1843 }
1844 }
1845
1846 *rate_mv = 0;
1847
1848 for (ref = 0; ref < 2; ++ref) {
1849 if (scaled_ref_frame[ref]) {
1850 // Restore the prediction frame pointers to their unscaled versions.
1851 int i;
1852 for (i = 0; i < MAX_MB_PLANE; i++)
1853 xd->plane[i].pre[ref] = backup_yv12[ref][i];
1854 }
1855
1856 *rate_mv += vp9_mv_bit_cost(&frame_mv[refs[ref]].as_mv,
1857 &x->mbmi_ext->ref_mvs[refs[ref]][0].as_mv,
1858 x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
1859 }
1860 }
1861
rd_pick_best_sub8x8_mode(VP9_COMP * cpi,MACROBLOCK * x,int_mv * best_ref_mv,int_mv * second_best_ref_mv,int64_t best_rd,int * returntotrate,int * returnyrate,int64_t * returndistortion,int * skippable,int64_t * psse,int mvthresh,int_mv seg_mvs[4][MAX_REF_FRAMES],BEST_SEG_INFO * bsi_buf,int filter_idx,int mi_row,int mi_col)1862 static int64_t rd_pick_best_sub8x8_mode(
1863 VP9_COMP *cpi, MACROBLOCK *x, int_mv *best_ref_mv,
1864 int_mv *second_best_ref_mv, int64_t best_rd, int *returntotrate,
1865 int *returnyrate, int64_t *returndistortion, int *skippable, int64_t *psse,
1866 int mvthresh, int_mv seg_mvs[4][MAX_REF_FRAMES], BEST_SEG_INFO *bsi_buf,
1867 int filter_idx, int mi_row, int mi_col) {
1868 int i;
1869 BEST_SEG_INFO *bsi = bsi_buf + filter_idx;
1870 MACROBLOCKD *xd = &x->e_mbd;
1871 MODE_INFO *mi = xd->mi[0];
1872 int mode_idx;
1873 int k, br = 0, idx, idy;
1874 int64_t bd = 0, block_sse = 0;
1875 PREDICTION_MODE this_mode;
1876 VP9_COMMON *cm = &cpi->common;
1877 struct macroblock_plane *const p = &x->plane[0];
1878 struct macroblockd_plane *const pd = &xd->plane[0];
1879 const int label_count = 4;
1880 int64_t this_segment_rd = 0;
1881 int label_mv_thresh;
1882 int segmentyrate = 0;
1883 const BLOCK_SIZE bsize = mi->sb_type;
1884 const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize];
1885 const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize];
1886 ENTROPY_CONTEXT t_above[2], t_left[2];
1887 int subpelmv = 1, have_ref = 0;
1888 SPEED_FEATURES *const sf = &cpi->sf;
1889 const int has_second_rf = has_second_ref(mi);
1890 const int inter_mode_mask = sf->inter_mode_mask[bsize];
1891 MB_MODE_INFO_EXT *const mbmi_ext = x->mbmi_ext;
1892
1893 vp9_zero(*bsi);
1894
1895 bsi->segment_rd = best_rd;
1896 bsi->ref_mv[0] = best_ref_mv;
1897 bsi->ref_mv[1] = second_best_ref_mv;
1898 bsi->mvp.as_int = best_ref_mv->as_int;
1899 bsi->mvthresh = mvthresh;
1900
1901 for (i = 0; i < 4; i++) bsi->modes[i] = ZEROMV;
1902
1903 memcpy(t_above, pd->above_context, sizeof(t_above));
1904 memcpy(t_left, pd->left_context, sizeof(t_left));
1905
1906 // 64 makes this threshold really big effectively
1907 // making it so that we very rarely check mvs on
1908 // segments. setting this to 1 would make mv thresh
1909 // roughly equal to what it is for macroblocks
1910 label_mv_thresh = 1 * bsi->mvthresh / label_count;
1911
1912 // Segmentation method overheads
1913 for (idy = 0; idy < 2; idy += num_4x4_blocks_high) {
1914 for (idx = 0; idx < 2; idx += num_4x4_blocks_wide) {
1915 // TODO(jingning,rbultje): rewrite the rate-distortion optimization
1916 // loop for 4x4/4x8/8x4 block coding. to be replaced with new rd loop
1917 int_mv mode_mv[MB_MODE_COUNT][2];
1918 int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES];
1919 PREDICTION_MODE mode_selected = ZEROMV;
1920 int64_t best_rd = INT64_MAX;
1921 const int i = idy * 2 + idx;
1922 int ref;
1923
1924 for (ref = 0; ref < 1 + has_second_rf; ++ref) {
1925 const MV_REFERENCE_FRAME frame = mi->ref_frame[ref];
1926 frame_mv[ZEROMV][frame].as_int = 0;
1927 vp9_append_sub8x8_mvs_for_idx(
1928 cm, xd, i, ref, mi_row, mi_col, &frame_mv[NEARESTMV][frame],
1929 &frame_mv[NEARMV][frame], mbmi_ext->mode_context);
1930 }
1931
1932 // search for the best motion vector on this segment
1933 for (this_mode = NEARESTMV; this_mode <= NEWMV; ++this_mode) {
1934 const struct buf_2d orig_src = x->plane[0].src;
1935 struct buf_2d orig_pre[2];
1936
1937 mode_idx = INTER_OFFSET(this_mode);
1938 bsi->rdstat[i][mode_idx].brdcost = INT64_MAX;
1939 if (!(inter_mode_mask & (1 << this_mode))) continue;
1940
1941 if (!check_best_zero_mv(cpi, mbmi_ext->mode_context, frame_mv,
1942 this_mode, mi->ref_frame))
1943 continue;
1944
1945 memcpy(orig_pre, pd->pre, sizeof(orig_pre));
1946 memcpy(bsi->rdstat[i][mode_idx].ta, t_above,
1947 sizeof(bsi->rdstat[i][mode_idx].ta));
1948 memcpy(bsi->rdstat[i][mode_idx].tl, t_left,
1949 sizeof(bsi->rdstat[i][mode_idx].tl));
1950
1951 // motion search for newmv (single predictor case only)
1952 if (!has_second_rf && this_mode == NEWMV &&
1953 seg_mvs[i][mi->ref_frame[0]].as_int == INVALID_MV) {
1954 MV *const new_mv = &mode_mv[NEWMV][0].as_mv;
1955 int step_param = 0;
1956 uint32_t bestsme = UINT_MAX;
1957 int sadpb = x->sadperbit4;
1958 MV mvp_full;
1959 int max_mv;
1960 int cost_list[5];
1961 const MvLimits tmp_mv_limits = x->mv_limits;
1962
1963 /* Is the best so far sufficiently good that we cant justify doing
1964 * and new motion search. */
1965 if (best_rd < label_mv_thresh) break;
1966
1967 if (cpi->oxcf.mode != BEST) {
1968 // use previous block's result as next block's MV predictor.
1969 if (i > 0) {
1970 bsi->mvp.as_int = mi->bmi[i - 1].as_mv[0].as_int;
1971 if (i == 2) bsi->mvp.as_int = mi->bmi[i - 2].as_mv[0].as_int;
1972 }
1973 }
1974 if (i == 0)
1975 max_mv = x->max_mv_context[mi->ref_frame[0]];
1976 else
1977 max_mv =
1978 VPXMAX(abs(bsi->mvp.as_mv.row), abs(bsi->mvp.as_mv.col)) >> 3;
1979
1980 if (sf->mv.auto_mv_step_size && cm->show_frame) {
1981 // Take wtd average of the step_params based on the last frame's
1982 // max mv magnitude and the best ref mvs of the current block for
1983 // the given reference.
1984 step_param =
1985 (vp9_init_search_range(max_mv) + cpi->mv_step_param) / 2;
1986 } else {
1987 step_param = cpi->mv_step_param;
1988 }
1989
1990 mvp_full.row = bsi->mvp.as_mv.row >> 3;
1991 mvp_full.col = bsi->mvp.as_mv.col >> 3;
1992
1993 if (sf->adaptive_motion_search) {
1994 mvp_full.row = x->pred_mv[mi->ref_frame[0]].row >> 3;
1995 mvp_full.col = x->pred_mv[mi->ref_frame[0]].col >> 3;
1996 step_param = VPXMAX(step_param, 8);
1997 }
1998
1999 // adjust src pointer for this block
2000 mi_buf_shift(x, i);
2001
2002 vp9_set_mv_search_range(&x->mv_limits, &bsi->ref_mv[0]->as_mv);
2003
2004 bestsme = vp9_full_pixel_search(
2005 cpi, x, bsize, &mvp_full, step_param, cpi->sf.mv.search_method,
2006 sadpb,
2007 sf->mv.subpel_search_method != SUBPEL_TREE ? cost_list : NULL,
2008 &bsi->ref_mv[0]->as_mv, new_mv, INT_MAX, 1);
2009
2010 x->mv_limits = tmp_mv_limits;
2011
2012 if (bestsme < UINT_MAX) {
2013 uint32_t distortion;
2014 cpi->find_fractional_mv_step(
2015 x, new_mv, &bsi->ref_mv[0]->as_mv, cm->allow_high_precision_mv,
2016 x->errorperbit, &cpi->fn_ptr[bsize], sf->mv.subpel_force_stop,
2017 sf->mv.subpel_iters_per_step, cond_cost_list(cpi, cost_list),
2018 x->nmvjointcost, x->mvcost, &distortion,
2019 &x->pred_sse[mi->ref_frame[0]], NULL, 0, 0);
2020
2021 // save motion search result for use in compound prediction
2022 seg_mvs[i][mi->ref_frame[0]].as_mv = *new_mv;
2023 }
2024
2025 if (sf->adaptive_motion_search)
2026 x->pred_mv[mi->ref_frame[0]] = *new_mv;
2027
2028 // restore src pointers
2029 mi_buf_restore(x, orig_src, orig_pre);
2030 }
2031
2032 if (has_second_rf) {
2033 if (seg_mvs[i][mi->ref_frame[1]].as_int == INVALID_MV ||
2034 seg_mvs[i][mi->ref_frame[0]].as_int == INVALID_MV)
2035 continue;
2036 }
2037
2038 if (has_second_rf && this_mode == NEWMV &&
2039 mi->interp_filter == EIGHTTAP) {
2040 // adjust src pointers
2041 mi_buf_shift(x, i);
2042 if (sf->comp_inter_joint_search_thresh <= bsize) {
2043 int rate_mv;
2044 joint_motion_search(cpi, x, bsize, frame_mv[this_mode], mi_row,
2045 mi_col, seg_mvs[i], &rate_mv);
2046 seg_mvs[i][mi->ref_frame[0]].as_int =
2047 frame_mv[this_mode][mi->ref_frame[0]].as_int;
2048 seg_mvs[i][mi->ref_frame[1]].as_int =
2049 frame_mv[this_mode][mi->ref_frame[1]].as_int;
2050 }
2051 // restore src pointers
2052 mi_buf_restore(x, orig_src, orig_pre);
2053 }
2054
2055 bsi->rdstat[i][mode_idx].brate = set_and_cost_bmi_mvs(
2056 cpi, x, xd, i, this_mode, mode_mv[this_mode], frame_mv, seg_mvs[i],
2057 bsi->ref_mv, x->nmvjointcost, x->mvcost);
2058
2059 for (ref = 0; ref < 1 + has_second_rf; ++ref) {
2060 bsi->rdstat[i][mode_idx].mvs[ref].as_int =
2061 mode_mv[this_mode][ref].as_int;
2062 if (num_4x4_blocks_wide > 1)
2063 bsi->rdstat[i + 1][mode_idx].mvs[ref].as_int =
2064 mode_mv[this_mode][ref].as_int;
2065 if (num_4x4_blocks_high > 1)
2066 bsi->rdstat[i + 2][mode_idx].mvs[ref].as_int =
2067 mode_mv[this_mode][ref].as_int;
2068 }
2069
2070 // Trap vectors that reach beyond the UMV borders
2071 if (mv_check_bounds(&x->mv_limits, &mode_mv[this_mode][0].as_mv) ||
2072 (has_second_rf &&
2073 mv_check_bounds(&x->mv_limits, &mode_mv[this_mode][1].as_mv)))
2074 continue;
2075
2076 if (filter_idx > 0) {
2077 BEST_SEG_INFO *ref_bsi = bsi_buf;
2078 subpelmv = 0;
2079 have_ref = 1;
2080
2081 for (ref = 0; ref < 1 + has_second_rf; ++ref) {
2082 subpelmv |= mv_has_subpel(&mode_mv[this_mode][ref].as_mv);
2083 have_ref &= mode_mv[this_mode][ref].as_int ==
2084 ref_bsi->rdstat[i][mode_idx].mvs[ref].as_int;
2085 }
2086
2087 if (filter_idx > 1 && !subpelmv && !have_ref) {
2088 ref_bsi = bsi_buf + 1;
2089 have_ref = 1;
2090 for (ref = 0; ref < 1 + has_second_rf; ++ref)
2091 have_ref &= mode_mv[this_mode][ref].as_int ==
2092 ref_bsi->rdstat[i][mode_idx].mvs[ref].as_int;
2093 }
2094
2095 if (!subpelmv && have_ref &&
2096 ref_bsi->rdstat[i][mode_idx].brdcost < INT64_MAX) {
2097 memcpy(&bsi->rdstat[i][mode_idx], &ref_bsi->rdstat[i][mode_idx],
2098 sizeof(SEG_RDSTAT));
2099 if (num_4x4_blocks_wide > 1)
2100 bsi->rdstat[i + 1][mode_idx].eobs =
2101 ref_bsi->rdstat[i + 1][mode_idx].eobs;
2102 if (num_4x4_blocks_high > 1)
2103 bsi->rdstat[i + 2][mode_idx].eobs =
2104 ref_bsi->rdstat[i + 2][mode_idx].eobs;
2105
2106 if (bsi->rdstat[i][mode_idx].brdcost < best_rd) {
2107 mode_selected = this_mode;
2108 best_rd = bsi->rdstat[i][mode_idx].brdcost;
2109 }
2110 continue;
2111 }
2112 }
2113
2114 bsi->rdstat[i][mode_idx].brdcost = encode_inter_mb_segment(
2115 cpi, x, bsi->segment_rd - this_segment_rd, i,
2116 &bsi->rdstat[i][mode_idx].byrate, &bsi->rdstat[i][mode_idx].bdist,
2117 &bsi->rdstat[i][mode_idx].bsse, bsi->rdstat[i][mode_idx].ta,
2118 bsi->rdstat[i][mode_idx].tl, mi_row, mi_col);
2119 if (bsi->rdstat[i][mode_idx].brdcost < INT64_MAX) {
2120 bsi->rdstat[i][mode_idx].brdcost +=
2121 RDCOST(x->rdmult, x->rddiv, bsi->rdstat[i][mode_idx].brate, 0);
2122 bsi->rdstat[i][mode_idx].brate += bsi->rdstat[i][mode_idx].byrate;
2123 bsi->rdstat[i][mode_idx].eobs = p->eobs[i];
2124 if (num_4x4_blocks_wide > 1)
2125 bsi->rdstat[i + 1][mode_idx].eobs = p->eobs[i + 1];
2126 if (num_4x4_blocks_high > 1)
2127 bsi->rdstat[i + 2][mode_idx].eobs = p->eobs[i + 2];
2128 }
2129
2130 if (bsi->rdstat[i][mode_idx].brdcost < best_rd) {
2131 mode_selected = this_mode;
2132 best_rd = bsi->rdstat[i][mode_idx].brdcost;
2133 }
2134 } /*for each 4x4 mode*/
2135
2136 if (best_rd == INT64_MAX) {
2137 int iy, midx;
2138 for (iy = i + 1; iy < 4; ++iy)
2139 for (midx = 0; midx < INTER_MODES; ++midx)
2140 bsi->rdstat[iy][midx].brdcost = INT64_MAX;
2141 bsi->segment_rd = INT64_MAX;
2142 return INT64_MAX;
2143 }
2144
2145 mode_idx = INTER_OFFSET(mode_selected);
2146 memcpy(t_above, bsi->rdstat[i][mode_idx].ta, sizeof(t_above));
2147 memcpy(t_left, bsi->rdstat[i][mode_idx].tl, sizeof(t_left));
2148
2149 set_and_cost_bmi_mvs(cpi, x, xd, i, mode_selected, mode_mv[mode_selected],
2150 frame_mv, seg_mvs[i], bsi->ref_mv, x->nmvjointcost,
2151 x->mvcost);
2152
2153 br += bsi->rdstat[i][mode_idx].brate;
2154 bd += bsi->rdstat[i][mode_idx].bdist;
2155 block_sse += bsi->rdstat[i][mode_idx].bsse;
2156 segmentyrate += bsi->rdstat[i][mode_idx].byrate;
2157 this_segment_rd += bsi->rdstat[i][mode_idx].brdcost;
2158
2159 if (this_segment_rd > bsi->segment_rd) {
2160 int iy, midx;
2161 for (iy = i + 1; iy < 4; ++iy)
2162 for (midx = 0; midx < INTER_MODES; ++midx)
2163 bsi->rdstat[iy][midx].brdcost = INT64_MAX;
2164 bsi->segment_rd = INT64_MAX;
2165 return INT64_MAX;
2166 }
2167 }
2168 } /* for each label */
2169
2170 bsi->r = br;
2171 bsi->d = bd;
2172 bsi->segment_yrate = segmentyrate;
2173 bsi->segment_rd = this_segment_rd;
2174 bsi->sse = block_sse;
2175
2176 // update the coding decisions
2177 for (k = 0; k < 4; ++k) bsi->modes[k] = mi->bmi[k].as_mode;
2178
2179 if (bsi->segment_rd > best_rd) return INT64_MAX;
2180 /* set it to the best */
2181 for (i = 0; i < 4; i++) {
2182 mode_idx = INTER_OFFSET(bsi->modes[i]);
2183 mi->bmi[i].as_mv[0].as_int = bsi->rdstat[i][mode_idx].mvs[0].as_int;
2184 if (has_second_ref(mi))
2185 mi->bmi[i].as_mv[1].as_int = bsi->rdstat[i][mode_idx].mvs[1].as_int;
2186 x->plane[0].eobs[i] = bsi->rdstat[i][mode_idx].eobs;
2187 mi->bmi[i].as_mode = bsi->modes[i];
2188 }
2189
2190 /*
2191 * used to set mbmi->mv.as_int
2192 */
2193 *returntotrate = bsi->r;
2194 *returndistortion = bsi->d;
2195 *returnyrate = bsi->segment_yrate;
2196 *skippable = vp9_is_skippable_in_plane(x, BLOCK_8X8, 0);
2197 *psse = bsi->sse;
2198 mi->mode = bsi->modes[3];
2199
2200 return bsi->segment_rd;
2201 }
2202
estimate_ref_frame_costs(const VP9_COMMON * cm,const MACROBLOCKD * xd,int segment_id,unsigned int * ref_costs_single,unsigned int * ref_costs_comp,vpx_prob * comp_mode_p)2203 static void estimate_ref_frame_costs(const VP9_COMMON *cm,
2204 const MACROBLOCKD *xd, int segment_id,
2205 unsigned int *ref_costs_single,
2206 unsigned int *ref_costs_comp,
2207 vpx_prob *comp_mode_p) {
2208 int seg_ref_active =
2209 segfeature_active(&cm->seg, segment_id, SEG_LVL_REF_FRAME);
2210 if (seg_ref_active) {
2211 memset(ref_costs_single, 0, MAX_REF_FRAMES * sizeof(*ref_costs_single));
2212 memset(ref_costs_comp, 0, MAX_REF_FRAMES * sizeof(*ref_costs_comp));
2213 *comp_mode_p = 128;
2214 } else {
2215 vpx_prob intra_inter_p = vp9_get_intra_inter_prob(cm, xd);
2216 vpx_prob comp_inter_p = 128;
2217
2218 if (cm->reference_mode == REFERENCE_MODE_SELECT) {
2219 comp_inter_p = vp9_get_reference_mode_prob(cm, xd);
2220 *comp_mode_p = comp_inter_p;
2221 } else {
2222 *comp_mode_p = 128;
2223 }
2224
2225 ref_costs_single[INTRA_FRAME] = vp9_cost_bit(intra_inter_p, 0);
2226
2227 if (cm->reference_mode != COMPOUND_REFERENCE) {
2228 vpx_prob ref_single_p1 = vp9_get_pred_prob_single_ref_p1(cm, xd);
2229 vpx_prob ref_single_p2 = vp9_get_pred_prob_single_ref_p2(cm, xd);
2230 unsigned int base_cost = vp9_cost_bit(intra_inter_p, 1);
2231
2232 if (cm->reference_mode == REFERENCE_MODE_SELECT)
2233 base_cost += vp9_cost_bit(comp_inter_p, 0);
2234
2235 ref_costs_single[LAST_FRAME] = ref_costs_single[GOLDEN_FRAME] =
2236 ref_costs_single[ALTREF_FRAME] = base_cost;
2237 ref_costs_single[LAST_FRAME] += vp9_cost_bit(ref_single_p1, 0);
2238 ref_costs_single[GOLDEN_FRAME] += vp9_cost_bit(ref_single_p1, 1);
2239 ref_costs_single[ALTREF_FRAME] += vp9_cost_bit(ref_single_p1, 1);
2240 ref_costs_single[GOLDEN_FRAME] += vp9_cost_bit(ref_single_p2, 0);
2241 ref_costs_single[ALTREF_FRAME] += vp9_cost_bit(ref_single_p2, 1);
2242 } else {
2243 ref_costs_single[LAST_FRAME] = 512;
2244 ref_costs_single[GOLDEN_FRAME] = 512;
2245 ref_costs_single[ALTREF_FRAME] = 512;
2246 }
2247 if (cm->reference_mode != SINGLE_REFERENCE) {
2248 vpx_prob ref_comp_p = vp9_get_pred_prob_comp_ref_p(cm, xd);
2249 unsigned int base_cost = vp9_cost_bit(intra_inter_p, 1);
2250
2251 if (cm->reference_mode == REFERENCE_MODE_SELECT)
2252 base_cost += vp9_cost_bit(comp_inter_p, 1);
2253
2254 ref_costs_comp[LAST_FRAME] = base_cost + vp9_cost_bit(ref_comp_p, 0);
2255 ref_costs_comp[GOLDEN_FRAME] = base_cost + vp9_cost_bit(ref_comp_p, 1);
2256 } else {
2257 ref_costs_comp[LAST_FRAME] = 512;
2258 ref_costs_comp[GOLDEN_FRAME] = 512;
2259 }
2260 }
2261 }
2262
store_coding_context(MACROBLOCK * x,PICK_MODE_CONTEXT * ctx,int mode_index,int64_t comp_pred_diff[REFERENCE_MODES],int64_t best_filter_diff[SWITCHABLE_FILTER_CONTEXTS],int skippable)2263 static void store_coding_context(
2264 MACROBLOCK *x, PICK_MODE_CONTEXT *ctx, int mode_index,
2265 int64_t comp_pred_diff[REFERENCE_MODES],
2266 int64_t best_filter_diff[SWITCHABLE_FILTER_CONTEXTS], int skippable) {
2267 MACROBLOCKD *const xd = &x->e_mbd;
2268
2269 // Take a snapshot of the coding context so it can be
2270 // restored if we decide to encode this way
2271 ctx->skip = x->skip;
2272 ctx->skippable = skippable;
2273 ctx->best_mode_index = mode_index;
2274 ctx->mic = *xd->mi[0];
2275 ctx->mbmi_ext = *x->mbmi_ext;
2276 ctx->single_pred_diff = (int)comp_pred_diff[SINGLE_REFERENCE];
2277 ctx->comp_pred_diff = (int)comp_pred_diff[COMPOUND_REFERENCE];
2278 ctx->hybrid_pred_diff = (int)comp_pred_diff[REFERENCE_MODE_SELECT];
2279
2280 memcpy(ctx->best_filter_diff, best_filter_diff,
2281 sizeof(*best_filter_diff) * SWITCHABLE_FILTER_CONTEXTS);
2282 }
2283
setup_buffer_inter(VP9_COMP * cpi,MACROBLOCK * x,MV_REFERENCE_FRAME ref_frame,BLOCK_SIZE block_size,int mi_row,int mi_col,int_mv frame_nearest_mv[MAX_REF_FRAMES],int_mv frame_near_mv[MAX_REF_FRAMES],struct buf_2d yv12_mb[4][MAX_MB_PLANE])2284 static void setup_buffer_inter(VP9_COMP *cpi, MACROBLOCK *x,
2285 MV_REFERENCE_FRAME ref_frame,
2286 BLOCK_SIZE block_size, int mi_row, int mi_col,
2287 int_mv frame_nearest_mv[MAX_REF_FRAMES],
2288 int_mv frame_near_mv[MAX_REF_FRAMES],
2289 struct buf_2d yv12_mb[4][MAX_MB_PLANE]) {
2290 const VP9_COMMON *cm = &cpi->common;
2291 const YV12_BUFFER_CONFIG *yv12 = get_ref_frame_buffer(cpi, ref_frame);
2292 MACROBLOCKD *const xd = &x->e_mbd;
2293 MODE_INFO *const mi = xd->mi[0];
2294 int_mv *const candidates = x->mbmi_ext->ref_mvs[ref_frame];
2295 const struct scale_factors *const sf = &cm->frame_refs[ref_frame - 1].sf;
2296 MB_MODE_INFO_EXT *const mbmi_ext = x->mbmi_ext;
2297
2298 assert(yv12 != NULL);
2299
2300 // TODO(jkoleszar): Is the UV buffer ever used here? If so, need to make this
2301 // use the UV scaling factors.
2302 vp9_setup_pred_block(xd, yv12_mb[ref_frame], yv12, mi_row, mi_col, sf, sf);
2303
2304 // Gets an initial list of candidate vectors from neighbours and orders them
2305 vp9_find_mv_refs(cm, xd, mi, ref_frame, candidates, mi_row, mi_col,
2306 mbmi_ext->mode_context);
2307
2308 // Candidate refinement carried out at encoder and decoder
2309 vp9_find_best_ref_mvs(xd, cm->allow_high_precision_mv, candidates,
2310 &frame_nearest_mv[ref_frame],
2311 &frame_near_mv[ref_frame]);
2312
2313 // Further refinement that is encode side only to test the top few candidates
2314 // in full and choose the best as the centre point for subsequent searches.
2315 // The current implementation doesn't support scaling.
2316 if (!vp9_is_scaled(sf) && block_size >= BLOCK_8X8)
2317 vp9_mv_pred(cpi, x, yv12_mb[ref_frame][0].buf, yv12->y_stride, ref_frame,
2318 block_size);
2319 }
2320
single_motion_search(VP9_COMP * cpi,MACROBLOCK * x,BLOCK_SIZE bsize,int mi_row,int mi_col,int_mv * tmp_mv,int * rate_mv)2321 static void single_motion_search(VP9_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize,
2322 int mi_row, int mi_col, int_mv *tmp_mv,
2323 int *rate_mv) {
2324 MACROBLOCKD *xd = &x->e_mbd;
2325 const VP9_COMMON *cm = &cpi->common;
2326 MODE_INFO *mi = xd->mi[0];
2327 struct buf_2d backup_yv12[MAX_MB_PLANE] = { { 0, 0 } };
2328 int bestsme = INT_MAX;
2329 int step_param;
2330 int sadpb = x->sadperbit16;
2331 MV mvp_full;
2332 int ref = mi->ref_frame[0];
2333 MV ref_mv = x->mbmi_ext->ref_mvs[ref][0].as_mv;
2334 const MvLimits tmp_mv_limits = x->mv_limits;
2335 int cost_list[5];
2336
2337 const YV12_BUFFER_CONFIG *scaled_ref_frame =
2338 vp9_get_scaled_ref_frame(cpi, ref);
2339
2340 MV pred_mv[3];
2341 pred_mv[0] = x->mbmi_ext->ref_mvs[ref][0].as_mv;
2342 pred_mv[1] = x->mbmi_ext->ref_mvs[ref][1].as_mv;
2343 pred_mv[2] = x->pred_mv[ref];
2344
2345 if (scaled_ref_frame) {
2346 int i;
2347 // Swap out the reference frame for a version that's been scaled to
2348 // match the resolution of the current frame, allowing the existing
2349 // motion search code to be used without additional modifications.
2350 for (i = 0; i < MAX_MB_PLANE; i++) backup_yv12[i] = xd->plane[i].pre[0];
2351
2352 vp9_setup_pre_planes(xd, 0, scaled_ref_frame, mi_row, mi_col, NULL);
2353 }
2354
2355 // Work out the size of the first step in the mv step search.
2356 // 0 here is maximum length first step. 1 is VPXMAX >> 1 etc.
2357 if (cpi->sf.mv.auto_mv_step_size && cm->show_frame) {
2358 // Take wtd average of the step_params based on the last frame's
2359 // max mv magnitude and that based on the best ref mvs of the current
2360 // block for the given reference.
2361 step_param =
2362 (vp9_init_search_range(x->max_mv_context[ref]) + cpi->mv_step_param) /
2363 2;
2364 } else {
2365 step_param = cpi->mv_step_param;
2366 }
2367
2368 if (cpi->sf.adaptive_motion_search && bsize < BLOCK_64X64) {
2369 int boffset =
2370 2 * (b_width_log2_lookup[BLOCK_64X64] -
2371 VPXMIN(b_height_log2_lookup[bsize], b_width_log2_lookup[bsize]));
2372 step_param = VPXMAX(step_param, boffset);
2373 }
2374
2375 if (cpi->sf.adaptive_motion_search) {
2376 int bwl = b_width_log2_lookup[bsize];
2377 int bhl = b_height_log2_lookup[bsize];
2378 int tlevel = x->pred_mv_sad[ref] >> (bwl + bhl + 4);
2379
2380 if (tlevel < 5) step_param += 2;
2381
2382 // prev_mv_sad is not setup for dynamically scaled frames.
2383 if (cpi->oxcf.resize_mode != RESIZE_DYNAMIC) {
2384 int i;
2385 for (i = LAST_FRAME; i <= ALTREF_FRAME && cm->show_frame; ++i) {
2386 if ((x->pred_mv_sad[ref] >> 3) > x->pred_mv_sad[i]) {
2387 x->pred_mv[ref].row = 0;
2388 x->pred_mv[ref].col = 0;
2389 tmp_mv->as_int = INVALID_MV;
2390
2391 if (scaled_ref_frame) {
2392 int i;
2393 for (i = 0; i < MAX_MB_PLANE; ++i)
2394 xd->plane[i].pre[0] = backup_yv12[i];
2395 }
2396 return;
2397 }
2398 }
2399 }
2400 }
2401
2402 // Note: MV limits are modified here. Always restore the original values
2403 // after full-pixel motion search.
2404 vp9_set_mv_search_range(&x->mv_limits, &ref_mv);
2405
2406 mvp_full = pred_mv[x->mv_best_ref_index[ref]];
2407
2408 mvp_full.col >>= 3;
2409 mvp_full.row >>= 3;
2410
2411 bestsme = vp9_full_pixel_search(
2412 cpi, x, bsize, &mvp_full, step_param, cpi->sf.mv.search_method, sadpb,
2413 cond_cost_list(cpi, cost_list), &ref_mv, &tmp_mv->as_mv, INT_MAX, 1);
2414
2415 x->mv_limits = tmp_mv_limits;
2416
2417 if (bestsme < INT_MAX) {
2418 uint32_t dis; /* TODO: use dis in distortion calculation later. */
2419 cpi->find_fractional_mv_step(
2420 x, &tmp_mv->as_mv, &ref_mv, cm->allow_high_precision_mv, x->errorperbit,
2421 &cpi->fn_ptr[bsize], cpi->sf.mv.subpel_force_stop,
2422 cpi->sf.mv.subpel_iters_per_step, cond_cost_list(cpi, cost_list),
2423 x->nmvjointcost, x->mvcost, &dis, &x->pred_sse[ref], NULL, 0, 0);
2424 }
2425 *rate_mv = vp9_mv_bit_cost(&tmp_mv->as_mv, &ref_mv, x->nmvjointcost,
2426 x->mvcost, MV_COST_WEIGHT);
2427
2428 if (cpi->sf.adaptive_motion_search) x->pred_mv[ref] = tmp_mv->as_mv;
2429
2430 if (scaled_ref_frame) {
2431 int i;
2432 for (i = 0; i < MAX_MB_PLANE; i++) xd->plane[i].pre[0] = backup_yv12[i];
2433 }
2434 }
2435
restore_dst_buf(MACROBLOCKD * xd,uint8_t * orig_dst[MAX_MB_PLANE],int orig_dst_stride[MAX_MB_PLANE])2436 static INLINE void restore_dst_buf(MACROBLOCKD *xd,
2437 uint8_t *orig_dst[MAX_MB_PLANE],
2438 int orig_dst_stride[MAX_MB_PLANE]) {
2439 int i;
2440 for (i = 0; i < MAX_MB_PLANE; i++) {
2441 xd->plane[i].dst.buf = orig_dst[i];
2442 xd->plane[i].dst.stride = orig_dst_stride[i];
2443 }
2444 }
2445
2446 // In some situations we want to discount tha pparent cost of a new motion
2447 // vector. Where there is a subtle motion field and especially where there is
2448 // low spatial complexity then it can be hard to cover the cost of a new motion
2449 // vector in a single block, even if that motion vector reduces distortion.
2450 // However, once established that vector may be usable through the nearest and
2451 // near mv modes to reduce distortion in subsequent blocks and also improve
2452 // visual quality.
discount_newmv_test(const VP9_COMP * cpi,int this_mode,int_mv this_mv,int_mv (* mode_mv)[MAX_REF_FRAMES],int ref_frame)2453 static int discount_newmv_test(const VP9_COMP *cpi, int this_mode,
2454 int_mv this_mv,
2455 int_mv (*mode_mv)[MAX_REF_FRAMES],
2456 int ref_frame) {
2457 return (!cpi->rc.is_src_frame_alt_ref && (this_mode == NEWMV) &&
2458 (this_mv.as_int != 0) &&
2459 ((mode_mv[NEARESTMV][ref_frame].as_int == 0) ||
2460 (mode_mv[NEARESTMV][ref_frame].as_int == INVALID_MV)) &&
2461 ((mode_mv[NEARMV][ref_frame].as_int == 0) ||
2462 (mode_mv[NEARMV][ref_frame].as_int == INVALID_MV)));
2463 }
2464
handle_inter_mode(VP9_COMP * cpi,MACROBLOCK * x,BLOCK_SIZE bsize,int * rate2,int64_t * distortion,int * skippable,int * rate_y,int * rate_uv,int * disable_skip,int_mv (* mode_mv)[MAX_REF_FRAMES],int mi_row,int mi_col,int_mv single_newmv[MAX_REF_FRAMES],INTERP_FILTER (* single_filter)[MAX_REF_FRAMES],int (* single_skippable)[MAX_REF_FRAMES],int64_t * psse,const int64_t ref_best_rd,int64_t * mask_filter,int64_t filter_cache[])2465 static int64_t handle_inter_mode(
2466 VP9_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize, int *rate2,
2467 int64_t *distortion, int *skippable, int *rate_y, int *rate_uv,
2468 int *disable_skip, int_mv (*mode_mv)[MAX_REF_FRAMES], int mi_row,
2469 int mi_col, int_mv single_newmv[MAX_REF_FRAMES],
2470 INTERP_FILTER (*single_filter)[MAX_REF_FRAMES],
2471 int (*single_skippable)[MAX_REF_FRAMES], int64_t *psse,
2472 const int64_t ref_best_rd, int64_t *mask_filter, int64_t filter_cache[]) {
2473 VP9_COMMON *cm = &cpi->common;
2474 MACROBLOCKD *xd = &x->e_mbd;
2475 MODE_INFO *mi = xd->mi[0];
2476 MB_MODE_INFO_EXT *const mbmi_ext = x->mbmi_ext;
2477 const int is_comp_pred = has_second_ref(mi);
2478 const int this_mode = mi->mode;
2479 int_mv *frame_mv = mode_mv[this_mode];
2480 int i;
2481 int refs[2] = { mi->ref_frame[0],
2482 (mi->ref_frame[1] < 0 ? 0 : mi->ref_frame[1]) };
2483 int_mv cur_mv[2];
2484 #if CONFIG_VP9_HIGHBITDEPTH
2485 DECLARE_ALIGNED(16, uint16_t, tmp_buf16[MAX_MB_PLANE * 64 * 64]);
2486 uint8_t *tmp_buf;
2487 #else
2488 DECLARE_ALIGNED(16, uint8_t, tmp_buf[MAX_MB_PLANE * 64 * 64]);
2489 #endif // CONFIG_VP9_HIGHBITDEPTH
2490 int pred_exists = 0;
2491 int intpel_mv;
2492 int64_t rd, tmp_rd, best_rd = INT64_MAX;
2493 int best_needs_copy = 0;
2494 uint8_t *orig_dst[MAX_MB_PLANE];
2495 int orig_dst_stride[MAX_MB_PLANE];
2496 int rs = 0;
2497 INTERP_FILTER best_filter = SWITCHABLE;
2498 uint8_t skip_txfm[MAX_MB_PLANE << 2] = { 0 };
2499 int64_t bsse[MAX_MB_PLANE << 2] = { 0 };
2500
2501 int bsl = mi_width_log2_lookup[bsize];
2502 int pred_filter_search =
2503 cpi->sf.cb_pred_filter_search
2504 ? (((mi_row + mi_col) >> bsl) +
2505 get_chessboard_index(cm->current_video_frame)) &
2506 0x1
2507 : 0;
2508
2509 int skip_txfm_sb = 0;
2510 int64_t skip_sse_sb = INT64_MAX;
2511 int64_t distortion_y = 0, distortion_uv = 0;
2512
2513 #if CONFIG_VP9_HIGHBITDEPTH
2514 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
2515 tmp_buf = CONVERT_TO_BYTEPTR(tmp_buf16);
2516 } else {
2517 tmp_buf = (uint8_t *)tmp_buf16;
2518 }
2519 #endif // CONFIG_VP9_HIGHBITDEPTH
2520
2521 if (pred_filter_search) {
2522 INTERP_FILTER af = SWITCHABLE, lf = SWITCHABLE;
2523 if (xd->above_mi && is_inter_block(xd->above_mi))
2524 af = xd->above_mi->interp_filter;
2525 if (xd->left_mi && is_inter_block(xd->left_mi))
2526 lf = xd->left_mi->interp_filter;
2527
2528 if ((this_mode != NEWMV) || (af == lf)) best_filter = af;
2529 }
2530
2531 if (is_comp_pred) {
2532 if (frame_mv[refs[0]].as_int == INVALID_MV ||
2533 frame_mv[refs[1]].as_int == INVALID_MV)
2534 return INT64_MAX;
2535
2536 if (cpi->sf.adaptive_mode_search) {
2537 if (single_filter[this_mode][refs[0]] ==
2538 single_filter[this_mode][refs[1]])
2539 best_filter = single_filter[this_mode][refs[0]];
2540 }
2541 }
2542
2543 if (this_mode == NEWMV) {
2544 int rate_mv;
2545 if (is_comp_pred) {
2546 // Initialize mv using single prediction mode result.
2547 frame_mv[refs[0]].as_int = single_newmv[refs[0]].as_int;
2548 frame_mv[refs[1]].as_int = single_newmv[refs[1]].as_int;
2549
2550 if (cpi->sf.comp_inter_joint_search_thresh <= bsize) {
2551 joint_motion_search(cpi, x, bsize, frame_mv, mi_row, mi_col,
2552 single_newmv, &rate_mv);
2553 } else {
2554 rate_mv = vp9_mv_bit_cost(&frame_mv[refs[0]].as_mv,
2555 &x->mbmi_ext->ref_mvs[refs[0]][0].as_mv,
2556 x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
2557 rate_mv += vp9_mv_bit_cost(&frame_mv[refs[1]].as_mv,
2558 &x->mbmi_ext->ref_mvs[refs[1]][0].as_mv,
2559 x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
2560 }
2561 *rate2 += rate_mv;
2562 } else {
2563 int_mv tmp_mv;
2564 single_motion_search(cpi, x, bsize, mi_row, mi_col, &tmp_mv, &rate_mv);
2565 if (tmp_mv.as_int == INVALID_MV) return INT64_MAX;
2566
2567 frame_mv[refs[0]].as_int = xd->mi[0]->bmi[0].as_mv[0].as_int =
2568 tmp_mv.as_int;
2569 single_newmv[refs[0]].as_int = tmp_mv.as_int;
2570
2571 // Estimate the rate implications of a new mv but discount this
2572 // under certain circumstances where we want to help initiate a weak
2573 // motion field, where the distortion gain for a single block may not
2574 // be enough to overcome the cost of a new mv.
2575 if (discount_newmv_test(cpi, this_mode, tmp_mv, mode_mv, refs[0])) {
2576 *rate2 += VPXMAX((rate_mv / NEW_MV_DISCOUNT_FACTOR), 1);
2577 } else {
2578 *rate2 += rate_mv;
2579 }
2580 }
2581 }
2582
2583 for (i = 0; i < is_comp_pred + 1; ++i) {
2584 cur_mv[i] = frame_mv[refs[i]];
2585 // Clip "next_nearest" so that it does not extend to far out of image
2586 if (this_mode != NEWMV) clamp_mv2(&cur_mv[i].as_mv, xd);
2587
2588 if (mv_check_bounds(&x->mv_limits, &cur_mv[i].as_mv)) return INT64_MAX;
2589 mi->mv[i].as_int = cur_mv[i].as_int;
2590 }
2591
2592 // do first prediction into the destination buffer. Do the next
2593 // prediction into a temporary buffer. Then keep track of which one
2594 // of these currently holds the best predictor, and use the other
2595 // one for future predictions. In the end, copy from tmp_buf to
2596 // dst if necessary.
2597 for (i = 0; i < MAX_MB_PLANE; i++) {
2598 orig_dst[i] = xd->plane[i].dst.buf;
2599 orig_dst_stride[i] = xd->plane[i].dst.stride;
2600 }
2601
2602 // We don't include the cost of the second reference here, because there
2603 // are only two options: Last/ARF or Golden/ARF; The second one is always
2604 // known, which is ARF.
2605 //
2606 // Under some circumstances we discount the cost of new mv mode to encourage
2607 // initiation of a motion field.
2608 if (discount_newmv_test(cpi, this_mode, frame_mv[refs[0]], mode_mv,
2609 refs[0])) {
2610 *rate2 +=
2611 VPXMIN(cost_mv_ref(cpi, this_mode, mbmi_ext->mode_context[refs[0]]),
2612 cost_mv_ref(cpi, NEARESTMV, mbmi_ext->mode_context[refs[0]]));
2613 } else {
2614 *rate2 += cost_mv_ref(cpi, this_mode, mbmi_ext->mode_context[refs[0]]);
2615 }
2616
2617 if (RDCOST(x->rdmult, x->rddiv, *rate2, 0) > ref_best_rd &&
2618 mi->mode != NEARESTMV)
2619 return INT64_MAX;
2620
2621 pred_exists = 0;
2622 // Are all MVs integer pel for Y and UV
2623 intpel_mv = !mv_has_subpel(&mi->mv[0].as_mv);
2624 if (is_comp_pred) intpel_mv &= !mv_has_subpel(&mi->mv[1].as_mv);
2625
2626 // Search for best switchable filter by checking the variance of
2627 // pred error irrespective of whether the filter will be used
2628 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i) filter_cache[i] = INT64_MAX;
2629
2630 if (cm->interp_filter != BILINEAR) {
2631 if (x->source_variance < cpi->sf.disable_filter_search_var_thresh) {
2632 best_filter = EIGHTTAP;
2633 } else if (best_filter == SWITCHABLE) {
2634 int newbest;
2635 int tmp_rate_sum = 0;
2636 int64_t tmp_dist_sum = 0;
2637
2638 for (i = 0; i < SWITCHABLE_FILTERS; ++i) {
2639 int j;
2640 int64_t rs_rd;
2641 int tmp_skip_sb = 0;
2642 int64_t tmp_skip_sse = INT64_MAX;
2643
2644 mi->interp_filter = i;
2645 rs = vp9_get_switchable_rate(cpi, xd);
2646 rs_rd = RDCOST(x->rdmult, x->rddiv, rs, 0);
2647
2648 if (i > 0 && intpel_mv) {
2649 rd = RDCOST(x->rdmult, x->rddiv, tmp_rate_sum, tmp_dist_sum);
2650 filter_cache[i] = rd;
2651 filter_cache[SWITCHABLE_FILTERS] =
2652 VPXMIN(filter_cache[SWITCHABLE_FILTERS], rd + rs_rd);
2653 if (cm->interp_filter == SWITCHABLE) rd += rs_rd;
2654 *mask_filter = VPXMAX(*mask_filter, rd);
2655 } else {
2656 int rate_sum = 0;
2657 int64_t dist_sum = 0;
2658 if (i > 0 && cpi->sf.adaptive_interp_filter_search &&
2659 (cpi->sf.interp_filter_search_mask & (1 << i))) {
2660 rate_sum = INT_MAX;
2661 dist_sum = INT64_MAX;
2662 continue;
2663 }
2664
2665 if ((cm->interp_filter == SWITCHABLE && (!i || best_needs_copy)) ||
2666 (cm->interp_filter != SWITCHABLE &&
2667 (cm->interp_filter == mi->interp_filter ||
2668 (i == 0 && intpel_mv)))) {
2669 restore_dst_buf(xd, orig_dst, orig_dst_stride);
2670 } else {
2671 for (j = 0; j < MAX_MB_PLANE; j++) {
2672 xd->plane[j].dst.buf = tmp_buf + j * 64 * 64;
2673 xd->plane[j].dst.stride = 64;
2674 }
2675 }
2676 vp9_build_inter_predictors_sb(xd, mi_row, mi_col, bsize);
2677 model_rd_for_sb(cpi, bsize, x, xd, &rate_sum, &dist_sum, &tmp_skip_sb,
2678 &tmp_skip_sse);
2679
2680 rd = RDCOST(x->rdmult, x->rddiv, rate_sum, dist_sum);
2681 filter_cache[i] = rd;
2682 filter_cache[SWITCHABLE_FILTERS] =
2683 VPXMIN(filter_cache[SWITCHABLE_FILTERS], rd + rs_rd);
2684 if (cm->interp_filter == SWITCHABLE) rd += rs_rd;
2685 *mask_filter = VPXMAX(*mask_filter, rd);
2686
2687 if (i == 0 && intpel_mv) {
2688 tmp_rate_sum = rate_sum;
2689 tmp_dist_sum = dist_sum;
2690 }
2691 }
2692
2693 if (i == 0 && cpi->sf.use_rd_breakout && ref_best_rd < INT64_MAX) {
2694 if (rd / 2 > ref_best_rd) {
2695 restore_dst_buf(xd, orig_dst, orig_dst_stride);
2696 return INT64_MAX;
2697 }
2698 }
2699 newbest = i == 0 || rd < best_rd;
2700
2701 if (newbest) {
2702 best_rd = rd;
2703 best_filter = mi->interp_filter;
2704 if (cm->interp_filter == SWITCHABLE && i && !intpel_mv)
2705 best_needs_copy = !best_needs_copy;
2706 }
2707
2708 if ((cm->interp_filter == SWITCHABLE && newbest) ||
2709 (cm->interp_filter != SWITCHABLE &&
2710 cm->interp_filter == mi->interp_filter)) {
2711 pred_exists = 1;
2712 tmp_rd = best_rd;
2713
2714 skip_txfm_sb = tmp_skip_sb;
2715 skip_sse_sb = tmp_skip_sse;
2716 memcpy(skip_txfm, x->skip_txfm, sizeof(skip_txfm));
2717 memcpy(bsse, x->bsse, sizeof(bsse));
2718 }
2719 }
2720 restore_dst_buf(xd, orig_dst, orig_dst_stride);
2721 }
2722 }
2723 // Set the appropriate filter
2724 mi->interp_filter =
2725 cm->interp_filter != SWITCHABLE ? cm->interp_filter : best_filter;
2726 rs = cm->interp_filter == SWITCHABLE ? vp9_get_switchable_rate(cpi, xd) : 0;
2727
2728 if (pred_exists) {
2729 if (best_needs_copy) {
2730 // again temporarily set the buffers to local memory to prevent a memcpy
2731 for (i = 0; i < MAX_MB_PLANE; i++) {
2732 xd->plane[i].dst.buf = tmp_buf + i * 64 * 64;
2733 xd->plane[i].dst.stride = 64;
2734 }
2735 }
2736 rd = tmp_rd + RDCOST(x->rdmult, x->rddiv, rs, 0);
2737 } else {
2738 int tmp_rate;
2739 int64_t tmp_dist;
2740 // Handles the special case when a filter that is not in the
2741 // switchable list (ex. bilinear) is indicated at the frame level, or
2742 // skip condition holds.
2743 vp9_build_inter_predictors_sb(xd, mi_row, mi_col, bsize);
2744 model_rd_for_sb(cpi, bsize, x, xd, &tmp_rate, &tmp_dist, &skip_txfm_sb,
2745 &skip_sse_sb);
2746 rd = RDCOST(x->rdmult, x->rddiv, rs + tmp_rate, tmp_dist);
2747 memcpy(skip_txfm, x->skip_txfm, sizeof(skip_txfm));
2748 memcpy(bsse, x->bsse, sizeof(bsse));
2749 }
2750
2751 if (!is_comp_pred) single_filter[this_mode][refs[0]] = mi->interp_filter;
2752
2753 if (cpi->sf.adaptive_mode_search)
2754 if (is_comp_pred)
2755 if (single_skippable[this_mode][refs[0]] &&
2756 single_skippable[this_mode][refs[1]])
2757 memset(skip_txfm, SKIP_TXFM_AC_DC, sizeof(skip_txfm));
2758
2759 if (cpi->sf.use_rd_breakout && ref_best_rd < INT64_MAX) {
2760 // if current pred_error modeled rd is substantially more than the best
2761 // so far, do not bother doing full rd
2762 if (rd / 2 > ref_best_rd) {
2763 restore_dst_buf(xd, orig_dst, orig_dst_stride);
2764 return INT64_MAX;
2765 }
2766 }
2767
2768 if (cm->interp_filter == SWITCHABLE) *rate2 += rs;
2769
2770 memcpy(x->skip_txfm, skip_txfm, sizeof(skip_txfm));
2771 memcpy(x->bsse, bsse, sizeof(bsse));
2772
2773 if (!skip_txfm_sb) {
2774 int skippable_y, skippable_uv;
2775 int64_t sseuv = INT64_MAX;
2776 int64_t rdcosty = INT64_MAX;
2777
2778 // Y cost and distortion
2779 vp9_subtract_plane(x, bsize, 0);
2780 super_block_yrd(cpi, x, rate_y, &distortion_y, &skippable_y, psse, bsize,
2781 ref_best_rd);
2782
2783 if (*rate_y == INT_MAX) {
2784 *rate2 = INT_MAX;
2785 *distortion = INT64_MAX;
2786 restore_dst_buf(xd, orig_dst, orig_dst_stride);
2787 return INT64_MAX;
2788 }
2789
2790 *rate2 += *rate_y;
2791 *distortion += distortion_y;
2792
2793 rdcosty = RDCOST(x->rdmult, x->rddiv, *rate2, *distortion);
2794 rdcosty = VPXMIN(rdcosty, RDCOST(x->rdmult, x->rddiv, 0, *psse));
2795
2796 if (!super_block_uvrd(cpi, x, rate_uv, &distortion_uv, &skippable_uv,
2797 &sseuv, bsize, ref_best_rd - rdcosty)) {
2798 *rate2 = INT_MAX;
2799 *distortion = INT64_MAX;
2800 restore_dst_buf(xd, orig_dst, orig_dst_stride);
2801 return INT64_MAX;
2802 }
2803
2804 *psse += sseuv;
2805 *rate2 += *rate_uv;
2806 *distortion += distortion_uv;
2807 *skippable = skippable_y && skippable_uv;
2808 } else {
2809 x->skip = 1;
2810 *disable_skip = 1;
2811
2812 // The cost of skip bit needs to be added.
2813 *rate2 += vp9_cost_bit(vp9_get_skip_prob(cm, xd), 1);
2814
2815 *distortion = skip_sse_sb;
2816 }
2817
2818 if (!is_comp_pred) single_skippable[this_mode][refs[0]] = *skippable;
2819
2820 restore_dst_buf(xd, orig_dst, orig_dst_stride);
2821 return 0; // The rate-distortion cost will be re-calculated by caller.
2822 }
2823
vp9_rd_pick_intra_mode_sb(VP9_COMP * cpi,MACROBLOCK * x,RD_COST * rd_cost,BLOCK_SIZE bsize,PICK_MODE_CONTEXT * ctx,int64_t best_rd)2824 void vp9_rd_pick_intra_mode_sb(VP9_COMP *cpi, MACROBLOCK *x, RD_COST *rd_cost,
2825 BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx,
2826 int64_t best_rd) {
2827 VP9_COMMON *const cm = &cpi->common;
2828 MACROBLOCKD *const xd = &x->e_mbd;
2829 struct macroblockd_plane *const pd = xd->plane;
2830 int rate_y = 0, rate_uv = 0, rate_y_tokenonly = 0, rate_uv_tokenonly = 0;
2831 int y_skip = 0, uv_skip = 0;
2832 int64_t dist_y = 0, dist_uv = 0;
2833 TX_SIZE max_uv_tx_size;
2834 x->skip_encode = 0;
2835 ctx->skip = 0;
2836 xd->mi[0]->ref_frame[0] = INTRA_FRAME;
2837 xd->mi[0]->ref_frame[1] = NONE;
2838 // Initialize interp_filter here so we do not have to check for inter block
2839 // modes in get_pred_context_switchable_interp()
2840 xd->mi[0]->interp_filter = SWITCHABLE_FILTERS;
2841
2842 if (bsize >= BLOCK_8X8) {
2843 if (rd_pick_intra_sby_mode(cpi, x, &rate_y, &rate_y_tokenonly, &dist_y,
2844 &y_skip, bsize, best_rd) >= best_rd) {
2845 rd_cost->rate = INT_MAX;
2846 return;
2847 }
2848 } else {
2849 y_skip = 0;
2850 if (rd_pick_intra_sub_8x8_y_mode(cpi, x, &rate_y, &rate_y_tokenonly,
2851 &dist_y, best_rd) >= best_rd) {
2852 rd_cost->rate = INT_MAX;
2853 return;
2854 }
2855 }
2856 max_uv_tx_size = uv_txsize_lookup[bsize][xd->mi[0]->tx_size]
2857 [pd[1].subsampling_x][pd[1].subsampling_y];
2858 rd_pick_intra_sbuv_mode(cpi, x, ctx, &rate_uv, &rate_uv_tokenonly, &dist_uv,
2859 &uv_skip, VPXMAX(BLOCK_8X8, bsize), max_uv_tx_size);
2860
2861 if (y_skip && uv_skip) {
2862 rd_cost->rate = rate_y + rate_uv - rate_y_tokenonly - rate_uv_tokenonly +
2863 vp9_cost_bit(vp9_get_skip_prob(cm, xd), 1);
2864 rd_cost->dist = dist_y + dist_uv;
2865 } else {
2866 rd_cost->rate =
2867 rate_y + rate_uv + vp9_cost_bit(vp9_get_skip_prob(cm, xd), 0);
2868 rd_cost->dist = dist_y + dist_uv;
2869 }
2870
2871 ctx->mic = *xd->mi[0];
2872 ctx->mbmi_ext = *x->mbmi_ext;
2873 rd_cost->rdcost = RDCOST(x->rdmult, x->rddiv, rd_cost->rate, rd_cost->dist);
2874 }
2875
2876 // This function is designed to apply a bias or adjustment to an rd value based
2877 // on the relative variance of the source and reconstruction.
2878 #define LOW_VAR_THRESH 16
2879 #define VLOW_ADJ_MAX 25
2880 #define VHIGH_ADJ_MAX 8
rd_variance_adjustment(VP9_COMP * cpi,MACROBLOCK * x,BLOCK_SIZE bsize,int64_t * this_rd,MV_REFERENCE_FRAME ref_frame,unsigned int source_variance)2881 static void rd_variance_adjustment(VP9_COMP *cpi, MACROBLOCK *x,
2882 BLOCK_SIZE bsize, int64_t *this_rd,
2883 MV_REFERENCE_FRAME ref_frame,
2884 unsigned int source_variance) {
2885 MACROBLOCKD *const xd = &x->e_mbd;
2886 unsigned int recon_variance;
2887 unsigned int absvar_diff = 0;
2888 int64_t var_error = 0;
2889 int64_t var_factor = 0;
2890
2891 if (*this_rd == INT64_MAX) return;
2892
2893 #if CONFIG_VP9_HIGHBITDEPTH
2894 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
2895 recon_variance = vp9_high_get_sby_perpixel_variance(cpi, &xd->plane[0].dst,
2896 bsize, xd->bd);
2897 } else {
2898 recon_variance =
2899 vp9_get_sby_perpixel_variance(cpi, &xd->plane[0].dst, bsize);
2900 }
2901 #else
2902 recon_variance = vp9_get_sby_perpixel_variance(cpi, &xd->plane[0].dst, bsize);
2903 #endif // CONFIG_VP9_HIGHBITDEPTH
2904
2905 if ((source_variance + recon_variance) > LOW_VAR_THRESH) {
2906 absvar_diff = (source_variance > recon_variance)
2907 ? (source_variance - recon_variance)
2908 : (recon_variance - source_variance);
2909
2910 var_error = ((int64_t)200 * source_variance * recon_variance) /
2911 (((int64_t)source_variance * source_variance) +
2912 ((int64_t)recon_variance * recon_variance));
2913 var_error = 100 - var_error;
2914 }
2915
2916 // Source variance above a threshold and ref frame is intra.
2917 // This case is targeted mainly at discouraging intra modes that give rise
2918 // to a predictor with a low spatial complexity compared to the source.
2919 if ((source_variance > LOW_VAR_THRESH) && (ref_frame == INTRA_FRAME) &&
2920 (source_variance > recon_variance)) {
2921 var_factor = VPXMIN(absvar_diff, VPXMIN(VLOW_ADJ_MAX, var_error));
2922 // A second possible case of interest is where the source variance
2923 // is very low and we wish to discourage false texture or motion trails.
2924 } else if ((source_variance < (LOW_VAR_THRESH >> 1)) &&
2925 (recon_variance > source_variance)) {
2926 var_factor = VPXMIN(absvar_diff, VPXMIN(VHIGH_ADJ_MAX, var_error));
2927 }
2928 *this_rd += (*this_rd * var_factor) / 100;
2929 }
2930
2931 // Do we have an internal image edge (e.g. formatting bars).
vp9_internal_image_edge(VP9_COMP * cpi)2932 int vp9_internal_image_edge(VP9_COMP *cpi) {
2933 return (cpi->oxcf.pass == 2) &&
2934 ((cpi->twopass.this_frame_stats.inactive_zone_rows > 0) ||
2935 (cpi->twopass.this_frame_stats.inactive_zone_cols > 0));
2936 }
2937
2938 // Checks to see if a super block is on a horizontal image edge.
2939 // In most cases this is the "real" edge unless there are formatting
2940 // bars embedded in the stream.
vp9_active_h_edge(VP9_COMP * cpi,int mi_row,int mi_step)2941 int vp9_active_h_edge(VP9_COMP *cpi, int mi_row, int mi_step) {
2942 int top_edge = 0;
2943 int bottom_edge = cpi->common.mi_rows;
2944 int is_active_h_edge = 0;
2945
2946 // For two pass account for any formatting bars detected.
2947 if (cpi->oxcf.pass == 2) {
2948 TWO_PASS *twopass = &cpi->twopass;
2949
2950 // The inactive region is specified in MBs not mi units.
2951 // The image edge is in the following MB row.
2952 top_edge += (int)(twopass->this_frame_stats.inactive_zone_rows * 2);
2953
2954 bottom_edge -= (int)(twopass->this_frame_stats.inactive_zone_rows * 2);
2955 bottom_edge = VPXMAX(top_edge, bottom_edge);
2956 }
2957
2958 if (((top_edge >= mi_row) && (top_edge < (mi_row + mi_step))) ||
2959 ((bottom_edge >= mi_row) && (bottom_edge < (mi_row + mi_step)))) {
2960 is_active_h_edge = 1;
2961 }
2962 return is_active_h_edge;
2963 }
2964
2965 // Checks to see if a super block is on a vertical image edge.
2966 // In most cases this is the "real" edge unless there are formatting
2967 // bars embedded in the stream.
vp9_active_v_edge(VP9_COMP * cpi,int mi_col,int mi_step)2968 int vp9_active_v_edge(VP9_COMP *cpi, int mi_col, int mi_step) {
2969 int left_edge = 0;
2970 int right_edge = cpi->common.mi_cols;
2971 int is_active_v_edge = 0;
2972
2973 // For two pass account for any formatting bars detected.
2974 if (cpi->oxcf.pass == 2) {
2975 TWO_PASS *twopass = &cpi->twopass;
2976
2977 // The inactive region is specified in MBs not mi units.
2978 // The image edge is in the following MB row.
2979 left_edge += (int)(twopass->this_frame_stats.inactive_zone_cols * 2);
2980
2981 right_edge -= (int)(twopass->this_frame_stats.inactive_zone_cols * 2);
2982 right_edge = VPXMAX(left_edge, right_edge);
2983 }
2984
2985 if (((left_edge >= mi_col) && (left_edge < (mi_col + mi_step))) ||
2986 ((right_edge >= mi_col) && (right_edge < (mi_col + mi_step)))) {
2987 is_active_v_edge = 1;
2988 }
2989 return is_active_v_edge;
2990 }
2991
2992 // Checks to see if a super block is at the edge of the active image.
2993 // In most cases this is the "real" edge unless there are formatting
2994 // bars embedded in the stream.
vp9_active_edge_sb(VP9_COMP * cpi,int mi_row,int mi_col)2995 int vp9_active_edge_sb(VP9_COMP *cpi, int mi_row, int mi_col) {
2996 return vp9_active_h_edge(cpi, mi_row, MI_BLOCK_SIZE) ||
2997 vp9_active_v_edge(cpi, mi_col, MI_BLOCK_SIZE);
2998 }
2999
vp9_rd_pick_inter_mode_sb(VP9_COMP * cpi,TileDataEnc * tile_data,MACROBLOCK * x,int mi_row,int mi_col,RD_COST * rd_cost,BLOCK_SIZE bsize,PICK_MODE_CONTEXT * ctx,int64_t best_rd_so_far)3000 void vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, TileDataEnc *tile_data,
3001 MACROBLOCK *x, int mi_row, int mi_col,
3002 RD_COST *rd_cost, BLOCK_SIZE bsize,
3003 PICK_MODE_CONTEXT *ctx, int64_t best_rd_so_far) {
3004 VP9_COMMON *const cm = &cpi->common;
3005 TileInfo *const tile_info = &tile_data->tile_info;
3006 RD_OPT *const rd_opt = &cpi->rd;
3007 SPEED_FEATURES *const sf = &cpi->sf;
3008 MACROBLOCKD *const xd = &x->e_mbd;
3009 MODE_INFO *const mi = xd->mi[0];
3010 MB_MODE_INFO_EXT *const mbmi_ext = x->mbmi_ext;
3011 const struct segmentation *const seg = &cm->seg;
3012 PREDICTION_MODE this_mode;
3013 MV_REFERENCE_FRAME ref_frame, second_ref_frame;
3014 unsigned char segment_id = mi->segment_id;
3015 int comp_pred, i, k;
3016 int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES];
3017 struct buf_2d yv12_mb[4][MAX_MB_PLANE];
3018 int_mv single_newmv[MAX_REF_FRAMES] = { { 0 } };
3019 INTERP_FILTER single_inter_filter[MB_MODE_COUNT][MAX_REF_FRAMES];
3020 int single_skippable[MB_MODE_COUNT][MAX_REF_FRAMES];
3021 static const int flag_list[4] = { 0, VP9_LAST_FLAG, VP9_GOLD_FLAG,
3022 VP9_ALT_FLAG };
3023 int64_t best_rd = best_rd_so_far;
3024 int64_t best_pred_diff[REFERENCE_MODES];
3025 int64_t best_pred_rd[REFERENCE_MODES];
3026 int64_t best_filter_rd[SWITCHABLE_FILTER_CONTEXTS];
3027 int64_t best_filter_diff[SWITCHABLE_FILTER_CONTEXTS];
3028 MODE_INFO best_mbmode;
3029 int best_mode_skippable = 0;
3030 int midx, best_mode_index = -1;
3031 unsigned int ref_costs_single[MAX_REF_FRAMES], ref_costs_comp[MAX_REF_FRAMES];
3032 vpx_prob comp_mode_p;
3033 int64_t best_intra_rd = INT64_MAX;
3034 unsigned int best_pred_sse = UINT_MAX;
3035 PREDICTION_MODE best_intra_mode = DC_PRED;
3036 int rate_uv_intra[TX_SIZES], rate_uv_tokenonly[TX_SIZES];
3037 int64_t dist_uv[TX_SIZES];
3038 int skip_uv[TX_SIZES];
3039 PREDICTION_MODE mode_uv[TX_SIZES];
3040 const int intra_cost_penalty = vp9_get_intra_cost_penalty(
3041 cm->base_qindex, cm->y_dc_delta_q, cm->bit_depth);
3042 int best_skip2 = 0;
3043 uint8_t ref_frame_skip_mask[2] = { 0 };
3044 uint16_t mode_skip_mask[MAX_REF_FRAMES] = { 0 };
3045 int mode_skip_start = sf->mode_skip_start + 1;
3046 const int *const rd_threshes = rd_opt->threshes[segment_id][bsize];
3047 const int *const rd_thresh_freq_fact = tile_data->thresh_freq_fact[bsize];
3048 int64_t mode_threshold[MAX_MODES];
3049 int *tile_mode_map = tile_data->mode_map[bsize];
3050 int mode_map[MAX_MODES]; // Maintain mode_map information locally to avoid
3051 // lock mechanism involved with reads from
3052 // tile_mode_map
3053 const int mode_search_skip_flags = sf->mode_search_skip_flags;
3054 int64_t mask_filter = 0;
3055 int64_t filter_cache[SWITCHABLE_FILTER_CONTEXTS];
3056
3057 vp9_zero(best_mbmode);
3058
3059 x->skip_encode = sf->skip_encode_frame && x->q_index < QIDX_SKIP_THRESH;
3060
3061 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i) filter_cache[i] = INT64_MAX;
3062
3063 estimate_ref_frame_costs(cm, xd, segment_id, ref_costs_single, ref_costs_comp,
3064 &comp_mode_p);
3065
3066 for (i = 0; i < REFERENCE_MODES; ++i) best_pred_rd[i] = INT64_MAX;
3067 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++)
3068 best_filter_rd[i] = INT64_MAX;
3069 for (i = 0; i < TX_SIZES; i++) rate_uv_intra[i] = INT_MAX;
3070 for (i = 0; i < MAX_REF_FRAMES; ++i) x->pred_sse[i] = INT_MAX;
3071 for (i = 0; i < MB_MODE_COUNT; ++i) {
3072 for (k = 0; k < MAX_REF_FRAMES; ++k) {
3073 single_inter_filter[i][k] = SWITCHABLE;
3074 single_skippable[i][k] = 0;
3075 }
3076 }
3077
3078 rd_cost->rate = INT_MAX;
3079
3080 for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) {
3081 x->pred_mv_sad[ref_frame] = INT_MAX;
3082 if (cpi->ref_frame_flags & flag_list[ref_frame]) {
3083 assert(get_ref_frame_buffer(cpi, ref_frame) != NULL);
3084 setup_buffer_inter(cpi, x, ref_frame, bsize, mi_row, mi_col,
3085 frame_mv[NEARESTMV], frame_mv[NEARMV], yv12_mb);
3086 }
3087 frame_mv[NEWMV][ref_frame].as_int = INVALID_MV;
3088 frame_mv[ZEROMV][ref_frame].as_int = 0;
3089 }
3090
3091 for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) {
3092 if (!(cpi->ref_frame_flags & flag_list[ref_frame])) {
3093 // Skip checking missing references in both single and compound reference
3094 // modes. Note that a mode will be skipped if both reference frames
3095 // are masked out.
3096 ref_frame_skip_mask[0] |= (1 << ref_frame);
3097 ref_frame_skip_mask[1] |= SECOND_REF_FRAME_MASK;
3098 } else if (sf->reference_masking) {
3099 for (i = LAST_FRAME; i <= ALTREF_FRAME; ++i) {
3100 // Skip fixed mv modes for poor references
3101 if ((x->pred_mv_sad[ref_frame] >> 2) > x->pred_mv_sad[i]) {
3102 mode_skip_mask[ref_frame] |= INTER_NEAREST_NEAR_ZERO;
3103 break;
3104 }
3105 }
3106 }
3107 // If the segment reference frame feature is enabled....
3108 // then do nothing if the current ref frame is not allowed..
3109 if (segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME) &&
3110 get_segdata(seg, segment_id, SEG_LVL_REF_FRAME) != (int)ref_frame) {
3111 ref_frame_skip_mask[0] |= (1 << ref_frame);
3112 ref_frame_skip_mask[1] |= SECOND_REF_FRAME_MASK;
3113 }
3114 }
3115
3116 // Disable this drop out case if the ref frame
3117 // segment level feature is enabled for this segment. This is to
3118 // prevent the possibility that we end up unable to pick any mode.
3119 if (!segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME)) {
3120 // Only consider ZEROMV/ALTREF_FRAME for alt ref frame,
3121 // unless ARNR filtering is enabled in which case we want
3122 // an unfiltered alternative. We allow near/nearest as well
3123 // because they may result in zero-zero MVs but be cheaper.
3124 if (cpi->rc.is_src_frame_alt_ref && (cpi->oxcf.arnr_max_frames == 0)) {
3125 ref_frame_skip_mask[0] = (1 << LAST_FRAME) | (1 << GOLDEN_FRAME);
3126 ref_frame_skip_mask[1] = SECOND_REF_FRAME_MASK;
3127 mode_skip_mask[ALTREF_FRAME] = ~INTER_NEAREST_NEAR_ZERO;
3128 if (frame_mv[NEARMV][ALTREF_FRAME].as_int != 0)
3129 mode_skip_mask[ALTREF_FRAME] |= (1 << NEARMV);
3130 if (frame_mv[NEARESTMV][ALTREF_FRAME].as_int != 0)
3131 mode_skip_mask[ALTREF_FRAME] |= (1 << NEARESTMV);
3132 }
3133 }
3134
3135 if (cpi->rc.is_src_frame_alt_ref) {
3136 if (sf->alt_ref_search_fp) {
3137 mode_skip_mask[ALTREF_FRAME] = 0;
3138 ref_frame_skip_mask[0] = ~(1 << ALTREF_FRAME);
3139 ref_frame_skip_mask[1] = SECOND_REF_FRAME_MASK;
3140 }
3141 }
3142
3143 if (sf->alt_ref_search_fp)
3144 if (!cm->show_frame && x->pred_mv_sad[GOLDEN_FRAME] < INT_MAX)
3145 if (x->pred_mv_sad[ALTREF_FRAME] > (x->pred_mv_sad[GOLDEN_FRAME] << 1))
3146 mode_skip_mask[ALTREF_FRAME] |= INTER_ALL;
3147
3148 if (sf->adaptive_mode_search) {
3149 if (cm->show_frame && !cpi->rc.is_src_frame_alt_ref &&
3150 cpi->rc.frames_since_golden >= 3)
3151 if (x->pred_mv_sad[GOLDEN_FRAME] > (x->pred_mv_sad[LAST_FRAME] << 1))
3152 mode_skip_mask[GOLDEN_FRAME] |= INTER_ALL;
3153 }
3154
3155 if (bsize > sf->max_intra_bsize) {
3156 ref_frame_skip_mask[0] |= (1 << INTRA_FRAME);
3157 ref_frame_skip_mask[1] |= (1 << INTRA_FRAME);
3158 }
3159
3160 mode_skip_mask[INTRA_FRAME] |=
3161 ~(sf->intra_y_mode_mask[max_txsize_lookup[bsize]]);
3162
3163 for (i = 0; i <= LAST_NEW_MV_INDEX; ++i) mode_threshold[i] = 0;
3164
3165 for (i = LAST_NEW_MV_INDEX + 1; i < MAX_MODES; ++i)
3166 mode_threshold[i] = ((int64_t)rd_threshes[i] * rd_thresh_freq_fact[i]) >> 5;
3167
3168 midx = sf->schedule_mode_search ? mode_skip_start : 0;
3169
3170 while (midx > 4) {
3171 uint8_t end_pos = 0;
3172 for (i = 5; i < midx; ++i) {
3173 if (mode_threshold[tile_mode_map[i - 1]] >
3174 mode_threshold[tile_mode_map[i]]) {
3175 uint8_t tmp = tile_mode_map[i];
3176 tile_mode_map[i] = tile_mode_map[i - 1];
3177 tile_mode_map[i - 1] = tmp;
3178 end_pos = i;
3179 }
3180 }
3181 midx = end_pos;
3182 }
3183
3184 memcpy(mode_map, tile_mode_map, sizeof(mode_map));
3185
3186 for (midx = 0; midx < MAX_MODES; ++midx) {
3187 int mode_index = mode_map[midx];
3188 int mode_excluded = 0;
3189 int64_t this_rd = INT64_MAX;
3190 int disable_skip = 0;
3191 int compmode_cost = 0;
3192 int rate2 = 0, rate_y = 0, rate_uv = 0;
3193 int64_t distortion2 = 0, distortion_y = 0, distortion_uv = 0;
3194 int skippable = 0;
3195 int this_skip2 = 0;
3196 int64_t total_sse = INT64_MAX;
3197 int early_term = 0;
3198
3199 this_mode = vp9_mode_order[mode_index].mode;
3200 ref_frame = vp9_mode_order[mode_index].ref_frame[0];
3201 second_ref_frame = vp9_mode_order[mode_index].ref_frame[1];
3202
3203 vp9_zero(x->sum_y_eobs);
3204
3205 // Look at the reference frame of the best mode so far and set the
3206 // skip mask to look at a subset of the remaining modes.
3207 if (midx == mode_skip_start && best_mode_index >= 0) {
3208 switch (best_mbmode.ref_frame[0]) {
3209 case INTRA_FRAME: break;
3210 case LAST_FRAME:
3211 ref_frame_skip_mask[0] |= LAST_FRAME_MODE_MASK;
3212 ref_frame_skip_mask[1] |= SECOND_REF_FRAME_MASK;
3213 break;
3214 case GOLDEN_FRAME:
3215 ref_frame_skip_mask[0] |= GOLDEN_FRAME_MODE_MASK;
3216 ref_frame_skip_mask[1] |= SECOND_REF_FRAME_MASK;
3217 break;
3218 case ALTREF_FRAME: ref_frame_skip_mask[0] |= ALT_REF_MODE_MASK; break;
3219 case NONE:
3220 case MAX_REF_FRAMES: assert(0 && "Invalid Reference frame"); break;
3221 }
3222 }
3223
3224 if ((ref_frame_skip_mask[0] & (1 << ref_frame)) &&
3225 (ref_frame_skip_mask[1] & (1 << VPXMAX(0, second_ref_frame))))
3226 continue;
3227
3228 if (mode_skip_mask[ref_frame] & (1 << this_mode)) continue;
3229
3230 // Test best rd so far against threshold for trying this mode.
3231 if (best_mode_skippable && sf->schedule_mode_search)
3232 mode_threshold[mode_index] <<= 1;
3233
3234 if (best_rd < mode_threshold[mode_index]) continue;
3235
3236 // This is only used in motion vector unit test.
3237 if (cpi->oxcf.motion_vector_unit_test && ref_frame == INTRA_FRAME) continue;
3238
3239 if (sf->motion_field_mode_search) {
3240 const int mi_width = VPXMIN(num_8x8_blocks_wide_lookup[bsize],
3241 tile_info->mi_col_end - mi_col);
3242 const int mi_height = VPXMIN(num_8x8_blocks_high_lookup[bsize],
3243 tile_info->mi_row_end - mi_row);
3244 const int bsl = mi_width_log2_lookup[bsize];
3245 int cb_partition_search_ctrl =
3246 (((mi_row + mi_col) >> bsl) +
3247 get_chessboard_index(cm->current_video_frame)) &
3248 0x1;
3249 MODE_INFO *ref_mi;
3250 int const_motion = 1;
3251 int skip_ref_frame = !cb_partition_search_ctrl;
3252 MV_REFERENCE_FRAME rf = NONE;
3253 int_mv ref_mv;
3254 ref_mv.as_int = INVALID_MV;
3255
3256 if ((mi_row - 1) >= tile_info->mi_row_start) {
3257 ref_mv = xd->mi[-xd->mi_stride]->mv[0];
3258 rf = xd->mi[-xd->mi_stride]->ref_frame[0];
3259 for (i = 0; i < mi_width; ++i) {
3260 ref_mi = xd->mi[-xd->mi_stride + i];
3261 const_motion &= (ref_mv.as_int == ref_mi->mv[0].as_int) &&
3262 (ref_frame == ref_mi->ref_frame[0]);
3263 skip_ref_frame &= (rf == ref_mi->ref_frame[0]);
3264 }
3265 }
3266
3267 if ((mi_col - 1) >= tile_info->mi_col_start) {
3268 if (ref_mv.as_int == INVALID_MV) ref_mv = xd->mi[-1]->mv[0];
3269 if (rf == NONE) rf = xd->mi[-1]->ref_frame[0];
3270 for (i = 0; i < mi_height; ++i) {
3271 ref_mi = xd->mi[i * xd->mi_stride - 1];
3272 const_motion &= (ref_mv.as_int == ref_mi->mv[0].as_int) &&
3273 (ref_frame == ref_mi->ref_frame[0]);
3274 skip_ref_frame &= (rf == ref_mi->ref_frame[0]);
3275 }
3276 }
3277
3278 if (skip_ref_frame && this_mode != NEARESTMV && this_mode != NEWMV)
3279 if (rf > INTRA_FRAME)
3280 if (ref_frame != rf) continue;
3281
3282 if (const_motion)
3283 if (this_mode == NEARMV || this_mode == ZEROMV) continue;
3284 }
3285
3286 comp_pred = second_ref_frame > INTRA_FRAME;
3287 if (comp_pred) {
3288 if (!cpi->allow_comp_inter_inter) continue;
3289
3290 // Skip compound inter modes if ARF is not available.
3291 if (!(cpi->ref_frame_flags & flag_list[second_ref_frame])) continue;
3292
3293 // Do not allow compound prediction if the segment level reference frame
3294 // feature is in use as in this case there can only be one reference.
3295 if (segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME)) continue;
3296
3297 if ((mode_search_skip_flags & FLAG_SKIP_COMP_BESTINTRA) &&
3298 best_mode_index >= 0 && best_mbmode.ref_frame[0] == INTRA_FRAME)
3299 continue;
3300
3301 mode_excluded = cm->reference_mode == SINGLE_REFERENCE;
3302 } else {
3303 if (ref_frame != INTRA_FRAME)
3304 mode_excluded = cm->reference_mode == COMPOUND_REFERENCE;
3305 }
3306
3307 if (ref_frame == INTRA_FRAME) {
3308 if (sf->adaptive_mode_search)
3309 if ((x->source_variance << num_pels_log2_lookup[bsize]) > best_pred_sse)
3310 continue;
3311
3312 if (this_mode != DC_PRED) {
3313 // Disable intra modes other than DC_PRED for blocks with low variance
3314 // Threshold for intra skipping based on source variance
3315 // TODO(debargha): Specialize the threshold for super block sizes
3316 const unsigned int skip_intra_var_thresh = 64;
3317 if ((mode_search_skip_flags & FLAG_SKIP_INTRA_LOWVAR) &&
3318 x->source_variance < skip_intra_var_thresh)
3319 continue;
3320 // Only search the oblique modes if the best so far is
3321 // one of the neighboring directional modes
3322 if ((mode_search_skip_flags & FLAG_SKIP_INTRA_BESTINTER) &&
3323 (this_mode >= D45_PRED && this_mode <= TM_PRED)) {
3324 if (best_mode_index >= 0 && best_mbmode.ref_frame[0] > INTRA_FRAME)
3325 continue;
3326 }
3327 if (mode_search_skip_flags & FLAG_SKIP_INTRA_DIRMISMATCH) {
3328 if (conditional_skipintra(this_mode, best_intra_mode)) continue;
3329 }
3330 }
3331 } else {
3332 const MV_REFERENCE_FRAME ref_frames[2] = { ref_frame, second_ref_frame };
3333 if (!check_best_zero_mv(cpi, mbmi_ext->mode_context, frame_mv, this_mode,
3334 ref_frames))
3335 continue;
3336 }
3337
3338 mi->mode = this_mode;
3339 mi->uv_mode = DC_PRED;
3340 mi->ref_frame[0] = ref_frame;
3341 mi->ref_frame[1] = second_ref_frame;
3342 // Evaluate all sub-pel filters irrespective of whether we can use
3343 // them for this frame.
3344 mi->interp_filter =
3345 cm->interp_filter == SWITCHABLE ? EIGHTTAP : cm->interp_filter;
3346 mi->mv[0].as_int = mi->mv[1].as_int = 0;
3347
3348 x->skip = 0;
3349 set_ref_ptrs(cm, xd, ref_frame, second_ref_frame);
3350
3351 // Select prediction reference frames.
3352 for (i = 0; i < MAX_MB_PLANE; i++) {
3353 xd->plane[i].pre[0] = yv12_mb[ref_frame][i];
3354 if (comp_pred) xd->plane[i].pre[1] = yv12_mb[second_ref_frame][i];
3355 }
3356
3357 if (ref_frame == INTRA_FRAME) {
3358 TX_SIZE uv_tx;
3359 struct macroblockd_plane *const pd = &xd->plane[1];
3360 memset(x->skip_txfm, 0, sizeof(x->skip_txfm));
3361 super_block_yrd(cpi, x, &rate_y, &distortion_y, &skippable, NULL, bsize,
3362 best_rd);
3363 if (rate_y == INT_MAX) continue;
3364
3365 uv_tx = uv_txsize_lookup[bsize][mi->tx_size][pd->subsampling_x]
3366 [pd->subsampling_y];
3367 if (rate_uv_intra[uv_tx] == INT_MAX) {
3368 choose_intra_uv_mode(cpi, x, ctx, bsize, uv_tx, &rate_uv_intra[uv_tx],
3369 &rate_uv_tokenonly[uv_tx], &dist_uv[uv_tx],
3370 &skip_uv[uv_tx], &mode_uv[uv_tx]);
3371 }
3372
3373 rate_uv = rate_uv_tokenonly[uv_tx];
3374 distortion_uv = dist_uv[uv_tx];
3375 skippable = skippable && skip_uv[uv_tx];
3376 mi->uv_mode = mode_uv[uv_tx];
3377
3378 rate2 = rate_y + cpi->mbmode_cost[mi->mode] + rate_uv_intra[uv_tx];
3379 if (this_mode != DC_PRED && this_mode != TM_PRED)
3380 rate2 += intra_cost_penalty;
3381 distortion2 = distortion_y + distortion_uv;
3382 } else {
3383 this_rd = handle_inter_mode(
3384 cpi, x, bsize, &rate2, &distortion2, &skippable, &rate_y, &rate_uv,
3385 &disable_skip, frame_mv, mi_row, mi_col, single_newmv,
3386 single_inter_filter, single_skippable, &total_sse, best_rd,
3387 &mask_filter, filter_cache);
3388 if (this_rd == INT64_MAX) continue;
3389
3390 compmode_cost = vp9_cost_bit(comp_mode_p, comp_pred);
3391
3392 if (cm->reference_mode == REFERENCE_MODE_SELECT) rate2 += compmode_cost;
3393 }
3394
3395 // Estimate the reference frame signaling cost and add it
3396 // to the rolling cost variable.
3397 if (comp_pred) {
3398 rate2 += ref_costs_comp[ref_frame];
3399 } else {
3400 rate2 += ref_costs_single[ref_frame];
3401 }
3402
3403 if (!disable_skip) {
3404 const vpx_prob skip_prob = vp9_get_skip_prob(cm, xd);
3405 const int skip_cost0 = vp9_cost_bit(skip_prob, 0);
3406 const int skip_cost1 = vp9_cost_bit(skip_prob, 1);
3407
3408 if (skippable) {
3409 // Back out the coefficient coding costs
3410 rate2 -= (rate_y + rate_uv);
3411
3412 // Cost the skip mb case
3413 rate2 += skip_cost1;
3414 } else if (ref_frame != INTRA_FRAME && !xd->lossless) {
3415 if (RDCOST(x->rdmult, x->rddiv, rate_y + rate_uv + skip_cost0,
3416 distortion2) <
3417 RDCOST(x->rdmult, x->rddiv, skip_cost1, total_sse)) {
3418 // Add in the cost of the no skip flag.
3419 rate2 += skip_cost0;
3420 } else {
3421 // FIXME(rbultje) make this work for splitmv also
3422 assert(total_sse >= 0);
3423
3424 rate2 += skip_cost1;
3425 distortion2 = total_sse;
3426 rate2 -= (rate_y + rate_uv);
3427 this_skip2 = 1;
3428 }
3429 } else {
3430 // Add in the cost of the no skip flag.
3431 rate2 += skip_cost0;
3432 }
3433
3434 // Calculate the final RD estimate for this mode.
3435 this_rd = RDCOST(x->rdmult, x->rddiv, rate2, distortion2);
3436 }
3437
3438 // Apply an adjustment to the rd value based on the similarity of the
3439 // source variance and reconstructed variance.
3440 rd_variance_adjustment(cpi, x, bsize, &this_rd, ref_frame,
3441 x->source_variance);
3442
3443 if (ref_frame == INTRA_FRAME) {
3444 // Keep record of best intra rd
3445 if (this_rd < best_intra_rd) {
3446 best_intra_rd = this_rd;
3447 best_intra_mode = mi->mode;
3448 }
3449 }
3450
3451 if (!disable_skip && ref_frame == INTRA_FRAME) {
3452 for (i = 0; i < REFERENCE_MODES; ++i)
3453 best_pred_rd[i] = VPXMIN(best_pred_rd[i], this_rd);
3454 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++)
3455 best_filter_rd[i] = VPXMIN(best_filter_rd[i], this_rd);
3456 }
3457
3458 // Did this mode help.. i.e. is it the new best mode
3459 if (this_rd < best_rd || x->skip) {
3460 int max_plane = MAX_MB_PLANE;
3461 if (!mode_excluded) {
3462 // Note index of best mode so far
3463 best_mode_index = mode_index;
3464
3465 if (ref_frame == INTRA_FRAME) {
3466 /* required for left and above block mv */
3467 mi->mv[0].as_int = 0;
3468 max_plane = 1;
3469 // Initialize interp_filter here so we do not have to check for
3470 // inter block modes in get_pred_context_switchable_interp()
3471 mi->interp_filter = SWITCHABLE_FILTERS;
3472 } else {
3473 best_pred_sse = x->pred_sse[ref_frame];
3474 }
3475
3476 rd_cost->rate = rate2;
3477 rd_cost->dist = distortion2;
3478 rd_cost->rdcost = this_rd;
3479 best_rd = this_rd;
3480 best_mbmode = *mi;
3481 best_skip2 = this_skip2;
3482 best_mode_skippable = skippable;
3483
3484 if (!x->select_tx_size) swap_block_ptr(x, ctx, 1, 0, 0, max_plane);
3485 memcpy(ctx->zcoeff_blk, x->zcoeff_blk[mi->tx_size],
3486 sizeof(ctx->zcoeff_blk[0]) * ctx->num_4x4_blk);
3487 ctx->sum_y_eobs = x->sum_y_eobs[mi->tx_size];
3488
3489 // TODO(debargha): enhance this test with a better distortion prediction
3490 // based on qp, activity mask and history
3491 if ((mode_search_skip_flags & FLAG_EARLY_TERMINATE) &&
3492 (mode_index > MIN_EARLY_TERM_INDEX)) {
3493 int qstep = xd->plane[0].dequant[1];
3494 // TODO(debargha): Enhance this by specializing for each mode_index
3495 int scale = 4;
3496 #if CONFIG_VP9_HIGHBITDEPTH
3497 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
3498 qstep >>= (xd->bd - 8);
3499 }
3500 #endif // CONFIG_VP9_HIGHBITDEPTH
3501 if (x->source_variance < UINT_MAX) {
3502 const int var_adjust = (x->source_variance < 16);
3503 scale -= var_adjust;
3504 }
3505 if (ref_frame > INTRA_FRAME && distortion2 * scale < qstep * qstep) {
3506 early_term = 1;
3507 }
3508 }
3509 }
3510 }
3511
3512 /* keep record of best compound/single-only prediction */
3513 if (!disable_skip && ref_frame != INTRA_FRAME) {
3514 int64_t single_rd, hybrid_rd, single_rate, hybrid_rate;
3515
3516 if (cm->reference_mode == REFERENCE_MODE_SELECT) {
3517 single_rate = rate2 - compmode_cost;
3518 hybrid_rate = rate2;
3519 } else {
3520 single_rate = rate2;
3521 hybrid_rate = rate2 + compmode_cost;
3522 }
3523
3524 single_rd = RDCOST(x->rdmult, x->rddiv, single_rate, distortion2);
3525 hybrid_rd = RDCOST(x->rdmult, x->rddiv, hybrid_rate, distortion2);
3526
3527 if (!comp_pred) {
3528 if (single_rd < best_pred_rd[SINGLE_REFERENCE])
3529 best_pred_rd[SINGLE_REFERENCE] = single_rd;
3530 } else {
3531 if (single_rd < best_pred_rd[COMPOUND_REFERENCE])
3532 best_pred_rd[COMPOUND_REFERENCE] = single_rd;
3533 }
3534 if (hybrid_rd < best_pred_rd[REFERENCE_MODE_SELECT])
3535 best_pred_rd[REFERENCE_MODE_SELECT] = hybrid_rd;
3536
3537 /* keep record of best filter type */
3538 if (!mode_excluded && cm->interp_filter != BILINEAR) {
3539 int64_t ref =
3540 filter_cache[cm->interp_filter == SWITCHABLE ? SWITCHABLE_FILTERS
3541 : cm->interp_filter];
3542
3543 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++) {
3544 int64_t adj_rd;
3545 if (ref == INT64_MAX)
3546 adj_rd = 0;
3547 else if (filter_cache[i] == INT64_MAX)
3548 // when early termination is triggered, the encoder does not have
3549 // access to the rate-distortion cost. it only knows that the cost
3550 // should be above the maximum valid value. hence it takes the known
3551 // maximum plus an arbitrary constant as the rate-distortion cost.
3552 adj_rd = mask_filter - ref + 10;
3553 else
3554 adj_rd = filter_cache[i] - ref;
3555
3556 adj_rd += this_rd;
3557 best_filter_rd[i] = VPXMIN(best_filter_rd[i], adj_rd);
3558 }
3559 }
3560 }
3561
3562 if (early_term) break;
3563
3564 if (x->skip && !comp_pred) break;
3565 }
3566
3567 // The inter modes' rate costs are not calculated precisely in some cases.
3568 // Therefore, sometimes, NEWMV is chosen instead of NEARESTMV, NEARMV, and
3569 // ZEROMV. Here, checks are added for those cases, and the mode decisions
3570 // are corrected.
3571 if (best_mbmode.mode == NEWMV) {
3572 const MV_REFERENCE_FRAME refs[2] = { best_mbmode.ref_frame[0],
3573 best_mbmode.ref_frame[1] };
3574 int comp_pred_mode = refs[1] > INTRA_FRAME;
3575
3576 if (frame_mv[NEARESTMV][refs[0]].as_int == best_mbmode.mv[0].as_int &&
3577 ((comp_pred_mode &&
3578 frame_mv[NEARESTMV][refs[1]].as_int == best_mbmode.mv[1].as_int) ||
3579 !comp_pred_mode))
3580 best_mbmode.mode = NEARESTMV;
3581 else if (frame_mv[NEARMV][refs[0]].as_int == best_mbmode.mv[0].as_int &&
3582 ((comp_pred_mode &&
3583 frame_mv[NEARMV][refs[1]].as_int == best_mbmode.mv[1].as_int) ||
3584 !comp_pred_mode))
3585 best_mbmode.mode = NEARMV;
3586 else if (best_mbmode.mv[0].as_int == 0 &&
3587 ((comp_pred_mode && best_mbmode.mv[1].as_int == 0) ||
3588 !comp_pred_mode))
3589 best_mbmode.mode = ZEROMV;
3590 }
3591
3592 if (best_mode_index < 0 || best_rd >= best_rd_so_far) {
3593 // If adaptive interp filter is enabled, then the current leaf node of 8x8
3594 // data is needed for sub8x8. Hence preserve the context.
3595 if (cpi->row_mt && bsize == BLOCK_8X8) ctx->mic = *xd->mi[0];
3596 rd_cost->rate = INT_MAX;
3597 rd_cost->rdcost = INT64_MAX;
3598 return;
3599 }
3600
3601 // If we used an estimate for the uv intra rd in the loop above...
3602 if (sf->use_uv_intra_rd_estimate) {
3603 // Do Intra UV best rd mode selection if best mode choice above was intra.
3604 if (best_mbmode.ref_frame[0] == INTRA_FRAME) {
3605 TX_SIZE uv_tx_size;
3606 *mi = best_mbmode;
3607 uv_tx_size = get_uv_tx_size(mi, &xd->plane[1]);
3608 rd_pick_intra_sbuv_mode(cpi, x, ctx, &rate_uv_intra[uv_tx_size],
3609 &rate_uv_tokenonly[uv_tx_size],
3610 &dist_uv[uv_tx_size], &skip_uv[uv_tx_size],
3611 bsize < BLOCK_8X8 ? BLOCK_8X8 : bsize,
3612 uv_tx_size);
3613 }
3614 }
3615
3616 assert((cm->interp_filter == SWITCHABLE) ||
3617 (cm->interp_filter == best_mbmode.interp_filter) ||
3618 !is_inter_block(&best_mbmode));
3619
3620 if (!cpi->rc.is_src_frame_alt_ref)
3621 vp9_update_rd_thresh_fact(tile_data->thresh_freq_fact,
3622 sf->adaptive_rd_thresh, bsize, best_mode_index);
3623
3624 // macroblock modes
3625 *mi = best_mbmode;
3626 x->skip |= best_skip2;
3627
3628 for (i = 0; i < REFERENCE_MODES; ++i) {
3629 if (best_pred_rd[i] == INT64_MAX)
3630 best_pred_diff[i] = INT_MIN;
3631 else
3632 best_pred_diff[i] = best_rd - best_pred_rd[i];
3633 }
3634
3635 if (!x->skip) {
3636 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++) {
3637 if (best_filter_rd[i] == INT64_MAX)
3638 best_filter_diff[i] = 0;
3639 else
3640 best_filter_diff[i] = best_rd - best_filter_rd[i];
3641 }
3642 if (cm->interp_filter == SWITCHABLE)
3643 assert(best_filter_diff[SWITCHABLE_FILTERS] == 0);
3644 } else {
3645 vp9_zero(best_filter_diff);
3646 }
3647
3648 // TODO(yunqingwang): Moving this line in front of the above best_filter_diff
3649 // updating code causes PSNR loss. Need to figure out the confliction.
3650 x->skip |= best_mode_skippable;
3651
3652 if (!x->skip && !x->select_tx_size) {
3653 int has_high_freq_coeff = 0;
3654 int plane;
3655 int max_plane = is_inter_block(xd->mi[0]) ? MAX_MB_PLANE : 1;
3656 for (plane = 0; plane < max_plane; ++plane) {
3657 x->plane[plane].eobs = ctx->eobs_pbuf[plane][1];
3658 has_high_freq_coeff |= vp9_has_high_freq_in_plane(x, bsize, plane);
3659 }
3660
3661 for (plane = max_plane; plane < MAX_MB_PLANE; ++plane) {
3662 x->plane[plane].eobs = ctx->eobs_pbuf[plane][2];
3663 has_high_freq_coeff |= vp9_has_high_freq_in_plane(x, bsize, plane);
3664 }
3665
3666 best_mode_skippable |= !has_high_freq_coeff;
3667 }
3668
3669 assert(best_mode_index >= 0);
3670
3671 store_coding_context(x, ctx, best_mode_index, best_pred_diff,
3672 best_filter_diff, best_mode_skippable);
3673 }
3674
vp9_rd_pick_inter_mode_sb_seg_skip(VP9_COMP * cpi,TileDataEnc * tile_data,MACROBLOCK * x,RD_COST * rd_cost,BLOCK_SIZE bsize,PICK_MODE_CONTEXT * ctx,int64_t best_rd_so_far)3675 void vp9_rd_pick_inter_mode_sb_seg_skip(VP9_COMP *cpi, TileDataEnc *tile_data,
3676 MACROBLOCK *x, RD_COST *rd_cost,
3677 BLOCK_SIZE bsize,
3678 PICK_MODE_CONTEXT *ctx,
3679 int64_t best_rd_so_far) {
3680 VP9_COMMON *const cm = &cpi->common;
3681 MACROBLOCKD *const xd = &x->e_mbd;
3682 MODE_INFO *const mi = xd->mi[0];
3683 unsigned char segment_id = mi->segment_id;
3684 const int comp_pred = 0;
3685 int i;
3686 int64_t best_pred_diff[REFERENCE_MODES];
3687 int64_t best_filter_diff[SWITCHABLE_FILTER_CONTEXTS];
3688 unsigned int ref_costs_single[MAX_REF_FRAMES], ref_costs_comp[MAX_REF_FRAMES];
3689 vpx_prob comp_mode_p;
3690 INTERP_FILTER best_filter = SWITCHABLE;
3691 int64_t this_rd = INT64_MAX;
3692 int rate2 = 0;
3693 const int64_t distortion2 = 0;
3694
3695 x->skip_encode = cpi->sf.skip_encode_frame && x->q_index < QIDX_SKIP_THRESH;
3696
3697 estimate_ref_frame_costs(cm, xd, segment_id, ref_costs_single, ref_costs_comp,
3698 &comp_mode_p);
3699
3700 for (i = 0; i < MAX_REF_FRAMES; ++i) x->pred_sse[i] = INT_MAX;
3701 for (i = LAST_FRAME; i < MAX_REF_FRAMES; ++i) x->pred_mv_sad[i] = INT_MAX;
3702
3703 rd_cost->rate = INT_MAX;
3704
3705 assert(segfeature_active(&cm->seg, segment_id, SEG_LVL_SKIP));
3706
3707 mi->mode = ZEROMV;
3708 mi->uv_mode = DC_PRED;
3709 mi->ref_frame[0] = LAST_FRAME;
3710 mi->ref_frame[1] = NONE;
3711 mi->mv[0].as_int = 0;
3712 x->skip = 1;
3713
3714 ctx->sum_y_eobs = 0;
3715
3716 if (cm->interp_filter != BILINEAR) {
3717 best_filter = EIGHTTAP;
3718 if (cm->interp_filter == SWITCHABLE &&
3719 x->source_variance >= cpi->sf.disable_filter_search_var_thresh) {
3720 int rs;
3721 int best_rs = INT_MAX;
3722 for (i = 0; i < SWITCHABLE_FILTERS; ++i) {
3723 mi->interp_filter = i;
3724 rs = vp9_get_switchable_rate(cpi, xd);
3725 if (rs < best_rs) {
3726 best_rs = rs;
3727 best_filter = mi->interp_filter;
3728 }
3729 }
3730 }
3731 }
3732 // Set the appropriate filter
3733 if (cm->interp_filter == SWITCHABLE) {
3734 mi->interp_filter = best_filter;
3735 rate2 += vp9_get_switchable_rate(cpi, xd);
3736 } else {
3737 mi->interp_filter = cm->interp_filter;
3738 }
3739
3740 if (cm->reference_mode == REFERENCE_MODE_SELECT)
3741 rate2 += vp9_cost_bit(comp_mode_p, comp_pred);
3742
3743 // Estimate the reference frame signaling cost and add it
3744 // to the rolling cost variable.
3745 rate2 += ref_costs_single[LAST_FRAME];
3746 this_rd = RDCOST(x->rdmult, x->rddiv, rate2, distortion2);
3747
3748 rd_cost->rate = rate2;
3749 rd_cost->dist = distortion2;
3750 rd_cost->rdcost = this_rd;
3751
3752 if (this_rd >= best_rd_so_far) {
3753 rd_cost->rate = INT_MAX;
3754 rd_cost->rdcost = INT64_MAX;
3755 return;
3756 }
3757
3758 assert((cm->interp_filter == SWITCHABLE) ||
3759 (cm->interp_filter == mi->interp_filter));
3760
3761 vp9_update_rd_thresh_fact(tile_data->thresh_freq_fact,
3762 cpi->sf.adaptive_rd_thresh, bsize, THR_ZEROMV);
3763
3764 vp9_zero(best_pred_diff);
3765 vp9_zero(best_filter_diff);
3766
3767 if (!x->select_tx_size) swap_block_ptr(x, ctx, 1, 0, 0, MAX_MB_PLANE);
3768 store_coding_context(x, ctx, THR_ZEROMV, best_pred_diff, best_filter_diff, 0);
3769 }
3770
vp9_rd_pick_inter_mode_sub8x8(VP9_COMP * cpi,TileDataEnc * tile_data,MACROBLOCK * x,int mi_row,int mi_col,RD_COST * rd_cost,BLOCK_SIZE bsize,PICK_MODE_CONTEXT * ctx,int64_t best_rd_so_far)3771 void vp9_rd_pick_inter_mode_sub8x8(VP9_COMP *cpi, TileDataEnc *tile_data,
3772 MACROBLOCK *x, int mi_row, int mi_col,
3773 RD_COST *rd_cost, BLOCK_SIZE bsize,
3774 PICK_MODE_CONTEXT *ctx,
3775 int64_t best_rd_so_far) {
3776 VP9_COMMON *const cm = &cpi->common;
3777 RD_OPT *const rd_opt = &cpi->rd;
3778 SPEED_FEATURES *const sf = &cpi->sf;
3779 MACROBLOCKD *const xd = &x->e_mbd;
3780 MODE_INFO *const mi = xd->mi[0];
3781 const struct segmentation *const seg = &cm->seg;
3782 MV_REFERENCE_FRAME ref_frame, second_ref_frame;
3783 unsigned char segment_id = mi->segment_id;
3784 int comp_pred, i;
3785 int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES];
3786 struct buf_2d yv12_mb[4][MAX_MB_PLANE];
3787 static const int flag_list[4] = { 0, VP9_LAST_FLAG, VP9_GOLD_FLAG,
3788 VP9_ALT_FLAG };
3789 int64_t best_rd = best_rd_so_far;
3790 int64_t best_yrd = best_rd_so_far; // FIXME(rbultje) more precise
3791 int64_t best_pred_diff[REFERENCE_MODES];
3792 int64_t best_pred_rd[REFERENCE_MODES];
3793 int64_t best_filter_rd[SWITCHABLE_FILTER_CONTEXTS];
3794 int64_t best_filter_diff[SWITCHABLE_FILTER_CONTEXTS];
3795 MODE_INFO best_mbmode;
3796 int ref_index, best_ref_index = 0;
3797 unsigned int ref_costs_single[MAX_REF_FRAMES], ref_costs_comp[MAX_REF_FRAMES];
3798 vpx_prob comp_mode_p;
3799 INTERP_FILTER tmp_best_filter = SWITCHABLE;
3800 int rate_uv_intra, rate_uv_tokenonly;
3801 int64_t dist_uv;
3802 int skip_uv;
3803 PREDICTION_MODE mode_uv = DC_PRED;
3804 const int intra_cost_penalty = vp9_get_intra_cost_penalty(
3805 cm->base_qindex, cm->y_dc_delta_q, cm->bit_depth);
3806 int_mv seg_mvs[4][MAX_REF_FRAMES];
3807 b_mode_info best_bmodes[4];
3808 int best_skip2 = 0;
3809 int ref_frame_skip_mask[2] = { 0 };
3810 int64_t mask_filter = 0;
3811 int64_t filter_cache[SWITCHABLE_FILTER_CONTEXTS];
3812 int internal_active_edge =
3813 vp9_active_edge_sb(cpi, mi_row, mi_col) && vp9_internal_image_edge(cpi);
3814 const int *const rd_thresh_freq_fact = tile_data->thresh_freq_fact[bsize];
3815
3816 x->skip_encode = sf->skip_encode_frame && x->q_index < QIDX_SKIP_THRESH;
3817 memset(x->zcoeff_blk[TX_4X4], 0, 4);
3818 vp9_zero(best_mbmode);
3819
3820 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i) filter_cache[i] = INT64_MAX;
3821
3822 for (i = 0; i < 4; i++) {
3823 int j;
3824 for (j = 0; j < MAX_REF_FRAMES; j++) seg_mvs[i][j].as_int = INVALID_MV;
3825 }
3826
3827 estimate_ref_frame_costs(cm, xd, segment_id, ref_costs_single, ref_costs_comp,
3828 &comp_mode_p);
3829
3830 for (i = 0; i < REFERENCE_MODES; ++i) best_pred_rd[i] = INT64_MAX;
3831 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++)
3832 best_filter_rd[i] = INT64_MAX;
3833 rate_uv_intra = INT_MAX;
3834
3835 rd_cost->rate = INT_MAX;
3836
3837 for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ref_frame++) {
3838 if (cpi->ref_frame_flags & flag_list[ref_frame]) {
3839 setup_buffer_inter(cpi, x, ref_frame, bsize, mi_row, mi_col,
3840 frame_mv[NEARESTMV], frame_mv[NEARMV], yv12_mb);
3841 } else {
3842 ref_frame_skip_mask[0] |= (1 << ref_frame);
3843 ref_frame_skip_mask[1] |= SECOND_REF_FRAME_MASK;
3844 }
3845 frame_mv[NEWMV][ref_frame].as_int = INVALID_MV;
3846 frame_mv[ZEROMV][ref_frame].as_int = 0;
3847 }
3848
3849 for (ref_index = 0; ref_index < MAX_REFS; ++ref_index) {
3850 int mode_excluded = 0;
3851 int64_t this_rd = INT64_MAX;
3852 int disable_skip = 0;
3853 int compmode_cost = 0;
3854 int rate2 = 0, rate_y = 0, rate_uv = 0;
3855 int64_t distortion2 = 0, distortion_y = 0, distortion_uv = 0;
3856 int skippable = 0;
3857 int i;
3858 int this_skip2 = 0;
3859 int64_t total_sse = INT_MAX;
3860 int early_term = 0;
3861 struct buf_2d backup_yv12[2][MAX_MB_PLANE];
3862
3863 ref_frame = vp9_ref_order[ref_index].ref_frame[0];
3864 second_ref_frame = vp9_ref_order[ref_index].ref_frame[1];
3865
3866 vp9_zero(x->sum_y_eobs);
3867
3868 #if CONFIG_BETTER_HW_COMPATIBILITY
3869 // forbid 8X4 and 4X8 partitions if any reference frame is scaled.
3870 if (bsize == BLOCK_8X4 || bsize == BLOCK_4X8) {
3871 int ref_scaled = vp9_is_scaled(&cm->frame_refs[ref_frame - 1].sf);
3872 if (second_ref_frame > INTRA_FRAME)
3873 ref_scaled += vp9_is_scaled(&cm->frame_refs[second_ref_frame - 1].sf);
3874 if (ref_scaled) continue;
3875 }
3876 #endif
3877 // Look at the reference frame of the best mode so far and set the
3878 // skip mask to look at a subset of the remaining modes.
3879 if (ref_index > 2 && sf->mode_skip_start < MAX_MODES) {
3880 if (ref_index == 3) {
3881 switch (best_mbmode.ref_frame[0]) {
3882 case INTRA_FRAME: break;
3883 case LAST_FRAME:
3884 ref_frame_skip_mask[0] |= (1 << GOLDEN_FRAME) | (1 << ALTREF_FRAME);
3885 ref_frame_skip_mask[1] |= SECOND_REF_FRAME_MASK;
3886 break;
3887 case GOLDEN_FRAME:
3888 ref_frame_skip_mask[0] |= (1 << LAST_FRAME) | (1 << ALTREF_FRAME);
3889 ref_frame_skip_mask[1] |= SECOND_REF_FRAME_MASK;
3890 break;
3891 case ALTREF_FRAME:
3892 ref_frame_skip_mask[0] |= (1 << GOLDEN_FRAME) | (1 << LAST_FRAME);
3893 break;
3894 case NONE:
3895 case MAX_REF_FRAMES: assert(0 && "Invalid Reference frame"); break;
3896 }
3897 }
3898 }
3899
3900 if ((ref_frame_skip_mask[0] & (1 << ref_frame)) &&
3901 (ref_frame_skip_mask[1] & (1 << VPXMAX(0, second_ref_frame))))
3902 continue;
3903
3904 // Test best rd so far against threshold for trying this mode.
3905 if (!internal_active_edge &&
3906 rd_less_than_thresh(best_rd,
3907 rd_opt->threshes[segment_id][bsize][ref_index],
3908 &rd_thresh_freq_fact[ref_index]))
3909 continue;
3910
3911 // This is only used in motion vector unit test.
3912 if (cpi->oxcf.motion_vector_unit_test && ref_frame == INTRA_FRAME) continue;
3913
3914 comp_pred = second_ref_frame > INTRA_FRAME;
3915 if (comp_pred) {
3916 if (!cpi->allow_comp_inter_inter) continue;
3917 if (!(cpi->ref_frame_flags & flag_list[second_ref_frame])) continue;
3918 // Do not allow compound prediction if the segment level reference frame
3919 // feature is in use as in this case there can only be one reference.
3920 if (segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME)) continue;
3921
3922 if ((sf->mode_search_skip_flags & FLAG_SKIP_COMP_BESTINTRA) &&
3923 best_mbmode.ref_frame[0] == INTRA_FRAME)
3924 continue;
3925 }
3926
3927 if (comp_pred)
3928 mode_excluded = cm->reference_mode == SINGLE_REFERENCE;
3929 else if (ref_frame != INTRA_FRAME)
3930 mode_excluded = cm->reference_mode == COMPOUND_REFERENCE;
3931
3932 // If the segment reference frame feature is enabled....
3933 // then do nothing if the current ref frame is not allowed..
3934 if (segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME) &&
3935 get_segdata(seg, segment_id, SEG_LVL_REF_FRAME) != (int)ref_frame) {
3936 continue;
3937 // Disable this drop out case if the ref frame
3938 // segment level feature is enabled for this segment. This is to
3939 // prevent the possibility that we end up unable to pick any mode.
3940 } else if (!segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME)) {
3941 // Only consider ZEROMV/ALTREF_FRAME for alt ref frame,
3942 // unless ARNR filtering is enabled in which case we want
3943 // an unfiltered alternative. We allow near/nearest as well
3944 // because they may result in zero-zero MVs but be cheaper.
3945 if (cpi->rc.is_src_frame_alt_ref && (cpi->oxcf.arnr_max_frames == 0))
3946 continue;
3947 }
3948
3949 mi->tx_size = TX_4X4;
3950 mi->uv_mode = DC_PRED;
3951 mi->ref_frame[0] = ref_frame;
3952 mi->ref_frame[1] = second_ref_frame;
3953 // Evaluate all sub-pel filters irrespective of whether we can use
3954 // them for this frame.
3955 mi->interp_filter =
3956 cm->interp_filter == SWITCHABLE ? EIGHTTAP : cm->interp_filter;
3957 x->skip = 0;
3958 set_ref_ptrs(cm, xd, ref_frame, second_ref_frame);
3959
3960 // Select prediction reference frames.
3961 for (i = 0; i < MAX_MB_PLANE; i++) {
3962 xd->plane[i].pre[0] = yv12_mb[ref_frame][i];
3963 if (comp_pred) xd->plane[i].pre[1] = yv12_mb[second_ref_frame][i];
3964 }
3965
3966 if (ref_frame == INTRA_FRAME) {
3967 int rate;
3968 if (rd_pick_intra_sub_8x8_y_mode(cpi, x, &rate, &rate_y, &distortion_y,
3969 best_rd) >= best_rd)
3970 continue;
3971 rate2 += rate;
3972 rate2 += intra_cost_penalty;
3973 distortion2 += distortion_y;
3974
3975 if (rate_uv_intra == INT_MAX) {
3976 choose_intra_uv_mode(cpi, x, ctx, bsize, TX_4X4, &rate_uv_intra,
3977 &rate_uv_tokenonly, &dist_uv, &skip_uv, &mode_uv);
3978 }
3979 rate2 += rate_uv_intra;
3980 rate_uv = rate_uv_tokenonly;
3981 distortion2 += dist_uv;
3982 distortion_uv = dist_uv;
3983 mi->uv_mode = mode_uv;
3984 } else {
3985 int rate;
3986 int64_t distortion;
3987 int64_t this_rd_thresh;
3988 int64_t tmp_rd, tmp_best_rd = INT64_MAX, tmp_best_rdu = INT64_MAX;
3989 int tmp_best_rate = INT_MAX, tmp_best_ratey = INT_MAX;
3990 int64_t tmp_best_distortion = INT_MAX, tmp_best_sse, uv_sse;
3991 int tmp_best_skippable = 0;
3992 int switchable_filter_index;
3993 int_mv *second_ref =
3994 comp_pred ? &x->mbmi_ext->ref_mvs[second_ref_frame][0] : NULL;
3995 b_mode_info tmp_best_bmodes[16];
3996 MODE_INFO tmp_best_mbmode;
3997 BEST_SEG_INFO bsi[SWITCHABLE_FILTERS];
3998 int pred_exists = 0;
3999 int uv_skippable;
4000
4001 YV12_BUFFER_CONFIG *scaled_ref_frame[2] = { NULL, NULL };
4002 int ref;
4003
4004 for (ref = 0; ref < 2; ++ref) {
4005 scaled_ref_frame[ref] =
4006 mi->ref_frame[ref] > INTRA_FRAME
4007 ? vp9_get_scaled_ref_frame(cpi, mi->ref_frame[ref])
4008 : NULL;
4009
4010 if (scaled_ref_frame[ref]) {
4011 int i;
4012 // Swap out the reference frame for a version that's been scaled to
4013 // match the resolution of the current frame, allowing the existing
4014 // motion search code to be used without additional modifications.
4015 for (i = 0; i < MAX_MB_PLANE; i++)
4016 backup_yv12[ref][i] = xd->plane[i].pre[ref];
4017 vp9_setup_pre_planes(xd, ref, scaled_ref_frame[ref], mi_row, mi_col,
4018 NULL);
4019 }
4020 }
4021
4022 this_rd_thresh = (ref_frame == LAST_FRAME)
4023 ? rd_opt->threshes[segment_id][bsize][THR_LAST]
4024 : rd_opt->threshes[segment_id][bsize][THR_ALTR];
4025 this_rd_thresh = (ref_frame == GOLDEN_FRAME)
4026 ? rd_opt->threshes[segment_id][bsize][THR_GOLD]
4027 : this_rd_thresh;
4028 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i)
4029 filter_cache[i] = INT64_MAX;
4030
4031 if (cm->interp_filter != BILINEAR) {
4032 tmp_best_filter = EIGHTTAP;
4033 if (x->source_variance < sf->disable_filter_search_var_thresh) {
4034 tmp_best_filter = EIGHTTAP;
4035 } else if (sf->adaptive_pred_interp_filter == 1 &&
4036 ctx->pred_interp_filter < SWITCHABLE) {
4037 tmp_best_filter = ctx->pred_interp_filter;
4038 } else if (sf->adaptive_pred_interp_filter == 2) {
4039 tmp_best_filter = ctx->pred_interp_filter < SWITCHABLE
4040 ? ctx->pred_interp_filter
4041 : 0;
4042 } else {
4043 for (switchable_filter_index = 0;
4044 switchable_filter_index < SWITCHABLE_FILTERS;
4045 ++switchable_filter_index) {
4046 int newbest, rs;
4047 int64_t rs_rd;
4048 MB_MODE_INFO_EXT *mbmi_ext = x->mbmi_ext;
4049 mi->interp_filter = switchable_filter_index;
4050 tmp_rd = rd_pick_best_sub8x8_mode(
4051 cpi, x, &mbmi_ext->ref_mvs[ref_frame][0], second_ref, best_yrd,
4052 &rate, &rate_y, &distortion, &skippable, &total_sse,
4053 (int)this_rd_thresh, seg_mvs, bsi, switchable_filter_index,
4054 mi_row, mi_col);
4055
4056 if (tmp_rd == INT64_MAX) continue;
4057 rs = vp9_get_switchable_rate(cpi, xd);
4058 rs_rd = RDCOST(x->rdmult, x->rddiv, rs, 0);
4059 filter_cache[switchable_filter_index] = tmp_rd;
4060 filter_cache[SWITCHABLE_FILTERS] =
4061 VPXMIN(filter_cache[SWITCHABLE_FILTERS], tmp_rd + rs_rd);
4062 if (cm->interp_filter == SWITCHABLE) tmp_rd += rs_rd;
4063
4064 mask_filter = VPXMAX(mask_filter, tmp_rd);
4065
4066 newbest = (tmp_rd < tmp_best_rd);
4067 if (newbest) {
4068 tmp_best_filter = mi->interp_filter;
4069 tmp_best_rd = tmp_rd;
4070 }
4071 if ((newbest && cm->interp_filter == SWITCHABLE) ||
4072 (mi->interp_filter == cm->interp_filter &&
4073 cm->interp_filter != SWITCHABLE)) {
4074 tmp_best_rdu = tmp_rd;
4075 tmp_best_rate = rate;
4076 tmp_best_ratey = rate_y;
4077 tmp_best_distortion = distortion;
4078 tmp_best_sse = total_sse;
4079 tmp_best_skippable = skippable;
4080 tmp_best_mbmode = *mi;
4081 for (i = 0; i < 4; i++) {
4082 tmp_best_bmodes[i] = xd->mi[0]->bmi[i];
4083 x->zcoeff_blk[TX_4X4][i] = !x->plane[0].eobs[i];
4084 x->sum_y_eobs[TX_4X4] += x->plane[0].eobs[i];
4085 }
4086 pred_exists = 1;
4087 if (switchable_filter_index == 0 && sf->use_rd_breakout &&
4088 best_rd < INT64_MAX) {
4089 if (tmp_best_rdu / 2 > best_rd) {
4090 // skip searching the other filters if the first is
4091 // already substantially larger than the best so far
4092 tmp_best_filter = mi->interp_filter;
4093 tmp_best_rdu = INT64_MAX;
4094 break;
4095 }
4096 }
4097 }
4098 } // switchable_filter_index loop
4099 }
4100 }
4101
4102 if (tmp_best_rdu == INT64_MAX && pred_exists) continue;
4103
4104 mi->interp_filter = (cm->interp_filter == SWITCHABLE ? tmp_best_filter
4105 : cm->interp_filter);
4106 if (!pred_exists) {
4107 // Handles the special case when a filter that is not in the
4108 // switchable list (bilinear, 6-tap) is indicated at the frame level
4109 tmp_rd = rd_pick_best_sub8x8_mode(
4110 cpi, x, &x->mbmi_ext->ref_mvs[ref_frame][0], second_ref, best_yrd,
4111 &rate, &rate_y, &distortion, &skippable, &total_sse,
4112 (int)this_rd_thresh, seg_mvs, bsi, 0, mi_row, mi_col);
4113 if (tmp_rd == INT64_MAX) continue;
4114 } else {
4115 total_sse = tmp_best_sse;
4116 rate = tmp_best_rate;
4117 rate_y = tmp_best_ratey;
4118 distortion = tmp_best_distortion;
4119 skippable = tmp_best_skippable;
4120 *mi = tmp_best_mbmode;
4121 for (i = 0; i < 4; i++) xd->mi[0]->bmi[i] = tmp_best_bmodes[i];
4122 }
4123
4124 rate2 += rate;
4125 distortion2 += distortion;
4126
4127 if (cm->interp_filter == SWITCHABLE)
4128 rate2 += vp9_get_switchable_rate(cpi, xd);
4129
4130 if (!mode_excluded)
4131 mode_excluded = comp_pred ? cm->reference_mode == SINGLE_REFERENCE
4132 : cm->reference_mode == COMPOUND_REFERENCE;
4133
4134 compmode_cost = vp9_cost_bit(comp_mode_p, comp_pred);
4135
4136 tmp_best_rdu =
4137 best_rd - VPXMIN(RDCOST(x->rdmult, x->rddiv, rate2, distortion2),
4138 RDCOST(x->rdmult, x->rddiv, 0, total_sse));
4139
4140 if (tmp_best_rdu > 0) {
4141 // If even the 'Y' rd value of split is higher than best so far
4142 // then dont bother looking at UV
4143 vp9_build_inter_predictors_sbuv(&x->e_mbd, mi_row, mi_col, BLOCK_8X8);
4144 memset(x->skip_txfm, SKIP_TXFM_NONE, sizeof(x->skip_txfm));
4145 if (!super_block_uvrd(cpi, x, &rate_uv, &distortion_uv, &uv_skippable,
4146 &uv_sse, BLOCK_8X8, tmp_best_rdu)) {
4147 for (ref = 0; ref < 2; ++ref) {
4148 if (scaled_ref_frame[ref]) {
4149 int i;
4150 for (i = 0; i < MAX_MB_PLANE; ++i)
4151 xd->plane[i].pre[ref] = backup_yv12[ref][i];
4152 }
4153 }
4154 continue;
4155 }
4156
4157 rate2 += rate_uv;
4158 distortion2 += distortion_uv;
4159 skippable = skippable && uv_skippable;
4160 total_sse += uv_sse;
4161 }
4162
4163 for (ref = 0; ref < 2; ++ref) {
4164 if (scaled_ref_frame[ref]) {
4165 // Restore the prediction frame pointers to their unscaled versions.
4166 int i;
4167 for (i = 0; i < MAX_MB_PLANE; ++i)
4168 xd->plane[i].pre[ref] = backup_yv12[ref][i];
4169 }
4170 }
4171 }
4172
4173 if (cm->reference_mode == REFERENCE_MODE_SELECT) rate2 += compmode_cost;
4174
4175 // Estimate the reference frame signaling cost and add it
4176 // to the rolling cost variable.
4177 if (second_ref_frame > INTRA_FRAME) {
4178 rate2 += ref_costs_comp[ref_frame];
4179 } else {
4180 rate2 += ref_costs_single[ref_frame];
4181 }
4182
4183 if (!disable_skip) {
4184 const vpx_prob skip_prob = vp9_get_skip_prob(cm, xd);
4185 const int skip_cost0 = vp9_cost_bit(skip_prob, 0);
4186 const int skip_cost1 = vp9_cost_bit(skip_prob, 1);
4187
4188 // Skip is never coded at the segment level for sub8x8 blocks and instead
4189 // always coded in the bitstream at the mode info level.
4190 if (ref_frame != INTRA_FRAME && !xd->lossless) {
4191 if (RDCOST(x->rdmult, x->rddiv, rate_y + rate_uv + skip_cost0,
4192 distortion2) <
4193 RDCOST(x->rdmult, x->rddiv, skip_cost1, total_sse)) {
4194 // Add in the cost of the no skip flag.
4195 rate2 += skip_cost0;
4196 } else {
4197 // FIXME(rbultje) make this work for splitmv also
4198 rate2 += skip_cost1;
4199 distortion2 = total_sse;
4200 assert(total_sse >= 0);
4201 rate2 -= (rate_y + rate_uv);
4202 rate_y = 0;
4203 rate_uv = 0;
4204 this_skip2 = 1;
4205 }
4206 } else {
4207 // Add in the cost of the no skip flag.
4208 rate2 += skip_cost0;
4209 }
4210
4211 // Calculate the final RD estimate for this mode.
4212 this_rd = RDCOST(x->rdmult, x->rddiv, rate2, distortion2);
4213 }
4214
4215 if (!disable_skip && ref_frame == INTRA_FRAME) {
4216 for (i = 0; i < REFERENCE_MODES; ++i)
4217 best_pred_rd[i] = VPXMIN(best_pred_rd[i], this_rd);
4218 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++)
4219 best_filter_rd[i] = VPXMIN(best_filter_rd[i], this_rd);
4220 }
4221
4222 // Did this mode help.. i.e. is it the new best mode
4223 if (this_rd < best_rd || x->skip) {
4224 if (!mode_excluded) {
4225 int max_plane = MAX_MB_PLANE;
4226 // Note index of best mode so far
4227 best_ref_index = ref_index;
4228
4229 if (ref_frame == INTRA_FRAME) {
4230 /* required for left and above block mv */
4231 mi->mv[0].as_int = 0;
4232 max_plane = 1;
4233 // Initialize interp_filter here so we do not have to check for
4234 // inter block modes in get_pred_context_switchable_interp()
4235 mi->interp_filter = SWITCHABLE_FILTERS;
4236 }
4237
4238 rd_cost->rate = rate2;
4239 rd_cost->dist = distortion2;
4240 rd_cost->rdcost = this_rd;
4241 best_rd = this_rd;
4242 best_yrd =
4243 best_rd - RDCOST(x->rdmult, x->rddiv, rate_uv, distortion_uv);
4244 best_mbmode = *mi;
4245 best_skip2 = this_skip2;
4246 if (!x->select_tx_size) swap_block_ptr(x, ctx, 1, 0, 0, max_plane);
4247 memcpy(ctx->zcoeff_blk, x->zcoeff_blk[TX_4X4],
4248 sizeof(ctx->zcoeff_blk[0]) * ctx->num_4x4_blk);
4249 ctx->sum_y_eobs = x->sum_y_eobs[TX_4X4];
4250
4251 for (i = 0; i < 4; i++) best_bmodes[i] = xd->mi[0]->bmi[i];
4252
4253 // TODO(debargha): enhance this test with a better distortion prediction
4254 // based on qp, activity mask and history
4255 if ((sf->mode_search_skip_flags & FLAG_EARLY_TERMINATE) &&
4256 (ref_index > MIN_EARLY_TERM_INDEX)) {
4257 int qstep = xd->plane[0].dequant[1];
4258 // TODO(debargha): Enhance this by specializing for each mode_index
4259 int scale = 4;
4260 #if CONFIG_VP9_HIGHBITDEPTH
4261 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
4262 qstep >>= (xd->bd - 8);
4263 }
4264 #endif // CONFIG_VP9_HIGHBITDEPTH
4265 if (x->source_variance < UINT_MAX) {
4266 const int var_adjust = (x->source_variance < 16);
4267 scale -= var_adjust;
4268 }
4269 if (ref_frame > INTRA_FRAME && distortion2 * scale < qstep * qstep) {
4270 early_term = 1;
4271 }
4272 }
4273 }
4274 }
4275
4276 /* keep record of best compound/single-only prediction */
4277 if (!disable_skip && ref_frame != INTRA_FRAME) {
4278 int64_t single_rd, hybrid_rd, single_rate, hybrid_rate;
4279
4280 if (cm->reference_mode == REFERENCE_MODE_SELECT) {
4281 single_rate = rate2 - compmode_cost;
4282 hybrid_rate = rate2;
4283 } else {
4284 single_rate = rate2;
4285 hybrid_rate = rate2 + compmode_cost;
4286 }
4287
4288 single_rd = RDCOST(x->rdmult, x->rddiv, single_rate, distortion2);
4289 hybrid_rd = RDCOST(x->rdmult, x->rddiv, hybrid_rate, distortion2);
4290
4291 if (!comp_pred && single_rd < best_pred_rd[SINGLE_REFERENCE])
4292 best_pred_rd[SINGLE_REFERENCE] = single_rd;
4293 else if (comp_pred && single_rd < best_pred_rd[COMPOUND_REFERENCE])
4294 best_pred_rd[COMPOUND_REFERENCE] = single_rd;
4295
4296 if (hybrid_rd < best_pred_rd[REFERENCE_MODE_SELECT])
4297 best_pred_rd[REFERENCE_MODE_SELECT] = hybrid_rd;
4298 }
4299
4300 /* keep record of best filter type */
4301 if (!mode_excluded && !disable_skip && ref_frame != INTRA_FRAME &&
4302 cm->interp_filter != BILINEAR) {
4303 int64_t ref =
4304 filter_cache[cm->interp_filter == SWITCHABLE ? SWITCHABLE_FILTERS
4305 : cm->interp_filter];
4306 int64_t adj_rd;
4307 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++) {
4308 if (ref == INT64_MAX)
4309 adj_rd = 0;
4310 else if (filter_cache[i] == INT64_MAX)
4311 // when early termination is triggered, the encoder does not have
4312 // access to the rate-distortion cost. it only knows that the cost
4313 // should be above the maximum valid value. hence it takes the known
4314 // maximum plus an arbitrary constant as the rate-distortion cost.
4315 adj_rd = mask_filter - ref + 10;
4316 else
4317 adj_rd = filter_cache[i] - ref;
4318
4319 adj_rd += this_rd;
4320 best_filter_rd[i] = VPXMIN(best_filter_rd[i], adj_rd);
4321 }
4322 }
4323
4324 if (early_term) break;
4325
4326 if (x->skip && !comp_pred) break;
4327 }
4328
4329 if (best_rd >= best_rd_so_far) {
4330 rd_cost->rate = INT_MAX;
4331 rd_cost->rdcost = INT64_MAX;
4332 return;
4333 }
4334
4335 // If we used an estimate for the uv intra rd in the loop above...
4336 if (sf->use_uv_intra_rd_estimate) {
4337 // Do Intra UV best rd mode selection if best mode choice above was intra.
4338 if (best_mbmode.ref_frame[0] == INTRA_FRAME) {
4339 *mi = best_mbmode;
4340 rd_pick_intra_sbuv_mode(cpi, x, ctx, &rate_uv_intra, &rate_uv_tokenonly,
4341 &dist_uv, &skip_uv, BLOCK_8X8, TX_4X4);
4342 }
4343 }
4344
4345 if (best_rd == INT64_MAX) {
4346 rd_cost->rate = INT_MAX;
4347 rd_cost->dist = INT64_MAX;
4348 rd_cost->rdcost = INT64_MAX;
4349 return;
4350 }
4351
4352 assert((cm->interp_filter == SWITCHABLE) ||
4353 (cm->interp_filter == best_mbmode.interp_filter) ||
4354 !is_inter_block(&best_mbmode));
4355
4356 vp9_update_rd_thresh_fact(tile_data->thresh_freq_fact, sf->adaptive_rd_thresh,
4357 bsize, best_ref_index);
4358
4359 // macroblock modes
4360 *mi = best_mbmode;
4361 x->skip |= best_skip2;
4362 if (!is_inter_block(&best_mbmode)) {
4363 for (i = 0; i < 4; i++) xd->mi[0]->bmi[i].as_mode = best_bmodes[i].as_mode;
4364 } else {
4365 for (i = 0; i < 4; ++i)
4366 memcpy(&xd->mi[0]->bmi[i], &best_bmodes[i], sizeof(b_mode_info));
4367
4368 mi->mv[0].as_int = xd->mi[0]->bmi[3].as_mv[0].as_int;
4369 mi->mv[1].as_int = xd->mi[0]->bmi[3].as_mv[1].as_int;
4370 }
4371
4372 for (i = 0; i < REFERENCE_MODES; ++i) {
4373 if (best_pred_rd[i] == INT64_MAX)
4374 best_pred_diff[i] = INT_MIN;
4375 else
4376 best_pred_diff[i] = best_rd - best_pred_rd[i];
4377 }
4378
4379 if (!x->skip) {
4380 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++) {
4381 if (best_filter_rd[i] == INT64_MAX)
4382 best_filter_diff[i] = 0;
4383 else
4384 best_filter_diff[i] = best_rd - best_filter_rd[i];
4385 }
4386 if (cm->interp_filter == SWITCHABLE)
4387 assert(best_filter_diff[SWITCHABLE_FILTERS] == 0);
4388 } else {
4389 vp9_zero(best_filter_diff);
4390 }
4391
4392 store_coding_context(x, ctx, best_ref_index, best_pred_diff, best_filter_diff,
4393 0);
4394 }
4395