1 /*
2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #include <assert.h>
12 #include <math.h>
13
14 #include "./vp9_rtcd.h"
15 #include "./vpx_dsp_rtcd.h"
16
17 #include "vpx_dsp/vpx_dsp_common.h"
18 #include "vpx_mem/vpx_mem.h"
19 #include "vpx_ports/mem.h"
20 #include "vpx_ports/system_state.h"
21
22 #include "vp9/common/vp9_common.h"
23 #include "vp9/common/vp9_entropy.h"
24 #include "vp9/common/vp9_entropymode.h"
25 #include "vp9/common/vp9_idct.h"
26 #include "vp9/common/vp9_mvref_common.h"
27 #include "vp9/common/vp9_pred_common.h"
28 #include "vp9/common/vp9_quant_common.h"
29 #include "vp9/common/vp9_reconinter.h"
30 #include "vp9/common/vp9_reconintra.h"
31 #include "vp9/common/vp9_scan.h"
32 #include "vp9/common/vp9_seg_common.h"
33
34 #include "vp9/encoder/vp9_cost.h"
35 #include "vp9/encoder/vp9_encodemb.h"
36 #include "vp9/encoder/vp9_encodemv.h"
37 #include "vp9/encoder/vp9_encoder.h"
38 #include "vp9/encoder/vp9_mcomp.h"
39 #include "vp9/encoder/vp9_quantize.h"
40 #include "vp9/encoder/vp9_ratectrl.h"
41 #include "vp9/encoder/vp9_rd.h"
42 #include "vp9/encoder/vp9_rdopt.h"
43 #include "vp9/encoder/vp9_aq_variance.h"
44
45 #define LAST_FRAME_MODE_MASK ((1 << GOLDEN_FRAME) | (1 << ALTREF_FRAME) | \
46 (1 << INTRA_FRAME))
47 #define GOLDEN_FRAME_MODE_MASK ((1 << LAST_FRAME) | (1 << ALTREF_FRAME) | \
48 (1 << INTRA_FRAME))
49 #define ALT_REF_MODE_MASK ((1 << LAST_FRAME) | (1 << GOLDEN_FRAME) | \
50 (1 << INTRA_FRAME))
51
52 #define SECOND_REF_FRAME_MASK ((1 << ALTREF_FRAME) | 0x01)
53
54 #define MIN_EARLY_TERM_INDEX 3
55 #define NEW_MV_DISCOUNT_FACTOR 8
56
57 typedef struct {
58 PREDICTION_MODE mode;
59 MV_REFERENCE_FRAME ref_frame[2];
60 } MODE_DEFINITION;
61
62 typedef struct {
63 MV_REFERENCE_FRAME ref_frame[2];
64 } REF_DEFINITION;
65
66 struct rdcost_block_args {
67 MACROBLOCK *x;
68 ENTROPY_CONTEXT t_above[16];
69 ENTROPY_CONTEXT t_left[16];
70 int this_rate;
71 int64_t this_dist;
72 int64_t this_sse;
73 int64_t this_rd;
74 int64_t best_rd;
75 int exit_early;
76 int use_fast_coef_costing;
77 const scan_order *so;
78 uint8_t skippable;
79 };
80
81 #define LAST_NEW_MV_INDEX 6
82 static const MODE_DEFINITION vp9_mode_order[MAX_MODES] = {
83 {NEARESTMV, {LAST_FRAME, NONE}},
84 {NEARESTMV, {ALTREF_FRAME, NONE}},
85 {NEARESTMV, {GOLDEN_FRAME, NONE}},
86
87 {DC_PRED, {INTRA_FRAME, NONE}},
88
89 {NEWMV, {LAST_FRAME, NONE}},
90 {NEWMV, {ALTREF_FRAME, NONE}},
91 {NEWMV, {GOLDEN_FRAME, NONE}},
92
93 {NEARMV, {LAST_FRAME, NONE}},
94 {NEARMV, {ALTREF_FRAME, NONE}},
95 {NEARMV, {GOLDEN_FRAME, NONE}},
96
97 {ZEROMV, {LAST_FRAME, NONE}},
98 {ZEROMV, {GOLDEN_FRAME, NONE}},
99 {ZEROMV, {ALTREF_FRAME, NONE}},
100
101 {NEARESTMV, {LAST_FRAME, ALTREF_FRAME}},
102 {NEARESTMV, {GOLDEN_FRAME, ALTREF_FRAME}},
103
104 {TM_PRED, {INTRA_FRAME, NONE}},
105
106 {NEARMV, {LAST_FRAME, ALTREF_FRAME}},
107 {NEWMV, {LAST_FRAME, ALTREF_FRAME}},
108 {NEARMV, {GOLDEN_FRAME, ALTREF_FRAME}},
109 {NEWMV, {GOLDEN_FRAME, ALTREF_FRAME}},
110
111 {ZEROMV, {LAST_FRAME, ALTREF_FRAME}},
112 {ZEROMV, {GOLDEN_FRAME, ALTREF_FRAME}},
113
114 {H_PRED, {INTRA_FRAME, NONE}},
115 {V_PRED, {INTRA_FRAME, NONE}},
116 {D135_PRED, {INTRA_FRAME, NONE}},
117 {D207_PRED, {INTRA_FRAME, NONE}},
118 {D153_PRED, {INTRA_FRAME, NONE}},
119 {D63_PRED, {INTRA_FRAME, NONE}},
120 {D117_PRED, {INTRA_FRAME, NONE}},
121 {D45_PRED, {INTRA_FRAME, NONE}},
122 };
123
124 static const REF_DEFINITION vp9_ref_order[MAX_REFS] = {
125 {{LAST_FRAME, NONE}},
126 {{GOLDEN_FRAME, NONE}},
127 {{ALTREF_FRAME, NONE}},
128 {{LAST_FRAME, ALTREF_FRAME}},
129 {{GOLDEN_FRAME, ALTREF_FRAME}},
130 {{INTRA_FRAME, NONE}},
131 };
132
swap_block_ptr(MACROBLOCK * x,PICK_MODE_CONTEXT * ctx,int m,int n,int min_plane,int max_plane)133 static void swap_block_ptr(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx,
134 int m, int n, int min_plane, int max_plane) {
135 int i;
136
137 for (i = min_plane; i < max_plane; ++i) {
138 struct macroblock_plane *const p = &x->plane[i];
139 struct macroblockd_plane *const pd = &x->e_mbd.plane[i];
140
141 p->coeff = ctx->coeff_pbuf[i][m];
142 p->qcoeff = ctx->qcoeff_pbuf[i][m];
143 pd->dqcoeff = ctx->dqcoeff_pbuf[i][m];
144 p->eobs = ctx->eobs_pbuf[i][m];
145
146 ctx->coeff_pbuf[i][m] = ctx->coeff_pbuf[i][n];
147 ctx->qcoeff_pbuf[i][m] = ctx->qcoeff_pbuf[i][n];
148 ctx->dqcoeff_pbuf[i][m] = ctx->dqcoeff_pbuf[i][n];
149 ctx->eobs_pbuf[i][m] = ctx->eobs_pbuf[i][n];
150
151 ctx->coeff_pbuf[i][n] = p->coeff;
152 ctx->qcoeff_pbuf[i][n] = p->qcoeff;
153 ctx->dqcoeff_pbuf[i][n] = pd->dqcoeff;
154 ctx->eobs_pbuf[i][n] = p->eobs;
155 }
156 }
157
model_rd_for_sb(VP9_COMP * cpi,BLOCK_SIZE bsize,MACROBLOCK * x,MACROBLOCKD * xd,int * out_rate_sum,int64_t * out_dist_sum,int * skip_txfm_sb,int64_t * skip_sse_sb)158 static void model_rd_for_sb(VP9_COMP *cpi, BLOCK_SIZE bsize,
159 MACROBLOCK *x, MACROBLOCKD *xd,
160 int *out_rate_sum, int64_t *out_dist_sum,
161 int *skip_txfm_sb, int64_t *skip_sse_sb) {
162 // Note our transform coeffs are 8 times an orthogonal transform.
163 // Hence quantizer step is also 8 times. To get effective quantizer
164 // we need to divide by 8 before sending to modeling function.
165 int i;
166 int64_t rate_sum = 0;
167 int64_t dist_sum = 0;
168 const int ref = xd->mi[0]->mbmi.ref_frame[0];
169 unsigned int sse;
170 unsigned int var = 0;
171 unsigned int sum_sse = 0;
172 int64_t total_sse = 0;
173 int skip_flag = 1;
174 const int shift = 6;
175 int rate;
176 int64_t dist;
177 const int dequant_shift =
178 #if CONFIG_VP9_HIGHBITDEPTH
179 (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) ?
180 xd->bd - 5 :
181 #endif // CONFIG_VP9_HIGHBITDEPTH
182 3;
183
184 x->pred_sse[ref] = 0;
185
186 for (i = 0; i < MAX_MB_PLANE; ++i) {
187 struct macroblock_plane *const p = &x->plane[i];
188 struct macroblockd_plane *const pd = &xd->plane[i];
189 const BLOCK_SIZE bs = get_plane_block_size(bsize, pd);
190 const TX_SIZE max_tx_size = max_txsize_lookup[bs];
191 const BLOCK_SIZE unit_size = txsize_to_bsize[max_tx_size];
192 const int64_t dc_thr = p->quant_thred[0] >> shift;
193 const int64_t ac_thr = p->quant_thred[1] >> shift;
194 // The low thresholds are used to measure if the prediction errors are
195 // low enough so that we can skip the mode search.
196 const int64_t low_dc_thr = VPXMIN(50, dc_thr >> 2);
197 const int64_t low_ac_thr = VPXMIN(80, ac_thr >> 2);
198 int bw = 1 << (b_width_log2_lookup[bs] - b_width_log2_lookup[unit_size]);
199 int bh = 1 << (b_height_log2_lookup[bs] - b_width_log2_lookup[unit_size]);
200 int idx, idy;
201 int lw = b_width_log2_lookup[unit_size] + 2;
202 int lh = b_height_log2_lookup[unit_size] + 2;
203
204 sum_sse = 0;
205
206 for (idy = 0; idy < bh; ++idy) {
207 for (idx = 0; idx < bw; ++idx) {
208 uint8_t *src = p->src.buf + (idy * p->src.stride << lh) + (idx << lw);
209 uint8_t *dst = pd->dst.buf + (idy * pd->dst.stride << lh) + (idx << lh);
210 int block_idx = (idy << 1) + idx;
211 int low_err_skip = 0;
212
213 var = cpi->fn_ptr[unit_size].vf(src, p->src.stride,
214 dst, pd->dst.stride, &sse);
215 x->bsse[(i << 2) + block_idx] = sse;
216 sum_sse += sse;
217
218 x->skip_txfm[(i << 2) + block_idx] = SKIP_TXFM_NONE;
219 if (!x->select_tx_size) {
220 // Check if all ac coefficients can be quantized to zero.
221 if (var < ac_thr || var == 0) {
222 x->skip_txfm[(i << 2) + block_idx] = SKIP_TXFM_AC_ONLY;
223
224 // Check if dc coefficient can be quantized to zero.
225 if (sse - var < dc_thr || sse == var) {
226 x->skip_txfm[(i << 2) + block_idx] = SKIP_TXFM_AC_DC;
227
228 if (!sse || (var < low_ac_thr && sse - var < low_dc_thr))
229 low_err_skip = 1;
230 }
231 }
232 }
233
234 if (skip_flag && !low_err_skip)
235 skip_flag = 0;
236
237 if (i == 0)
238 x->pred_sse[ref] += sse;
239 }
240 }
241
242 total_sse += sum_sse;
243
244 // Fast approximate the modelling function.
245 if (cpi->sf.simple_model_rd_from_var) {
246 int64_t rate;
247 const int64_t square_error = sum_sse;
248 int quantizer = (pd->dequant[1] >> dequant_shift);
249
250 if (quantizer < 120)
251 rate = (square_error * (280 - quantizer)) >> 8;
252 else
253 rate = 0;
254 dist = (square_error * quantizer) >> 8;
255 rate_sum += rate;
256 dist_sum += dist;
257 } else {
258 vp9_model_rd_from_var_lapndz(sum_sse, num_pels_log2_lookup[bs],
259 pd->dequant[1] >> dequant_shift,
260 &rate, &dist);
261 rate_sum += rate;
262 dist_sum += dist;
263 }
264 }
265
266 *skip_txfm_sb = skip_flag;
267 *skip_sse_sb = total_sse << 4;
268 *out_rate_sum = (int)rate_sum;
269 *out_dist_sum = dist_sum << 4;
270 }
271
272 #if CONFIG_VP9_HIGHBITDEPTH
vp9_highbd_block_error_c(const tran_low_t * coeff,const tran_low_t * dqcoeff,intptr_t block_size,int64_t * ssz,int bd)273 int64_t vp9_highbd_block_error_c(const tran_low_t *coeff,
274 const tran_low_t *dqcoeff,
275 intptr_t block_size,
276 int64_t *ssz, int bd) {
277 int i;
278 int64_t error = 0, sqcoeff = 0;
279 int shift = 2 * (bd - 8);
280 int rounding = shift > 0 ? 1 << (shift - 1) : 0;
281
282 for (i = 0; i < block_size; i++) {
283 const int64_t diff = coeff[i] - dqcoeff[i];
284 error += diff * diff;
285 sqcoeff += (int64_t)coeff[i] * (int64_t)coeff[i];
286 }
287 assert(error >= 0 && sqcoeff >= 0);
288 error = (error + rounding) >> shift;
289 sqcoeff = (sqcoeff + rounding) >> shift;
290
291 *ssz = sqcoeff;
292 return error;
293 }
294
vp9_highbd_block_error_8bit_c(const tran_low_t * coeff,const tran_low_t * dqcoeff,intptr_t block_size,int64_t * ssz)295 int64_t vp9_highbd_block_error_8bit_c(const tran_low_t *coeff,
296 const tran_low_t *dqcoeff,
297 intptr_t block_size,
298 int64_t *ssz) {
299 // Note that the C versions of these 2 functions (vp9_block_error and
300 // vp9_highbd_block_error_8bit are the same, but the optimized assembly
301 // routines are not compatible in the non high bitdepth configuration, so
302 // they still cannot share the same name.
303 return vp9_block_error_c(coeff, dqcoeff, block_size, ssz);
304 }
305
vp9_highbd_block_error_dispatch(const tran_low_t * coeff,const tran_low_t * dqcoeff,intptr_t block_size,int64_t * ssz,int bd)306 static int64_t vp9_highbd_block_error_dispatch(const tran_low_t *coeff,
307 const tran_low_t *dqcoeff,
308 intptr_t block_size,
309 int64_t *ssz, int bd) {
310 if (bd == 8) {
311 return vp9_highbd_block_error_8bit(coeff, dqcoeff, block_size, ssz);
312 } else {
313 return vp9_highbd_block_error(coeff, dqcoeff, block_size, ssz, bd);
314 }
315 }
316 #endif // CONFIG_VP9_HIGHBITDEPTH
317
vp9_block_error_c(const tran_low_t * coeff,const tran_low_t * dqcoeff,intptr_t block_size,int64_t * ssz)318 int64_t vp9_block_error_c(const tran_low_t *coeff, const tran_low_t *dqcoeff,
319 intptr_t block_size, int64_t *ssz) {
320 int i;
321 int64_t error = 0, sqcoeff = 0;
322
323 for (i = 0; i < block_size; i++) {
324 const int diff = coeff[i] - dqcoeff[i];
325 error += diff * diff;
326 sqcoeff += coeff[i] * coeff[i];
327 }
328
329 *ssz = sqcoeff;
330 return error;
331 }
332
vp9_block_error_fp_c(const int16_t * coeff,const int16_t * dqcoeff,int block_size)333 int64_t vp9_block_error_fp_c(const int16_t *coeff, const int16_t *dqcoeff,
334 int block_size) {
335 int i;
336 int64_t error = 0;
337
338 for (i = 0; i < block_size; i++) {
339 const int diff = coeff[i] - dqcoeff[i];
340 error += diff * diff;
341 }
342
343 return error;
344 }
345
346 /* The trailing '0' is a terminator which is used inside cost_coeffs() to
347 * decide whether to include cost of a trailing EOB node or not (i.e. we
348 * can skip this if the last coefficient in this transform block, e.g. the
349 * 16th coefficient in a 4x4 block or the 64th coefficient in a 8x8 block,
350 * were non-zero). */
351 static const int16_t band_counts[TX_SIZES][8] = {
352 { 1, 2, 3, 4, 3, 16 - 13, 0 },
353 { 1, 2, 3, 4, 11, 64 - 21, 0 },
354 { 1, 2, 3, 4, 11, 256 - 21, 0 },
355 { 1, 2, 3, 4, 11, 1024 - 21, 0 },
356 };
cost_coeffs(MACROBLOCK * x,int plane,int block,ENTROPY_CONTEXT * A,ENTROPY_CONTEXT * L,TX_SIZE tx_size,const int16_t * scan,const int16_t * nb,int use_fast_coef_costing)357 static int cost_coeffs(MACROBLOCK *x,
358 int plane, int block,
359 ENTROPY_CONTEXT *A, ENTROPY_CONTEXT *L,
360 TX_SIZE tx_size,
361 const int16_t *scan, const int16_t *nb,
362 int use_fast_coef_costing) {
363 MACROBLOCKD *const xd = &x->e_mbd;
364 MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
365 const struct macroblock_plane *p = &x->plane[plane];
366 const PLANE_TYPE type = get_plane_type(plane);
367 const int16_t *band_count = &band_counts[tx_size][1];
368 const int eob = p->eobs[block];
369 const tran_low_t *const qcoeff = BLOCK_OFFSET(p->qcoeff, block);
370 unsigned int (*token_costs)[2][COEFF_CONTEXTS][ENTROPY_TOKENS] =
371 x->token_costs[tx_size][type][is_inter_block(mbmi)];
372 uint8_t token_cache[32 * 32];
373 int pt = combine_entropy_contexts(*A, *L);
374 int c, cost;
375 #if CONFIG_VP9_HIGHBITDEPTH
376 const int16_t *cat6_high_cost = vp9_get_high_cost_table(xd->bd);
377 #else
378 const int16_t *cat6_high_cost = vp9_get_high_cost_table(8);
379 #endif
380
381 // Check for consistency of tx_size with mode info
382 assert(type == PLANE_TYPE_Y ? mbmi->tx_size == tx_size :
383 get_uv_tx_size(mbmi, &xd->plane[plane]) == tx_size);
384
385 if (eob == 0) {
386 // single eob token
387 cost = token_costs[0][0][pt][EOB_TOKEN];
388 c = 0;
389 } else {
390 int band_left = *band_count++;
391
392 // dc token
393 int v = qcoeff[0];
394 int16_t prev_t;
395 EXTRABIT e;
396 vp9_get_token_extra(v, &prev_t, &e);
397 cost = (*token_costs)[0][pt][prev_t] +
398 vp9_get_cost(prev_t, e, cat6_high_cost);
399
400 token_cache[0] = vp9_pt_energy_class[prev_t];
401 ++token_costs;
402
403 // ac tokens
404 for (c = 1; c < eob; c++) {
405 const int rc = scan[c];
406 int16_t t;
407
408 v = qcoeff[rc];
409 vp9_get_token_extra(v, &t, &e);
410 if (use_fast_coef_costing) {
411 cost += (*token_costs)[!prev_t][!prev_t][t] +
412 vp9_get_cost(t, e, cat6_high_cost);
413 } else {
414 pt = get_coef_context(nb, token_cache, c);
415 cost += (*token_costs)[!prev_t][pt][t] +
416 vp9_get_cost(t, e, cat6_high_cost);
417 token_cache[rc] = vp9_pt_energy_class[t];
418 }
419 prev_t = t;
420 if (!--band_left) {
421 band_left = *band_count++;
422 ++token_costs;
423 }
424 }
425
426 // eob token
427 if (band_left) {
428 if (use_fast_coef_costing) {
429 cost += (*token_costs)[0][!prev_t][EOB_TOKEN];
430 } else {
431 pt = get_coef_context(nb, token_cache, c);
432 cost += (*token_costs)[0][pt][EOB_TOKEN];
433 }
434 }
435 }
436
437 // is eob first coefficient;
438 *A = *L = (c > 0);
439
440 return cost;
441 }
442
dist_block(MACROBLOCK * x,int plane,int block,TX_SIZE tx_size,int64_t * out_dist,int64_t * out_sse)443 static void dist_block(MACROBLOCK *x, int plane, int block, TX_SIZE tx_size,
444 int64_t *out_dist, int64_t *out_sse) {
445 const int ss_txfrm_size = tx_size << 1;
446 MACROBLOCKD* const xd = &x->e_mbd;
447 const struct macroblock_plane *const p = &x->plane[plane];
448 const struct macroblockd_plane *const pd = &xd->plane[plane];
449 int64_t this_sse;
450 int shift = tx_size == TX_32X32 ? 0 : 2;
451 tran_low_t *const coeff = BLOCK_OFFSET(p->coeff, block);
452 tran_low_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block);
453 #if CONFIG_VP9_HIGHBITDEPTH
454 const int bd = (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) ? xd->bd : 8;
455 *out_dist = vp9_highbd_block_error_dispatch(coeff, dqcoeff,
456 16 << ss_txfrm_size,
457 &this_sse, bd) >> shift;
458 #else
459 *out_dist = vp9_block_error(coeff, dqcoeff, 16 << ss_txfrm_size,
460 &this_sse) >> shift;
461 #endif // CONFIG_VP9_HIGHBITDEPTH
462 *out_sse = this_sse >> shift;
463
464 if (x->skip_encode && !is_inter_block(&xd->mi[0]->mbmi)) {
465 // TODO(jingning): tune the model to better capture the distortion.
466 int64_t p = (pd->dequant[1] * pd->dequant[1] *
467 (1 << ss_txfrm_size)) >>
468 #if CONFIG_VP9_HIGHBITDEPTH
469 (shift + 2 + (bd - 8) * 2);
470 #else
471 (shift + 2);
472 #endif // CONFIG_VP9_HIGHBITDEPTH
473 *out_dist += (p >> 4);
474 *out_sse += p;
475 }
476 }
477
rate_block(int plane,int block,BLOCK_SIZE plane_bsize,TX_SIZE tx_size,struct rdcost_block_args * args)478 static int rate_block(int plane, int block, BLOCK_SIZE plane_bsize,
479 TX_SIZE tx_size, struct rdcost_block_args* args) {
480 int x_idx, y_idx;
481 txfrm_block_to_raster_xy(plane_bsize, tx_size, block, &x_idx, &y_idx);
482
483 return cost_coeffs(args->x, plane, block, args->t_above + x_idx,
484 args->t_left + y_idx, tx_size,
485 args->so->scan, args->so->neighbors,
486 args->use_fast_coef_costing);
487 }
488
block_rd_txfm(int plane,int block,BLOCK_SIZE plane_bsize,TX_SIZE tx_size,void * arg)489 static void block_rd_txfm(int plane, int block, BLOCK_SIZE plane_bsize,
490 TX_SIZE tx_size, void *arg) {
491 struct rdcost_block_args *args = arg;
492 MACROBLOCK *const x = args->x;
493 MACROBLOCKD *const xd = &x->e_mbd;
494 MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
495 int64_t rd1, rd2, rd;
496 int rate;
497 int64_t dist;
498 int64_t sse;
499
500 if (args->exit_early)
501 return;
502
503 if (!is_inter_block(mbmi)) {
504 struct encode_b_args arg = {x, NULL, &mbmi->skip};
505 vp9_encode_block_intra(plane, block, plane_bsize, tx_size, &arg);
506 dist_block(x, plane, block, tx_size, &dist, &sse);
507 } else if (max_txsize_lookup[plane_bsize] == tx_size) {
508 if (x->skip_txfm[(plane << 2) + (block >> (tx_size << 1))] ==
509 SKIP_TXFM_NONE) {
510 // full forward transform and quantization
511 vp9_xform_quant(x, plane, block, plane_bsize, tx_size);
512 dist_block(x, plane, block, tx_size, &dist, &sse);
513 } else if (x->skip_txfm[(plane << 2) + (block >> (tx_size << 1))] ==
514 SKIP_TXFM_AC_ONLY) {
515 // compute DC coefficient
516 tran_low_t *const coeff = BLOCK_OFFSET(x->plane[plane].coeff, block);
517 tran_low_t *const dqcoeff = BLOCK_OFFSET(xd->plane[plane].dqcoeff, block);
518 vp9_xform_quant_dc(x, plane, block, plane_bsize, tx_size);
519 sse = x->bsse[(plane << 2) + (block >> (tx_size << 1))] << 4;
520 dist = sse;
521 if (x->plane[plane].eobs[block]) {
522 const int64_t orig_sse = (int64_t)coeff[0] * coeff[0];
523 const int64_t resd_sse = coeff[0] - dqcoeff[0];
524 int64_t dc_correct = orig_sse - resd_sse * resd_sse;
525 #if CONFIG_VP9_HIGHBITDEPTH
526 dc_correct >>= ((xd->bd - 8) * 2);
527 #endif
528 if (tx_size != TX_32X32)
529 dc_correct >>= 2;
530
531 dist = VPXMAX(0, sse - dc_correct);
532 }
533 } else {
534 // SKIP_TXFM_AC_DC
535 // skip forward transform
536 x->plane[plane].eobs[block] = 0;
537 sse = x->bsse[(plane << 2) + (block >> (tx_size << 1))] << 4;
538 dist = sse;
539 }
540 } else {
541 // full forward transform and quantization
542 vp9_xform_quant(x, plane, block, plane_bsize, tx_size);
543 dist_block(x, plane, block, tx_size, &dist, &sse);
544 }
545
546 rd = RDCOST(x->rdmult, x->rddiv, 0, dist);
547 if (args->this_rd + rd > args->best_rd) {
548 args->exit_early = 1;
549 return;
550 }
551
552 rate = rate_block(plane, block, plane_bsize, tx_size, args);
553 rd1 = RDCOST(x->rdmult, x->rddiv, rate, dist);
554 rd2 = RDCOST(x->rdmult, x->rddiv, 0, sse);
555
556 // TODO(jingning): temporarily enabled only for luma component
557 rd = VPXMIN(rd1, rd2);
558 if (plane == 0)
559 x->zcoeff_blk[tx_size][block] = !x->plane[plane].eobs[block] ||
560 (rd1 > rd2 && !xd->lossless);
561
562 args->this_rate += rate;
563 args->this_dist += dist;
564 args->this_sse += sse;
565 args->this_rd += rd;
566
567 if (args->this_rd > args->best_rd) {
568 args->exit_early = 1;
569 return;
570 }
571
572 args->skippable &= !x->plane[plane].eobs[block];
573 }
574
txfm_rd_in_plane(MACROBLOCK * x,int * rate,int64_t * distortion,int * skippable,int64_t * sse,int64_t ref_best_rd,int plane,BLOCK_SIZE bsize,TX_SIZE tx_size,int use_fast_coef_casting)575 static void txfm_rd_in_plane(MACROBLOCK *x,
576 int *rate, int64_t *distortion,
577 int *skippable, int64_t *sse,
578 int64_t ref_best_rd, int plane,
579 BLOCK_SIZE bsize, TX_SIZE tx_size,
580 int use_fast_coef_casting) {
581 MACROBLOCKD *const xd = &x->e_mbd;
582 const struct macroblockd_plane *const pd = &xd->plane[plane];
583 struct rdcost_block_args args;
584 vp9_zero(args);
585 args.x = x;
586 args.best_rd = ref_best_rd;
587 args.use_fast_coef_costing = use_fast_coef_casting;
588 args.skippable = 1;
589
590 if (plane == 0)
591 xd->mi[0]->mbmi.tx_size = tx_size;
592
593 vp9_get_entropy_contexts(bsize, tx_size, pd, args.t_above, args.t_left);
594
595 args.so = get_scan(xd, tx_size, get_plane_type(plane), 0);
596
597 vp9_foreach_transformed_block_in_plane(xd, bsize, plane,
598 block_rd_txfm, &args);
599 if (args.exit_early) {
600 *rate = INT_MAX;
601 *distortion = INT64_MAX;
602 *sse = INT64_MAX;
603 *skippable = 0;
604 } else {
605 *distortion = args.this_dist;
606 *rate = args.this_rate;
607 *sse = args.this_sse;
608 *skippable = args.skippable;
609 }
610 }
611
choose_largest_tx_size(VP9_COMP * cpi,MACROBLOCK * x,int * rate,int64_t * distortion,int * skip,int64_t * sse,int64_t ref_best_rd,BLOCK_SIZE bs)612 static void choose_largest_tx_size(VP9_COMP *cpi, MACROBLOCK *x,
613 int *rate, int64_t *distortion,
614 int *skip, int64_t *sse,
615 int64_t ref_best_rd,
616 BLOCK_SIZE bs) {
617 const TX_SIZE max_tx_size = max_txsize_lookup[bs];
618 VP9_COMMON *const cm = &cpi->common;
619 const TX_SIZE largest_tx_size = tx_mode_to_biggest_tx_size[cm->tx_mode];
620 MACROBLOCKD *const xd = &x->e_mbd;
621 MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
622
623 mbmi->tx_size = VPXMIN(max_tx_size, largest_tx_size);
624
625 txfm_rd_in_plane(x, rate, distortion, skip,
626 sse, ref_best_rd, 0, bs,
627 mbmi->tx_size, cpi->sf.use_fast_coef_costing);
628 }
629
choose_tx_size_from_rd(VP9_COMP * cpi,MACROBLOCK * x,int * rate,int64_t * distortion,int * skip,int64_t * psse,int64_t ref_best_rd,BLOCK_SIZE bs)630 static void choose_tx_size_from_rd(VP9_COMP *cpi, MACROBLOCK *x,
631 int *rate,
632 int64_t *distortion,
633 int *skip,
634 int64_t *psse,
635 int64_t ref_best_rd,
636 BLOCK_SIZE bs) {
637 const TX_SIZE max_tx_size = max_txsize_lookup[bs];
638 VP9_COMMON *const cm = &cpi->common;
639 MACROBLOCKD *const xd = &x->e_mbd;
640 MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
641 vpx_prob skip_prob = vp9_get_skip_prob(cm, xd);
642 int r[TX_SIZES][2], s[TX_SIZES];
643 int64_t d[TX_SIZES], sse[TX_SIZES];
644 int64_t rd[TX_SIZES][2] = {{INT64_MAX, INT64_MAX},
645 {INT64_MAX, INT64_MAX},
646 {INT64_MAX, INT64_MAX},
647 {INT64_MAX, INT64_MAX}};
648 int n, m;
649 int s0, s1;
650 int64_t best_rd = INT64_MAX;
651 TX_SIZE best_tx = max_tx_size;
652 int start_tx, end_tx;
653
654 const vpx_prob *tx_probs = get_tx_probs2(max_tx_size, xd, &cm->fc->tx_probs);
655 assert(skip_prob > 0);
656 s0 = vp9_cost_bit(skip_prob, 0);
657 s1 = vp9_cost_bit(skip_prob, 1);
658
659 if (cm->tx_mode == TX_MODE_SELECT) {
660 start_tx = max_tx_size;
661 end_tx = 0;
662 } else {
663 TX_SIZE chosen_tx_size = VPXMIN(max_tx_size,
664 tx_mode_to_biggest_tx_size[cm->tx_mode]);
665 start_tx = chosen_tx_size;
666 end_tx = chosen_tx_size;
667 }
668
669 for (n = start_tx; n >= end_tx; n--) {
670 int r_tx_size = 0;
671 for (m = 0; m <= n - (n == (int) max_tx_size); m++) {
672 if (m == n)
673 r_tx_size += vp9_cost_zero(tx_probs[m]);
674 else
675 r_tx_size += vp9_cost_one(tx_probs[m]);
676 }
677 txfm_rd_in_plane(x, &r[n][0], &d[n], &s[n],
678 &sse[n], ref_best_rd, 0, bs, n,
679 cpi->sf.use_fast_coef_costing);
680 r[n][1] = r[n][0];
681 if (r[n][0] < INT_MAX) {
682 r[n][1] += r_tx_size;
683 }
684 if (d[n] == INT64_MAX || r[n][0] == INT_MAX) {
685 rd[n][0] = rd[n][1] = INT64_MAX;
686 } else if (s[n]) {
687 if (is_inter_block(mbmi)) {
688 rd[n][0] = rd[n][1] = RDCOST(x->rdmult, x->rddiv, s1, sse[n]);
689 r[n][1] -= r_tx_size;
690 } else {
691 rd[n][0] = RDCOST(x->rdmult, x->rddiv, s1, sse[n]);
692 rd[n][1] = RDCOST(x->rdmult, x->rddiv, s1 + r_tx_size, sse[n]);
693 }
694 } else {
695 rd[n][0] = RDCOST(x->rdmult, x->rddiv, r[n][0] + s0, d[n]);
696 rd[n][1] = RDCOST(x->rdmult, x->rddiv, r[n][1] + s0, d[n]);
697 }
698
699 if (is_inter_block(mbmi) && !xd->lossless && !s[n] && sse[n] != INT64_MAX) {
700 rd[n][0] = VPXMIN(rd[n][0], RDCOST(x->rdmult, x->rddiv, s1, sse[n]));
701 rd[n][1] = VPXMIN(rd[n][1], RDCOST(x->rdmult, x->rddiv, s1, sse[n]));
702 }
703
704 // Early termination in transform size search.
705 if (cpi->sf.tx_size_search_breakout &&
706 (rd[n][1] == INT64_MAX ||
707 (n < (int) max_tx_size && rd[n][1] > rd[n + 1][1]) ||
708 s[n] == 1))
709 break;
710
711 if (rd[n][1] < best_rd) {
712 best_tx = n;
713 best_rd = rd[n][1];
714 }
715 }
716 mbmi->tx_size = best_tx;
717
718 *distortion = d[mbmi->tx_size];
719 *rate = r[mbmi->tx_size][cm->tx_mode == TX_MODE_SELECT];
720 *skip = s[mbmi->tx_size];
721 *psse = sse[mbmi->tx_size];
722 }
723
super_block_yrd(VP9_COMP * cpi,MACROBLOCK * x,int * rate,int64_t * distortion,int * skip,int64_t * psse,BLOCK_SIZE bs,int64_t ref_best_rd)724 static void super_block_yrd(VP9_COMP *cpi, MACROBLOCK *x, int *rate,
725 int64_t *distortion, int *skip,
726 int64_t *psse, BLOCK_SIZE bs,
727 int64_t ref_best_rd) {
728 MACROBLOCKD *xd = &x->e_mbd;
729 int64_t sse;
730 int64_t *ret_sse = psse ? psse : &sse;
731
732 assert(bs == xd->mi[0]->mbmi.sb_type);
733
734 if (cpi->sf.tx_size_search_method == USE_LARGESTALL || xd->lossless) {
735 choose_largest_tx_size(cpi, x, rate, distortion, skip, ret_sse, ref_best_rd,
736 bs);
737 } else {
738 choose_tx_size_from_rd(cpi, x, rate, distortion, skip, ret_sse,
739 ref_best_rd, bs);
740 }
741 }
742
conditional_skipintra(PREDICTION_MODE mode,PREDICTION_MODE best_intra_mode)743 static int conditional_skipintra(PREDICTION_MODE mode,
744 PREDICTION_MODE best_intra_mode) {
745 if (mode == D117_PRED &&
746 best_intra_mode != V_PRED &&
747 best_intra_mode != D135_PRED)
748 return 1;
749 if (mode == D63_PRED &&
750 best_intra_mode != V_PRED &&
751 best_intra_mode != D45_PRED)
752 return 1;
753 if (mode == D207_PRED &&
754 best_intra_mode != H_PRED &&
755 best_intra_mode != D45_PRED)
756 return 1;
757 if (mode == D153_PRED &&
758 best_intra_mode != H_PRED &&
759 best_intra_mode != D135_PRED)
760 return 1;
761 return 0;
762 }
763
rd_pick_intra4x4block(VP9_COMP * cpi,MACROBLOCK * x,int row,int col,PREDICTION_MODE * best_mode,const int * bmode_costs,ENTROPY_CONTEXT * a,ENTROPY_CONTEXT * l,int * bestrate,int * bestratey,int64_t * bestdistortion,BLOCK_SIZE bsize,int64_t rd_thresh)764 static int64_t rd_pick_intra4x4block(VP9_COMP *cpi, MACROBLOCK *x,
765 int row, int col,
766 PREDICTION_MODE *best_mode,
767 const int *bmode_costs,
768 ENTROPY_CONTEXT *a, ENTROPY_CONTEXT *l,
769 int *bestrate, int *bestratey,
770 int64_t *bestdistortion,
771 BLOCK_SIZE bsize, int64_t rd_thresh) {
772 PREDICTION_MODE mode;
773 MACROBLOCKD *const xd = &x->e_mbd;
774 int64_t best_rd = rd_thresh;
775 struct macroblock_plane *p = &x->plane[0];
776 struct macroblockd_plane *pd = &xd->plane[0];
777 const int src_stride = p->src.stride;
778 const int dst_stride = pd->dst.stride;
779 const uint8_t *src_init = &p->src.buf[row * 4 * src_stride + col * 4];
780 uint8_t *dst_init = &pd->dst.buf[row * 4 * src_stride + col * 4];
781 ENTROPY_CONTEXT ta[2], tempa[2];
782 ENTROPY_CONTEXT tl[2], templ[2];
783 const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize];
784 const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize];
785 int idx, idy;
786 uint8_t best_dst[8 * 8];
787 #if CONFIG_VP9_HIGHBITDEPTH
788 uint16_t best_dst16[8 * 8];
789 #endif
790
791 memcpy(ta, a, sizeof(ta));
792 memcpy(tl, l, sizeof(tl));
793 xd->mi[0]->mbmi.tx_size = TX_4X4;
794
795 #if CONFIG_VP9_HIGHBITDEPTH
796 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
797 for (mode = DC_PRED; mode <= TM_PRED; ++mode) {
798 int64_t this_rd;
799 int ratey = 0;
800 int64_t distortion = 0;
801 int rate = bmode_costs[mode];
802
803 if (!(cpi->sf.intra_y_mode_mask[TX_4X4] & (1 << mode)))
804 continue;
805
806 // Only do the oblique modes if the best so far is
807 // one of the neighboring directional modes
808 if (cpi->sf.mode_search_skip_flags & FLAG_SKIP_INTRA_DIRMISMATCH) {
809 if (conditional_skipintra(mode, *best_mode))
810 continue;
811 }
812
813 memcpy(tempa, ta, sizeof(ta));
814 memcpy(templ, tl, sizeof(tl));
815
816 for (idy = 0; idy < num_4x4_blocks_high; ++idy) {
817 for (idx = 0; idx < num_4x4_blocks_wide; ++idx) {
818 const int block = (row + idy) * 2 + (col + idx);
819 const uint8_t *const src = &src_init[idx * 4 + idy * 4 * src_stride];
820 uint8_t *const dst = &dst_init[idx * 4 + idy * 4 * dst_stride];
821 int16_t *const src_diff = vp9_raster_block_offset_int16(BLOCK_8X8,
822 block,
823 p->src_diff);
824 tran_low_t *const coeff = BLOCK_OFFSET(x->plane[0].coeff, block);
825 xd->mi[0]->bmi[block].as_mode = mode;
826 vp9_predict_intra_block(xd, 1, TX_4X4, mode,
827 x->skip_encode ? src : dst,
828 x->skip_encode ? src_stride : dst_stride,
829 dst, dst_stride,
830 col + idx, row + idy, 0);
831 vpx_highbd_subtract_block(4, 4, src_diff, 8, src, src_stride,
832 dst, dst_stride, xd->bd);
833 if (xd->lossless) {
834 const scan_order *so = &vp9_default_scan_orders[TX_4X4];
835 vp9_highbd_fwht4x4(src_diff, coeff, 8);
836 vp9_regular_quantize_b_4x4(x, 0, block, so->scan, so->iscan);
837 ratey += cost_coeffs(x, 0, block, tempa + idx, templ + idy, TX_4X4,
838 so->scan, so->neighbors,
839 cpi->sf.use_fast_coef_costing);
840 if (RDCOST(x->rdmult, x->rddiv, ratey, distortion) >= best_rd)
841 goto next_highbd;
842 vp9_highbd_iwht4x4_add(BLOCK_OFFSET(pd->dqcoeff, block),
843 dst, dst_stride,
844 p->eobs[block], xd->bd);
845 } else {
846 int64_t unused;
847 const TX_TYPE tx_type = get_tx_type_4x4(PLANE_TYPE_Y, xd, block);
848 const scan_order *so = &vp9_scan_orders[TX_4X4][tx_type];
849 if (tx_type == DCT_DCT)
850 vpx_highbd_fdct4x4(src_diff, coeff, 8);
851 else
852 vp9_highbd_fht4x4(src_diff, coeff, 8, tx_type);
853 vp9_regular_quantize_b_4x4(x, 0, block, so->scan, so->iscan);
854 ratey += cost_coeffs(x, 0, block, tempa + idx, templ + idy, TX_4X4,
855 so->scan, so->neighbors,
856 cpi->sf.use_fast_coef_costing);
857 distortion += vp9_highbd_block_error_dispatch(
858 coeff, BLOCK_OFFSET(pd->dqcoeff, block),
859 16, &unused, xd->bd) >> 2;
860 if (RDCOST(x->rdmult, x->rddiv, ratey, distortion) >= best_rd)
861 goto next_highbd;
862 vp9_highbd_iht4x4_add(tx_type, BLOCK_OFFSET(pd->dqcoeff, block),
863 dst, dst_stride, p->eobs[block], xd->bd);
864 }
865 }
866 }
867
868 rate += ratey;
869 this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion);
870
871 if (this_rd < best_rd) {
872 *bestrate = rate;
873 *bestratey = ratey;
874 *bestdistortion = distortion;
875 best_rd = this_rd;
876 *best_mode = mode;
877 memcpy(a, tempa, sizeof(tempa));
878 memcpy(l, templ, sizeof(templ));
879 for (idy = 0; idy < num_4x4_blocks_high * 4; ++idy) {
880 memcpy(best_dst16 + idy * 8,
881 CONVERT_TO_SHORTPTR(dst_init + idy * dst_stride),
882 num_4x4_blocks_wide * 4 * sizeof(uint16_t));
883 }
884 }
885 next_highbd:
886 {}
887 }
888 if (best_rd >= rd_thresh || x->skip_encode)
889 return best_rd;
890
891 for (idy = 0; idy < num_4x4_blocks_high * 4; ++idy) {
892 memcpy(CONVERT_TO_SHORTPTR(dst_init + idy * dst_stride),
893 best_dst16 + idy * 8,
894 num_4x4_blocks_wide * 4 * sizeof(uint16_t));
895 }
896
897 return best_rd;
898 }
899 #endif // CONFIG_VP9_HIGHBITDEPTH
900
901 for (mode = DC_PRED; mode <= TM_PRED; ++mode) {
902 int64_t this_rd;
903 int ratey = 0;
904 int64_t distortion = 0;
905 int rate = bmode_costs[mode];
906
907 if (!(cpi->sf.intra_y_mode_mask[TX_4X4] & (1 << mode)))
908 continue;
909
910 // Only do the oblique modes if the best so far is
911 // one of the neighboring directional modes
912 if (cpi->sf.mode_search_skip_flags & FLAG_SKIP_INTRA_DIRMISMATCH) {
913 if (conditional_skipintra(mode, *best_mode))
914 continue;
915 }
916
917 memcpy(tempa, ta, sizeof(ta));
918 memcpy(templ, tl, sizeof(tl));
919
920 for (idy = 0; idy < num_4x4_blocks_high; ++idy) {
921 for (idx = 0; idx < num_4x4_blocks_wide; ++idx) {
922 const int block = (row + idy) * 2 + (col + idx);
923 const uint8_t *const src = &src_init[idx * 4 + idy * 4 * src_stride];
924 uint8_t *const dst = &dst_init[idx * 4 + idy * 4 * dst_stride];
925 int16_t *const src_diff =
926 vp9_raster_block_offset_int16(BLOCK_8X8, block, p->src_diff);
927 tran_low_t *const coeff = BLOCK_OFFSET(x->plane[0].coeff, block);
928 xd->mi[0]->bmi[block].as_mode = mode;
929 vp9_predict_intra_block(xd, 1, TX_4X4, mode,
930 x->skip_encode ? src : dst,
931 x->skip_encode ? src_stride : dst_stride,
932 dst, dst_stride, col + idx, row + idy, 0);
933 vpx_subtract_block(4, 4, src_diff, 8, src, src_stride, dst, dst_stride);
934
935 if (xd->lossless) {
936 const scan_order *so = &vp9_default_scan_orders[TX_4X4];
937 vp9_fwht4x4(src_diff, coeff, 8);
938 vp9_regular_quantize_b_4x4(x, 0, block, so->scan, so->iscan);
939 ratey += cost_coeffs(x, 0, block, tempa + idx, templ + idy, TX_4X4,
940 so->scan, so->neighbors,
941 cpi->sf.use_fast_coef_costing);
942 if (RDCOST(x->rdmult, x->rddiv, ratey, distortion) >= best_rd)
943 goto next;
944 vp9_iwht4x4_add(BLOCK_OFFSET(pd->dqcoeff, block), dst, dst_stride,
945 p->eobs[block]);
946 } else {
947 int64_t unused;
948 const TX_TYPE tx_type = get_tx_type_4x4(PLANE_TYPE_Y, xd, block);
949 const scan_order *so = &vp9_scan_orders[TX_4X4][tx_type];
950 vp9_fht4x4(src_diff, coeff, 8, tx_type);
951 vp9_regular_quantize_b_4x4(x, 0, block, so->scan, so->iscan);
952 ratey += cost_coeffs(x, 0, block, tempa + idx, templ + idy, TX_4X4,
953 so->scan, so->neighbors,
954 cpi->sf.use_fast_coef_costing);
955 #if CONFIG_VP9_HIGHBITDEPTH
956 distortion += vp9_highbd_block_error_8bit(
957 coeff, BLOCK_OFFSET(pd->dqcoeff, block), 16, &unused) >> 2;
958 #else
959 distortion += vp9_block_error(coeff, BLOCK_OFFSET(pd->dqcoeff, block),
960 16, &unused) >> 2;
961 #endif
962 if (RDCOST(x->rdmult, x->rddiv, ratey, distortion) >= best_rd)
963 goto next;
964 vp9_iht4x4_add(tx_type, BLOCK_OFFSET(pd->dqcoeff, block),
965 dst, dst_stride, p->eobs[block]);
966 }
967 }
968 }
969
970 rate += ratey;
971 this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion);
972
973 if (this_rd < best_rd) {
974 *bestrate = rate;
975 *bestratey = ratey;
976 *bestdistortion = distortion;
977 best_rd = this_rd;
978 *best_mode = mode;
979 memcpy(a, tempa, sizeof(tempa));
980 memcpy(l, templ, sizeof(templ));
981 for (idy = 0; idy < num_4x4_blocks_high * 4; ++idy)
982 memcpy(best_dst + idy * 8, dst_init + idy * dst_stride,
983 num_4x4_blocks_wide * 4);
984 }
985 next:
986 {}
987 }
988
989 if (best_rd >= rd_thresh || x->skip_encode)
990 return best_rd;
991
992 for (idy = 0; idy < num_4x4_blocks_high * 4; ++idy)
993 memcpy(dst_init + idy * dst_stride, best_dst + idy * 8,
994 num_4x4_blocks_wide * 4);
995
996 return best_rd;
997 }
998
rd_pick_intra_sub_8x8_y_mode(VP9_COMP * cpi,MACROBLOCK * mb,int * rate,int * rate_y,int64_t * distortion,int64_t best_rd)999 static int64_t rd_pick_intra_sub_8x8_y_mode(VP9_COMP *cpi, MACROBLOCK *mb,
1000 int *rate, int *rate_y,
1001 int64_t *distortion,
1002 int64_t best_rd) {
1003 int i, j;
1004 const MACROBLOCKD *const xd = &mb->e_mbd;
1005 MODE_INFO *const mic = xd->mi[0];
1006 const MODE_INFO *above_mi = xd->above_mi;
1007 const MODE_INFO *left_mi = xd->left_mi;
1008 const BLOCK_SIZE bsize = xd->mi[0]->mbmi.sb_type;
1009 const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize];
1010 const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize];
1011 int idx, idy;
1012 int cost = 0;
1013 int64_t total_distortion = 0;
1014 int tot_rate_y = 0;
1015 int64_t total_rd = 0;
1016 ENTROPY_CONTEXT t_above[4], t_left[4];
1017 const int *bmode_costs = cpi->mbmode_cost;
1018
1019 memcpy(t_above, xd->plane[0].above_context, sizeof(t_above));
1020 memcpy(t_left, xd->plane[0].left_context, sizeof(t_left));
1021
1022 // Pick modes for each sub-block (of size 4x4, 4x8, or 8x4) in an 8x8 block.
1023 for (idy = 0; idy < 2; idy += num_4x4_blocks_high) {
1024 for (idx = 0; idx < 2; idx += num_4x4_blocks_wide) {
1025 PREDICTION_MODE best_mode = DC_PRED;
1026 int r = INT_MAX, ry = INT_MAX;
1027 int64_t d = INT64_MAX, this_rd = INT64_MAX;
1028 i = idy * 2 + idx;
1029 if (cpi->common.frame_type == KEY_FRAME) {
1030 const PREDICTION_MODE A = vp9_above_block_mode(mic, above_mi, i);
1031 const PREDICTION_MODE L = vp9_left_block_mode(mic, left_mi, i);
1032
1033 bmode_costs = cpi->y_mode_costs[A][L];
1034 }
1035
1036 this_rd = rd_pick_intra4x4block(cpi, mb, idy, idx, &best_mode,
1037 bmode_costs, t_above + idx, t_left + idy,
1038 &r, &ry, &d, bsize, best_rd - total_rd);
1039 if (this_rd >= best_rd - total_rd)
1040 return INT64_MAX;
1041
1042 total_rd += this_rd;
1043 cost += r;
1044 total_distortion += d;
1045 tot_rate_y += ry;
1046
1047 mic->bmi[i].as_mode = best_mode;
1048 for (j = 1; j < num_4x4_blocks_high; ++j)
1049 mic->bmi[i + j * 2].as_mode = best_mode;
1050 for (j = 1; j < num_4x4_blocks_wide; ++j)
1051 mic->bmi[i + j].as_mode = best_mode;
1052
1053 if (total_rd >= best_rd)
1054 return INT64_MAX;
1055 }
1056 }
1057
1058 *rate = cost;
1059 *rate_y = tot_rate_y;
1060 *distortion = total_distortion;
1061 mic->mbmi.mode = mic->bmi[3].as_mode;
1062
1063 return RDCOST(mb->rdmult, mb->rddiv, cost, total_distortion);
1064 }
1065
1066 // This function is used only for intra_only frames
rd_pick_intra_sby_mode(VP9_COMP * cpi,MACROBLOCK * x,int * rate,int * rate_tokenonly,int64_t * distortion,int * skippable,BLOCK_SIZE bsize,int64_t best_rd)1067 static int64_t rd_pick_intra_sby_mode(VP9_COMP *cpi, MACROBLOCK *x,
1068 int *rate, int *rate_tokenonly,
1069 int64_t *distortion, int *skippable,
1070 BLOCK_SIZE bsize,
1071 int64_t best_rd) {
1072 PREDICTION_MODE mode;
1073 PREDICTION_MODE mode_selected = DC_PRED;
1074 MACROBLOCKD *const xd = &x->e_mbd;
1075 MODE_INFO *const mic = xd->mi[0];
1076 int this_rate, this_rate_tokenonly, s;
1077 int64_t this_distortion, this_rd;
1078 TX_SIZE best_tx = TX_4X4;
1079 int *bmode_costs;
1080 const MODE_INFO *above_mi = xd->above_mi;
1081 const MODE_INFO *left_mi = xd->left_mi;
1082 const PREDICTION_MODE A = vp9_above_block_mode(mic, above_mi, 0);
1083 const PREDICTION_MODE L = vp9_left_block_mode(mic, left_mi, 0);
1084 bmode_costs = cpi->y_mode_costs[A][L];
1085
1086 memset(x->skip_txfm, SKIP_TXFM_NONE, sizeof(x->skip_txfm));
1087 /* Y Search for intra prediction mode */
1088 for (mode = DC_PRED; mode <= TM_PRED; mode++) {
1089 if (cpi->sf.use_nonrd_pick_mode) {
1090 // These speed features are turned on in hybrid non-RD and RD mode
1091 // for key frame coding in the context of real-time setting.
1092 if (conditional_skipintra(mode, mode_selected))
1093 continue;
1094 if (*skippable)
1095 break;
1096 }
1097
1098 mic->mbmi.mode = mode;
1099
1100 super_block_yrd(cpi, x, &this_rate_tokenonly, &this_distortion,
1101 &s, NULL, bsize, best_rd);
1102
1103 if (this_rate_tokenonly == INT_MAX)
1104 continue;
1105
1106 this_rate = this_rate_tokenonly + bmode_costs[mode];
1107 this_rd = RDCOST(x->rdmult, x->rddiv, this_rate, this_distortion);
1108
1109 if (this_rd < best_rd) {
1110 mode_selected = mode;
1111 best_rd = this_rd;
1112 best_tx = mic->mbmi.tx_size;
1113 *rate = this_rate;
1114 *rate_tokenonly = this_rate_tokenonly;
1115 *distortion = this_distortion;
1116 *skippable = s;
1117 }
1118 }
1119
1120 mic->mbmi.mode = mode_selected;
1121 mic->mbmi.tx_size = best_tx;
1122
1123 return best_rd;
1124 }
1125
1126 // Return value 0: early termination triggered, no valid rd cost available;
1127 // 1: rd cost values are valid.
super_block_uvrd(const VP9_COMP * cpi,MACROBLOCK * x,int * rate,int64_t * distortion,int * skippable,int64_t * sse,BLOCK_SIZE bsize,int64_t ref_best_rd)1128 static int super_block_uvrd(const VP9_COMP *cpi, MACROBLOCK *x,
1129 int *rate, int64_t *distortion, int *skippable,
1130 int64_t *sse, BLOCK_SIZE bsize,
1131 int64_t ref_best_rd) {
1132 MACROBLOCKD *const xd = &x->e_mbd;
1133 MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
1134 const TX_SIZE uv_tx_size = get_uv_tx_size(mbmi, &xd->plane[1]);
1135 int plane;
1136 int pnrate = 0, pnskip = 1;
1137 int64_t pndist = 0, pnsse = 0;
1138 int is_cost_valid = 1;
1139
1140 if (ref_best_rd < 0)
1141 is_cost_valid = 0;
1142
1143 if (is_inter_block(mbmi) && is_cost_valid) {
1144 int plane;
1145 for (plane = 1; plane < MAX_MB_PLANE; ++plane)
1146 vp9_subtract_plane(x, bsize, plane);
1147 }
1148
1149 *rate = 0;
1150 *distortion = 0;
1151 *sse = 0;
1152 *skippable = 1;
1153
1154 for (plane = 1; plane < MAX_MB_PLANE; ++plane) {
1155 txfm_rd_in_plane(x, &pnrate, &pndist, &pnskip, &pnsse,
1156 ref_best_rd, plane, bsize, uv_tx_size,
1157 cpi->sf.use_fast_coef_costing);
1158 if (pnrate == INT_MAX) {
1159 is_cost_valid = 0;
1160 break;
1161 }
1162 *rate += pnrate;
1163 *distortion += pndist;
1164 *sse += pnsse;
1165 *skippable &= pnskip;
1166 }
1167
1168 if (!is_cost_valid) {
1169 // reset cost value
1170 *rate = INT_MAX;
1171 *distortion = INT64_MAX;
1172 *sse = INT64_MAX;
1173 *skippable = 0;
1174 }
1175
1176 return is_cost_valid;
1177 }
1178
rd_pick_intra_sbuv_mode(VP9_COMP * cpi,MACROBLOCK * x,PICK_MODE_CONTEXT * ctx,int * rate,int * rate_tokenonly,int64_t * distortion,int * skippable,BLOCK_SIZE bsize,TX_SIZE max_tx_size)1179 static int64_t rd_pick_intra_sbuv_mode(VP9_COMP *cpi, MACROBLOCK *x,
1180 PICK_MODE_CONTEXT *ctx,
1181 int *rate, int *rate_tokenonly,
1182 int64_t *distortion, int *skippable,
1183 BLOCK_SIZE bsize, TX_SIZE max_tx_size) {
1184 MACROBLOCKD *xd = &x->e_mbd;
1185 PREDICTION_MODE mode;
1186 PREDICTION_MODE mode_selected = DC_PRED;
1187 int64_t best_rd = INT64_MAX, this_rd;
1188 int this_rate_tokenonly, this_rate, s;
1189 int64_t this_distortion, this_sse;
1190
1191 memset(x->skip_txfm, SKIP_TXFM_NONE, sizeof(x->skip_txfm));
1192 for (mode = DC_PRED; mode <= TM_PRED; ++mode) {
1193 if (!(cpi->sf.intra_uv_mode_mask[max_tx_size] & (1 << mode)))
1194 continue;
1195
1196 xd->mi[0]->mbmi.uv_mode = mode;
1197
1198 if (!super_block_uvrd(cpi, x, &this_rate_tokenonly,
1199 &this_distortion, &s, &this_sse, bsize, best_rd))
1200 continue;
1201 this_rate = this_rate_tokenonly +
1202 cpi->intra_uv_mode_cost[cpi->common.frame_type][mode];
1203 this_rd = RDCOST(x->rdmult, x->rddiv, this_rate, this_distortion);
1204
1205 if (this_rd < best_rd) {
1206 mode_selected = mode;
1207 best_rd = this_rd;
1208 *rate = this_rate;
1209 *rate_tokenonly = this_rate_tokenonly;
1210 *distortion = this_distortion;
1211 *skippable = s;
1212 if (!x->select_tx_size)
1213 swap_block_ptr(x, ctx, 2, 0, 1, MAX_MB_PLANE);
1214 }
1215 }
1216
1217 xd->mi[0]->mbmi.uv_mode = mode_selected;
1218 return best_rd;
1219 }
1220
rd_sbuv_dcpred(const VP9_COMP * cpi,MACROBLOCK * x,int * rate,int * rate_tokenonly,int64_t * distortion,int * skippable,BLOCK_SIZE bsize)1221 static int64_t rd_sbuv_dcpred(const VP9_COMP *cpi, MACROBLOCK *x,
1222 int *rate, int *rate_tokenonly,
1223 int64_t *distortion, int *skippable,
1224 BLOCK_SIZE bsize) {
1225 const VP9_COMMON *cm = &cpi->common;
1226 int64_t unused;
1227
1228 x->e_mbd.mi[0]->mbmi.uv_mode = DC_PRED;
1229 memset(x->skip_txfm, SKIP_TXFM_NONE, sizeof(x->skip_txfm));
1230 super_block_uvrd(cpi, x, rate_tokenonly, distortion,
1231 skippable, &unused, bsize, INT64_MAX);
1232 *rate = *rate_tokenonly + cpi->intra_uv_mode_cost[cm->frame_type][DC_PRED];
1233 return RDCOST(x->rdmult, x->rddiv, *rate, *distortion);
1234 }
1235
choose_intra_uv_mode(VP9_COMP * cpi,MACROBLOCK * const x,PICK_MODE_CONTEXT * ctx,BLOCK_SIZE bsize,TX_SIZE max_tx_size,int * rate_uv,int * rate_uv_tokenonly,int64_t * dist_uv,int * skip_uv,PREDICTION_MODE * mode_uv)1236 static void choose_intra_uv_mode(VP9_COMP *cpi, MACROBLOCK *const x,
1237 PICK_MODE_CONTEXT *ctx,
1238 BLOCK_SIZE bsize, TX_SIZE max_tx_size,
1239 int *rate_uv, int *rate_uv_tokenonly,
1240 int64_t *dist_uv, int *skip_uv,
1241 PREDICTION_MODE *mode_uv) {
1242 // Use an estimated rd for uv_intra based on DC_PRED if the
1243 // appropriate speed flag is set.
1244 if (cpi->sf.use_uv_intra_rd_estimate) {
1245 rd_sbuv_dcpred(cpi, x, rate_uv, rate_uv_tokenonly, dist_uv,
1246 skip_uv, bsize < BLOCK_8X8 ? BLOCK_8X8 : bsize);
1247 // Else do a proper rd search for each possible transform size that may
1248 // be considered in the main rd loop.
1249 } else {
1250 rd_pick_intra_sbuv_mode(cpi, x, ctx,
1251 rate_uv, rate_uv_tokenonly, dist_uv, skip_uv,
1252 bsize < BLOCK_8X8 ? BLOCK_8X8 : bsize, max_tx_size);
1253 }
1254 *mode_uv = x->e_mbd.mi[0]->mbmi.uv_mode;
1255 }
1256
cost_mv_ref(const VP9_COMP * cpi,PREDICTION_MODE mode,int mode_context)1257 static int cost_mv_ref(const VP9_COMP *cpi, PREDICTION_MODE mode,
1258 int mode_context) {
1259 assert(is_inter_mode(mode));
1260 return cpi->inter_mode_cost[mode_context][INTER_OFFSET(mode)];
1261 }
1262
set_and_cost_bmi_mvs(VP9_COMP * cpi,MACROBLOCK * x,MACROBLOCKD * xd,int i,PREDICTION_MODE mode,int_mv this_mv[2],int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES],int_mv seg_mvs[MAX_REF_FRAMES],int_mv * best_ref_mv[2],const int * mvjcost,int * mvcost[2])1263 static int set_and_cost_bmi_mvs(VP9_COMP *cpi, MACROBLOCK *x, MACROBLOCKD *xd,
1264 int i,
1265 PREDICTION_MODE mode, int_mv this_mv[2],
1266 int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES],
1267 int_mv seg_mvs[MAX_REF_FRAMES],
1268 int_mv *best_ref_mv[2], const int *mvjcost,
1269 int *mvcost[2]) {
1270 MODE_INFO *const mic = xd->mi[0];
1271 const MB_MODE_INFO *const mbmi = &mic->mbmi;
1272 const MB_MODE_INFO_EXT *const mbmi_ext = x->mbmi_ext;
1273 int thismvcost = 0;
1274 int idx, idy;
1275 const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[mbmi->sb_type];
1276 const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[mbmi->sb_type];
1277 const int is_compound = has_second_ref(mbmi);
1278
1279 switch (mode) {
1280 case NEWMV:
1281 this_mv[0].as_int = seg_mvs[mbmi->ref_frame[0]].as_int;
1282 thismvcost += vp9_mv_bit_cost(&this_mv[0].as_mv, &best_ref_mv[0]->as_mv,
1283 mvjcost, mvcost, MV_COST_WEIGHT_SUB);
1284 if (is_compound) {
1285 this_mv[1].as_int = seg_mvs[mbmi->ref_frame[1]].as_int;
1286 thismvcost += vp9_mv_bit_cost(&this_mv[1].as_mv, &best_ref_mv[1]->as_mv,
1287 mvjcost, mvcost, MV_COST_WEIGHT_SUB);
1288 }
1289 break;
1290 case NEARMV:
1291 case NEARESTMV:
1292 this_mv[0].as_int = frame_mv[mode][mbmi->ref_frame[0]].as_int;
1293 if (is_compound)
1294 this_mv[1].as_int = frame_mv[mode][mbmi->ref_frame[1]].as_int;
1295 break;
1296 case ZEROMV:
1297 this_mv[0].as_int = 0;
1298 if (is_compound)
1299 this_mv[1].as_int = 0;
1300 break;
1301 default:
1302 break;
1303 }
1304
1305 mic->bmi[i].as_mv[0].as_int = this_mv[0].as_int;
1306 if (is_compound)
1307 mic->bmi[i].as_mv[1].as_int = this_mv[1].as_int;
1308
1309 mic->bmi[i].as_mode = mode;
1310
1311 for (idy = 0; idy < num_4x4_blocks_high; ++idy)
1312 for (idx = 0; idx < num_4x4_blocks_wide; ++idx)
1313 memmove(&mic->bmi[i + idy * 2 + idx], &mic->bmi[i], sizeof(mic->bmi[i]));
1314
1315 return cost_mv_ref(cpi, mode, mbmi_ext->mode_context[mbmi->ref_frame[0]]) +
1316 thismvcost;
1317 }
1318
encode_inter_mb_segment(VP9_COMP * cpi,MACROBLOCK * x,int64_t best_yrd,int i,int * labelyrate,int64_t * distortion,int64_t * sse,ENTROPY_CONTEXT * ta,ENTROPY_CONTEXT * tl,int mi_row,int mi_col)1319 static int64_t encode_inter_mb_segment(VP9_COMP *cpi,
1320 MACROBLOCK *x,
1321 int64_t best_yrd,
1322 int i,
1323 int *labelyrate,
1324 int64_t *distortion, int64_t *sse,
1325 ENTROPY_CONTEXT *ta,
1326 ENTROPY_CONTEXT *tl,
1327 int mi_row, int mi_col) {
1328 int k;
1329 MACROBLOCKD *xd = &x->e_mbd;
1330 struct macroblockd_plane *const pd = &xd->plane[0];
1331 struct macroblock_plane *const p = &x->plane[0];
1332 MODE_INFO *const mi = xd->mi[0];
1333 const BLOCK_SIZE plane_bsize = get_plane_block_size(mi->mbmi.sb_type, pd);
1334 const int width = 4 * num_4x4_blocks_wide_lookup[plane_bsize];
1335 const int height = 4 * num_4x4_blocks_high_lookup[plane_bsize];
1336 int idx, idy;
1337
1338 const uint8_t *const src =
1339 &p->src.buf[vp9_raster_block_offset(BLOCK_8X8, i, p->src.stride)];
1340 uint8_t *const dst = &pd->dst.buf[vp9_raster_block_offset(BLOCK_8X8, i,
1341 pd->dst.stride)];
1342 int64_t thisdistortion = 0, thissse = 0;
1343 int thisrate = 0, ref;
1344 const scan_order *so = &vp9_default_scan_orders[TX_4X4];
1345 const int is_compound = has_second_ref(&mi->mbmi);
1346 const InterpKernel *kernel = vp9_filter_kernels[mi->mbmi.interp_filter];
1347
1348 for (ref = 0; ref < 1 + is_compound; ++ref) {
1349 const uint8_t *pre = &pd->pre[ref].buf[vp9_raster_block_offset(BLOCK_8X8, i,
1350 pd->pre[ref].stride)];
1351 #if CONFIG_VP9_HIGHBITDEPTH
1352 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
1353 vp9_highbd_build_inter_predictor(pre, pd->pre[ref].stride,
1354 dst, pd->dst.stride,
1355 &mi->bmi[i].as_mv[ref].as_mv,
1356 &xd->block_refs[ref]->sf, width, height,
1357 ref, kernel, MV_PRECISION_Q3,
1358 mi_col * MI_SIZE + 4 * (i % 2),
1359 mi_row * MI_SIZE + 4 * (i / 2), xd->bd);
1360 } else {
1361 vp9_build_inter_predictor(pre, pd->pre[ref].stride,
1362 dst, pd->dst.stride,
1363 &mi->bmi[i].as_mv[ref].as_mv,
1364 &xd->block_refs[ref]->sf, width, height, ref,
1365 kernel, MV_PRECISION_Q3,
1366 mi_col * MI_SIZE + 4 * (i % 2),
1367 mi_row * MI_SIZE + 4 * (i / 2));
1368 }
1369 #else
1370 vp9_build_inter_predictor(pre, pd->pre[ref].stride,
1371 dst, pd->dst.stride,
1372 &mi->bmi[i].as_mv[ref].as_mv,
1373 &xd->block_refs[ref]->sf, width, height, ref,
1374 kernel, MV_PRECISION_Q3,
1375 mi_col * MI_SIZE + 4 * (i % 2),
1376 mi_row * MI_SIZE + 4 * (i / 2));
1377 #endif // CONFIG_VP9_HIGHBITDEPTH
1378 }
1379
1380 #if CONFIG_VP9_HIGHBITDEPTH
1381 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
1382 vpx_highbd_subtract_block(
1383 height, width, vp9_raster_block_offset_int16(BLOCK_8X8, i, p->src_diff),
1384 8, src, p->src.stride, dst, pd->dst.stride, xd->bd);
1385 } else {
1386 vpx_subtract_block(
1387 height, width, vp9_raster_block_offset_int16(BLOCK_8X8, i, p->src_diff),
1388 8, src, p->src.stride, dst, pd->dst.stride);
1389 }
1390 #else
1391 vpx_subtract_block(height, width,
1392 vp9_raster_block_offset_int16(BLOCK_8X8, i, p->src_diff),
1393 8, src, p->src.stride, dst, pd->dst.stride);
1394 #endif // CONFIG_VP9_HIGHBITDEPTH
1395
1396 k = i;
1397 for (idy = 0; idy < height / 4; ++idy) {
1398 for (idx = 0; idx < width / 4; ++idx) {
1399 #if CONFIG_VP9_HIGHBITDEPTH
1400 const int bd = (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) ? xd->bd : 8;
1401 #endif
1402 int64_t ssz, rd, rd1, rd2;
1403 tran_low_t* coeff;
1404
1405 k += (idy * 2 + idx);
1406 coeff = BLOCK_OFFSET(p->coeff, k);
1407 x->fwd_txm4x4(vp9_raster_block_offset_int16(BLOCK_8X8, k, p->src_diff),
1408 coeff, 8);
1409 vp9_regular_quantize_b_4x4(x, 0, k, so->scan, so->iscan);
1410 #if CONFIG_VP9_HIGHBITDEPTH
1411 thisdistortion += vp9_highbd_block_error_dispatch(
1412 coeff, BLOCK_OFFSET(pd->dqcoeff, k), 16, &ssz, bd);
1413 #else
1414 thisdistortion += vp9_block_error(coeff, BLOCK_OFFSET(pd->dqcoeff, k),
1415 16, &ssz);
1416 #endif // CONFIG_VP9_HIGHBITDEPTH
1417 thissse += ssz;
1418 thisrate += cost_coeffs(x, 0, k, ta + (k & 1), tl + (k >> 1), TX_4X4,
1419 so->scan, so->neighbors,
1420 cpi->sf.use_fast_coef_costing);
1421 rd1 = RDCOST(x->rdmult, x->rddiv, thisrate, thisdistortion >> 2);
1422 rd2 = RDCOST(x->rdmult, x->rddiv, 0, thissse >> 2);
1423 rd = VPXMIN(rd1, rd2);
1424 if (rd >= best_yrd)
1425 return INT64_MAX;
1426 }
1427 }
1428
1429 *distortion = thisdistortion >> 2;
1430 *labelyrate = thisrate;
1431 *sse = thissse >> 2;
1432
1433 return RDCOST(x->rdmult, x->rddiv, *labelyrate, *distortion);
1434 }
1435
1436 typedef struct {
1437 int eobs;
1438 int brate;
1439 int byrate;
1440 int64_t bdist;
1441 int64_t bsse;
1442 int64_t brdcost;
1443 int_mv mvs[2];
1444 ENTROPY_CONTEXT ta[2];
1445 ENTROPY_CONTEXT tl[2];
1446 } SEG_RDSTAT;
1447
1448 typedef struct {
1449 int_mv *ref_mv[2];
1450 int_mv mvp;
1451
1452 int64_t segment_rd;
1453 int r;
1454 int64_t d;
1455 int64_t sse;
1456 int segment_yrate;
1457 PREDICTION_MODE modes[4];
1458 SEG_RDSTAT rdstat[4][INTER_MODES];
1459 int mvthresh;
1460 } BEST_SEG_INFO;
1461
mv_check_bounds(const MACROBLOCK * x,const MV * mv)1462 static INLINE int mv_check_bounds(const MACROBLOCK *x, const MV *mv) {
1463 return (mv->row >> 3) < x->mv_row_min ||
1464 (mv->row >> 3) > x->mv_row_max ||
1465 (mv->col >> 3) < x->mv_col_min ||
1466 (mv->col >> 3) > x->mv_col_max;
1467 }
1468
mi_buf_shift(MACROBLOCK * x,int i)1469 static INLINE void mi_buf_shift(MACROBLOCK *x, int i) {
1470 MB_MODE_INFO *const mbmi = &x->e_mbd.mi[0]->mbmi;
1471 struct macroblock_plane *const p = &x->plane[0];
1472 struct macroblockd_plane *const pd = &x->e_mbd.plane[0];
1473
1474 p->src.buf = &p->src.buf[vp9_raster_block_offset(BLOCK_8X8, i,
1475 p->src.stride)];
1476 assert(((intptr_t)pd->pre[0].buf & 0x7) == 0);
1477 pd->pre[0].buf = &pd->pre[0].buf[vp9_raster_block_offset(BLOCK_8X8, i,
1478 pd->pre[0].stride)];
1479 if (has_second_ref(mbmi))
1480 pd->pre[1].buf = &pd->pre[1].buf[vp9_raster_block_offset(BLOCK_8X8, i,
1481 pd->pre[1].stride)];
1482 }
1483
mi_buf_restore(MACROBLOCK * x,struct buf_2d orig_src,struct buf_2d orig_pre[2])1484 static INLINE void mi_buf_restore(MACROBLOCK *x, struct buf_2d orig_src,
1485 struct buf_2d orig_pre[2]) {
1486 MB_MODE_INFO *mbmi = &x->e_mbd.mi[0]->mbmi;
1487 x->plane[0].src = orig_src;
1488 x->e_mbd.plane[0].pre[0] = orig_pre[0];
1489 if (has_second_ref(mbmi))
1490 x->e_mbd.plane[0].pre[1] = orig_pre[1];
1491 }
1492
mv_has_subpel(const MV * mv)1493 static INLINE int mv_has_subpel(const MV *mv) {
1494 return (mv->row & 0x0F) || (mv->col & 0x0F);
1495 }
1496
1497 // Check if NEARESTMV/NEARMV/ZEROMV is the cheapest way encode zero motion.
1498 // TODO(aconverse): Find out if this is still productive then clean up or remove
check_best_zero_mv(const VP9_COMP * cpi,const uint8_t mode_context[MAX_REF_FRAMES],int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES],int this_mode,const MV_REFERENCE_FRAME ref_frames[2])1499 static int check_best_zero_mv(
1500 const VP9_COMP *cpi, const uint8_t mode_context[MAX_REF_FRAMES],
1501 int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES], int this_mode,
1502 const MV_REFERENCE_FRAME ref_frames[2]) {
1503 if ((this_mode == NEARMV || this_mode == NEARESTMV || this_mode == ZEROMV) &&
1504 frame_mv[this_mode][ref_frames[0]].as_int == 0 &&
1505 (ref_frames[1] == NONE ||
1506 frame_mv[this_mode][ref_frames[1]].as_int == 0)) {
1507 int rfc = mode_context[ref_frames[0]];
1508 int c1 = cost_mv_ref(cpi, NEARMV, rfc);
1509 int c2 = cost_mv_ref(cpi, NEARESTMV, rfc);
1510 int c3 = cost_mv_ref(cpi, ZEROMV, rfc);
1511
1512 if (this_mode == NEARMV) {
1513 if (c1 > c3) return 0;
1514 } else if (this_mode == NEARESTMV) {
1515 if (c2 > c3) return 0;
1516 } else {
1517 assert(this_mode == ZEROMV);
1518 if (ref_frames[1] == NONE) {
1519 if ((c3 >= c2 && frame_mv[NEARESTMV][ref_frames[0]].as_int == 0) ||
1520 (c3 >= c1 && frame_mv[NEARMV][ref_frames[0]].as_int == 0))
1521 return 0;
1522 } else {
1523 if ((c3 >= c2 && frame_mv[NEARESTMV][ref_frames[0]].as_int == 0 &&
1524 frame_mv[NEARESTMV][ref_frames[1]].as_int == 0) ||
1525 (c3 >= c1 && frame_mv[NEARMV][ref_frames[0]].as_int == 0 &&
1526 frame_mv[NEARMV][ref_frames[1]].as_int == 0))
1527 return 0;
1528 }
1529 }
1530 }
1531 return 1;
1532 }
1533
joint_motion_search(VP9_COMP * cpi,MACROBLOCK * x,BLOCK_SIZE bsize,int_mv * frame_mv,int mi_row,int mi_col,int_mv single_newmv[MAX_REF_FRAMES],int * rate_mv)1534 static void joint_motion_search(VP9_COMP *cpi, MACROBLOCK *x,
1535 BLOCK_SIZE bsize,
1536 int_mv *frame_mv,
1537 int mi_row, int mi_col,
1538 int_mv single_newmv[MAX_REF_FRAMES],
1539 int *rate_mv) {
1540 const VP9_COMMON *const cm = &cpi->common;
1541 const int pw = 4 * num_4x4_blocks_wide_lookup[bsize];
1542 const int ph = 4 * num_4x4_blocks_high_lookup[bsize];
1543 MACROBLOCKD *xd = &x->e_mbd;
1544 MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
1545 const int refs[2] = {mbmi->ref_frame[0],
1546 mbmi->ref_frame[1] < 0 ? 0 : mbmi->ref_frame[1]};
1547 int_mv ref_mv[2];
1548 int ite, ref;
1549 const InterpKernel *kernel = vp9_filter_kernels[mbmi->interp_filter];
1550 struct scale_factors sf;
1551
1552 // Do joint motion search in compound mode to get more accurate mv.
1553 struct buf_2d backup_yv12[2][MAX_MB_PLANE];
1554 int last_besterr[2] = {INT_MAX, INT_MAX};
1555 const YV12_BUFFER_CONFIG *const scaled_ref_frame[2] = {
1556 vp9_get_scaled_ref_frame(cpi, mbmi->ref_frame[0]),
1557 vp9_get_scaled_ref_frame(cpi, mbmi->ref_frame[1])
1558 };
1559
1560 // Prediction buffer from second frame.
1561 #if CONFIG_VP9_HIGHBITDEPTH
1562 DECLARE_ALIGNED(16, uint16_t, second_pred_alloc_16[64 * 64]);
1563 uint8_t *second_pred;
1564 #else
1565 DECLARE_ALIGNED(16, uint8_t, second_pred[64 * 64]);
1566 #endif // CONFIG_VP9_HIGHBITDEPTH
1567
1568 for (ref = 0; ref < 2; ++ref) {
1569 ref_mv[ref] = x->mbmi_ext->ref_mvs[refs[ref]][0];
1570
1571 if (scaled_ref_frame[ref]) {
1572 int i;
1573 // Swap out the reference frame for a version that's been scaled to
1574 // match the resolution of the current frame, allowing the existing
1575 // motion search code to be used without additional modifications.
1576 for (i = 0; i < MAX_MB_PLANE; i++)
1577 backup_yv12[ref][i] = xd->plane[i].pre[ref];
1578 vp9_setup_pre_planes(xd, ref, scaled_ref_frame[ref], mi_row, mi_col,
1579 NULL);
1580 }
1581
1582 frame_mv[refs[ref]].as_int = single_newmv[refs[ref]].as_int;
1583 }
1584
1585 // Since we have scaled the reference frames to match the size of the current
1586 // frame we must use a unit scaling factor during mode selection.
1587 #if CONFIG_VP9_HIGHBITDEPTH
1588 vp9_setup_scale_factors_for_frame(&sf, cm->width, cm->height,
1589 cm->width, cm->height,
1590 cm->use_highbitdepth);
1591 #else
1592 vp9_setup_scale_factors_for_frame(&sf, cm->width, cm->height,
1593 cm->width, cm->height);
1594 #endif // CONFIG_VP9_HIGHBITDEPTH
1595
1596 // Allow joint search multiple times iteratively for each reference frame
1597 // and break out of the search loop if it couldn't find a better mv.
1598 for (ite = 0; ite < 4; ite++) {
1599 struct buf_2d ref_yv12[2];
1600 int bestsme = INT_MAX;
1601 int sadpb = x->sadperbit16;
1602 MV tmp_mv;
1603 int search_range = 3;
1604
1605 int tmp_col_min = x->mv_col_min;
1606 int tmp_col_max = x->mv_col_max;
1607 int tmp_row_min = x->mv_row_min;
1608 int tmp_row_max = x->mv_row_max;
1609 int id = ite % 2; // Even iterations search in the first reference frame,
1610 // odd iterations search in the second. The predictor
1611 // found for the 'other' reference frame is factored in.
1612
1613 // Initialized here because of compiler problem in Visual Studio.
1614 ref_yv12[0] = xd->plane[0].pre[0];
1615 ref_yv12[1] = xd->plane[0].pre[1];
1616
1617 // Get the prediction block from the 'other' reference frame.
1618 #if CONFIG_VP9_HIGHBITDEPTH
1619 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
1620 second_pred = CONVERT_TO_BYTEPTR(second_pred_alloc_16);
1621 vp9_highbd_build_inter_predictor(ref_yv12[!id].buf,
1622 ref_yv12[!id].stride,
1623 second_pred, pw,
1624 &frame_mv[refs[!id]].as_mv,
1625 &sf, pw, ph, 0,
1626 kernel, MV_PRECISION_Q3,
1627 mi_col * MI_SIZE, mi_row * MI_SIZE,
1628 xd->bd);
1629 } else {
1630 second_pred = (uint8_t *)second_pred_alloc_16;
1631 vp9_build_inter_predictor(ref_yv12[!id].buf,
1632 ref_yv12[!id].stride,
1633 second_pred, pw,
1634 &frame_mv[refs[!id]].as_mv,
1635 &sf, pw, ph, 0,
1636 kernel, MV_PRECISION_Q3,
1637 mi_col * MI_SIZE, mi_row * MI_SIZE);
1638 }
1639 #else
1640 vp9_build_inter_predictor(ref_yv12[!id].buf,
1641 ref_yv12[!id].stride,
1642 second_pred, pw,
1643 &frame_mv[refs[!id]].as_mv,
1644 &sf, pw, ph, 0,
1645 kernel, MV_PRECISION_Q3,
1646 mi_col * MI_SIZE, mi_row * MI_SIZE);
1647 #endif // CONFIG_VP9_HIGHBITDEPTH
1648
1649 // Do compound motion search on the current reference frame.
1650 if (id)
1651 xd->plane[0].pre[0] = ref_yv12[id];
1652 vp9_set_mv_search_range(x, &ref_mv[id].as_mv);
1653
1654 // Use the mv result from the single mode as mv predictor.
1655 tmp_mv = frame_mv[refs[id]].as_mv;
1656
1657 tmp_mv.col >>= 3;
1658 tmp_mv.row >>= 3;
1659
1660 // Small-range full-pixel motion search.
1661 bestsme = vp9_refining_search_8p_c(x, &tmp_mv, sadpb,
1662 search_range,
1663 &cpi->fn_ptr[bsize],
1664 &ref_mv[id].as_mv, second_pred);
1665 if (bestsme < INT_MAX)
1666 bestsme = vp9_get_mvpred_av_var(x, &tmp_mv, &ref_mv[id].as_mv,
1667 second_pred, &cpi->fn_ptr[bsize], 1);
1668
1669 x->mv_col_min = tmp_col_min;
1670 x->mv_col_max = tmp_col_max;
1671 x->mv_row_min = tmp_row_min;
1672 x->mv_row_max = tmp_row_max;
1673
1674 if (bestsme < INT_MAX) {
1675 int dis; /* TODO: use dis in distortion calculation later. */
1676 unsigned int sse;
1677 bestsme = cpi->find_fractional_mv_step(
1678 x, &tmp_mv,
1679 &ref_mv[id].as_mv,
1680 cpi->common.allow_high_precision_mv,
1681 x->errorperbit,
1682 &cpi->fn_ptr[bsize],
1683 0, cpi->sf.mv.subpel_iters_per_step,
1684 NULL,
1685 x->nmvjointcost, x->mvcost,
1686 &dis, &sse, second_pred,
1687 pw, ph);
1688 }
1689
1690 // Restore the pointer to the first (possibly scaled) prediction buffer.
1691 if (id)
1692 xd->plane[0].pre[0] = ref_yv12[0];
1693
1694 if (bestsme < last_besterr[id]) {
1695 frame_mv[refs[id]].as_mv = tmp_mv;
1696 last_besterr[id] = bestsme;
1697 } else {
1698 break;
1699 }
1700 }
1701
1702 *rate_mv = 0;
1703
1704 for (ref = 0; ref < 2; ++ref) {
1705 if (scaled_ref_frame[ref]) {
1706 // Restore the prediction frame pointers to their unscaled versions.
1707 int i;
1708 for (i = 0; i < MAX_MB_PLANE; i++)
1709 xd->plane[i].pre[ref] = backup_yv12[ref][i];
1710 }
1711
1712 *rate_mv += vp9_mv_bit_cost(&frame_mv[refs[ref]].as_mv,
1713 &x->mbmi_ext->ref_mvs[refs[ref]][0].as_mv,
1714 x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
1715 }
1716 }
1717
rd_pick_best_sub8x8_mode(VP9_COMP * cpi,MACROBLOCK * x,int_mv * best_ref_mv,int_mv * second_best_ref_mv,int64_t best_rd,int * returntotrate,int * returnyrate,int64_t * returndistortion,int * skippable,int64_t * psse,int mvthresh,int_mv seg_mvs[4][MAX_REF_FRAMES],BEST_SEG_INFO * bsi_buf,int filter_idx,int mi_row,int mi_col)1718 static int64_t rd_pick_best_sub8x8_mode(VP9_COMP *cpi, MACROBLOCK *x,
1719 int_mv *best_ref_mv,
1720 int_mv *second_best_ref_mv,
1721 int64_t best_rd, int *returntotrate,
1722 int *returnyrate,
1723 int64_t *returndistortion,
1724 int *skippable, int64_t *psse,
1725 int mvthresh,
1726 int_mv seg_mvs[4][MAX_REF_FRAMES],
1727 BEST_SEG_INFO *bsi_buf, int filter_idx,
1728 int mi_row, int mi_col) {
1729 int i;
1730 BEST_SEG_INFO *bsi = bsi_buf + filter_idx;
1731 MACROBLOCKD *xd = &x->e_mbd;
1732 MODE_INFO *mi = xd->mi[0];
1733 MB_MODE_INFO *mbmi = &mi->mbmi;
1734 int mode_idx;
1735 int k, br = 0, idx, idy;
1736 int64_t bd = 0, block_sse = 0;
1737 PREDICTION_MODE this_mode;
1738 VP9_COMMON *cm = &cpi->common;
1739 struct macroblock_plane *const p = &x->plane[0];
1740 struct macroblockd_plane *const pd = &xd->plane[0];
1741 const int label_count = 4;
1742 int64_t this_segment_rd = 0;
1743 int label_mv_thresh;
1744 int segmentyrate = 0;
1745 const BLOCK_SIZE bsize = mbmi->sb_type;
1746 const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize];
1747 const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize];
1748 ENTROPY_CONTEXT t_above[2], t_left[2];
1749 int subpelmv = 1, have_ref = 0;
1750 const int has_second_rf = has_second_ref(mbmi);
1751 const int inter_mode_mask = cpi->sf.inter_mode_mask[bsize];
1752 MB_MODE_INFO_EXT *const mbmi_ext = x->mbmi_ext;
1753
1754 vp9_zero(*bsi);
1755
1756 bsi->segment_rd = best_rd;
1757 bsi->ref_mv[0] = best_ref_mv;
1758 bsi->ref_mv[1] = second_best_ref_mv;
1759 bsi->mvp.as_int = best_ref_mv->as_int;
1760 bsi->mvthresh = mvthresh;
1761
1762 for (i = 0; i < 4; i++)
1763 bsi->modes[i] = ZEROMV;
1764
1765 memcpy(t_above, pd->above_context, sizeof(t_above));
1766 memcpy(t_left, pd->left_context, sizeof(t_left));
1767
1768 // 64 makes this threshold really big effectively
1769 // making it so that we very rarely check mvs on
1770 // segments. setting this to 1 would make mv thresh
1771 // roughly equal to what it is for macroblocks
1772 label_mv_thresh = 1 * bsi->mvthresh / label_count;
1773
1774 // Segmentation method overheads
1775 for (idy = 0; idy < 2; idy += num_4x4_blocks_high) {
1776 for (idx = 0; idx < 2; idx += num_4x4_blocks_wide) {
1777 // TODO(jingning,rbultje): rewrite the rate-distortion optimization
1778 // loop for 4x4/4x8/8x4 block coding. to be replaced with new rd loop
1779 int_mv mode_mv[MB_MODE_COUNT][2];
1780 int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES];
1781 PREDICTION_MODE mode_selected = ZEROMV;
1782 int64_t best_rd = INT64_MAX;
1783 const int i = idy * 2 + idx;
1784 int ref;
1785
1786 for (ref = 0; ref < 1 + has_second_rf; ++ref) {
1787 const MV_REFERENCE_FRAME frame = mbmi->ref_frame[ref];
1788 frame_mv[ZEROMV][frame].as_int = 0;
1789 vp9_append_sub8x8_mvs_for_idx(cm, xd, i, ref, mi_row, mi_col,
1790 &frame_mv[NEARESTMV][frame],
1791 &frame_mv[NEARMV][frame],
1792 mbmi_ext->mode_context);
1793 }
1794
1795 // search for the best motion vector on this segment
1796 for (this_mode = NEARESTMV; this_mode <= NEWMV; ++this_mode) {
1797 const struct buf_2d orig_src = x->plane[0].src;
1798 struct buf_2d orig_pre[2];
1799
1800 mode_idx = INTER_OFFSET(this_mode);
1801 bsi->rdstat[i][mode_idx].brdcost = INT64_MAX;
1802 if (!(inter_mode_mask & (1 << this_mode)))
1803 continue;
1804
1805 if (!check_best_zero_mv(cpi, mbmi_ext->mode_context, frame_mv,
1806 this_mode, mbmi->ref_frame))
1807 continue;
1808
1809 memcpy(orig_pre, pd->pre, sizeof(orig_pre));
1810 memcpy(bsi->rdstat[i][mode_idx].ta, t_above,
1811 sizeof(bsi->rdstat[i][mode_idx].ta));
1812 memcpy(bsi->rdstat[i][mode_idx].tl, t_left,
1813 sizeof(bsi->rdstat[i][mode_idx].tl));
1814
1815 // motion search for newmv (single predictor case only)
1816 if (!has_second_rf && this_mode == NEWMV &&
1817 seg_mvs[i][mbmi->ref_frame[0]].as_int == INVALID_MV) {
1818 MV *const new_mv = &mode_mv[NEWMV][0].as_mv;
1819 int step_param = 0;
1820 int thissme, bestsme = INT_MAX;
1821 int sadpb = x->sadperbit4;
1822 MV mvp_full;
1823 int max_mv;
1824 int cost_list[5];
1825
1826 /* Is the best so far sufficiently good that we cant justify doing
1827 * and new motion search. */
1828 if (best_rd < label_mv_thresh)
1829 break;
1830
1831 if (cpi->oxcf.mode != BEST) {
1832 // use previous block's result as next block's MV predictor.
1833 if (i > 0) {
1834 bsi->mvp.as_int = mi->bmi[i - 1].as_mv[0].as_int;
1835 if (i == 2)
1836 bsi->mvp.as_int = mi->bmi[i - 2].as_mv[0].as_int;
1837 }
1838 }
1839 if (i == 0)
1840 max_mv = x->max_mv_context[mbmi->ref_frame[0]];
1841 else
1842 max_mv =
1843 VPXMAX(abs(bsi->mvp.as_mv.row), abs(bsi->mvp.as_mv.col)) >> 3;
1844
1845 if (cpi->sf.mv.auto_mv_step_size && cm->show_frame) {
1846 // Take wtd average of the step_params based on the last frame's
1847 // max mv magnitude and the best ref mvs of the current block for
1848 // the given reference.
1849 step_param = (vp9_init_search_range(max_mv) +
1850 cpi->mv_step_param) / 2;
1851 } else {
1852 step_param = cpi->mv_step_param;
1853 }
1854
1855 mvp_full.row = bsi->mvp.as_mv.row >> 3;
1856 mvp_full.col = bsi->mvp.as_mv.col >> 3;
1857
1858 if (cpi->sf.adaptive_motion_search) {
1859 mvp_full.row = x->pred_mv[mbmi->ref_frame[0]].row >> 3;
1860 mvp_full.col = x->pred_mv[mbmi->ref_frame[0]].col >> 3;
1861 step_param = VPXMAX(step_param, 8);
1862 }
1863
1864 // adjust src pointer for this block
1865 mi_buf_shift(x, i);
1866
1867 vp9_set_mv_search_range(x, &bsi->ref_mv[0]->as_mv);
1868
1869 bestsme = vp9_full_pixel_search(
1870 cpi, x, bsize, &mvp_full, step_param, sadpb,
1871 cpi->sf.mv.subpel_search_method != SUBPEL_TREE ? cost_list : NULL,
1872 &bsi->ref_mv[0]->as_mv, new_mv,
1873 INT_MAX, 1);
1874
1875 // Should we do a full search (best quality only)
1876 if (cpi->oxcf.mode == BEST) {
1877 int_mv *const best_mv = &mi->bmi[i].as_mv[0];
1878 /* Check if mvp_full is within the range. */
1879 clamp_mv(&mvp_full, x->mv_col_min, x->mv_col_max,
1880 x->mv_row_min, x->mv_row_max);
1881 thissme = cpi->full_search_sad(x, &mvp_full,
1882 sadpb, 16, &cpi->fn_ptr[bsize],
1883 &bsi->ref_mv[0]->as_mv,
1884 &best_mv->as_mv);
1885 cost_list[1] = cost_list[2] = cost_list[3] = cost_list[4] = INT_MAX;
1886 if (thissme < bestsme) {
1887 bestsme = thissme;
1888 *new_mv = best_mv->as_mv;
1889 } else {
1890 // The full search result is actually worse so re-instate the
1891 // previous best vector
1892 best_mv->as_mv = *new_mv;
1893 }
1894 }
1895
1896 if (bestsme < INT_MAX) {
1897 int distortion;
1898 cpi->find_fractional_mv_step(
1899 x,
1900 new_mv,
1901 &bsi->ref_mv[0]->as_mv,
1902 cm->allow_high_precision_mv,
1903 x->errorperbit, &cpi->fn_ptr[bsize],
1904 cpi->sf.mv.subpel_force_stop,
1905 cpi->sf.mv.subpel_iters_per_step,
1906 cond_cost_list(cpi, cost_list),
1907 x->nmvjointcost, x->mvcost,
1908 &distortion,
1909 &x->pred_sse[mbmi->ref_frame[0]],
1910 NULL, 0, 0);
1911
1912 // save motion search result for use in compound prediction
1913 seg_mvs[i][mbmi->ref_frame[0]].as_mv = *new_mv;
1914 }
1915
1916 if (cpi->sf.adaptive_motion_search)
1917 x->pred_mv[mbmi->ref_frame[0]] = *new_mv;
1918
1919 // restore src pointers
1920 mi_buf_restore(x, orig_src, orig_pre);
1921 }
1922
1923 if (has_second_rf) {
1924 if (seg_mvs[i][mbmi->ref_frame[1]].as_int == INVALID_MV ||
1925 seg_mvs[i][mbmi->ref_frame[0]].as_int == INVALID_MV)
1926 continue;
1927 }
1928
1929 if (has_second_rf && this_mode == NEWMV &&
1930 mbmi->interp_filter == EIGHTTAP) {
1931 // adjust src pointers
1932 mi_buf_shift(x, i);
1933 if (cpi->sf.comp_inter_joint_search_thresh <= bsize) {
1934 int rate_mv;
1935 joint_motion_search(cpi, x, bsize, frame_mv[this_mode],
1936 mi_row, mi_col, seg_mvs[i],
1937 &rate_mv);
1938 seg_mvs[i][mbmi->ref_frame[0]].as_int =
1939 frame_mv[this_mode][mbmi->ref_frame[0]].as_int;
1940 seg_mvs[i][mbmi->ref_frame[1]].as_int =
1941 frame_mv[this_mode][mbmi->ref_frame[1]].as_int;
1942 }
1943 // restore src pointers
1944 mi_buf_restore(x, orig_src, orig_pre);
1945 }
1946
1947 bsi->rdstat[i][mode_idx].brate =
1948 set_and_cost_bmi_mvs(cpi, x, xd, i, this_mode, mode_mv[this_mode],
1949 frame_mv, seg_mvs[i], bsi->ref_mv,
1950 x->nmvjointcost, x->mvcost);
1951
1952 for (ref = 0; ref < 1 + has_second_rf; ++ref) {
1953 bsi->rdstat[i][mode_idx].mvs[ref].as_int =
1954 mode_mv[this_mode][ref].as_int;
1955 if (num_4x4_blocks_wide > 1)
1956 bsi->rdstat[i + 1][mode_idx].mvs[ref].as_int =
1957 mode_mv[this_mode][ref].as_int;
1958 if (num_4x4_blocks_high > 1)
1959 bsi->rdstat[i + 2][mode_idx].mvs[ref].as_int =
1960 mode_mv[this_mode][ref].as_int;
1961 }
1962
1963 // Trap vectors that reach beyond the UMV borders
1964 if (mv_check_bounds(x, &mode_mv[this_mode][0].as_mv) ||
1965 (has_second_rf &&
1966 mv_check_bounds(x, &mode_mv[this_mode][1].as_mv)))
1967 continue;
1968
1969 if (filter_idx > 0) {
1970 BEST_SEG_INFO *ref_bsi = bsi_buf;
1971 subpelmv = 0;
1972 have_ref = 1;
1973
1974 for (ref = 0; ref < 1 + has_second_rf; ++ref) {
1975 subpelmv |= mv_has_subpel(&mode_mv[this_mode][ref].as_mv);
1976 have_ref &= mode_mv[this_mode][ref].as_int ==
1977 ref_bsi->rdstat[i][mode_idx].mvs[ref].as_int;
1978 }
1979
1980 if (filter_idx > 1 && !subpelmv && !have_ref) {
1981 ref_bsi = bsi_buf + 1;
1982 have_ref = 1;
1983 for (ref = 0; ref < 1 + has_second_rf; ++ref)
1984 have_ref &= mode_mv[this_mode][ref].as_int ==
1985 ref_bsi->rdstat[i][mode_idx].mvs[ref].as_int;
1986 }
1987
1988 if (!subpelmv && have_ref &&
1989 ref_bsi->rdstat[i][mode_idx].brdcost < INT64_MAX) {
1990 memcpy(&bsi->rdstat[i][mode_idx], &ref_bsi->rdstat[i][mode_idx],
1991 sizeof(SEG_RDSTAT));
1992 if (num_4x4_blocks_wide > 1)
1993 bsi->rdstat[i + 1][mode_idx].eobs =
1994 ref_bsi->rdstat[i + 1][mode_idx].eobs;
1995 if (num_4x4_blocks_high > 1)
1996 bsi->rdstat[i + 2][mode_idx].eobs =
1997 ref_bsi->rdstat[i + 2][mode_idx].eobs;
1998
1999 if (bsi->rdstat[i][mode_idx].brdcost < best_rd) {
2000 mode_selected = this_mode;
2001 best_rd = bsi->rdstat[i][mode_idx].brdcost;
2002 }
2003 continue;
2004 }
2005 }
2006
2007 bsi->rdstat[i][mode_idx].brdcost =
2008 encode_inter_mb_segment(cpi, x,
2009 bsi->segment_rd - this_segment_rd, i,
2010 &bsi->rdstat[i][mode_idx].byrate,
2011 &bsi->rdstat[i][mode_idx].bdist,
2012 &bsi->rdstat[i][mode_idx].bsse,
2013 bsi->rdstat[i][mode_idx].ta,
2014 bsi->rdstat[i][mode_idx].tl,
2015 mi_row, mi_col);
2016 if (bsi->rdstat[i][mode_idx].brdcost < INT64_MAX) {
2017 bsi->rdstat[i][mode_idx].brdcost += RDCOST(x->rdmult, x->rddiv,
2018 bsi->rdstat[i][mode_idx].brate, 0);
2019 bsi->rdstat[i][mode_idx].brate += bsi->rdstat[i][mode_idx].byrate;
2020 bsi->rdstat[i][mode_idx].eobs = p->eobs[i];
2021 if (num_4x4_blocks_wide > 1)
2022 bsi->rdstat[i + 1][mode_idx].eobs = p->eobs[i + 1];
2023 if (num_4x4_blocks_high > 1)
2024 bsi->rdstat[i + 2][mode_idx].eobs = p->eobs[i + 2];
2025 }
2026
2027 if (bsi->rdstat[i][mode_idx].brdcost < best_rd) {
2028 mode_selected = this_mode;
2029 best_rd = bsi->rdstat[i][mode_idx].brdcost;
2030 }
2031 } /*for each 4x4 mode*/
2032
2033 if (best_rd == INT64_MAX) {
2034 int iy, midx;
2035 for (iy = i + 1; iy < 4; ++iy)
2036 for (midx = 0; midx < INTER_MODES; ++midx)
2037 bsi->rdstat[iy][midx].brdcost = INT64_MAX;
2038 bsi->segment_rd = INT64_MAX;
2039 return INT64_MAX;
2040 }
2041
2042 mode_idx = INTER_OFFSET(mode_selected);
2043 memcpy(t_above, bsi->rdstat[i][mode_idx].ta, sizeof(t_above));
2044 memcpy(t_left, bsi->rdstat[i][mode_idx].tl, sizeof(t_left));
2045
2046 set_and_cost_bmi_mvs(cpi, x, xd, i, mode_selected, mode_mv[mode_selected],
2047 frame_mv, seg_mvs[i], bsi->ref_mv, x->nmvjointcost,
2048 x->mvcost);
2049
2050 br += bsi->rdstat[i][mode_idx].brate;
2051 bd += bsi->rdstat[i][mode_idx].bdist;
2052 block_sse += bsi->rdstat[i][mode_idx].bsse;
2053 segmentyrate += bsi->rdstat[i][mode_idx].byrate;
2054 this_segment_rd += bsi->rdstat[i][mode_idx].brdcost;
2055
2056 if (this_segment_rd > bsi->segment_rd) {
2057 int iy, midx;
2058 for (iy = i + 1; iy < 4; ++iy)
2059 for (midx = 0; midx < INTER_MODES; ++midx)
2060 bsi->rdstat[iy][midx].brdcost = INT64_MAX;
2061 bsi->segment_rd = INT64_MAX;
2062 return INT64_MAX;
2063 }
2064 }
2065 } /* for each label */
2066
2067 bsi->r = br;
2068 bsi->d = bd;
2069 bsi->segment_yrate = segmentyrate;
2070 bsi->segment_rd = this_segment_rd;
2071 bsi->sse = block_sse;
2072
2073 // update the coding decisions
2074 for (k = 0; k < 4; ++k)
2075 bsi->modes[k] = mi->bmi[k].as_mode;
2076
2077 if (bsi->segment_rd > best_rd)
2078 return INT64_MAX;
2079 /* set it to the best */
2080 for (i = 0; i < 4; i++) {
2081 mode_idx = INTER_OFFSET(bsi->modes[i]);
2082 mi->bmi[i].as_mv[0].as_int = bsi->rdstat[i][mode_idx].mvs[0].as_int;
2083 if (has_second_ref(mbmi))
2084 mi->bmi[i].as_mv[1].as_int = bsi->rdstat[i][mode_idx].mvs[1].as_int;
2085 x->plane[0].eobs[i] = bsi->rdstat[i][mode_idx].eobs;
2086 mi->bmi[i].as_mode = bsi->modes[i];
2087 }
2088
2089 /*
2090 * used to set mbmi->mv.as_int
2091 */
2092 *returntotrate = bsi->r;
2093 *returndistortion = bsi->d;
2094 *returnyrate = bsi->segment_yrate;
2095 *skippable = vp9_is_skippable_in_plane(x, BLOCK_8X8, 0);
2096 *psse = bsi->sse;
2097 mbmi->mode = bsi->modes[3];
2098
2099 return bsi->segment_rd;
2100 }
2101
estimate_ref_frame_costs(const VP9_COMMON * cm,const MACROBLOCKD * xd,int segment_id,unsigned int * ref_costs_single,unsigned int * ref_costs_comp,vpx_prob * comp_mode_p)2102 static void estimate_ref_frame_costs(const VP9_COMMON *cm,
2103 const MACROBLOCKD *xd,
2104 int segment_id,
2105 unsigned int *ref_costs_single,
2106 unsigned int *ref_costs_comp,
2107 vpx_prob *comp_mode_p) {
2108 int seg_ref_active = segfeature_active(&cm->seg, segment_id,
2109 SEG_LVL_REF_FRAME);
2110 if (seg_ref_active) {
2111 memset(ref_costs_single, 0, MAX_REF_FRAMES * sizeof(*ref_costs_single));
2112 memset(ref_costs_comp, 0, MAX_REF_FRAMES * sizeof(*ref_costs_comp));
2113 *comp_mode_p = 128;
2114 } else {
2115 vpx_prob intra_inter_p = vp9_get_intra_inter_prob(cm, xd);
2116 vpx_prob comp_inter_p = 128;
2117
2118 if (cm->reference_mode == REFERENCE_MODE_SELECT) {
2119 comp_inter_p = vp9_get_reference_mode_prob(cm, xd);
2120 *comp_mode_p = comp_inter_p;
2121 } else {
2122 *comp_mode_p = 128;
2123 }
2124
2125 ref_costs_single[INTRA_FRAME] = vp9_cost_bit(intra_inter_p, 0);
2126
2127 if (cm->reference_mode != COMPOUND_REFERENCE) {
2128 vpx_prob ref_single_p1 = vp9_get_pred_prob_single_ref_p1(cm, xd);
2129 vpx_prob ref_single_p2 = vp9_get_pred_prob_single_ref_p2(cm, xd);
2130 unsigned int base_cost = vp9_cost_bit(intra_inter_p, 1);
2131
2132 if (cm->reference_mode == REFERENCE_MODE_SELECT)
2133 base_cost += vp9_cost_bit(comp_inter_p, 0);
2134
2135 ref_costs_single[LAST_FRAME] = ref_costs_single[GOLDEN_FRAME] =
2136 ref_costs_single[ALTREF_FRAME] = base_cost;
2137 ref_costs_single[LAST_FRAME] += vp9_cost_bit(ref_single_p1, 0);
2138 ref_costs_single[GOLDEN_FRAME] += vp9_cost_bit(ref_single_p1, 1);
2139 ref_costs_single[ALTREF_FRAME] += vp9_cost_bit(ref_single_p1, 1);
2140 ref_costs_single[GOLDEN_FRAME] += vp9_cost_bit(ref_single_p2, 0);
2141 ref_costs_single[ALTREF_FRAME] += vp9_cost_bit(ref_single_p2, 1);
2142 } else {
2143 ref_costs_single[LAST_FRAME] = 512;
2144 ref_costs_single[GOLDEN_FRAME] = 512;
2145 ref_costs_single[ALTREF_FRAME] = 512;
2146 }
2147 if (cm->reference_mode != SINGLE_REFERENCE) {
2148 vpx_prob ref_comp_p = vp9_get_pred_prob_comp_ref_p(cm, xd);
2149 unsigned int base_cost = vp9_cost_bit(intra_inter_p, 1);
2150
2151 if (cm->reference_mode == REFERENCE_MODE_SELECT)
2152 base_cost += vp9_cost_bit(comp_inter_p, 1);
2153
2154 ref_costs_comp[LAST_FRAME] = base_cost + vp9_cost_bit(ref_comp_p, 0);
2155 ref_costs_comp[GOLDEN_FRAME] = base_cost + vp9_cost_bit(ref_comp_p, 1);
2156 } else {
2157 ref_costs_comp[LAST_FRAME] = 512;
2158 ref_costs_comp[GOLDEN_FRAME] = 512;
2159 }
2160 }
2161 }
2162
store_coding_context(MACROBLOCK * x,PICK_MODE_CONTEXT * ctx,int mode_index,int64_t comp_pred_diff[REFERENCE_MODES],int64_t best_filter_diff[SWITCHABLE_FILTER_CONTEXTS],int skippable)2163 static void store_coding_context(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx,
2164 int mode_index,
2165 int64_t comp_pred_diff[REFERENCE_MODES],
2166 int64_t best_filter_diff[SWITCHABLE_FILTER_CONTEXTS],
2167 int skippable) {
2168 MACROBLOCKD *const xd = &x->e_mbd;
2169
2170 // Take a snapshot of the coding context so it can be
2171 // restored if we decide to encode this way
2172 ctx->skip = x->skip;
2173 ctx->skippable = skippable;
2174 ctx->best_mode_index = mode_index;
2175 ctx->mic = *xd->mi[0];
2176 ctx->mbmi_ext = *x->mbmi_ext;
2177 ctx->single_pred_diff = (int)comp_pred_diff[SINGLE_REFERENCE];
2178 ctx->comp_pred_diff = (int)comp_pred_diff[COMPOUND_REFERENCE];
2179 ctx->hybrid_pred_diff = (int)comp_pred_diff[REFERENCE_MODE_SELECT];
2180
2181 memcpy(ctx->best_filter_diff, best_filter_diff,
2182 sizeof(*best_filter_diff) * SWITCHABLE_FILTER_CONTEXTS);
2183 }
2184
setup_buffer_inter(VP9_COMP * cpi,MACROBLOCK * x,MV_REFERENCE_FRAME ref_frame,BLOCK_SIZE block_size,int mi_row,int mi_col,int_mv frame_nearest_mv[MAX_REF_FRAMES],int_mv frame_near_mv[MAX_REF_FRAMES],struct buf_2d yv12_mb[4][MAX_MB_PLANE])2185 static void setup_buffer_inter(VP9_COMP *cpi, MACROBLOCK *x,
2186 MV_REFERENCE_FRAME ref_frame,
2187 BLOCK_SIZE block_size,
2188 int mi_row, int mi_col,
2189 int_mv frame_nearest_mv[MAX_REF_FRAMES],
2190 int_mv frame_near_mv[MAX_REF_FRAMES],
2191 struct buf_2d yv12_mb[4][MAX_MB_PLANE]) {
2192 const VP9_COMMON *cm = &cpi->common;
2193 const YV12_BUFFER_CONFIG *yv12 = get_ref_frame_buffer(cpi, ref_frame);
2194 MACROBLOCKD *const xd = &x->e_mbd;
2195 MODE_INFO *const mi = xd->mi[0];
2196 int_mv *const candidates = x->mbmi_ext->ref_mvs[ref_frame];
2197 const struct scale_factors *const sf = &cm->frame_refs[ref_frame - 1].sf;
2198 MB_MODE_INFO_EXT *const mbmi_ext = x->mbmi_ext;
2199
2200 assert(yv12 != NULL);
2201
2202 // TODO(jkoleszar): Is the UV buffer ever used here? If so, need to make this
2203 // use the UV scaling factors.
2204 vp9_setup_pred_block(xd, yv12_mb[ref_frame], yv12, mi_row, mi_col, sf, sf);
2205
2206 // Gets an initial list of candidate vectors from neighbours and orders them
2207 vp9_find_mv_refs(cm, xd, mi, ref_frame, candidates, mi_row, mi_col,
2208 NULL, NULL, mbmi_ext->mode_context);
2209
2210 // Candidate refinement carried out at encoder and decoder
2211 vp9_find_best_ref_mvs(xd, cm->allow_high_precision_mv, candidates,
2212 &frame_nearest_mv[ref_frame],
2213 &frame_near_mv[ref_frame]);
2214
2215 // Further refinement that is encode side only to test the top few candidates
2216 // in full and choose the best as the centre point for subsequent searches.
2217 // The current implementation doesn't support scaling.
2218 if (!vp9_is_scaled(sf) && block_size >= BLOCK_8X8)
2219 vp9_mv_pred(cpi, x, yv12_mb[ref_frame][0].buf, yv12->y_stride,
2220 ref_frame, block_size);
2221 }
2222
single_motion_search(VP9_COMP * cpi,MACROBLOCK * x,BLOCK_SIZE bsize,int mi_row,int mi_col,int_mv * tmp_mv,int * rate_mv)2223 static void single_motion_search(VP9_COMP *cpi, MACROBLOCK *x,
2224 BLOCK_SIZE bsize,
2225 int mi_row, int mi_col,
2226 int_mv *tmp_mv, int *rate_mv) {
2227 MACROBLOCKD *xd = &x->e_mbd;
2228 const VP9_COMMON *cm = &cpi->common;
2229 MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
2230 struct buf_2d backup_yv12[MAX_MB_PLANE] = {{0, 0}};
2231 int bestsme = INT_MAX;
2232 int step_param;
2233 int sadpb = x->sadperbit16;
2234 MV mvp_full;
2235 int ref = mbmi->ref_frame[0];
2236 MV ref_mv = x->mbmi_ext->ref_mvs[ref][0].as_mv;
2237
2238 int tmp_col_min = x->mv_col_min;
2239 int tmp_col_max = x->mv_col_max;
2240 int tmp_row_min = x->mv_row_min;
2241 int tmp_row_max = x->mv_row_max;
2242 int cost_list[5];
2243
2244 const YV12_BUFFER_CONFIG *scaled_ref_frame = vp9_get_scaled_ref_frame(cpi,
2245 ref);
2246
2247 MV pred_mv[3];
2248 pred_mv[0] = x->mbmi_ext->ref_mvs[ref][0].as_mv;
2249 pred_mv[1] = x->mbmi_ext->ref_mvs[ref][1].as_mv;
2250 pred_mv[2] = x->pred_mv[ref];
2251
2252 if (scaled_ref_frame) {
2253 int i;
2254 // Swap out the reference frame for a version that's been scaled to
2255 // match the resolution of the current frame, allowing the existing
2256 // motion search code to be used without additional modifications.
2257 for (i = 0; i < MAX_MB_PLANE; i++)
2258 backup_yv12[i] = xd->plane[i].pre[0];
2259
2260 vp9_setup_pre_planes(xd, 0, scaled_ref_frame, mi_row, mi_col, NULL);
2261 }
2262
2263 vp9_set_mv_search_range(x, &ref_mv);
2264
2265 // Work out the size of the first step in the mv step search.
2266 // 0 here is maximum length first step. 1 is VPXMAX >> 1 etc.
2267 if (cpi->sf.mv.auto_mv_step_size && cm->show_frame) {
2268 // Take wtd average of the step_params based on the last frame's
2269 // max mv magnitude and that based on the best ref mvs of the current
2270 // block for the given reference.
2271 step_param = (vp9_init_search_range(x->max_mv_context[ref]) +
2272 cpi->mv_step_param) / 2;
2273 } else {
2274 step_param = cpi->mv_step_param;
2275 }
2276
2277 if (cpi->sf.adaptive_motion_search && bsize < BLOCK_64X64) {
2278 int boffset =
2279 2 * (b_width_log2_lookup[BLOCK_64X64] -
2280 VPXMIN(b_height_log2_lookup[bsize], b_width_log2_lookup[bsize]));
2281 step_param = VPXMAX(step_param, boffset);
2282 }
2283
2284 if (cpi->sf.adaptive_motion_search) {
2285 int bwl = b_width_log2_lookup[bsize];
2286 int bhl = b_height_log2_lookup[bsize];
2287 int tlevel = x->pred_mv_sad[ref] >> (bwl + bhl + 4);
2288
2289 if (tlevel < 5)
2290 step_param += 2;
2291
2292 // prev_mv_sad is not setup for dynamically scaled frames.
2293 if (cpi->oxcf.resize_mode != RESIZE_DYNAMIC) {
2294 int i;
2295 for (i = LAST_FRAME; i <= ALTREF_FRAME && cm->show_frame; ++i) {
2296 if ((x->pred_mv_sad[ref] >> 3) > x->pred_mv_sad[i]) {
2297 x->pred_mv[ref].row = 0;
2298 x->pred_mv[ref].col = 0;
2299 tmp_mv->as_int = INVALID_MV;
2300
2301 if (scaled_ref_frame) {
2302 int i;
2303 for (i = 0; i < MAX_MB_PLANE; ++i)
2304 xd->plane[i].pre[0] = backup_yv12[i];
2305 }
2306 return;
2307 }
2308 }
2309 }
2310 }
2311
2312 mvp_full = pred_mv[x->mv_best_ref_index[ref]];
2313
2314 mvp_full.col >>= 3;
2315 mvp_full.row >>= 3;
2316
2317 bestsme = vp9_full_pixel_search(cpi, x, bsize, &mvp_full, step_param, sadpb,
2318 cond_cost_list(cpi, cost_list),
2319 &ref_mv, &tmp_mv->as_mv, INT_MAX, 1);
2320
2321 x->mv_col_min = tmp_col_min;
2322 x->mv_col_max = tmp_col_max;
2323 x->mv_row_min = tmp_row_min;
2324 x->mv_row_max = tmp_row_max;
2325
2326 if (bestsme < INT_MAX) {
2327 int dis; /* TODO: use dis in distortion calculation later. */
2328 cpi->find_fractional_mv_step(x, &tmp_mv->as_mv, &ref_mv,
2329 cm->allow_high_precision_mv,
2330 x->errorperbit,
2331 &cpi->fn_ptr[bsize],
2332 cpi->sf.mv.subpel_force_stop,
2333 cpi->sf.mv.subpel_iters_per_step,
2334 cond_cost_list(cpi, cost_list),
2335 x->nmvjointcost, x->mvcost,
2336 &dis, &x->pred_sse[ref], NULL, 0, 0);
2337 }
2338 *rate_mv = vp9_mv_bit_cost(&tmp_mv->as_mv, &ref_mv,
2339 x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
2340
2341 if (cpi->sf.adaptive_motion_search)
2342 x->pred_mv[ref] = tmp_mv->as_mv;
2343
2344 if (scaled_ref_frame) {
2345 int i;
2346 for (i = 0; i < MAX_MB_PLANE; i++)
2347 xd->plane[i].pre[0] = backup_yv12[i];
2348 }
2349 }
2350
2351
2352
restore_dst_buf(MACROBLOCKD * xd,uint8_t * orig_dst[MAX_MB_PLANE],int orig_dst_stride[MAX_MB_PLANE])2353 static INLINE void restore_dst_buf(MACROBLOCKD *xd,
2354 uint8_t *orig_dst[MAX_MB_PLANE],
2355 int orig_dst_stride[MAX_MB_PLANE]) {
2356 int i;
2357 for (i = 0; i < MAX_MB_PLANE; i++) {
2358 xd->plane[i].dst.buf = orig_dst[i];
2359 xd->plane[i].dst.stride = orig_dst_stride[i];
2360 }
2361 }
2362
2363 // In some situations we want to discount tha pparent cost of a new motion
2364 // vector. Where there is a subtle motion field and especially where there is
2365 // low spatial complexity then it can be hard to cover the cost of a new motion
2366 // vector in a single block, even if that motion vector reduces distortion.
2367 // However, once established that vector may be usable through the nearest and
2368 // near mv modes to reduce distortion in subsequent blocks and also improve
2369 // visual quality.
discount_newmv_test(const VP9_COMP * cpi,int this_mode,int_mv this_mv,int_mv (* mode_mv)[MAX_REF_FRAMES],int ref_frame)2370 static int discount_newmv_test(const VP9_COMP *cpi,
2371 int this_mode,
2372 int_mv this_mv,
2373 int_mv (*mode_mv)[MAX_REF_FRAMES],
2374 int ref_frame) {
2375 return (!cpi->rc.is_src_frame_alt_ref &&
2376 (this_mode == NEWMV) &&
2377 (this_mv.as_int != 0) &&
2378 ((mode_mv[NEARESTMV][ref_frame].as_int == 0) ||
2379 (mode_mv[NEARESTMV][ref_frame].as_int == INVALID_MV)) &&
2380 ((mode_mv[NEARMV][ref_frame].as_int == 0) ||
2381 (mode_mv[NEARMV][ref_frame].as_int == INVALID_MV)));
2382 }
2383
handle_inter_mode(VP9_COMP * cpi,MACROBLOCK * x,BLOCK_SIZE bsize,int * rate2,int64_t * distortion,int * skippable,int * rate_y,int * rate_uv,int * disable_skip,int_mv (* mode_mv)[MAX_REF_FRAMES],int mi_row,int mi_col,int_mv single_newmv[MAX_REF_FRAMES],INTERP_FILTER (* single_filter)[MAX_REF_FRAMES],int (* single_skippable)[MAX_REF_FRAMES],int64_t * psse,const int64_t ref_best_rd,int64_t * mask_filter,int64_t filter_cache[])2384 static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
2385 BLOCK_SIZE bsize,
2386 int *rate2, int64_t *distortion,
2387 int *skippable,
2388 int *rate_y, int *rate_uv,
2389 int *disable_skip,
2390 int_mv (*mode_mv)[MAX_REF_FRAMES],
2391 int mi_row, int mi_col,
2392 int_mv single_newmv[MAX_REF_FRAMES],
2393 INTERP_FILTER (*single_filter)[MAX_REF_FRAMES],
2394 int (*single_skippable)[MAX_REF_FRAMES],
2395 int64_t *psse,
2396 const int64_t ref_best_rd,
2397 int64_t *mask_filter,
2398 int64_t filter_cache[]) {
2399 VP9_COMMON *cm = &cpi->common;
2400 MACROBLOCKD *xd = &x->e_mbd;
2401 MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
2402 MB_MODE_INFO_EXT *const mbmi_ext = x->mbmi_ext;
2403 const int is_comp_pred = has_second_ref(mbmi);
2404 const int this_mode = mbmi->mode;
2405 int_mv *frame_mv = mode_mv[this_mode];
2406 int i;
2407 int refs[2] = { mbmi->ref_frame[0],
2408 (mbmi->ref_frame[1] < 0 ? 0 : mbmi->ref_frame[1]) };
2409 int_mv cur_mv[2];
2410 #if CONFIG_VP9_HIGHBITDEPTH
2411 DECLARE_ALIGNED(16, uint16_t, tmp_buf16[MAX_MB_PLANE * 64 * 64]);
2412 uint8_t *tmp_buf;
2413 #else
2414 DECLARE_ALIGNED(16, uint8_t, tmp_buf[MAX_MB_PLANE * 64 * 64]);
2415 #endif // CONFIG_VP9_HIGHBITDEPTH
2416 int pred_exists = 0;
2417 int intpel_mv;
2418 int64_t rd, tmp_rd, best_rd = INT64_MAX;
2419 int best_needs_copy = 0;
2420 uint8_t *orig_dst[MAX_MB_PLANE];
2421 int orig_dst_stride[MAX_MB_PLANE];
2422 int rs = 0;
2423 INTERP_FILTER best_filter = SWITCHABLE;
2424 uint8_t skip_txfm[MAX_MB_PLANE << 2] = {0};
2425 int64_t bsse[MAX_MB_PLANE << 2] = {0};
2426
2427 int bsl = mi_width_log2_lookup[bsize];
2428 int pred_filter_search = cpi->sf.cb_pred_filter_search ?
2429 (((mi_row + mi_col) >> bsl) +
2430 get_chessboard_index(cm->current_video_frame)) & 0x1 : 0;
2431
2432 int skip_txfm_sb = 0;
2433 int64_t skip_sse_sb = INT64_MAX;
2434 int64_t distortion_y = 0, distortion_uv = 0;
2435
2436 #if CONFIG_VP9_HIGHBITDEPTH
2437 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
2438 tmp_buf = CONVERT_TO_BYTEPTR(tmp_buf16);
2439 } else {
2440 tmp_buf = (uint8_t *)tmp_buf16;
2441 }
2442 #endif // CONFIG_VP9_HIGHBITDEPTH
2443
2444 if (pred_filter_search) {
2445 INTERP_FILTER af = SWITCHABLE, lf = SWITCHABLE;
2446 if (xd->up_available)
2447 af = xd->mi[-xd->mi_stride]->mbmi.interp_filter;
2448 if (xd->left_available)
2449 lf = xd->mi[-1]->mbmi.interp_filter;
2450
2451 if ((this_mode != NEWMV) || (af == lf))
2452 best_filter = af;
2453 }
2454
2455 if (is_comp_pred) {
2456 if (frame_mv[refs[0]].as_int == INVALID_MV ||
2457 frame_mv[refs[1]].as_int == INVALID_MV)
2458 return INT64_MAX;
2459
2460 if (cpi->sf.adaptive_mode_search) {
2461 if (single_filter[this_mode][refs[0]] ==
2462 single_filter[this_mode][refs[1]])
2463 best_filter = single_filter[this_mode][refs[0]];
2464 }
2465 }
2466
2467 if (this_mode == NEWMV) {
2468 int rate_mv;
2469 if (is_comp_pred) {
2470 // Initialize mv using single prediction mode result.
2471 frame_mv[refs[0]].as_int = single_newmv[refs[0]].as_int;
2472 frame_mv[refs[1]].as_int = single_newmv[refs[1]].as_int;
2473
2474 if (cpi->sf.comp_inter_joint_search_thresh <= bsize) {
2475 joint_motion_search(cpi, x, bsize, frame_mv,
2476 mi_row, mi_col, single_newmv, &rate_mv);
2477 } else {
2478 rate_mv = vp9_mv_bit_cost(&frame_mv[refs[0]].as_mv,
2479 &x->mbmi_ext->ref_mvs[refs[0]][0].as_mv,
2480 x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
2481 rate_mv += vp9_mv_bit_cost(&frame_mv[refs[1]].as_mv,
2482 &x->mbmi_ext->ref_mvs[refs[1]][0].as_mv,
2483 x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
2484 }
2485 *rate2 += rate_mv;
2486 } else {
2487 int_mv tmp_mv;
2488 single_motion_search(cpi, x, bsize, mi_row, mi_col,
2489 &tmp_mv, &rate_mv);
2490 if (tmp_mv.as_int == INVALID_MV)
2491 return INT64_MAX;
2492
2493 frame_mv[refs[0]].as_int =
2494 xd->mi[0]->bmi[0].as_mv[0].as_int = tmp_mv.as_int;
2495 single_newmv[refs[0]].as_int = tmp_mv.as_int;
2496
2497 // Estimate the rate implications of a new mv but discount this
2498 // under certain circumstances where we want to help initiate a weak
2499 // motion field, where the distortion gain for a single block may not
2500 // be enough to overcome the cost of a new mv.
2501 if (discount_newmv_test(cpi, this_mode, tmp_mv, mode_mv, refs[0])) {
2502 *rate2 += VPXMAX((rate_mv / NEW_MV_DISCOUNT_FACTOR), 1);
2503 } else {
2504 *rate2 += rate_mv;
2505 }
2506 }
2507 }
2508
2509 for (i = 0; i < is_comp_pred + 1; ++i) {
2510 cur_mv[i] = frame_mv[refs[i]];
2511 // Clip "next_nearest" so that it does not extend to far out of image
2512 if (this_mode != NEWMV)
2513 clamp_mv2(&cur_mv[i].as_mv, xd);
2514
2515 if (mv_check_bounds(x, &cur_mv[i].as_mv))
2516 return INT64_MAX;
2517 mbmi->mv[i].as_int = cur_mv[i].as_int;
2518 }
2519
2520 // do first prediction into the destination buffer. Do the next
2521 // prediction into a temporary buffer. Then keep track of which one
2522 // of these currently holds the best predictor, and use the other
2523 // one for future predictions. In the end, copy from tmp_buf to
2524 // dst if necessary.
2525 for (i = 0; i < MAX_MB_PLANE; i++) {
2526 orig_dst[i] = xd->plane[i].dst.buf;
2527 orig_dst_stride[i] = xd->plane[i].dst.stride;
2528 }
2529
2530 // We don't include the cost of the second reference here, because there
2531 // are only two options: Last/ARF or Golden/ARF; The second one is always
2532 // known, which is ARF.
2533 //
2534 // Under some circumstances we discount the cost of new mv mode to encourage
2535 // initiation of a motion field.
2536 if (discount_newmv_test(cpi, this_mode, frame_mv[refs[0]],
2537 mode_mv, refs[0])) {
2538 *rate2 += VPXMIN(cost_mv_ref(cpi, this_mode,
2539 mbmi_ext->mode_context[refs[0]]),
2540 cost_mv_ref(cpi, NEARESTMV,
2541 mbmi_ext->mode_context[refs[0]]));
2542 } else {
2543 *rate2 += cost_mv_ref(cpi, this_mode, mbmi_ext->mode_context[refs[0]]);
2544 }
2545
2546 if (RDCOST(x->rdmult, x->rddiv, *rate2, 0) > ref_best_rd &&
2547 mbmi->mode != NEARESTMV)
2548 return INT64_MAX;
2549
2550 pred_exists = 0;
2551 // Are all MVs integer pel for Y and UV
2552 intpel_mv = !mv_has_subpel(&mbmi->mv[0].as_mv);
2553 if (is_comp_pred)
2554 intpel_mv &= !mv_has_subpel(&mbmi->mv[1].as_mv);
2555
2556 // Search for best switchable filter by checking the variance of
2557 // pred error irrespective of whether the filter will be used
2558 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i)
2559 filter_cache[i] = INT64_MAX;
2560
2561 if (cm->interp_filter != BILINEAR) {
2562 if (x->source_variance < cpi->sf.disable_filter_search_var_thresh) {
2563 best_filter = EIGHTTAP;
2564 } else if (best_filter == SWITCHABLE) {
2565 int newbest;
2566 int tmp_rate_sum = 0;
2567 int64_t tmp_dist_sum = 0;
2568
2569 for (i = 0; i < SWITCHABLE_FILTERS; ++i) {
2570 int j;
2571 int64_t rs_rd;
2572 int tmp_skip_sb = 0;
2573 int64_t tmp_skip_sse = INT64_MAX;
2574
2575 mbmi->interp_filter = i;
2576 rs = vp9_get_switchable_rate(cpi, xd);
2577 rs_rd = RDCOST(x->rdmult, x->rddiv, rs, 0);
2578
2579 if (i > 0 && intpel_mv) {
2580 rd = RDCOST(x->rdmult, x->rddiv, tmp_rate_sum, tmp_dist_sum);
2581 filter_cache[i] = rd;
2582 filter_cache[SWITCHABLE_FILTERS] =
2583 VPXMIN(filter_cache[SWITCHABLE_FILTERS], rd + rs_rd);
2584 if (cm->interp_filter == SWITCHABLE)
2585 rd += rs_rd;
2586 *mask_filter = VPXMAX(*mask_filter, rd);
2587 } else {
2588 int rate_sum = 0;
2589 int64_t dist_sum = 0;
2590 if (i > 0 && cpi->sf.adaptive_interp_filter_search &&
2591 (cpi->sf.interp_filter_search_mask & (1 << i))) {
2592 rate_sum = INT_MAX;
2593 dist_sum = INT64_MAX;
2594 continue;
2595 }
2596
2597 if ((cm->interp_filter == SWITCHABLE &&
2598 (!i || best_needs_copy)) ||
2599 (cm->interp_filter != SWITCHABLE &&
2600 (cm->interp_filter == mbmi->interp_filter ||
2601 (i == 0 && intpel_mv)))) {
2602 restore_dst_buf(xd, orig_dst, orig_dst_stride);
2603 } else {
2604 for (j = 0; j < MAX_MB_PLANE; j++) {
2605 xd->plane[j].dst.buf = tmp_buf + j * 64 * 64;
2606 xd->plane[j].dst.stride = 64;
2607 }
2608 }
2609 vp9_build_inter_predictors_sb(xd, mi_row, mi_col, bsize);
2610 model_rd_for_sb(cpi, bsize, x, xd, &rate_sum, &dist_sum,
2611 &tmp_skip_sb, &tmp_skip_sse);
2612
2613 rd = RDCOST(x->rdmult, x->rddiv, rate_sum, dist_sum);
2614 filter_cache[i] = rd;
2615 filter_cache[SWITCHABLE_FILTERS] =
2616 VPXMIN(filter_cache[SWITCHABLE_FILTERS], rd + rs_rd);
2617 if (cm->interp_filter == SWITCHABLE)
2618 rd += rs_rd;
2619 *mask_filter = VPXMAX(*mask_filter, rd);
2620
2621 if (i == 0 && intpel_mv) {
2622 tmp_rate_sum = rate_sum;
2623 tmp_dist_sum = dist_sum;
2624 }
2625 }
2626
2627 if (i == 0 && cpi->sf.use_rd_breakout && ref_best_rd < INT64_MAX) {
2628 if (rd / 2 > ref_best_rd) {
2629 restore_dst_buf(xd, orig_dst, orig_dst_stride);
2630 return INT64_MAX;
2631 }
2632 }
2633 newbest = i == 0 || rd < best_rd;
2634
2635 if (newbest) {
2636 best_rd = rd;
2637 best_filter = mbmi->interp_filter;
2638 if (cm->interp_filter == SWITCHABLE && i && !intpel_mv)
2639 best_needs_copy = !best_needs_copy;
2640 }
2641
2642 if ((cm->interp_filter == SWITCHABLE && newbest) ||
2643 (cm->interp_filter != SWITCHABLE &&
2644 cm->interp_filter == mbmi->interp_filter)) {
2645 pred_exists = 1;
2646 tmp_rd = best_rd;
2647
2648 skip_txfm_sb = tmp_skip_sb;
2649 skip_sse_sb = tmp_skip_sse;
2650 memcpy(skip_txfm, x->skip_txfm, sizeof(skip_txfm));
2651 memcpy(bsse, x->bsse, sizeof(bsse));
2652 }
2653 }
2654 restore_dst_buf(xd, orig_dst, orig_dst_stride);
2655 }
2656 }
2657 // Set the appropriate filter
2658 mbmi->interp_filter = cm->interp_filter != SWITCHABLE ?
2659 cm->interp_filter : best_filter;
2660 rs = cm->interp_filter == SWITCHABLE ? vp9_get_switchable_rate(cpi, xd) : 0;
2661
2662 if (pred_exists) {
2663 if (best_needs_copy) {
2664 // again temporarily set the buffers to local memory to prevent a memcpy
2665 for (i = 0; i < MAX_MB_PLANE; i++) {
2666 xd->plane[i].dst.buf = tmp_buf + i * 64 * 64;
2667 xd->plane[i].dst.stride = 64;
2668 }
2669 }
2670 rd = tmp_rd + RDCOST(x->rdmult, x->rddiv, rs, 0);
2671 } else {
2672 int tmp_rate;
2673 int64_t tmp_dist;
2674 // Handles the special case when a filter that is not in the
2675 // switchable list (ex. bilinear) is indicated at the frame level, or
2676 // skip condition holds.
2677 vp9_build_inter_predictors_sb(xd, mi_row, mi_col, bsize);
2678 model_rd_for_sb(cpi, bsize, x, xd, &tmp_rate, &tmp_dist,
2679 &skip_txfm_sb, &skip_sse_sb);
2680 rd = RDCOST(x->rdmult, x->rddiv, rs + tmp_rate, tmp_dist);
2681 memcpy(skip_txfm, x->skip_txfm, sizeof(skip_txfm));
2682 memcpy(bsse, x->bsse, sizeof(bsse));
2683 }
2684
2685 if (!is_comp_pred)
2686 single_filter[this_mode][refs[0]] = mbmi->interp_filter;
2687
2688 if (cpi->sf.adaptive_mode_search)
2689 if (is_comp_pred)
2690 if (single_skippable[this_mode][refs[0]] &&
2691 single_skippable[this_mode][refs[1]])
2692 memset(skip_txfm, SKIP_TXFM_AC_DC, sizeof(skip_txfm));
2693
2694 if (cpi->sf.use_rd_breakout && ref_best_rd < INT64_MAX) {
2695 // if current pred_error modeled rd is substantially more than the best
2696 // so far, do not bother doing full rd
2697 if (rd / 2 > ref_best_rd) {
2698 restore_dst_buf(xd, orig_dst, orig_dst_stride);
2699 return INT64_MAX;
2700 }
2701 }
2702
2703 if (cm->interp_filter == SWITCHABLE)
2704 *rate2 += rs;
2705
2706 memcpy(x->skip_txfm, skip_txfm, sizeof(skip_txfm));
2707 memcpy(x->bsse, bsse, sizeof(bsse));
2708
2709 if (!skip_txfm_sb) {
2710 int skippable_y, skippable_uv;
2711 int64_t sseuv = INT64_MAX;
2712 int64_t rdcosty = INT64_MAX;
2713
2714 // Y cost and distortion
2715 vp9_subtract_plane(x, bsize, 0);
2716 super_block_yrd(cpi, x, rate_y, &distortion_y, &skippable_y, psse,
2717 bsize, ref_best_rd);
2718
2719 if (*rate_y == INT_MAX) {
2720 *rate2 = INT_MAX;
2721 *distortion = INT64_MAX;
2722 restore_dst_buf(xd, orig_dst, orig_dst_stride);
2723 return INT64_MAX;
2724 }
2725
2726 *rate2 += *rate_y;
2727 *distortion += distortion_y;
2728
2729 rdcosty = RDCOST(x->rdmult, x->rddiv, *rate2, *distortion);
2730 rdcosty = VPXMIN(rdcosty, RDCOST(x->rdmult, x->rddiv, 0, *psse));
2731
2732 if (!super_block_uvrd(cpi, x, rate_uv, &distortion_uv, &skippable_uv,
2733 &sseuv, bsize, ref_best_rd - rdcosty)) {
2734 *rate2 = INT_MAX;
2735 *distortion = INT64_MAX;
2736 restore_dst_buf(xd, orig_dst, orig_dst_stride);
2737 return INT64_MAX;
2738 }
2739
2740 *psse += sseuv;
2741 *rate2 += *rate_uv;
2742 *distortion += distortion_uv;
2743 *skippable = skippable_y && skippable_uv;
2744 } else {
2745 x->skip = 1;
2746 *disable_skip = 1;
2747
2748 // The cost of skip bit needs to be added.
2749 *rate2 += vp9_cost_bit(vp9_get_skip_prob(cm, xd), 1);
2750
2751 *distortion = skip_sse_sb;
2752 }
2753
2754 if (!is_comp_pred)
2755 single_skippable[this_mode][refs[0]] = *skippable;
2756
2757 restore_dst_buf(xd, orig_dst, orig_dst_stride);
2758 return 0; // The rate-distortion cost will be re-calculated by caller.
2759 }
2760
vp9_rd_pick_intra_mode_sb(VP9_COMP * cpi,MACROBLOCK * x,RD_COST * rd_cost,BLOCK_SIZE bsize,PICK_MODE_CONTEXT * ctx,int64_t best_rd)2761 void vp9_rd_pick_intra_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
2762 RD_COST *rd_cost, BLOCK_SIZE bsize,
2763 PICK_MODE_CONTEXT *ctx, int64_t best_rd) {
2764 VP9_COMMON *const cm = &cpi->common;
2765 MACROBLOCKD *const xd = &x->e_mbd;
2766 struct macroblockd_plane *const pd = xd->plane;
2767 int rate_y = 0, rate_uv = 0, rate_y_tokenonly = 0, rate_uv_tokenonly = 0;
2768 int y_skip = 0, uv_skip = 0;
2769 int64_t dist_y = 0, dist_uv = 0;
2770 TX_SIZE max_uv_tx_size;
2771 x->skip_encode = 0;
2772 ctx->skip = 0;
2773 xd->mi[0]->mbmi.ref_frame[0] = INTRA_FRAME;
2774 xd->mi[0]->mbmi.ref_frame[1] = NONE;
2775
2776 if (bsize >= BLOCK_8X8) {
2777 if (rd_pick_intra_sby_mode(cpi, x, &rate_y, &rate_y_tokenonly,
2778 &dist_y, &y_skip, bsize,
2779 best_rd) >= best_rd) {
2780 rd_cost->rate = INT_MAX;
2781 return;
2782 }
2783 } else {
2784 y_skip = 0;
2785 if (rd_pick_intra_sub_8x8_y_mode(cpi, x, &rate_y, &rate_y_tokenonly,
2786 &dist_y, best_rd) >= best_rd) {
2787 rd_cost->rate = INT_MAX;
2788 return;
2789 }
2790 }
2791 max_uv_tx_size = get_uv_tx_size_impl(xd->mi[0]->mbmi.tx_size, bsize,
2792 pd[1].subsampling_x,
2793 pd[1].subsampling_y);
2794 rd_pick_intra_sbuv_mode(cpi, x, ctx, &rate_uv, &rate_uv_tokenonly,
2795 &dist_uv, &uv_skip, VPXMAX(BLOCK_8X8, bsize),
2796 max_uv_tx_size);
2797
2798 if (y_skip && uv_skip) {
2799 rd_cost->rate = rate_y + rate_uv - rate_y_tokenonly - rate_uv_tokenonly +
2800 vp9_cost_bit(vp9_get_skip_prob(cm, xd), 1);
2801 rd_cost->dist = dist_y + dist_uv;
2802 } else {
2803 rd_cost->rate = rate_y + rate_uv +
2804 vp9_cost_bit(vp9_get_skip_prob(cm, xd), 0);
2805 rd_cost->dist = dist_y + dist_uv;
2806 }
2807
2808 ctx->mic = *xd->mi[0];
2809 ctx->mbmi_ext = *x->mbmi_ext;
2810 rd_cost->rdcost = RDCOST(x->rdmult, x->rddiv, rd_cost->rate, rd_cost->dist);
2811 }
2812
2813 // This function is designed to apply a bias or adjustment to an rd value based
2814 // on the relative variance of the source and reconstruction.
2815 #define LOW_VAR_THRESH 16
2816 #define VLOW_ADJ_MAX 25
2817 #define VHIGH_ADJ_MAX 8
rd_variance_adjustment(VP9_COMP * cpi,MACROBLOCK * x,BLOCK_SIZE bsize,int64_t * this_rd,MV_REFERENCE_FRAME ref_frame,unsigned int source_variance)2818 static void rd_variance_adjustment(VP9_COMP *cpi,
2819 MACROBLOCK *x,
2820 BLOCK_SIZE bsize,
2821 int64_t *this_rd,
2822 MV_REFERENCE_FRAME ref_frame,
2823 unsigned int source_variance) {
2824 MACROBLOCKD *const xd = &x->e_mbd;
2825 unsigned int recon_variance;
2826 unsigned int absvar_diff = 0;
2827 int64_t var_error = 0;
2828 int64_t var_factor = 0;
2829
2830 if (*this_rd == INT64_MAX)
2831 return;
2832
2833 #if CONFIG_VP9_HIGHBITDEPTH
2834 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
2835 recon_variance =
2836 vp9_high_get_sby_perpixel_variance(cpi, &xd->plane[0].dst, bsize, xd->bd);
2837 } else {
2838 recon_variance =
2839 vp9_get_sby_perpixel_variance(cpi, &xd->plane[0].dst, bsize);
2840 }
2841 #else
2842 recon_variance =
2843 vp9_get_sby_perpixel_variance(cpi, &xd->plane[0].dst, bsize);
2844 #endif // CONFIG_VP9_HIGHBITDEPTH
2845
2846 if ((source_variance + recon_variance) > LOW_VAR_THRESH) {
2847 absvar_diff = (source_variance > recon_variance)
2848 ? (source_variance - recon_variance)
2849 : (recon_variance - source_variance);
2850
2851 var_error = (200 * source_variance * recon_variance) /
2852 ((source_variance * source_variance) +
2853 (recon_variance * recon_variance));
2854 var_error = 100 - var_error;
2855 }
2856
2857 // Source variance above a threshold and ref frame is intra.
2858 // This case is targeted mainly at discouraging intra modes that give rise
2859 // to a predictor with a low spatial complexity compared to the source.
2860 if ((source_variance > LOW_VAR_THRESH) && (ref_frame == INTRA_FRAME) &&
2861 (source_variance > recon_variance)) {
2862 var_factor = VPXMIN(absvar_diff, VPXMIN(VLOW_ADJ_MAX, var_error));
2863 // A second possible case of interest is where the source variance
2864 // is very low and we wish to discourage false texture or motion trails.
2865 } else if ((source_variance < (LOW_VAR_THRESH >> 1)) &&
2866 (recon_variance > source_variance)) {
2867 var_factor = VPXMIN(absvar_diff, VPXMIN(VHIGH_ADJ_MAX, var_error));
2868 }
2869 *this_rd += (*this_rd * var_factor) / 100;
2870 }
2871
2872
2873 // Do we have an internal image edge (e.g. formatting bars).
vp9_internal_image_edge(VP9_COMP * cpi)2874 int vp9_internal_image_edge(VP9_COMP *cpi) {
2875 return (cpi->oxcf.pass == 2) &&
2876 ((cpi->twopass.this_frame_stats.inactive_zone_rows > 0) ||
2877 (cpi->twopass.this_frame_stats.inactive_zone_cols > 0));
2878 }
2879
2880 // Checks to see if a super block is on a horizontal image edge.
2881 // In most cases this is the "real" edge unless there are formatting
2882 // bars embedded in the stream.
vp9_active_h_edge(VP9_COMP * cpi,int mi_row,int mi_step)2883 int vp9_active_h_edge(VP9_COMP *cpi, int mi_row, int mi_step) {
2884 int top_edge = 0;
2885 int bottom_edge = cpi->common.mi_rows;
2886 int is_active_h_edge = 0;
2887
2888 // For two pass account for any formatting bars detected.
2889 if (cpi->oxcf.pass == 2) {
2890 TWO_PASS *twopass = &cpi->twopass;
2891
2892 // The inactive region is specified in MBs not mi units.
2893 // The image edge is in the following MB row.
2894 top_edge += (int)(twopass->this_frame_stats.inactive_zone_rows * 2);
2895
2896 bottom_edge -= (int)(twopass->this_frame_stats.inactive_zone_rows * 2);
2897 bottom_edge = VPXMAX(top_edge, bottom_edge);
2898 }
2899
2900 if (((top_edge >= mi_row) && (top_edge < (mi_row + mi_step))) ||
2901 ((bottom_edge >= mi_row) && (bottom_edge < (mi_row + mi_step)))) {
2902 is_active_h_edge = 1;
2903 }
2904 return is_active_h_edge;
2905 }
2906
2907 // Checks to see if a super block is on a vertical image edge.
2908 // In most cases this is the "real" edge unless there are formatting
2909 // bars embedded in the stream.
vp9_active_v_edge(VP9_COMP * cpi,int mi_col,int mi_step)2910 int vp9_active_v_edge(VP9_COMP *cpi, int mi_col, int mi_step) {
2911 int left_edge = 0;
2912 int right_edge = cpi->common.mi_cols;
2913 int is_active_v_edge = 0;
2914
2915 // For two pass account for any formatting bars detected.
2916 if (cpi->oxcf.pass == 2) {
2917 TWO_PASS *twopass = &cpi->twopass;
2918
2919 // The inactive region is specified in MBs not mi units.
2920 // The image edge is in the following MB row.
2921 left_edge += (int)(twopass->this_frame_stats.inactive_zone_cols * 2);
2922
2923 right_edge -= (int)(twopass->this_frame_stats.inactive_zone_cols * 2);
2924 right_edge = VPXMAX(left_edge, right_edge);
2925 }
2926
2927 if (((left_edge >= mi_col) && (left_edge < (mi_col + mi_step))) ||
2928 ((right_edge >= mi_col) && (right_edge < (mi_col + mi_step)))) {
2929 is_active_v_edge = 1;
2930 }
2931 return is_active_v_edge;
2932 }
2933
2934 // Checks to see if a super block is at the edge of the active image.
2935 // In most cases this is the "real" edge unless there are formatting
2936 // bars embedded in the stream.
vp9_active_edge_sb(VP9_COMP * cpi,int mi_row,int mi_col)2937 int vp9_active_edge_sb(VP9_COMP *cpi,
2938 int mi_row, int mi_col) {
2939 return vp9_active_h_edge(cpi, mi_row, MI_BLOCK_SIZE) ||
2940 vp9_active_v_edge(cpi, mi_col, MI_BLOCK_SIZE);
2941 }
2942
vp9_rd_pick_inter_mode_sb(VP9_COMP * cpi,TileDataEnc * tile_data,MACROBLOCK * x,int mi_row,int mi_col,RD_COST * rd_cost,BLOCK_SIZE bsize,PICK_MODE_CONTEXT * ctx,int64_t best_rd_so_far)2943 void vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi,
2944 TileDataEnc *tile_data,
2945 MACROBLOCK *x,
2946 int mi_row, int mi_col,
2947 RD_COST *rd_cost, BLOCK_SIZE bsize,
2948 PICK_MODE_CONTEXT *ctx,
2949 int64_t best_rd_so_far) {
2950 VP9_COMMON *const cm = &cpi->common;
2951 TileInfo *const tile_info = &tile_data->tile_info;
2952 RD_OPT *const rd_opt = &cpi->rd;
2953 SPEED_FEATURES *const sf = &cpi->sf;
2954 MACROBLOCKD *const xd = &x->e_mbd;
2955 MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
2956 MB_MODE_INFO_EXT *const mbmi_ext = x->mbmi_ext;
2957 const struct segmentation *const seg = &cm->seg;
2958 PREDICTION_MODE this_mode;
2959 MV_REFERENCE_FRAME ref_frame, second_ref_frame;
2960 unsigned char segment_id = mbmi->segment_id;
2961 int comp_pred, i, k;
2962 int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES];
2963 struct buf_2d yv12_mb[4][MAX_MB_PLANE];
2964 int_mv single_newmv[MAX_REF_FRAMES] = { { 0 } };
2965 INTERP_FILTER single_inter_filter[MB_MODE_COUNT][MAX_REF_FRAMES];
2966 int single_skippable[MB_MODE_COUNT][MAX_REF_FRAMES];
2967 static const int flag_list[4] = { 0, VP9_LAST_FLAG, VP9_GOLD_FLAG,
2968 VP9_ALT_FLAG };
2969 int64_t best_rd = best_rd_so_far;
2970 int64_t best_pred_diff[REFERENCE_MODES];
2971 int64_t best_pred_rd[REFERENCE_MODES];
2972 int64_t best_filter_rd[SWITCHABLE_FILTER_CONTEXTS];
2973 int64_t best_filter_diff[SWITCHABLE_FILTER_CONTEXTS];
2974 MB_MODE_INFO best_mbmode;
2975 int best_mode_skippable = 0;
2976 int midx, best_mode_index = -1;
2977 unsigned int ref_costs_single[MAX_REF_FRAMES], ref_costs_comp[MAX_REF_FRAMES];
2978 vpx_prob comp_mode_p;
2979 int64_t best_intra_rd = INT64_MAX;
2980 unsigned int best_pred_sse = UINT_MAX;
2981 PREDICTION_MODE best_intra_mode = DC_PRED;
2982 int rate_uv_intra[TX_SIZES], rate_uv_tokenonly[TX_SIZES];
2983 int64_t dist_uv[TX_SIZES];
2984 int skip_uv[TX_SIZES];
2985 PREDICTION_MODE mode_uv[TX_SIZES];
2986 const int intra_cost_penalty = vp9_get_intra_cost_penalty(
2987 cm->base_qindex, cm->y_dc_delta_q, cm->bit_depth);
2988 int best_skip2 = 0;
2989 uint8_t ref_frame_skip_mask[2] = { 0 };
2990 uint16_t mode_skip_mask[MAX_REF_FRAMES] = { 0 };
2991 int mode_skip_start = sf->mode_skip_start + 1;
2992 const int *const rd_threshes = rd_opt->threshes[segment_id][bsize];
2993 const int *const rd_thresh_freq_fact = tile_data->thresh_freq_fact[bsize];
2994 int64_t mode_threshold[MAX_MODES];
2995 int *mode_map = tile_data->mode_map[bsize];
2996 const int mode_search_skip_flags = sf->mode_search_skip_flags;
2997 int64_t mask_filter = 0;
2998 int64_t filter_cache[SWITCHABLE_FILTER_CONTEXTS];
2999
3000 vp9_zero(best_mbmode);
3001
3002 x->skip_encode = sf->skip_encode_frame && x->q_index < QIDX_SKIP_THRESH;
3003
3004 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i)
3005 filter_cache[i] = INT64_MAX;
3006
3007 estimate_ref_frame_costs(cm, xd, segment_id, ref_costs_single, ref_costs_comp,
3008 &comp_mode_p);
3009
3010 for (i = 0; i < REFERENCE_MODES; ++i)
3011 best_pred_rd[i] = INT64_MAX;
3012 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++)
3013 best_filter_rd[i] = INT64_MAX;
3014 for (i = 0; i < TX_SIZES; i++)
3015 rate_uv_intra[i] = INT_MAX;
3016 for (i = 0; i < MAX_REF_FRAMES; ++i)
3017 x->pred_sse[i] = INT_MAX;
3018 for (i = 0; i < MB_MODE_COUNT; ++i) {
3019 for (k = 0; k < MAX_REF_FRAMES; ++k) {
3020 single_inter_filter[i][k] = SWITCHABLE;
3021 single_skippable[i][k] = 0;
3022 }
3023 }
3024
3025 rd_cost->rate = INT_MAX;
3026
3027 for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) {
3028 x->pred_mv_sad[ref_frame] = INT_MAX;
3029 if (cpi->ref_frame_flags & flag_list[ref_frame]) {
3030 assert(get_ref_frame_buffer(cpi, ref_frame) != NULL);
3031 setup_buffer_inter(cpi, x, ref_frame, bsize, mi_row, mi_col,
3032 frame_mv[NEARESTMV], frame_mv[NEARMV], yv12_mb);
3033 }
3034 frame_mv[NEWMV][ref_frame].as_int = INVALID_MV;
3035 frame_mv[ZEROMV][ref_frame].as_int = 0;
3036 }
3037
3038 for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) {
3039 if (!(cpi->ref_frame_flags & flag_list[ref_frame])) {
3040 // Skip checking missing references in both single and compound reference
3041 // modes. Note that a mode will be skipped iff both reference frames
3042 // are masked out.
3043 ref_frame_skip_mask[0] |= (1 << ref_frame);
3044 ref_frame_skip_mask[1] |= SECOND_REF_FRAME_MASK;
3045 } else if (sf->reference_masking) {
3046 for (i = LAST_FRAME; i <= ALTREF_FRAME; ++i) {
3047 // Skip fixed mv modes for poor references
3048 if ((x->pred_mv_sad[ref_frame] >> 2) > x->pred_mv_sad[i]) {
3049 mode_skip_mask[ref_frame] |= INTER_NEAREST_NEAR_ZERO;
3050 break;
3051 }
3052 }
3053 }
3054 // If the segment reference frame feature is enabled....
3055 // then do nothing if the current ref frame is not allowed..
3056 if (segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME) &&
3057 get_segdata(seg, segment_id, SEG_LVL_REF_FRAME) != (int)ref_frame) {
3058 ref_frame_skip_mask[0] |= (1 << ref_frame);
3059 ref_frame_skip_mask[1] |= SECOND_REF_FRAME_MASK;
3060 }
3061 }
3062
3063 // Disable this drop out case if the ref frame
3064 // segment level feature is enabled for this segment. This is to
3065 // prevent the possibility that we end up unable to pick any mode.
3066 if (!segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME)) {
3067 // Only consider ZEROMV/ALTREF_FRAME for alt ref frame,
3068 // unless ARNR filtering is enabled in which case we want
3069 // an unfiltered alternative. We allow near/nearest as well
3070 // because they may result in zero-zero MVs but be cheaper.
3071 if (cpi->rc.is_src_frame_alt_ref && (cpi->oxcf.arnr_max_frames == 0)) {
3072 ref_frame_skip_mask[0] = (1 << LAST_FRAME) | (1 << GOLDEN_FRAME);
3073 ref_frame_skip_mask[1] = SECOND_REF_FRAME_MASK;
3074 mode_skip_mask[ALTREF_FRAME] = ~INTER_NEAREST_NEAR_ZERO;
3075 if (frame_mv[NEARMV][ALTREF_FRAME].as_int != 0)
3076 mode_skip_mask[ALTREF_FRAME] |= (1 << NEARMV);
3077 if (frame_mv[NEARESTMV][ALTREF_FRAME].as_int != 0)
3078 mode_skip_mask[ALTREF_FRAME] |= (1 << NEARESTMV);
3079 }
3080 }
3081
3082 if (cpi->rc.is_src_frame_alt_ref) {
3083 if (sf->alt_ref_search_fp) {
3084 mode_skip_mask[ALTREF_FRAME] = 0;
3085 ref_frame_skip_mask[0] = ~(1 << ALTREF_FRAME);
3086 ref_frame_skip_mask[1] = SECOND_REF_FRAME_MASK;
3087 }
3088 }
3089
3090 if (sf->alt_ref_search_fp)
3091 if (!cm->show_frame && x->pred_mv_sad[GOLDEN_FRAME] < INT_MAX)
3092 if (x->pred_mv_sad[ALTREF_FRAME] > (x->pred_mv_sad[GOLDEN_FRAME] << 1))
3093 mode_skip_mask[ALTREF_FRAME] |= INTER_ALL;
3094
3095 if (sf->adaptive_mode_search) {
3096 if (cm->show_frame && !cpi->rc.is_src_frame_alt_ref &&
3097 cpi->rc.frames_since_golden >= 3)
3098 if (x->pred_mv_sad[GOLDEN_FRAME] > (x->pred_mv_sad[LAST_FRAME] << 1))
3099 mode_skip_mask[GOLDEN_FRAME] |= INTER_ALL;
3100 }
3101
3102 if (bsize > sf->max_intra_bsize) {
3103 ref_frame_skip_mask[0] |= (1 << INTRA_FRAME);
3104 ref_frame_skip_mask[1] |= (1 << INTRA_FRAME);
3105 }
3106
3107 mode_skip_mask[INTRA_FRAME] |=
3108 ~(sf->intra_y_mode_mask[max_txsize_lookup[bsize]]);
3109
3110 for (i = 0; i <= LAST_NEW_MV_INDEX; ++i)
3111 mode_threshold[i] = 0;
3112 for (i = LAST_NEW_MV_INDEX + 1; i < MAX_MODES; ++i)
3113 mode_threshold[i] = ((int64_t)rd_threshes[i] * rd_thresh_freq_fact[i]) >> 5;
3114
3115 midx = sf->schedule_mode_search ? mode_skip_start : 0;
3116 while (midx > 4) {
3117 uint8_t end_pos = 0;
3118 for (i = 5; i < midx; ++i) {
3119 if (mode_threshold[mode_map[i - 1]] > mode_threshold[mode_map[i]]) {
3120 uint8_t tmp = mode_map[i];
3121 mode_map[i] = mode_map[i - 1];
3122 mode_map[i - 1] = tmp;
3123 end_pos = i;
3124 }
3125 }
3126 midx = end_pos;
3127 }
3128
3129 for (midx = 0; midx < MAX_MODES; ++midx) {
3130 int mode_index = mode_map[midx];
3131 int mode_excluded = 0;
3132 int64_t this_rd = INT64_MAX;
3133 int disable_skip = 0;
3134 int compmode_cost = 0;
3135 int rate2 = 0, rate_y = 0, rate_uv = 0;
3136 int64_t distortion2 = 0, distortion_y = 0, distortion_uv = 0;
3137 int skippable = 0;
3138 int this_skip2 = 0;
3139 int64_t total_sse = INT64_MAX;
3140 int early_term = 0;
3141
3142 this_mode = vp9_mode_order[mode_index].mode;
3143 ref_frame = vp9_mode_order[mode_index].ref_frame[0];
3144 second_ref_frame = vp9_mode_order[mode_index].ref_frame[1];
3145
3146 // Look at the reference frame of the best mode so far and set the
3147 // skip mask to look at a subset of the remaining modes.
3148 if (midx == mode_skip_start && best_mode_index >= 0) {
3149 switch (best_mbmode.ref_frame[0]) {
3150 case INTRA_FRAME:
3151 break;
3152 case LAST_FRAME:
3153 ref_frame_skip_mask[0] |= LAST_FRAME_MODE_MASK;
3154 ref_frame_skip_mask[1] |= SECOND_REF_FRAME_MASK;
3155 break;
3156 case GOLDEN_FRAME:
3157 ref_frame_skip_mask[0] |= GOLDEN_FRAME_MODE_MASK;
3158 ref_frame_skip_mask[1] |= SECOND_REF_FRAME_MASK;
3159 break;
3160 case ALTREF_FRAME:
3161 ref_frame_skip_mask[0] |= ALT_REF_MODE_MASK;
3162 break;
3163 case NONE:
3164 case MAX_REF_FRAMES:
3165 assert(0 && "Invalid Reference frame");
3166 break;
3167 }
3168 }
3169
3170 if ((ref_frame_skip_mask[0] & (1 << ref_frame)) &&
3171 (ref_frame_skip_mask[1] & (1 << VPXMAX(0, second_ref_frame))))
3172 continue;
3173
3174 if (mode_skip_mask[ref_frame] & (1 << this_mode))
3175 continue;
3176
3177 // Test best rd so far against threshold for trying this mode.
3178 if (best_mode_skippable && sf->schedule_mode_search)
3179 mode_threshold[mode_index] <<= 1;
3180
3181 if (best_rd < mode_threshold[mode_index])
3182 continue;
3183
3184 if (sf->motion_field_mode_search) {
3185 const int mi_width = VPXMIN(num_8x8_blocks_wide_lookup[bsize],
3186 tile_info->mi_col_end - mi_col);
3187 const int mi_height = VPXMIN(num_8x8_blocks_high_lookup[bsize],
3188 tile_info->mi_row_end - mi_row);
3189 const int bsl = mi_width_log2_lookup[bsize];
3190 int cb_partition_search_ctrl = (((mi_row + mi_col) >> bsl)
3191 + get_chessboard_index(cm->current_video_frame)) & 0x1;
3192 MB_MODE_INFO *ref_mbmi;
3193 int const_motion = 1;
3194 int skip_ref_frame = !cb_partition_search_ctrl;
3195 MV_REFERENCE_FRAME rf = NONE;
3196 int_mv ref_mv;
3197 ref_mv.as_int = INVALID_MV;
3198
3199 if ((mi_row - 1) >= tile_info->mi_row_start) {
3200 ref_mv = xd->mi[-xd->mi_stride]->mbmi.mv[0];
3201 rf = xd->mi[-xd->mi_stride]->mbmi.ref_frame[0];
3202 for (i = 0; i < mi_width; ++i) {
3203 ref_mbmi = &xd->mi[-xd->mi_stride + i]->mbmi;
3204 const_motion &= (ref_mv.as_int == ref_mbmi->mv[0].as_int) &&
3205 (ref_frame == ref_mbmi->ref_frame[0]);
3206 skip_ref_frame &= (rf == ref_mbmi->ref_frame[0]);
3207 }
3208 }
3209
3210 if ((mi_col - 1) >= tile_info->mi_col_start) {
3211 if (ref_mv.as_int == INVALID_MV)
3212 ref_mv = xd->mi[-1]->mbmi.mv[0];
3213 if (rf == NONE)
3214 rf = xd->mi[-1]->mbmi.ref_frame[0];
3215 for (i = 0; i < mi_height; ++i) {
3216 ref_mbmi = &xd->mi[i * xd->mi_stride - 1]->mbmi;
3217 const_motion &= (ref_mv.as_int == ref_mbmi->mv[0].as_int) &&
3218 (ref_frame == ref_mbmi->ref_frame[0]);
3219 skip_ref_frame &= (rf == ref_mbmi->ref_frame[0]);
3220 }
3221 }
3222
3223 if (skip_ref_frame && this_mode != NEARESTMV && this_mode != NEWMV)
3224 if (rf > INTRA_FRAME)
3225 if (ref_frame != rf)
3226 continue;
3227
3228 if (const_motion)
3229 if (this_mode == NEARMV || this_mode == ZEROMV)
3230 continue;
3231 }
3232
3233 comp_pred = second_ref_frame > INTRA_FRAME;
3234 if (comp_pred) {
3235 if (!cpi->allow_comp_inter_inter)
3236 continue;
3237
3238 // Skip compound inter modes if ARF is not available.
3239 if (!(cpi->ref_frame_flags & flag_list[second_ref_frame]))
3240 continue;
3241
3242 // Do not allow compound prediction if the segment level reference frame
3243 // feature is in use as in this case there can only be one reference.
3244 if (segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME))
3245 continue;
3246
3247 if ((mode_search_skip_flags & FLAG_SKIP_COMP_BESTINTRA) &&
3248 best_mode_index >= 0 && best_mbmode.ref_frame[0] == INTRA_FRAME)
3249 continue;
3250
3251 mode_excluded = cm->reference_mode == SINGLE_REFERENCE;
3252 } else {
3253 if (ref_frame != INTRA_FRAME)
3254 mode_excluded = cm->reference_mode == COMPOUND_REFERENCE;
3255 }
3256
3257 if (ref_frame == INTRA_FRAME) {
3258 if (sf->adaptive_mode_search)
3259 if ((x->source_variance << num_pels_log2_lookup[bsize]) > best_pred_sse)
3260 continue;
3261
3262 if (this_mode != DC_PRED) {
3263 // Disable intra modes other than DC_PRED for blocks with low variance
3264 // Threshold for intra skipping based on source variance
3265 // TODO(debargha): Specialize the threshold for super block sizes
3266 const unsigned int skip_intra_var_thresh = 64;
3267 if ((mode_search_skip_flags & FLAG_SKIP_INTRA_LOWVAR) &&
3268 x->source_variance < skip_intra_var_thresh)
3269 continue;
3270 // Only search the oblique modes if the best so far is
3271 // one of the neighboring directional modes
3272 if ((mode_search_skip_flags & FLAG_SKIP_INTRA_BESTINTER) &&
3273 (this_mode >= D45_PRED && this_mode <= TM_PRED)) {
3274 if (best_mode_index >= 0 &&
3275 best_mbmode.ref_frame[0] > INTRA_FRAME)
3276 continue;
3277 }
3278 if (mode_search_skip_flags & FLAG_SKIP_INTRA_DIRMISMATCH) {
3279 if (conditional_skipintra(this_mode, best_intra_mode))
3280 continue;
3281 }
3282 }
3283 } else {
3284 const MV_REFERENCE_FRAME ref_frames[2] = {ref_frame, second_ref_frame};
3285 if (!check_best_zero_mv(cpi, mbmi_ext->mode_context, frame_mv,
3286 this_mode, ref_frames))
3287 continue;
3288 }
3289
3290 mbmi->mode = this_mode;
3291 mbmi->uv_mode = DC_PRED;
3292 mbmi->ref_frame[0] = ref_frame;
3293 mbmi->ref_frame[1] = second_ref_frame;
3294 // Evaluate all sub-pel filters irrespective of whether we can use
3295 // them for this frame.
3296 mbmi->interp_filter = cm->interp_filter == SWITCHABLE ? EIGHTTAP
3297 : cm->interp_filter;
3298 mbmi->mv[0].as_int = mbmi->mv[1].as_int = 0;
3299
3300 x->skip = 0;
3301 set_ref_ptrs(cm, xd, ref_frame, second_ref_frame);
3302
3303 // Select prediction reference frames.
3304 for (i = 0; i < MAX_MB_PLANE; i++) {
3305 xd->plane[i].pre[0] = yv12_mb[ref_frame][i];
3306 if (comp_pred)
3307 xd->plane[i].pre[1] = yv12_mb[second_ref_frame][i];
3308 }
3309
3310 if (ref_frame == INTRA_FRAME) {
3311 TX_SIZE uv_tx;
3312 struct macroblockd_plane *const pd = &xd->plane[1];
3313 memset(x->skip_txfm, 0, sizeof(x->skip_txfm));
3314 super_block_yrd(cpi, x, &rate_y, &distortion_y, &skippable,
3315 NULL, bsize, best_rd);
3316 if (rate_y == INT_MAX)
3317 continue;
3318
3319 uv_tx = get_uv_tx_size_impl(mbmi->tx_size, bsize, pd->subsampling_x,
3320 pd->subsampling_y);
3321 if (rate_uv_intra[uv_tx] == INT_MAX) {
3322 choose_intra_uv_mode(cpi, x, ctx, bsize, uv_tx,
3323 &rate_uv_intra[uv_tx], &rate_uv_tokenonly[uv_tx],
3324 &dist_uv[uv_tx], &skip_uv[uv_tx], &mode_uv[uv_tx]);
3325 }
3326
3327 rate_uv = rate_uv_tokenonly[uv_tx];
3328 distortion_uv = dist_uv[uv_tx];
3329 skippable = skippable && skip_uv[uv_tx];
3330 mbmi->uv_mode = mode_uv[uv_tx];
3331
3332 rate2 = rate_y + cpi->mbmode_cost[mbmi->mode] + rate_uv_intra[uv_tx];
3333 if (this_mode != DC_PRED && this_mode != TM_PRED)
3334 rate2 += intra_cost_penalty;
3335 distortion2 = distortion_y + distortion_uv;
3336 } else {
3337 this_rd = handle_inter_mode(cpi, x, bsize,
3338 &rate2, &distortion2, &skippable,
3339 &rate_y, &rate_uv,
3340 &disable_skip, frame_mv,
3341 mi_row, mi_col,
3342 single_newmv, single_inter_filter,
3343 single_skippable, &total_sse, best_rd,
3344 &mask_filter, filter_cache);
3345 if (this_rd == INT64_MAX)
3346 continue;
3347
3348 compmode_cost = vp9_cost_bit(comp_mode_p, comp_pred);
3349
3350 if (cm->reference_mode == REFERENCE_MODE_SELECT)
3351 rate2 += compmode_cost;
3352 }
3353
3354 // Estimate the reference frame signaling cost and add it
3355 // to the rolling cost variable.
3356 if (comp_pred) {
3357 rate2 += ref_costs_comp[ref_frame];
3358 } else {
3359 rate2 += ref_costs_single[ref_frame];
3360 }
3361
3362 if (!disable_skip) {
3363 if (skippable) {
3364 // Back out the coefficient coding costs
3365 rate2 -= (rate_y + rate_uv);
3366
3367 // Cost the skip mb case
3368 rate2 += vp9_cost_bit(vp9_get_skip_prob(cm, xd), 1);
3369 } else if (ref_frame != INTRA_FRAME && !xd->lossless) {
3370 if (RDCOST(x->rdmult, x->rddiv, rate_y + rate_uv, distortion2) <
3371 RDCOST(x->rdmult, x->rddiv, 0, total_sse)) {
3372 // Add in the cost of the no skip flag.
3373 rate2 += vp9_cost_bit(vp9_get_skip_prob(cm, xd), 0);
3374 } else {
3375 // FIXME(rbultje) make this work for splitmv also
3376 rate2 += vp9_cost_bit(vp9_get_skip_prob(cm, xd), 1);
3377 distortion2 = total_sse;
3378 assert(total_sse >= 0);
3379 rate2 -= (rate_y + rate_uv);
3380 this_skip2 = 1;
3381 }
3382 } else {
3383 // Add in the cost of the no skip flag.
3384 rate2 += vp9_cost_bit(vp9_get_skip_prob(cm, xd), 0);
3385 }
3386
3387 // Calculate the final RD estimate for this mode.
3388 this_rd = RDCOST(x->rdmult, x->rddiv, rate2, distortion2);
3389 }
3390
3391 // Apply an adjustment to the rd value based on the similarity of the
3392 // source variance and reconstructed variance.
3393 rd_variance_adjustment(cpi, x, bsize, &this_rd,
3394 ref_frame, x->source_variance);
3395
3396 if (ref_frame == INTRA_FRAME) {
3397 // Keep record of best intra rd
3398 if (this_rd < best_intra_rd) {
3399 best_intra_rd = this_rd;
3400 best_intra_mode = mbmi->mode;
3401 }
3402 }
3403
3404 if (!disable_skip && ref_frame == INTRA_FRAME) {
3405 for (i = 0; i < REFERENCE_MODES; ++i)
3406 best_pred_rd[i] = VPXMIN(best_pred_rd[i], this_rd);
3407 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++)
3408 best_filter_rd[i] = VPXMIN(best_filter_rd[i], this_rd);
3409 }
3410
3411 // Did this mode help.. i.e. is it the new best mode
3412 if (this_rd < best_rd || x->skip) {
3413 int max_plane = MAX_MB_PLANE;
3414 if (!mode_excluded) {
3415 // Note index of best mode so far
3416 best_mode_index = mode_index;
3417
3418 if (ref_frame == INTRA_FRAME) {
3419 /* required for left and above block mv */
3420 mbmi->mv[0].as_int = 0;
3421 max_plane = 1;
3422 } else {
3423 best_pred_sse = x->pred_sse[ref_frame];
3424 }
3425
3426 rd_cost->rate = rate2;
3427 rd_cost->dist = distortion2;
3428 rd_cost->rdcost = this_rd;
3429 best_rd = this_rd;
3430 best_mbmode = *mbmi;
3431 best_skip2 = this_skip2;
3432 best_mode_skippable = skippable;
3433
3434 if (!x->select_tx_size)
3435 swap_block_ptr(x, ctx, 1, 0, 0, max_plane);
3436 memcpy(ctx->zcoeff_blk, x->zcoeff_blk[mbmi->tx_size],
3437 sizeof(ctx->zcoeff_blk[0]) * ctx->num_4x4_blk);
3438
3439 // TODO(debargha): enhance this test with a better distortion prediction
3440 // based on qp, activity mask and history
3441 if ((mode_search_skip_flags & FLAG_EARLY_TERMINATE) &&
3442 (mode_index > MIN_EARLY_TERM_INDEX)) {
3443 int qstep = xd->plane[0].dequant[1];
3444 // TODO(debargha): Enhance this by specializing for each mode_index
3445 int scale = 4;
3446 #if CONFIG_VP9_HIGHBITDEPTH
3447 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
3448 qstep >>= (xd->bd - 8);
3449 }
3450 #endif // CONFIG_VP9_HIGHBITDEPTH
3451 if (x->source_variance < UINT_MAX) {
3452 const int var_adjust = (x->source_variance < 16);
3453 scale -= var_adjust;
3454 }
3455 if (ref_frame > INTRA_FRAME &&
3456 distortion2 * scale < qstep * qstep) {
3457 early_term = 1;
3458 }
3459 }
3460 }
3461 }
3462
3463 /* keep record of best compound/single-only prediction */
3464 if (!disable_skip && ref_frame != INTRA_FRAME) {
3465 int64_t single_rd, hybrid_rd, single_rate, hybrid_rate;
3466
3467 if (cm->reference_mode == REFERENCE_MODE_SELECT) {
3468 single_rate = rate2 - compmode_cost;
3469 hybrid_rate = rate2;
3470 } else {
3471 single_rate = rate2;
3472 hybrid_rate = rate2 + compmode_cost;
3473 }
3474
3475 single_rd = RDCOST(x->rdmult, x->rddiv, single_rate, distortion2);
3476 hybrid_rd = RDCOST(x->rdmult, x->rddiv, hybrid_rate, distortion2);
3477
3478 if (!comp_pred) {
3479 if (single_rd < best_pred_rd[SINGLE_REFERENCE])
3480 best_pred_rd[SINGLE_REFERENCE] = single_rd;
3481 } else {
3482 if (single_rd < best_pred_rd[COMPOUND_REFERENCE])
3483 best_pred_rd[COMPOUND_REFERENCE] = single_rd;
3484 }
3485 if (hybrid_rd < best_pred_rd[REFERENCE_MODE_SELECT])
3486 best_pred_rd[REFERENCE_MODE_SELECT] = hybrid_rd;
3487
3488 /* keep record of best filter type */
3489 if (!mode_excluded && cm->interp_filter != BILINEAR) {
3490 int64_t ref = filter_cache[cm->interp_filter == SWITCHABLE ?
3491 SWITCHABLE_FILTERS : cm->interp_filter];
3492
3493 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++) {
3494 int64_t adj_rd;
3495 if (ref == INT64_MAX)
3496 adj_rd = 0;
3497 else if (filter_cache[i] == INT64_MAX)
3498 // when early termination is triggered, the encoder does not have
3499 // access to the rate-distortion cost. it only knows that the cost
3500 // should be above the maximum valid value. hence it takes the known
3501 // maximum plus an arbitrary constant as the rate-distortion cost.
3502 adj_rd = mask_filter - ref + 10;
3503 else
3504 adj_rd = filter_cache[i] - ref;
3505
3506 adj_rd += this_rd;
3507 best_filter_rd[i] = VPXMIN(best_filter_rd[i], adj_rd);
3508 }
3509 }
3510 }
3511
3512 if (early_term)
3513 break;
3514
3515 if (x->skip && !comp_pred)
3516 break;
3517 }
3518
3519 // The inter modes' rate costs are not calculated precisely in some cases.
3520 // Therefore, sometimes, NEWMV is chosen instead of NEARESTMV, NEARMV, and
3521 // ZEROMV. Here, checks are added for those cases, and the mode decisions
3522 // are corrected.
3523 if (best_mbmode.mode == NEWMV) {
3524 const MV_REFERENCE_FRAME refs[2] = {best_mbmode.ref_frame[0],
3525 best_mbmode.ref_frame[1]};
3526 int comp_pred_mode = refs[1] > INTRA_FRAME;
3527
3528 if (frame_mv[NEARESTMV][refs[0]].as_int == best_mbmode.mv[0].as_int &&
3529 ((comp_pred_mode && frame_mv[NEARESTMV][refs[1]].as_int ==
3530 best_mbmode.mv[1].as_int) || !comp_pred_mode))
3531 best_mbmode.mode = NEARESTMV;
3532 else if (frame_mv[NEARMV][refs[0]].as_int == best_mbmode.mv[0].as_int &&
3533 ((comp_pred_mode && frame_mv[NEARMV][refs[1]].as_int ==
3534 best_mbmode.mv[1].as_int) || !comp_pred_mode))
3535 best_mbmode.mode = NEARMV;
3536 else if (best_mbmode.mv[0].as_int == 0 &&
3537 ((comp_pred_mode && best_mbmode.mv[1].as_int == 0) || !comp_pred_mode))
3538 best_mbmode.mode = ZEROMV;
3539 }
3540
3541 if (best_mode_index < 0 || best_rd >= best_rd_so_far) {
3542 rd_cost->rate = INT_MAX;
3543 rd_cost->rdcost = INT64_MAX;
3544 return;
3545 }
3546
3547 // If we used an estimate for the uv intra rd in the loop above...
3548 if (sf->use_uv_intra_rd_estimate) {
3549 // Do Intra UV best rd mode selection if best mode choice above was intra.
3550 if (best_mbmode.ref_frame[0] == INTRA_FRAME) {
3551 TX_SIZE uv_tx_size;
3552 *mbmi = best_mbmode;
3553 uv_tx_size = get_uv_tx_size(mbmi, &xd->plane[1]);
3554 rd_pick_intra_sbuv_mode(cpi, x, ctx, &rate_uv_intra[uv_tx_size],
3555 &rate_uv_tokenonly[uv_tx_size],
3556 &dist_uv[uv_tx_size],
3557 &skip_uv[uv_tx_size],
3558 bsize < BLOCK_8X8 ? BLOCK_8X8 : bsize,
3559 uv_tx_size);
3560 }
3561 }
3562
3563 assert((cm->interp_filter == SWITCHABLE) ||
3564 (cm->interp_filter == best_mbmode.interp_filter) ||
3565 !is_inter_block(&best_mbmode));
3566
3567 if (!cpi->rc.is_src_frame_alt_ref)
3568 vp9_update_rd_thresh_fact(tile_data->thresh_freq_fact,
3569 sf->adaptive_rd_thresh, bsize, best_mode_index);
3570
3571 // macroblock modes
3572 *mbmi = best_mbmode;
3573 x->skip |= best_skip2;
3574
3575 for (i = 0; i < REFERENCE_MODES; ++i) {
3576 if (best_pred_rd[i] == INT64_MAX)
3577 best_pred_diff[i] = INT_MIN;
3578 else
3579 best_pred_diff[i] = best_rd - best_pred_rd[i];
3580 }
3581
3582 if (!x->skip) {
3583 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++) {
3584 if (best_filter_rd[i] == INT64_MAX)
3585 best_filter_diff[i] = 0;
3586 else
3587 best_filter_diff[i] = best_rd - best_filter_rd[i];
3588 }
3589 if (cm->interp_filter == SWITCHABLE)
3590 assert(best_filter_diff[SWITCHABLE_FILTERS] == 0);
3591 } else {
3592 vp9_zero(best_filter_diff);
3593 }
3594
3595 // TODO(yunqingwang): Moving this line in front of the above best_filter_diff
3596 // updating code causes PSNR loss. Need to figure out the confliction.
3597 x->skip |= best_mode_skippable;
3598
3599 if (!x->skip && !x->select_tx_size) {
3600 int has_high_freq_coeff = 0;
3601 int plane;
3602 int max_plane = is_inter_block(&xd->mi[0]->mbmi)
3603 ? MAX_MB_PLANE : 1;
3604 for (plane = 0; plane < max_plane; ++plane) {
3605 x->plane[plane].eobs = ctx->eobs_pbuf[plane][1];
3606 has_high_freq_coeff |= vp9_has_high_freq_in_plane(x, bsize, plane);
3607 }
3608
3609 for (plane = max_plane; plane < MAX_MB_PLANE; ++plane) {
3610 x->plane[plane].eobs = ctx->eobs_pbuf[plane][2];
3611 has_high_freq_coeff |= vp9_has_high_freq_in_plane(x, bsize, plane);
3612 }
3613
3614 best_mode_skippable |= !has_high_freq_coeff;
3615 }
3616
3617 assert(best_mode_index >= 0);
3618
3619 store_coding_context(x, ctx, best_mode_index, best_pred_diff,
3620 best_filter_diff, best_mode_skippable);
3621 }
3622
vp9_rd_pick_inter_mode_sb_seg_skip(VP9_COMP * cpi,TileDataEnc * tile_data,MACROBLOCK * x,RD_COST * rd_cost,BLOCK_SIZE bsize,PICK_MODE_CONTEXT * ctx,int64_t best_rd_so_far)3623 void vp9_rd_pick_inter_mode_sb_seg_skip(VP9_COMP *cpi,
3624 TileDataEnc *tile_data,
3625 MACROBLOCK *x,
3626 RD_COST *rd_cost,
3627 BLOCK_SIZE bsize,
3628 PICK_MODE_CONTEXT *ctx,
3629 int64_t best_rd_so_far) {
3630 VP9_COMMON *const cm = &cpi->common;
3631 MACROBLOCKD *const xd = &x->e_mbd;
3632 MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
3633 unsigned char segment_id = mbmi->segment_id;
3634 const int comp_pred = 0;
3635 int i;
3636 int64_t best_pred_diff[REFERENCE_MODES];
3637 int64_t best_filter_diff[SWITCHABLE_FILTER_CONTEXTS];
3638 unsigned int ref_costs_single[MAX_REF_FRAMES], ref_costs_comp[MAX_REF_FRAMES];
3639 vpx_prob comp_mode_p;
3640 INTERP_FILTER best_filter = SWITCHABLE;
3641 int64_t this_rd = INT64_MAX;
3642 int rate2 = 0;
3643 const int64_t distortion2 = 0;
3644
3645 x->skip_encode = cpi->sf.skip_encode_frame && x->q_index < QIDX_SKIP_THRESH;
3646
3647 estimate_ref_frame_costs(cm, xd, segment_id, ref_costs_single, ref_costs_comp,
3648 &comp_mode_p);
3649
3650 for (i = 0; i < MAX_REF_FRAMES; ++i)
3651 x->pred_sse[i] = INT_MAX;
3652 for (i = LAST_FRAME; i < MAX_REF_FRAMES; ++i)
3653 x->pred_mv_sad[i] = INT_MAX;
3654
3655 rd_cost->rate = INT_MAX;
3656
3657 assert(segfeature_active(&cm->seg, segment_id, SEG_LVL_SKIP));
3658
3659 mbmi->mode = ZEROMV;
3660 mbmi->uv_mode = DC_PRED;
3661 mbmi->ref_frame[0] = LAST_FRAME;
3662 mbmi->ref_frame[1] = NONE;
3663 mbmi->mv[0].as_int = 0;
3664 x->skip = 1;
3665
3666 if (cm->interp_filter != BILINEAR) {
3667 best_filter = EIGHTTAP;
3668 if (cm->interp_filter == SWITCHABLE &&
3669 x->source_variance >= cpi->sf.disable_filter_search_var_thresh) {
3670 int rs;
3671 int best_rs = INT_MAX;
3672 for (i = 0; i < SWITCHABLE_FILTERS; ++i) {
3673 mbmi->interp_filter = i;
3674 rs = vp9_get_switchable_rate(cpi, xd);
3675 if (rs < best_rs) {
3676 best_rs = rs;
3677 best_filter = mbmi->interp_filter;
3678 }
3679 }
3680 }
3681 }
3682 // Set the appropriate filter
3683 if (cm->interp_filter == SWITCHABLE) {
3684 mbmi->interp_filter = best_filter;
3685 rate2 += vp9_get_switchable_rate(cpi, xd);
3686 } else {
3687 mbmi->interp_filter = cm->interp_filter;
3688 }
3689
3690 if (cm->reference_mode == REFERENCE_MODE_SELECT)
3691 rate2 += vp9_cost_bit(comp_mode_p, comp_pred);
3692
3693 // Estimate the reference frame signaling cost and add it
3694 // to the rolling cost variable.
3695 rate2 += ref_costs_single[LAST_FRAME];
3696 this_rd = RDCOST(x->rdmult, x->rddiv, rate2, distortion2);
3697
3698 rd_cost->rate = rate2;
3699 rd_cost->dist = distortion2;
3700 rd_cost->rdcost = this_rd;
3701
3702 if (this_rd >= best_rd_so_far) {
3703 rd_cost->rate = INT_MAX;
3704 rd_cost->rdcost = INT64_MAX;
3705 return;
3706 }
3707
3708 assert((cm->interp_filter == SWITCHABLE) ||
3709 (cm->interp_filter == mbmi->interp_filter));
3710
3711 vp9_update_rd_thresh_fact(tile_data->thresh_freq_fact,
3712 cpi->sf.adaptive_rd_thresh, bsize, THR_ZEROMV);
3713
3714 vp9_zero(best_pred_diff);
3715 vp9_zero(best_filter_diff);
3716
3717 if (!x->select_tx_size)
3718 swap_block_ptr(x, ctx, 1, 0, 0, MAX_MB_PLANE);
3719 store_coding_context(x, ctx, THR_ZEROMV,
3720 best_pred_diff, best_filter_diff, 0);
3721 }
3722
vp9_rd_pick_inter_mode_sub8x8(VP9_COMP * cpi,TileDataEnc * tile_data,MACROBLOCK * x,int mi_row,int mi_col,RD_COST * rd_cost,BLOCK_SIZE bsize,PICK_MODE_CONTEXT * ctx,int64_t best_rd_so_far)3723 void vp9_rd_pick_inter_mode_sub8x8(VP9_COMP *cpi,
3724 TileDataEnc *tile_data,
3725 MACROBLOCK *x,
3726 int mi_row, int mi_col,
3727 RD_COST *rd_cost,
3728 BLOCK_SIZE bsize,
3729 PICK_MODE_CONTEXT *ctx,
3730 int64_t best_rd_so_far) {
3731 VP9_COMMON *const cm = &cpi->common;
3732 RD_OPT *const rd_opt = &cpi->rd;
3733 SPEED_FEATURES *const sf = &cpi->sf;
3734 MACROBLOCKD *const xd = &x->e_mbd;
3735 MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
3736 const struct segmentation *const seg = &cm->seg;
3737 MV_REFERENCE_FRAME ref_frame, second_ref_frame;
3738 unsigned char segment_id = mbmi->segment_id;
3739 int comp_pred, i;
3740 int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES];
3741 struct buf_2d yv12_mb[4][MAX_MB_PLANE];
3742 static const int flag_list[4] = { 0, VP9_LAST_FLAG, VP9_GOLD_FLAG,
3743 VP9_ALT_FLAG };
3744 int64_t best_rd = best_rd_so_far;
3745 int64_t best_yrd = best_rd_so_far; // FIXME(rbultje) more precise
3746 int64_t best_pred_diff[REFERENCE_MODES];
3747 int64_t best_pred_rd[REFERENCE_MODES];
3748 int64_t best_filter_rd[SWITCHABLE_FILTER_CONTEXTS];
3749 int64_t best_filter_diff[SWITCHABLE_FILTER_CONTEXTS];
3750 MB_MODE_INFO best_mbmode;
3751 int ref_index, best_ref_index = 0;
3752 unsigned int ref_costs_single[MAX_REF_FRAMES], ref_costs_comp[MAX_REF_FRAMES];
3753 vpx_prob comp_mode_p;
3754 INTERP_FILTER tmp_best_filter = SWITCHABLE;
3755 int rate_uv_intra, rate_uv_tokenonly;
3756 int64_t dist_uv;
3757 int skip_uv;
3758 PREDICTION_MODE mode_uv = DC_PRED;
3759 const int intra_cost_penalty = vp9_get_intra_cost_penalty(
3760 cm->base_qindex, cm->y_dc_delta_q, cm->bit_depth);
3761 int_mv seg_mvs[4][MAX_REF_FRAMES];
3762 b_mode_info best_bmodes[4];
3763 int best_skip2 = 0;
3764 int ref_frame_skip_mask[2] = { 0 };
3765 int64_t mask_filter = 0;
3766 int64_t filter_cache[SWITCHABLE_FILTER_CONTEXTS];
3767 int internal_active_edge =
3768 vp9_active_edge_sb(cpi, mi_row, mi_col) && vp9_internal_image_edge(cpi);
3769
3770 x->skip_encode = sf->skip_encode_frame && x->q_index < QIDX_SKIP_THRESH;
3771 memset(x->zcoeff_blk[TX_4X4], 0, 4);
3772 vp9_zero(best_mbmode);
3773
3774 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i)
3775 filter_cache[i] = INT64_MAX;
3776
3777 for (i = 0; i < 4; i++) {
3778 int j;
3779 for (j = 0; j < MAX_REF_FRAMES; j++)
3780 seg_mvs[i][j].as_int = INVALID_MV;
3781 }
3782
3783 estimate_ref_frame_costs(cm, xd, segment_id, ref_costs_single, ref_costs_comp,
3784 &comp_mode_p);
3785
3786 for (i = 0; i < REFERENCE_MODES; ++i)
3787 best_pred_rd[i] = INT64_MAX;
3788 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++)
3789 best_filter_rd[i] = INT64_MAX;
3790 rate_uv_intra = INT_MAX;
3791
3792 rd_cost->rate = INT_MAX;
3793
3794 for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ref_frame++) {
3795 if (cpi->ref_frame_flags & flag_list[ref_frame]) {
3796 setup_buffer_inter(cpi, x, ref_frame, bsize, mi_row, mi_col,
3797 frame_mv[NEARESTMV], frame_mv[NEARMV],
3798 yv12_mb);
3799 } else {
3800 ref_frame_skip_mask[0] |= (1 << ref_frame);
3801 ref_frame_skip_mask[1] |= SECOND_REF_FRAME_MASK;
3802 }
3803 frame_mv[NEWMV][ref_frame].as_int = INVALID_MV;
3804 frame_mv[ZEROMV][ref_frame].as_int = 0;
3805 }
3806
3807 for (ref_index = 0; ref_index < MAX_REFS; ++ref_index) {
3808 int mode_excluded = 0;
3809 int64_t this_rd = INT64_MAX;
3810 int disable_skip = 0;
3811 int compmode_cost = 0;
3812 int rate2 = 0, rate_y = 0, rate_uv = 0;
3813 int64_t distortion2 = 0, distortion_y = 0, distortion_uv = 0;
3814 int skippable = 0;
3815 int i;
3816 int this_skip2 = 0;
3817 int64_t total_sse = INT_MAX;
3818 int early_term = 0;
3819 struct buf_2d backup_yv12[2][MAX_MB_PLANE];
3820
3821 ref_frame = vp9_ref_order[ref_index].ref_frame[0];
3822 second_ref_frame = vp9_ref_order[ref_index].ref_frame[1];
3823
3824 // Look at the reference frame of the best mode so far and set the
3825 // skip mask to look at a subset of the remaining modes.
3826 if (ref_index > 2 && sf->mode_skip_start < MAX_MODES) {
3827 if (ref_index == 3) {
3828 switch (best_mbmode.ref_frame[0]) {
3829 case INTRA_FRAME:
3830 break;
3831 case LAST_FRAME:
3832 ref_frame_skip_mask[0] |= (1 << GOLDEN_FRAME) | (1 << ALTREF_FRAME);
3833 ref_frame_skip_mask[1] |= SECOND_REF_FRAME_MASK;
3834 break;
3835 case GOLDEN_FRAME:
3836 ref_frame_skip_mask[0] |= (1 << LAST_FRAME) | (1 << ALTREF_FRAME);
3837 ref_frame_skip_mask[1] |= SECOND_REF_FRAME_MASK;
3838 break;
3839 case ALTREF_FRAME:
3840 ref_frame_skip_mask[0] |= (1 << GOLDEN_FRAME) | (1 << LAST_FRAME);
3841 break;
3842 case NONE:
3843 case MAX_REF_FRAMES:
3844 assert(0 && "Invalid Reference frame");
3845 break;
3846 }
3847 }
3848 }
3849
3850 if ((ref_frame_skip_mask[0] & (1 << ref_frame)) &&
3851 (ref_frame_skip_mask[1] & (1 << VPXMAX(0, second_ref_frame))))
3852 continue;
3853
3854 // Test best rd so far against threshold for trying this mode.
3855 if (!internal_active_edge &&
3856 rd_less_than_thresh(best_rd,
3857 rd_opt->threshes[segment_id][bsize][ref_index],
3858 tile_data->thresh_freq_fact[bsize][ref_index]))
3859 continue;
3860
3861 comp_pred = second_ref_frame > INTRA_FRAME;
3862 if (comp_pred) {
3863 if (!cpi->allow_comp_inter_inter)
3864 continue;
3865 if (!(cpi->ref_frame_flags & flag_list[second_ref_frame]))
3866 continue;
3867 // Do not allow compound prediction if the segment level reference frame
3868 // feature is in use as in this case there can only be one reference.
3869 if (segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME))
3870 continue;
3871
3872 if ((sf->mode_search_skip_flags & FLAG_SKIP_COMP_BESTINTRA) &&
3873 best_mbmode.ref_frame[0] == INTRA_FRAME)
3874 continue;
3875 }
3876
3877 if (comp_pred)
3878 mode_excluded = cm->reference_mode == SINGLE_REFERENCE;
3879 else if (ref_frame != INTRA_FRAME)
3880 mode_excluded = cm->reference_mode == COMPOUND_REFERENCE;
3881
3882 // If the segment reference frame feature is enabled....
3883 // then do nothing if the current ref frame is not allowed..
3884 if (segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME) &&
3885 get_segdata(seg, segment_id, SEG_LVL_REF_FRAME) != (int)ref_frame) {
3886 continue;
3887 // Disable this drop out case if the ref frame
3888 // segment level feature is enabled for this segment. This is to
3889 // prevent the possibility that we end up unable to pick any mode.
3890 } else if (!segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME)) {
3891 // Only consider ZEROMV/ALTREF_FRAME for alt ref frame,
3892 // unless ARNR filtering is enabled in which case we want
3893 // an unfiltered alternative. We allow near/nearest as well
3894 // because they may result in zero-zero MVs but be cheaper.
3895 if (cpi->rc.is_src_frame_alt_ref && (cpi->oxcf.arnr_max_frames == 0))
3896 continue;
3897 }
3898
3899 mbmi->tx_size = TX_4X4;
3900 mbmi->uv_mode = DC_PRED;
3901 mbmi->ref_frame[0] = ref_frame;
3902 mbmi->ref_frame[1] = second_ref_frame;
3903 // Evaluate all sub-pel filters irrespective of whether we can use
3904 // them for this frame.
3905 mbmi->interp_filter = cm->interp_filter == SWITCHABLE ? EIGHTTAP
3906 : cm->interp_filter;
3907 x->skip = 0;
3908 set_ref_ptrs(cm, xd, ref_frame, second_ref_frame);
3909
3910 // Select prediction reference frames.
3911 for (i = 0; i < MAX_MB_PLANE; i++) {
3912 xd->plane[i].pre[0] = yv12_mb[ref_frame][i];
3913 if (comp_pred)
3914 xd->plane[i].pre[1] = yv12_mb[second_ref_frame][i];
3915 }
3916
3917 if (ref_frame == INTRA_FRAME) {
3918 int rate;
3919 if (rd_pick_intra_sub_8x8_y_mode(cpi, x, &rate, &rate_y,
3920 &distortion_y, best_rd) >= best_rd)
3921 continue;
3922 rate2 += rate;
3923 rate2 += intra_cost_penalty;
3924 distortion2 += distortion_y;
3925
3926 if (rate_uv_intra == INT_MAX) {
3927 choose_intra_uv_mode(cpi, x, ctx, bsize, TX_4X4,
3928 &rate_uv_intra,
3929 &rate_uv_tokenonly,
3930 &dist_uv, &skip_uv,
3931 &mode_uv);
3932 }
3933 rate2 += rate_uv_intra;
3934 rate_uv = rate_uv_tokenonly;
3935 distortion2 += dist_uv;
3936 distortion_uv = dist_uv;
3937 mbmi->uv_mode = mode_uv;
3938 } else {
3939 int rate;
3940 int64_t distortion;
3941 int64_t this_rd_thresh;
3942 int64_t tmp_rd, tmp_best_rd = INT64_MAX, tmp_best_rdu = INT64_MAX;
3943 int tmp_best_rate = INT_MAX, tmp_best_ratey = INT_MAX;
3944 int64_t tmp_best_distortion = INT_MAX, tmp_best_sse, uv_sse;
3945 int tmp_best_skippable = 0;
3946 int switchable_filter_index;
3947 int_mv *second_ref = comp_pred ?
3948 &x->mbmi_ext->ref_mvs[second_ref_frame][0] : NULL;
3949 b_mode_info tmp_best_bmodes[16];
3950 MB_MODE_INFO tmp_best_mbmode;
3951 BEST_SEG_INFO bsi[SWITCHABLE_FILTERS];
3952 int pred_exists = 0;
3953 int uv_skippable;
3954
3955 YV12_BUFFER_CONFIG *scaled_ref_frame[2] = {NULL, NULL};
3956 int ref;
3957
3958 for (ref = 0; ref < 2; ++ref) {
3959 scaled_ref_frame[ref] = mbmi->ref_frame[ref] > INTRA_FRAME ?
3960 vp9_get_scaled_ref_frame(cpi, mbmi->ref_frame[ref]) : NULL;
3961
3962 if (scaled_ref_frame[ref]) {
3963 int i;
3964 // Swap out the reference frame for a version that's been scaled to
3965 // match the resolution of the current frame, allowing the existing
3966 // motion search code to be used without additional modifications.
3967 for (i = 0; i < MAX_MB_PLANE; i++)
3968 backup_yv12[ref][i] = xd->plane[i].pre[ref];
3969 vp9_setup_pre_planes(xd, ref, scaled_ref_frame[ref], mi_row, mi_col,
3970 NULL);
3971 }
3972 }
3973
3974 this_rd_thresh = (ref_frame == LAST_FRAME) ?
3975 rd_opt->threshes[segment_id][bsize][THR_LAST] :
3976 rd_opt->threshes[segment_id][bsize][THR_ALTR];
3977 this_rd_thresh = (ref_frame == GOLDEN_FRAME) ?
3978 rd_opt->threshes[segment_id][bsize][THR_GOLD] : this_rd_thresh;
3979 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i)
3980 filter_cache[i] = INT64_MAX;
3981
3982 if (cm->interp_filter != BILINEAR) {
3983 tmp_best_filter = EIGHTTAP;
3984 if (x->source_variance < sf->disable_filter_search_var_thresh) {
3985 tmp_best_filter = EIGHTTAP;
3986 } else if (sf->adaptive_pred_interp_filter == 1 &&
3987 ctx->pred_interp_filter < SWITCHABLE) {
3988 tmp_best_filter = ctx->pred_interp_filter;
3989 } else if (sf->adaptive_pred_interp_filter == 2) {
3990 tmp_best_filter = ctx->pred_interp_filter < SWITCHABLE ?
3991 ctx->pred_interp_filter : 0;
3992 } else {
3993 for (switchable_filter_index = 0;
3994 switchable_filter_index < SWITCHABLE_FILTERS;
3995 ++switchable_filter_index) {
3996 int newbest, rs;
3997 int64_t rs_rd;
3998 MB_MODE_INFO_EXT *mbmi_ext = x->mbmi_ext;
3999 mbmi->interp_filter = switchable_filter_index;
4000 tmp_rd = rd_pick_best_sub8x8_mode(cpi, x,
4001 &mbmi_ext->ref_mvs[ref_frame][0],
4002 second_ref, best_yrd, &rate,
4003 &rate_y, &distortion,
4004 &skippable, &total_sse,
4005 (int) this_rd_thresh, seg_mvs,
4006 bsi, switchable_filter_index,
4007 mi_row, mi_col);
4008
4009 if (tmp_rd == INT64_MAX)
4010 continue;
4011 rs = vp9_get_switchable_rate(cpi, xd);
4012 rs_rd = RDCOST(x->rdmult, x->rddiv, rs, 0);
4013 filter_cache[switchable_filter_index] = tmp_rd;
4014 filter_cache[SWITCHABLE_FILTERS] =
4015 VPXMIN(filter_cache[SWITCHABLE_FILTERS], tmp_rd + rs_rd);
4016 if (cm->interp_filter == SWITCHABLE)
4017 tmp_rd += rs_rd;
4018
4019 mask_filter = VPXMAX(mask_filter, tmp_rd);
4020
4021 newbest = (tmp_rd < tmp_best_rd);
4022 if (newbest) {
4023 tmp_best_filter = mbmi->interp_filter;
4024 tmp_best_rd = tmp_rd;
4025 }
4026 if ((newbest && cm->interp_filter == SWITCHABLE) ||
4027 (mbmi->interp_filter == cm->interp_filter &&
4028 cm->interp_filter != SWITCHABLE)) {
4029 tmp_best_rdu = tmp_rd;
4030 tmp_best_rate = rate;
4031 tmp_best_ratey = rate_y;
4032 tmp_best_distortion = distortion;
4033 tmp_best_sse = total_sse;
4034 tmp_best_skippable = skippable;
4035 tmp_best_mbmode = *mbmi;
4036 for (i = 0; i < 4; i++) {
4037 tmp_best_bmodes[i] = xd->mi[0]->bmi[i];
4038 x->zcoeff_blk[TX_4X4][i] = !x->plane[0].eobs[i];
4039 }
4040 pred_exists = 1;
4041 if (switchable_filter_index == 0 &&
4042 sf->use_rd_breakout &&
4043 best_rd < INT64_MAX) {
4044 if (tmp_best_rdu / 2 > best_rd) {
4045 // skip searching the other filters if the first is
4046 // already substantially larger than the best so far
4047 tmp_best_filter = mbmi->interp_filter;
4048 tmp_best_rdu = INT64_MAX;
4049 break;
4050 }
4051 }
4052 }
4053 } // switchable_filter_index loop
4054 }
4055 }
4056
4057 if (tmp_best_rdu == INT64_MAX && pred_exists)
4058 continue;
4059
4060 mbmi->interp_filter = (cm->interp_filter == SWITCHABLE ?
4061 tmp_best_filter : cm->interp_filter);
4062 if (!pred_exists) {
4063 // Handles the special case when a filter that is not in the
4064 // switchable list (bilinear, 6-tap) is indicated at the frame level
4065 tmp_rd = rd_pick_best_sub8x8_mode(cpi, x,
4066 &x->mbmi_ext->ref_mvs[ref_frame][0],
4067 second_ref, best_yrd, &rate, &rate_y,
4068 &distortion, &skippable, &total_sse,
4069 (int) this_rd_thresh, seg_mvs, bsi, 0,
4070 mi_row, mi_col);
4071 if (tmp_rd == INT64_MAX)
4072 continue;
4073 } else {
4074 total_sse = tmp_best_sse;
4075 rate = tmp_best_rate;
4076 rate_y = tmp_best_ratey;
4077 distortion = tmp_best_distortion;
4078 skippable = tmp_best_skippable;
4079 *mbmi = tmp_best_mbmode;
4080 for (i = 0; i < 4; i++)
4081 xd->mi[0]->bmi[i] = tmp_best_bmodes[i];
4082 }
4083
4084 rate2 += rate;
4085 distortion2 += distortion;
4086
4087 if (cm->interp_filter == SWITCHABLE)
4088 rate2 += vp9_get_switchable_rate(cpi, xd);
4089
4090 if (!mode_excluded)
4091 mode_excluded = comp_pred ? cm->reference_mode == SINGLE_REFERENCE
4092 : cm->reference_mode == COMPOUND_REFERENCE;
4093
4094 compmode_cost = vp9_cost_bit(comp_mode_p, comp_pred);
4095
4096 tmp_best_rdu =
4097 best_rd - VPXMIN(RDCOST(x->rdmult, x->rddiv, rate2, distortion2),
4098 RDCOST(x->rdmult, x->rddiv, 0, total_sse));
4099
4100 if (tmp_best_rdu > 0) {
4101 // If even the 'Y' rd value of split is higher than best so far
4102 // then dont bother looking at UV
4103 vp9_build_inter_predictors_sbuv(&x->e_mbd, mi_row, mi_col,
4104 BLOCK_8X8);
4105 memset(x->skip_txfm, SKIP_TXFM_NONE, sizeof(x->skip_txfm));
4106 if (!super_block_uvrd(cpi, x, &rate_uv, &distortion_uv, &uv_skippable,
4107 &uv_sse, BLOCK_8X8, tmp_best_rdu)) {
4108 for (ref = 0; ref < 2; ++ref) {
4109 if (scaled_ref_frame[ref]) {
4110 int i;
4111 for (i = 0; i < MAX_MB_PLANE; ++i)
4112 xd->plane[i].pre[ref] = backup_yv12[ref][i];
4113 }
4114 }
4115 continue;
4116 }
4117
4118 rate2 += rate_uv;
4119 distortion2 += distortion_uv;
4120 skippable = skippable && uv_skippable;
4121 total_sse += uv_sse;
4122 }
4123
4124 for (ref = 0; ref < 2; ++ref) {
4125 if (scaled_ref_frame[ref]) {
4126 // Restore the prediction frame pointers to their unscaled versions.
4127 int i;
4128 for (i = 0; i < MAX_MB_PLANE; ++i)
4129 xd->plane[i].pre[ref] = backup_yv12[ref][i];
4130 }
4131 }
4132 }
4133
4134 if (cm->reference_mode == REFERENCE_MODE_SELECT)
4135 rate2 += compmode_cost;
4136
4137 // Estimate the reference frame signaling cost and add it
4138 // to the rolling cost variable.
4139 if (second_ref_frame > INTRA_FRAME) {
4140 rate2 += ref_costs_comp[ref_frame];
4141 } else {
4142 rate2 += ref_costs_single[ref_frame];
4143 }
4144
4145 if (!disable_skip) {
4146 // Skip is never coded at the segment level for sub8x8 blocks and instead
4147 // always coded in the bitstream at the mode info level.
4148
4149 if (ref_frame != INTRA_FRAME && !xd->lossless) {
4150 if (RDCOST(x->rdmult, x->rddiv, rate_y + rate_uv, distortion2) <
4151 RDCOST(x->rdmult, x->rddiv, 0, total_sse)) {
4152 // Add in the cost of the no skip flag.
4153 rate2 += vp9_cost_bit(vp9_get_skip_prob(cm, xd), 0);
4154 } else {
4155 // FIXME(rbultje) make this work for splitmv also
4156 rate2 += vp9_cost_bit(vp9_get_skip_prob(cm, xd), 1);
4157 distortion2 = total_sse;
4158 assert(total_sse >= 0);
4159 rate2 -= (rate_y + rate_uv);
4160 rate_y = 0;
4161 rate_uv = 0;
4162 this_skip2 = 1;
4163 }
4164 } else {
4165 // Add in the cost of the no skip flag.
4166 rate2 += vp9_cost_bit(vp9_get_skip_prob(cm, xd), 0);
4167 }
4168
4169 // Calculate the final RD estimate for this mode.
4170 this_rd = RDCOST(x->rdmult, x->rddiv, rate2, distortion2);
4171 }
4172
4173 if (!disable_skip && ref_frame == INTRA_FRAME) {
4174 for (i = 0; i < REFERENCE_MODES; ++i)
4175 best_pred_rd[i] = VPXMIN(best_pred_rd[i], this_rd);
4176 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++)
4177 best_filter_rd[i] = VPXMIN(best_filter_rd[i], this_rd);
4178 }
4179
4180 // Did this mode help.. i.e. is it the new best mode
4181 if (this_rd < best_rd || x->skip) {
4182 if (!mode_excluded) {
4183 int max_plane = MAX_MB_PLANE;
4184 // Note index of best mode so far
4185 best_ref_index = ref_index;
4186
4187 if (ref_frame == INTRA_FRAME) {
4188 /* required for left and above block mv */
4189 mbmi->mv[0].as_int = 0;
4190 max_plane = 1;
4191 }
4192
4193 rd_cost->rate = rate2;
4194 rd_cost->dist = distortion2;
4195 rd_cost->rdcost = this_rd;
4196 best_rd = this_rd;
4197 best_yrd = best_rd -
4198 RDCOST(x->rdmult, x->rddiv, rate_uv, distortion_uv);
4199 best_mbmode = *mbmi;
4200 best_skip2 = this_skip2;
4201 if (!x->select_tx_size)
4202 swap_block_ptr(x, ctx, 1, 0, 0, max_plane);
4203 memcpy(ctx->zcoeff_blk, x->zcoeff_blk[TX_4X4],
4204 sizeof(ctx->zcoeff_blk[0]) * ctx->num_4x4_blk);
4205
4206 for (i = 0; i < 4; i++)
4207 best_bmodes[i] = xd->mi[0]->bmi[i];
4208
4209 // TODO(debargha): enhance this test with a better distortion prediction
4210 // based on qp, activity mask and history
4211 if ((sf->mode_search_skip_flags & FLAG_EARLY_TERMINATE) &&
4212 (ref_index > MIN_EARLY_TERM_INDEX)) {
4213 int qstep = xd->plane[0].dequant[1];
4214 // TODO(debargha): Enhance this by specializing for each mode_index
4215 int scale = 4;
4216 #if CONFIG_VP9_HIGHBITDEPTH
4217 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
4218 qstep >>= (xd->bd - 8);
4219 }
4220 #endif // CONFIG_VP9_HIGHBITDEPTH
4221 if (x->source_variance < UINT_MAX) {
4222 const int var_adjust = (x->source_variance < 16);
4223 scale -= var_adjust;
4224 }
4225 if (ref_frame > INTRA_FRAME &&
4226 distortion2 * scale < qstep * qstep) {
4227 early_term = 1;
4228 }
4229 }
4230 }
4231 }
4232
4233 /* keep record of best compound/single-only prediction */
4234 if (!disable_skip && ref_frame != INTRA_FRAME) {
4235 int64_t single_rd, hybrid_rd, single_rate, hybrid_rate;
4236
4237 if (cm->reference_mode == REFERENCE_MODE_SELECT) {
4238 single_rate = rate2 - compmode_cost;
4239 hybrid_rate = rate2;
4240 } else {
4241 single_rate = rate2;
4242 hybrid_rate = rate2 + compmode_cost;
4243 }
4244
4245 single_rd = RDCOST(x->rdmult, x->rddiv, single_rate, distortion2);
4246 hybrid_rd = RDCOST(x->rdmult, x->rddiv, hybrid_rate, distortion2);
4247
4248 if (!comp_pred && single_rd < best_pred_rd[SINGLE_REFERENCE])
4249 best_pred_rd[SINGLE_REFERENCE] = single_rd;
4250 else if (comp_pred && single_rd < best_pred_rd[COMPOUND_REFERENCE])
4251 best_pred_rd[COMPOUND_REFERENCE] = single_rd;
4252
4253 if (hybrid_rd < best_pred_rd[REFERENCE_MODE_SELECT])
4254 best_pred_rd[REFERENCE_MODE_SELECT] = hybrid_rd;
4255 }
4256
4257 /* keep record of best filter type */
4258 if (!mode_excluded && !disable_skip && ref_frame != INTRA_FRAME &&
4259 cm->interp_filter != BILINEAR) {
4260 int64_t ref = filter_cache[cm->interp_filter == SWITCHABLE ?
4261 SWITCHABLE_FILTERS : cm->interp_filter];
4262 int64_t adj_rd;
4263 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++) {
4264 if (ref == INT64_MAX)
4265 adj_rd = 0;
4266 else if (filter_cache[i] == INT64_MAX)
4267 // when early termination is triggered, the encoder does not have
4268 // access to the rate-distortion cost. it only knows that the cost
4269 // should be above the maximum valid value. hence it takes the known
4270 // maximum plus an arbitrary constant as the rate-distortion cost.
4271 adj_rd = mask_filter - ref + 10;
4272 else
4273 adj_rd = filter_cache[i] - ref;
4274
4275 adj_rd += this_rd;
4276 best_filter_rd[i] = VPXMIN(best_filter_rd[i], adj_rd);
4277 }
4278 }
4279
4280 if (early_term)
4281 break;
4282
4283 if (x->skip && !comp_pred)
4284 break;
4285 }
4286
4287 if (best_rd >= best_rd_so_far) {
4288 rd_cost->rate = INT_MAX;
4289 rd_cost->rdcost = INT64_MAX;
4290 return;
4291 }
4292
4293 // If we used an estimate for the uv intra rd in the loop above...
4294 if (sf->use_uv_intra_rd_estimate) {
4295 // Do Intra UV best rd mode selection if best mode choice above was intra.
4296 if (best_mbmode.ref_frame[0] == INTRA_FRAME) {
4297 *mbmi = best_mbmode;
4298 rd_pick_intra_sbuv_mode(cpi, x, ctx, &rate_uv_intra,
4299 &rate_uv_tokenonly,
4300 &dist_uv,
4301 &skip_uv,
4302 BLOCK_8X8, TX_4X4);
4303 }
4304 }
4305
4306 if (best_rd == INT64_MAX) {
4307 rd_cost->rate = INT_MAX;
4308 rd_cost->dist = INT64_MAX;
4309 rd_cost->rdcost = INT64_MAX;
4310 return;
4311 }
4312
4313 assert((cm->interp_filter == SWITCHABLE) ||
4314 (cm->interp_filter == best_mbmode.interp_filter) ||
4315 !is_inter_block(&best_mbmode));
4316
4317 vp9_update_rd_thresh_fact(tile_data->thresh_freq_fact,
4318 sf->adaptive_rd_thresh, bsize, best_ref_index);
4319
4320 // macroblock modes
4321 *mbmi = best_mbmode;
4322 x->skip |= best_skip2;
4323 if (!is_inter_block(&best_mbmode)) {
4324 for (i = 0; i < 4; i++)
4325 xd->mi[0]->bmi[i].as_mode = best_bmodes[i].as_mode;
4326 } else {
4327 for (i = 0; i < 4; ++i)
4328 memcpy(&xd->mi[0]->bmi[i], &best_bmodes[i], sizeof(b_mode_info));
4329
4330 mbmi->mv[0].as_int = xd->mi[0]->bmi[3].as_mv[0].as_int;
4331 mbmi->mv[1].as_int = xd->mi[0]->bmi[3].as_mv[1].as_int;
4332 }
4333
4334 for (i = 0; i < REFERENCE_MODES; ++i) {
4335 if (best_pred_rd[i] == INT64_MAX)
4336 best_pred_diff[i] = INT_MIN;
4337 else
4338 best_pred_diff[i] = best_rd - best_pred_rd[i];
4339 }
4340
4341 if (!x->skip) {
4342 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++) {
4343 if (best_filter_rd[i] == INT64_MAX)
4344 best_filter_diff[i] = 0;
4345 else
4346 best_filter_diff[i] = best_rd - best_filter_rd[i];
4347 }
4348 if (cm->interp_filter == SWITCHABLE)
4349 assert(best_filter_diff[SWITCHABLE_FILTERS] == 0);
4350 } else {
4351 vp9_zero(best_filter_diff);
4352 }
4353
4354 store_coding_context(x, ctx, best_ref_index,
4355 best_pred_diff, best_filter_diff, 0);
4356 }
4357