1 /*
2 * Copyright (c) 2020, Alliance for Open Media. All rights reserved
3 *
4 * This source code is subject to the terms of the BSD 2 Clause License and
5 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6 * was not distributed with this source code in the LICENSE file, you can
7 * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8 * Media Patent License 1.0 was not distributed with this source code in the
9 * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10 */
11
12 #include "av1/common/reconinter.h"
13
14 #include "av1/encoder/encodemv.h"
15 #include "av1/encoder/encoder.h"
16 #include "av1/encoder/interp_search.h"
17 #include "av1/encoder/mcomp.h"
18 #include "av1/encoder/motion_search_facade.h"
19 #include "av1/encoder/partition_strategy.h"
20 #include "av1/encoder/reconinter_enc.h"
21 #include "av1/encoder/tpl_model.h"
22 #include "av1/encoder/tx_search.h"
23
24 #define RIGHT_SHIFT_MV(x) (((x) + 3 + ((x) >= 0)) >> 3)
25
26 typedef struct {
27 FULLPEL_MV fmv;
28 int weight;
29 } cand_mv_t;
30
compare_weight(const void * a,const void * b)31 static int compare_weight(const void *a, const void *b) {
32 const int diff = ((cand_mv_t *)a)->weight - ((cand_mv_t *)b)->weight;
33 if (diff < 0)
34 return 1;
35 else if (diff > 0)
36 return -1;
37 return 0;
38 }
39
40 // Allow more mesh searches for screen content type on the ARF.
use_fine_search_interval(const AV1_COMP * const cpi)41 static int use_fine_search_interval(const AV1_COMP *const cpi) {
42 return cpi->is_screen_content_type &&
43 cpi->ppi->gf_group.update_type[cpi->gf_frame_index] == ARF_UPDATE &&
44 cpi->oxcf.speed <= 2;
45 }
46
47 // Iterate through the tpl and collect the mvs to be used as candidates
get_mv_candidate_from_tpl(const AV1_COMP * const cpi,const MACROBLOCK * x,BLOCK_SIZE bsize,int ref,cand_mv_t * cand,int * cand_count,int * total_cand_weight)48 static INLINE void get_mv_candidate_from_tpl(const AV1_COMP *const cpi,
49 const MACROBLOCK *x,
50 BLOCK_SIZE bsize, int ref,
51 cand_mv_t *cand, int *cand_count,
52 int *total_cand_weight) {
53 const SuperBlockEnc *sb_enc = &x->sb_enc;
54 if (!sb_enc->tpl_data_count) {
55 return;
56 }
57
58 const AV1_COMMON *cm = &cpi->common;
59 const MACROBLOCKD *xd = &x->e_mbd;
60 const int mi_row = xd->mi_row;
61 const int mi_col = xd->mi_col;
62
63 const BLOCK_SIZE tpl_bsize =
64 convert_length_to_bsize(cpi->ppi->tpl_data.tpl_bsize_1d);
65 const int tplw = mi_size_wide[tpl_bsize];
66 const int tplh = mi_size_high[tpl_bsize];
67 const int nw = mi_size_wide[bsize] / tplw;
68 const int nh = mi_size_high[bsize] / tplh;
69
70 if (nw >= 1 && nh >= 1) {
71 const int of_h = mi_row % mi_size_high[cm->seq_params->sb_size];
72 const int of_w = mi_col % mi_size_wide[cm->seq_params->sb_size];
73 const int start = of_h / tplh * sb_enc->tpl_stride + of_w / tplw;
74 int valid = 1;
75
76 // Assign large weight to start_mv, so it is always tested.
77 cand[0].weight = nw * nh;
78
79 for (int k = 0; k < nh; k++) {
80 for (int l = 0; l < nw; l++) {
81 const int_mv mv =
82 sb_enc
83 ->tpl_mv[start + k * sb_enc->tpl_stride + l][ref - LAST_FRAME];
84 if (mv.as_int == INVALID_MV) {
85 valid = 0;
86 break;
87 }
88
89 const FULLPEL_MV fmv = { GET_MV_RAWPEL(mv.as_mv.row),
90 GET_MV_RAWPEL(mv.as_mv.col) };
91 int unique = 1;
92 for (int m = 0; m < *cand_count; m++) {
93 if (RIGHT_SHIFT_MV(fmv.row) == RIGHT_SHIFT_MV(cand[m].fmv.row) &&
94 RIGHT_SHIFT_MV(fmv.col) == RIGHT_SHIFT_MV(cand[m].fmv.col)) {
95 unique = 0;
96 cand[m].weight++;
97 break;
98 }
99 }
100
101 if (unique) {
102 cand[*cand_count].fmv = fmv;
103 cand[*cand_count].weight = 1;
104 (*cand_count)++;
105 }
106 }
107 if (!valid) break;
108 }
109
110 if (valid) {
111 *total_cand_weight = 2 * nh * nw;
112 if (*cand_count > 2)
113 qsort(cand, *cand_count, sizeof(cand[0]), &compare_weight);
114 }
115 }
116 }
117
av1_single_motion_search(const AV1_COMP * const cpi,MACROBLOCK * x,BLOCK_SIZE bsize,int ref_idx,int * rate_mv,int search_range,inter_mode_info * mode_info,int_mv * best_mv,struct HandleInterModeArgs * const args)118 void av1_single_motion_search(const AV1_COMP *const cpi, MACROBLOCK *x,
119 BLOCK_SIZE bsize, int ref_idx, int *rate_mv,
120 int search_range, inter_mode_info *mode_info,
121 int_mv *best_mv,
122 struct HandleInterModeArgs *const args) {
123 MACROBLOCKD *xd = &x->e_mbd;
124 const AV1_COMMON *cm = &cpi->common;
125 const MotionVectorSearchParams *mv_search_params = &cpi->mv_search_params;
126 const int num_planes = av1_num_planes(cm);
127 MB_MODE_INFO *mbmi = xd->mi[0];
128 struct buf_2d backup_yv12[MAX_MB_PLANE] = { { 0, 0, 0, 0, 0 } };
129 int bestsme = INT_MAX;
130 const int ref = mbmi->ref_frame[ref_idx];
131 const YV12_BUFFER_CONFIG *scaled_ref_frame =
132 av1_get_scaled_ref_frame(cpi, ref);
133 const int mi_row = xd->mi_row;
134 const int mi_col = xd->mi_col;
135 const MvCosts *mv_costs = x->mv_costs;
136
137 if (scaled_ref_frame) {
138 // Swap out the reference frame for a version that's been scaled to
139 // match the resolution of the current frame, allowing the existing
140 // full-pixel motion search code to be used without additional
141 // modifications.
142 for (int i = 0; i < num_planes; i++) {
143 backup_yv12[i] = xd->plane[i].pre[ref_idx];
144 }
145 av1_setup_pre_planes(xd, ref_idx, scaled_ref_frame, mi_row, mi_col, NULL,
146 num_planes);
147 }
148
149 // Work out the size of the first step in the mv step search.
150 // 0 here is maximum length first step. 1 is AOMMAX >> 1 etc.
151 int step_param;
152 if (cpi->sf.mv_sf.auto_mv_step_size && cm->show_frame) {
153 // Take the weighted average of the step_params based on the last frame's
154 // max mv magnitude and that based on the best ref mvs of the current
155 // block for the given reference.
156 step_param = (av1_init_search_range(x->max_mv_context[ref]) +
157 mv_search_params->mv_step_param) /
158 2;
159 } else {
160 step_param = mv_search_params->mv_step_param;
161 }
162
163 const MV ref_mv = av1_get_ref_mv(x, ref_idx).as_mv;
164 FULLPEL_MV start_mv;
165 if (mbmi->motion_mode != SIMPLE_TRANSLATION)
166 start_mv = get_fullmv_from_mv(&mbmi->mv[0].as_mv);
167 else
168 start_mv = get_fullmv_from_mv(&ref_mv);
169
170 // cand stores start_mv and all possible MVs in a SB.
171 cand_mv_t cand[MAX_TPL_BLK_IN_SB * MAX_TPL_BLK_IN_SB + 1] = { { { 0, 0 },
172 0 } };
173 cand[0].fmv = start_mv;
174 int cnt = 1;
175 int total_weight = 0;
176
177 if (!cpi->sf.mv_sf.full_pixel_search_level &&
178 mbmi->motion_mode == SIMPLE_TRANSLATION) {
179 get_mv_candidate_from_tpl(cpi, x, bsize, ref, cand, &cnt, &total_weight);
180 }
181
182 // Further reduce the search range.
183 if (search_range < INT_MAX) {
184 const search_site_config *search_site_cfg =
185 &mv_search_params
186 ->search_site_cfg[SS_CFG_SRC][cpi->sf.mv_sf.search_method];
187 // Max step_param is search_site_cfg->num_search_steps.
188 if (search_range < 1) {
189 step_param = search_site_cfg->num_search_steps;
190 } else {
191 while (search_site_cfg->radius[search_site_cfg->num_search_steps -
192 step_param - 1] > (search_range << 1) &&
193 search_site_cfg->num_search_steps - step_param - 1 > 0)
194 step_param++;
195 }
196 }
197
198 int cost_list[5];
199 int_mv second_best_mv;
200 best_mv->as_int = second_best_mv.as_int = INVALID_MV;
201
202 // Allow more mesh searches for screen content type on the ARF.
203 const int fine_search_interval = use_fine_search_interval(cpi);
204 const search_site_config *src_search_sites =
205 mv_search_params->search_site_cfg[SS_CFG_SRC];
206 FULLPEL_MOTION_SEARCH_PARAMS full_ms_params;
207 av1_make_default_fullpel_ms_params(&full_ms_params, cpi, x, bsize, &ref_mv,
208 src_search_sites, fine_search_interval);
209
210 switch (mbmi->motion_mode) {
211 case SIMPLE_TRANSLATION: {
212 // Perform a search with the top 2 candidates
213 int sum_weight = 0;
214 for (int m = 0; m < AOMMIN(2, cnt); m++) {
215 FULLPEL_MV smv = cand[m].fmv;
216 FULLPEL_MV this_best_mv, this_second_best_mv;
217
218 int thissme = av1_full_pixel_search(
219 smv, &full_ms_params, step_param, cond_cost_list(cpi, cost_list),
220 &this_best_mv, &this_second_best_mv);
221
222 if (thissme < bestsme) {
223 bestsme = thissme;
224 best_mv->as_fullmv = this_best_mv;
225 second_best_mv.as_fullmv = this_second_best_mv;
226 }
227
228 sum_weight += cand[m].weight;
229 if (4 * sum_weight > 3 * total_weight) break;
230 }
231 } break;
232 case OBMC_CAUSAL:
233 bestsme = av1_obmc_full_pixel_search(start_mv, &full_ms_params,
234 step_param, &best_mv->as_fullmv);
235 break;
236 default: assert(0 && "Invalid motion mode!\n");
237 }
238
239 if (scaled_ref_frame) {
240 // Swap back the original buffers for subpel motion search.
241 for (int i = 0; i < num_planes; i++) {
242 xd->plane[i].pre[ref_idx] = backup_yv12[i];
243 }
244 }
245
246 // Terminate search with the current ref_idx based on fullpel mv, rate cost,
247 // and other know cost.
248 if (cpi->sf.inter_sf.skip_newmv_in_drl >= 2 &&
249 mbmi->motion_mode == SIMPLE_TRANSLATION &&
250 best_mv->as_int != INVALID_MV) {
251 int_mv this_mv;
252 this_mv.as_mv = get_mv_from_fullmv(&best_mv->as_fullmv);
253 const int ref_mv_idx = mbmi->ref_mv_idx;
254 const int this_mv_rate =
255 av1_mv_bit_cost(&this_mv.as_mv, &ref_mv, mv_costs->nmv_joint_cost,
256 mv_costs->mv_cost_stack, MV_COST_WEIGHT);
257 mode_info[ref_mv_idx].full_search_mv.as_int = this_mv.as_int;
258 mode_info[ref_mv_idx].full_mv_rate = this_mv_rate;
259 mode_info[ref_mv_idx].full_mv_bestsme = bestsme;
260
261 for (int prev_ref_idx = 0; prev_ref_idx < ref_mv_idx; ++prev_ref_idx) {
262 // Check if the motion search result same as previous results
263 if (this_mv.as_int == mode_info[prev_ref_idx].full_search_mv.as_int) {
264 // Compare the rate cost
265 const int prev_rate_cost = mode_info[prev_ref_idx].full_mv_rate +
266 mode_info[prev_ref_idx].drl_cost;
267 const int this_rate_cost =
268 this_mv_rate + mode_info[ref_mv_idx].drl_cost;
269
270 if (prev_rate_cost <= this_rate_cost) {
271 // If the current rate_cost is worse than the previous rate_cost, then
272 // we terminate the search. Since av1_single_motion_search is only
273 // called by handle_new_mv in SIMPLE_TRANSLATION mode, we set the
274 // best_mv to INVALID mv to signal that we wish to terminate search
275 // for the current mode.
276 best_mv->as_int = INVALID_MV;
277 return;
278 }
279 }
280
281 // Terminate the evaluation of current ref_mv_idx based on bestsme and
282 // drl_cost.
283 const int psme = mode_info[prev_ref_idx].full_mv_bestsme;
284 if (psme == INT_MAX) continue;
285 const int thr =
286 cpi->sf.inter_sf.skip_newmv_in_drl == 3 ? (psme + (psme >> 2)) : psme;
287 if (cpi->sf.inter_sf.skip_newmv_in_drl >= 3 &&
288 mode_info[ref_mv_idx].full_mv_bestsme > thr &&
289 mode_info[prev_ref_idx].drl_cost < mode_info[ref_mv_idx].drl_cost) {
290 best_mv->as_int = INVALID_MV;
291 return;
292 }
293 }
294 }
295
296 if (cpi->common.features.cur_frame_force_integer_mv) {
297 convert_fullmv_to_mv(best_mv);
298 }
299
300 const int use_fractional_mv =
301 bestsme < INT_MAX && cpi->common.features.cur_frame_force_integer_mv == 0;
302 int best_mv_rate = 0;
303 int mv_rate_calculated = 0;
304 if (use_fractional_mv) {
305 int_mv fractional_ms_list[3];
306 av1_set_fractional_mv(fractional_ms_list);
307 int dis; /* TODO: use dis in distortion calculation later. */
308
309 SUBPEL_MOTION_SEARCH_PARAMS ms_params;
310 av1_make_default_subpel_ms_params(&ms_params, cpi, x, bsize, &ref_mv,
311 cost_list);
312 MV subpel_start_mv = get_mv_from_fullmv(&best_mv->as_fullmv);
313
314 switch (mbmi->motion_mode) {
315 case SIMPLE_TRANSLATION:
316 if (cpi->sf.mv_sf.use_accurate_subpel_search) {
317 const int try_second = second_best_mv.as_int != INVALID_MV &&
318 second_best_mv.as_int != best_mv->as_int &&
319 (cpi->sf.mv_sf.disable_second_mv <= 1);
320 const int best_mv_var = mv_search_params->find_fractional_mv_step(
321 xd, cm, &ms_params, subpel_start_mv, &best_mv->as_mv, &dis,
322 &x->pred_sse[ref], fractional_ms_list);
323
324 if (try_second) {
325 struct macroblockd_plane *p = xd->plane;
326 const BUFFER_SET orig_dst = {
327 { p[0].dst.buf, p[1].dst.buf, p[2].dst.buf },
328 { p[0].dst.stride, p[1].dst.stride, p[2].dst.stride },
329 };
330 int64_t rd = INT64_MAX;
331 if (!cpi->sf.mv_sf.disable_second_mv) {
332 // Calculate actual rd cost.
333 mbmi->mv[0].as_mv = best_mv->as_mv;
334 av1_enc_build_inter_predictor(cm, xd, mi_row, mi_col, &orig_dst,
335 bsize, 0, 0);
336 av1_subtract_plane(x, bsize, 0);
337 RD_STATS this_rd_stats;
338 av1_init_rd_stats(&this_rd_stats);
339 av1_estimate_txfm_yrd(cpi, x, &this_rd_stats, INT64_MAX, bsize,
340 max_txsize_rect_lookup[bsize]);
341 int this_mv_rate = av1_mv_bit_cost(
342 &best_mv->as_mv, &ref_mv, mv_costs->nmv_joint_cost,
343 mv_costs->mv_cost_stack, MV_COST_WEIGHT);
344 rd = RDCOST(x->rdmult, this_mv_rate + this_rd_stats.rate,
345 this_rd_stats.dist);
346 }
347
348 MV this_best_mv;
349 subpel_start_mv = get_mv_from_fullmv(&second_best_mv.as_fullmv);
350 if (av1_is_subpelmv_in_range(&ms_params.mv_limits,
351 subpel_start_mv)) {
352 unsigned int sse;
353 const int this_var = mv_search_params->find_fractional_mv_step(
354 xd, cm, &ms_params, subpel_start_mv, &this_best_mv, &dis,
355 &sse, fractional_ms_list);
356
357 if (!cpi->sf.mv_sf.disable_second_mv) {
358 // If cpi->sf.mv_sf.disable_second_mv is 0, use actual rd cost
359 // to choose the better MV.
360 mbmi->mv[0].as_mv = this_best_mv;
361 av1_enc_build_inter_predictor(cm, xd, mi_row, mi_col, &orig_dst,
362 bsize, 0, 0);
363 av1_subtract_plane(x, bsize, 0);
364 RD_STATS tmp_rd_stats;
365 av1_init_rd_stats(&tmp_rd_stats);
366 av1_estimate_txfm_yrd(cpi, x, &tmp_rd_stats, INT64_MAX, bsize,
367 max_txsize_rect_lookup[bsize]);
368 int tmp_mv_rate = av1_mv_bit_cost(
369 &this_best_mv, &ref_mv, mv_costs->nmv_joint_cost,
370 mv_costs->mv_cost_stack, MV_COST_WEIGHT);
371 int64_t tmp_rd =
372 RDCOST(x->rdmult, tmp_rd_stats.rate + tmp_mv_rate,
373 tmp_rd_stats.dist);
374 if (tmp_rd < rd) {
375 best_mv->as_mv = this_best_mv;
376 x->pred_sse[ref] = sse;
377 }
378 } else {
379 // If cpi->sf.mv_sf.disable_second_mv = 1, use var to decide the
380 // best MV.
381 if (this_var < best_mv_var) {
382 best_mv->as_mv = this_best_mv;
383 x->pred_sse[ref] = sse;
384 }
385 }
386 }
387 }
388 } else {
389 mv_search_params->find_fractional_mv_step(
390 xd, cm, &ms_params, subpel_start_mv, &best_mv->as_mv, &dis,
391 &x->pred_sse[ref], NULL);
392 }
393 break;
394 case OBMC_CAUSAL:
395 av1_find_best_obmc_sub_pixel_tree_up(xd, cm, &ms_params,
396 subpel_start_mv, &best_mv->as_mv,
397 &dis, &x->pred_sse[ref], NULL);
398 break;
399 default: assert(0 && "Invalid motion mode!\n");
400 }
401
402 // Terminate search with the current ref_idx based on subpel mv and rate
403 // cost.
404 if (cpi->sf.inter_sf.skip_newmv_in_drl >= 1 && args != NULL &&
405 mbmi->motion_mode == SIMPLE_TRANSLATION &&
406 best_mv->as_int != INVALID_MV) {
407 const int ref_mv_idx = mbmi->ref_mv_idx;
408 best_mv_rate =
409 av1_mv_bit_cost(&best_mv->as_mv, &ref_mv, mv_costs->nmv_joint_cost,
410 mv_costs->mv_cost_stack, MV_COST_WEIGHT);
411 mv_rate_calculated = 1;
412
413 for (int prev_ref_idx = 0; prev_ref_idx < ref_mv_idx; ++prev_ref_idx) {
414 if (!args->single_newmv_valid[prev_ref_idx][ref]) continue;
415 // Check if the motion vectors are the same.
416 if (best_mv->as_int == args->single_newmv[prev_ref_idx][ref].as_int) {
417 // Skip this evaluation if the previous one is skipped.
418 if (mode_info[prev_ref_idx].skip) {
419 mode_info[ref_mv_idx].skip = 1;
420 break;
421 }
422 // Compare the rate cost that we current know.
423 const int prev_rate_cost =
424 args->single_newmv_rate[prev_ref_idx][ref] +
425 mode_info[prev_ref_idx].drl_cost;
426 const int this_rate_cost =
427 best_mv_rate + mode_info[ref_mv_idx].drl_cost;
428
429 if (prev_rate_cost <= this_rate_cost) {
430 // If the current rate_cost is worse than the previous rate_cost,
431 // then we terminate the search for this ref_mv_idx.
432 mode_info[ref_mv_idx].skip = 1;
433 break;
434 }
435 }
436 }
437 }
438 }
439
440 if (mv_rate_calculated) {
441 *rate_mv = best_mv_rate;
442 } else {
443 *rate_mv =
444 av1_mv_bit_cost(&best_mv->as_mv, &ref_mv, mv_costs->nmv_joint_cost,
445 mv_costs->mv_cost_stack, MV_COST_WEIGHT);
446 }
447 }
448
av1_joint_motion_search(const AV1_COMP * cpi,MACROBLOCK * x,BLOCK_SIZE bsize,int_mv * cur_mv,const uint8_t * mask,int mask_stride,int * rate_mv,int allow_second_mv)449 int av1_joint_motion_search(const AV1_COMP *cpi, MACROBLOCK *x,
450 BLOCK_SIZE bsize, int_mv *cur_mv,
451 const uint8_t *mask, int mask_stride, int *rate_mv,
452 int allow_second_mv) {
453 const AV1_COMMON *const cm = &cpi->common;
454 const int num_planes = av1_num_planes(cm);
455 const int pw = block_size_wide[bsize];
456 const int ph = block_size_high[bsize];
457 const int plane = 0;
458 MACROBLOCKD *xd = &x->e_mbd;
459 MB_MODE_INFO *mbmi = xd->mi[0];
460 // This function should only ever be called for compound modes
461 assert(has_second_ref(mbmi));
462 const int_mv init_mv[2] = { cur_mv[0], cur_mv[1] };
463 const int refs[2] = { mbmi->ref_frame[0], mbmi->ref_frame[1] };
464 const MvCosts *mv_costs = x->mv_costs;
465 int_mv ref_mv[2];
466 int ite, ref;
467
468 // Get the prediction block from the 'other' reference frame.
469 const int_interpfilters interp_filters =
470 av1_broadcast_interp_filter(EIGHTTAP_REGULAR);
471
472 InterPredParams inter_pred_params;
473 const int mi_row = xd->mi_row;
474 const int mi_col = xd->mi_col;
475
476 // Do joint motion search in compound mode to get more accurate mv.
477 struct buf_2d backup_yv12[2][MAX_MB_PLANE];
478 int last_besterr[2] = { INT_MAX, INT_MAX };
479 const YV12_BUFFER_CONFIG *const scaled_ref_frame[2] = {
480 av1_get_scaled_ref_frame(cpi, refs[0]),
481 av1_get_scaled_ref_frame(cpi, refs[1])
482 };
483
484 // Prediction buffer from second frame.
485 DECLARE_ALIGNED(16, uint8_t, second_pred16[MAX_SB_SQUARE * sizeof(uint16_t)]);
486 uint8_t *second_pred = get_buf_by_bd(xd, second_pred16);
487
488 int_mv best_mv, second_best_mv;
489
490 // Allow joint search multiple times iteratively for each reference frame
491 // and break out of the search loop if it couldn't find a better mv.
492 for (ite = 0; ite < 4; ite++) {
493 struct buf_2d ref_yv12[2];
494 int bestsme = INT_MAX;
495 int id = ite % 2; // Even iterations search in the first reference frame,
496 // odd iterations search in the second. The predictor
497 // found for the 'other' reference frame is factored in.
498 if (ite >= 2 && cur_mv[!id].as_int == init_mv[!id].as_int) {
499 if (cur_mv[id].as_int == init_mv[id].as_int) {
500 break;
501 } else {
502 int_mv cur_int_mv, init_int_mv;
503 cur_int_mv.as_mv.col = cur_mv[id].as_mv.col >> 3;
504 cur_int_mv.as_mv.row = cur_mv[id].as_mv.row >> 3;
505 init_int_mv.as_mv.row = init_mv[id].as_mv.row >> 3;
506 init_int_mv.as_mv.col = init_mv[id].as_mv.col >> 3;
507 if (cur_int_mv.as_int == init_int_mv.as_int) {
508 break;
509 }
510 }
511 }
512 for (ref = 0; ref < 2; ++ref) {
513 ref_mv[ref] = av1_get_ref_mv(x, ref);
514 // Swap out the reference frame for a version that's been scaled to
515 // match the resolution of the current frame, allowing the existing
516 // motion search code to be used without additional modifications.
517 if (scaled_ref_frame[ref]) {
518 int i;
519 for (i = 0; i < num_planes; i++)
520 backup_yv12[ref][i] = xd->plane[i].pre[ref];
521 av1_setup_pre_planes(xd, ref, scaled_ref_frame[ref], mi_row, mi_col,
522 NULL, num_planes);
523 }
524 }
525
526 assert(IMPLIES(scaled_ref_frame[0] != NULL,
527 cm->width == scaled_ref_frame[0]->y_crop_width &&
528 cm->height == scaled_ref_frame[0]->y_crop_height));
529 assert(IMPLIES(scaled_ref_frame[1] != NULL,
530 cm->width == scaled_ref_frame[1]->y_crop_width &&
531 cm->height == scaled_ref_frame[1]->y_crop_height));
532
533 // Initialize based on (possibly scaled) prediction buffers.
534 ref_yv12[0] = xd->plane[plane].pre[0];
535 ref_yv12[1] = xd->plane[plane].pre[1];
536
537 av1_init_inter_params(&inter_pred_params, pw, ph, mi_row * MI_SIZE,
538 mi_col * MI_SIZE, 0, 0, xd->bd, is_cur_buf_hbd(xd), 0,
539 &cm->sf_identity, &ref_yv12[!id], interp_filters);
540 inter_pred_params.conv_params = get_conv_params(0, 0, xd->bd);
541
542 // Since we have scaled the reference frames to match the size of the
543 // current frame we must use a unit scaling factor during mode selection.
544 av1_enc_build_one_inter_predictor(second_pred, pw, &cur_mv[!id].as_mv,
545 &inter_pred_params);
546
547 // Do full-pixel compound motion search on the current reference frame.
548 if (id) xd->plane[plane].pre[0] = ref_yv12[id];
549
550 // Make motion search params
551 FULLPEL_MOTION_SEARCH_PARAMS full_ms_params;
552 const search_site_config *src_search_sites =
553 cpi->mv_search_params.search_site_cfg[SS_CFG_SRC];
554 av1_make_default_fullpel_ms_params(&full_ms_params, cpi, x, bsize,
555 &ref_mv[id].as_mv, src_search_sites,
556 /*fine_search_interval=*/0);
557
558 av1_set_ms_compound_refs(&full_ms_params.ms_buffers, second_pred, mask,
559 mask_stride, id);
560
561 // Use the mv result from the single mode as mv predictor.
562 const FULLPEL_MV start_fullmv = get_fullmv_from_mv(&cur_mv[id].as_mv);
563
564 // Small-range full-pixel motion search.
565 if (!cpi->sf.mv_sf.disable_extensive_joint_motion_search &&
566 mbmi->interinter_comp.type != COMPOUND_WEDGE) {
567 bestsme =
568 av1_full_pixel_search(start_fullmv, &full_ms_params, 5, NULL,
569 &best_mv.as_fullmv, &second_best_mv.as_fullmv);
570 } else {
571 bestsme = av1_refining_search_8p_c(&full_ms_params, start_fullmv,
572 &best_mv.as_fullmv);
573 second_best_mv = best_mv;
574 }
575
576 const int try_second = second_best_mv.as_int != INVALID_MV &&
577 second_best_mv.as_int != best_mv.as_int &&
578 allow_second_mv;
579
580 // Restore the pointer to the first (possibly scaled) prediction buffer.
581 if (id) xd->plane[plane].pre[0] = ref_yv12[0];
582
583 for (ref = 0; ref < 2; ++ref) {
584 if (scaled_ref_frame[ref]) {
585 // Swap back the original buffers for subpel motion search.
586 for (int i = 0; i < num_planes; i++) {
587 xd->plane[i].pre[ref] = backup_yv12[ref][i];
588 }
589 // Re-initialize based on unscaled prediction buffers.
590 ref_yv12[ref] = xd->plane[plane].pre[ref];
591 }
592 }
593
594 // Do sub-pixel compound motion search on the current reference frame.
595 if (id) xd->plane[plane].pre[0] = ref_yv12[id];
596
597 if (cpi->common.features.cur_frame_force_integer_mv) {
598 convert_fullmv_to_mv(&best_mv);
599 }
600 if (bestsme < INT_MAX &&
601 cpi->common.features.cur_frame_force_integer_mv == 0) {
602 int dis; /* TODO: use dis in distortion calculation later. */
603 unsigned int sse;
604 SUBPEL_MOTION_SEARCH_PARAMS ms_params;
605 av1_make_default_subpel_ms_params(&ms_params, cpi, x, bsize,
606 &ref_mv[id].as_mv, NULL);
607 av1_set_ms_compound_refs(&ms_params.var_params.ms_buffers, second_pred,
608 mask, mask_stride, id);
609 ms_params.forced_stop = EIGHTH_PEL;
610 MV start_mv = get_mv_from_fullmv(&best_mv.as_fullmv);
611 bestsme = cpi->mv_search_params.find_fractional_mv_step(
612 xd, cm, &ms_params, start_mv, &best_mv.as_mv, &dis, &sse, NULL);
613
614 if (try_second) {
615 MV this_best_mv;
616 MV subpel_start_mv = get_mv_from_fullmv(&second_best_mv.as_fullmv);
617 if (av1_is_subpelmv_in_range(&ms_params.mv_limits, subpel_start_mv)) {
618 const int thissme = cpi->mv_search_params.find_fractional_mv_step(
619 xd, cm, &ms_params, subpel_start_mv, &this_best_mv, &dis, &sse,
620 NULL);
621 if (thissme < bestsme) {
622 best_mv.as_mv = this_best_mv;
623 bestsme = thissme;
624 }
625 }
626 }
627 }
628
629 // Restore the pointer to the first prediction buffer.
630 if (id) xd->plane[plane].pre[0] = ref_yv12[0];
631 if (bestsme < last_besterr[id]) {
632 cur_mv[id] = best_mv;
633 last_besterr[id] = bestsme;
634 } else {
635 break;
636 }
637 }
638
639 *rate_mv = 0;
640
641 for (ref = 0; ref < 2; ++ref) {
642 const int_mv curr_ref_mv = av1_get_ref_mv(x, ref);
643 *rate_mv += av1_mv_bit_cost(&cur_mv[ref].as_mv, &curr_ref_mv.as_mv,
644 mv_costs->nmv_joint_cost,
645 mv_costs->mv_cost_stack, MV_COST_WEIGHT);
646 }
647
648 return AOMMIN(last_besterr[0], last_besterr[1]);
649 }
650
651 // Search for the best mv for one component of a compound,
652 // given that the other component is fixed.
av1_compound_single_motion_search(const AV1_COMP * cpi,MACROBLOCK * x,BLOCK_SIZE bsize,MV * this_mv,const uint8_t * second_pred,const uint8_t * mask,int mask_stride,int * rate_mv,int ref_idx)653 int av1_compound_single_motion_search(const AV1_COMP *cpi, MACROBLOCK *x,
654 BLOCK_SIZE bsize, MV *this_mv,
655 const uint8_t *second_pred,
656 const uint8_t *mask, int mask_stride,
657 int *rate_mv, int ref_idx) {
658 const AV1_COMMON *const cm = &cpi->common;
659 const int num_planes = av1_num_planes(cm);
660 MACROBLOCKD *xd = &x->e_mbd;
661 MB_MODE_INFO *mbmi = xd->mi[0];
662 const int ref = mbmi->ref_frame[ref_idx];
663 const int_mv ref_mv = av1_get_ref_mv(x, ref_idx);
664 struct macroblockd_plane *const pd = &xd->plane[0];
665 const MvCosts *mv_costs = x->mv_costs;
666
667 struct buf_2d backup_yv12[MAX_MB_PLANE];
668 const YV12_BUFFER_CONFIG *const scaled_ref_frame =
669 av1_get_scaled_ref_frame(cpi, ref);
670
671 // Check that this is either an interinter or an interintra block
672 assert(has_second_ref(mbmi) || (ref_idx == 0 && is_interintra_mode(mbmi)));
673
674 // Store the first prediction buffer.
675 struct buf_2d orig_yv12;
676 if (ref_idx) {
677 orig_yv12 = pd->pre[0];
678 pd->pre[0] = pd->pre[ref_idx];
679 }
680
681 if (scaled_ref_frame) {
682 // Swap out the reference frame for a version that's been scaled to
683 // match the resolution of the current frame, allowing the existing
684 // full-pixel motion search code to be used without additional
685 // modifications.
686 for (int i = 0; i < num_planes; i++) {
687 backup_yv12[i] = xd->plane[i].pre[ref_idx];
688 }
689 const int mi_row = xd->mi_row;
690 const int mi_col = xd->mi_col;
691 av1_setup_pre_planes(xd, ref_idx, scaled_ref_frame, mi_row, mi_col, NULL,
692 num_planes);
693 }
694
695 int bestsme = INT_MAX;
696 int_mv best_mv;
697
698 // Make motion search params
699 FULLPEL_MOTION_SEARCH_PARAMS full_ms_params;
700 const search_site_config *src_search_sites =
701 cpi->mv_search_params.search_site_cfg[SS_CFG_SRC];
702 av1_make_default_fullpel_ms_params(&full_ms_params, cpi, x, bsize,
703 &ref_mv.as_mv, src_search_sites,
704 /*fine_search_interval=*/0);
705
706 av1_set_ms_compound_refs(&full_ms_params.ms_buffers, second_pred, mask,
707 mask_stride, ref_idx);
708
709 // Use the mv result from the single mode as mv predictor.
710 const FULLPEL_MV start_fullmv = get_fullmv_from_mv(this_mv);
711
712 // Small-range full-pixel motion search.
713 bestsme = av1_full_pixel_search(start_fullmv, &full_ms_params, 5, NULL,
714 &best_mv.as_fullmv, NULL);
715
716 if (scaled_ref_frame) {
717 // Swap back the original buffers for subpel motion search.
718 for (int i = 0; i < num_planes; i++) {
719 xd->plane[i].pre[ref_idx] = backup_yv12[i];
720 }
721 }
722
723 if (cpi->common.features.cur_frame_force_integer_mv) {
724 convert_fullmv_to_mv(&best_mv);
725 }
726 const int use_fractional_mv =
727 bestsme < INT_MAX && cpi->common.features.cur_frame_force_integer_mv == 0;
728 if (use_fractional_mv) {
729 int dis; /* TODO: use dis in distortion calculation later. */
730 unsigned int sse;
731 SUBPEL_MOTION_SEARCH_PARAMS ms_params;
732 av1_make_default_subpel_ms_params(&ms_params, cpi, x, bsize, &ref_mv.as_mv,
733 NULL);
734 av1_set_ms_compound_refs(&ms_params.var_params.ms_buffers, second_pred,
735 mask, mask_stride, ref_idx);
736 ms_params.forced_stop = EIGHTH_PEL;
737 MV start_mv = get_mv_from_fullmv(&best_mv.as_fullmv);
738 bestsme = cpi->mv_search_params.find_fractional_mv_step(
739 xd, cm, &ms_params, start_mv, &best_mv.as_mv, &dis, &sse, NULL);
740 }
741
742 // Restore the pointer to the first unscaled prediction buffer.
743 if (ref_idx) pd->pre[0] = orig_yv12;
744
745 if (bestsme < INT_MAX) *this_mv = best_mv.as_mv;
746
747 *rate_mv = 0;
748
749 *rate_mv += av1_mv_bit_cost(this_mv, &ref_mv.as_mv, mv_costs->nmv_joint_cost,
750 mv_costs->mv_cost_stack, MV_COST_WEIGHT);
751 return bestsme;
752 }
753
build_second_inter_pred(const AV1_COMP * cpi,MACROBLOCK * x,BLOCK_SIZE bsize,const MV * other_mv,int ref_idx,uint8_t * second_pred)754 static AOM_INLINE void build_second_inter_pred(const AV1_COMP *cpi,
755 MACROBLOCK *x, BLOCK_SIZE bsize,
756 const MV *other_mv, int ref_idx,
757 uint8_t *second_pred) {
758 const AV1_COMMON *const cm = &cpi->common;
759 const int pw = block_size_wide[bsize];
760 const int ph = block_size_high[bsize];
761 MACROBLOCKD *xd = &x->e_mbd;
762 MB_MODE_INFO *mbmi = xd->mi[0];
763 struct macroblockd_plane *const pd = &xd->plane[0];
764 const int mi_row = xd->mi_row;
765 const int mi_col = xd->mi_col;
766 const int p_col = ((mi_col * MI_SIZE) >> pd->subsampling_x);
767 const int p_row = ((mi_row * MI_SIZE) >> pd->subsampling_y);
768
769 // This function should only ever be called for compound modes
770 assert(has_second_ref(mbmi));
771
772 const int plane = 0;
773 struct buf_2d ref_yv12 = xd->plane[plane].pre[!ref_idx];
774
775 struct scale_factors sf;
776 av1_setup_scale_factors_for_frame(&sf, ref_yv12.width, ref_yv12.height,
777 cm->width, cm->height);
778
779 InterPredParams inter_pred_params;
780
781 av1_init_inter_params(&inter_pred_params, pw, ph, p_row, p_col,
782 pd->subsampling_x, pd->subsampling_y, xd->bd,
783 is_cur_buf_hbd(xd), 0, &sf, &ref_yv12,
784 mbmi->interp_filters);
785 inter_pred_params.conv_params = get_conv_params(0, plane, xd->bd);
786
787 // Get the prediction block from the 'other' reference frame.
788 av1_enc_build_one_inter_predictor(second_pred, pw, other_mv,
789 &inter_pred_params);
790 }
791
792 // Wrapper for av1_compound_single_motion_search, for the common case
793 // where the second prediction is also an inter mode.
av1_compound_single_motion_search_interinter(const AV1_COMP * cpi,MACROBLOCK * x,BLOCK_SIZE bsize,int_mv * cur_mv,const uint8_t * mask,int mask_stride,int * rate_mv,int ref_idx)794 int av1_compound_single_motion_search_interinter(
795 const AV1_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize, int_mv *cur_mv,
796 const uint8_t *mask, int mask_stride, int *rate_mv, int ref_idx) {
797 MACROBLOCKD *xd = &x->e_mbd;
798 // This function should only ever be called for compound modes
799 assert(has_second_ref(xd->mi[0]));
800
801 // Prediction buffer from second frame.
802 DECLARE_ALIGNED(16, uint16_t, second_pred_alloc_16[MAX_SB_SQUARE]);
803 uint8_t *second_pred;
804 if (is_cur_buf_hbd(xd))
805 second_pred = CONVERT_TO_BYTEPTR(second_pred_alloc_16);
806 else
807 second_pred = (uint8_t *)second_pred_alloc_16;
808
809 MV *this_mv = &cur_mv[ref_idx].as_mv;
810 const MV *other_mv = &cur_mv[!ref_idx].as_mv;
811 build_second_inter_pred(cpi, x, bsize, other_mv, ref_idx, second_pred);
812 return av1_compound_single_motion_search(cpi, x, bsize, this_mv, second_pred,
813 mask, mask_stride, rate_mv, ref_idx);
814 }
815
do_masked_motion_search_indexed(const AV1_COMP * const cpi,MACROBLOCK * x,const int_mv * const cur_mv,const INTERINTER_COMPOUND_DATA * const comp_data,BLOCK_SIZE bsize,int_mv * tmp_mv,int * rate_mv,int which)816 static AOM_INLINE void do_masked_motion_search_indexed(
817 const AV1_COMP *const cpi, MACROBLOCK *x, const int_mv *const cur_mv,
818 const INTERINTER_COMPOUND_DATA *const comp_data, BLOCK_SIZE bsize,
819 int_mv *tmp_mv, int *rate_mv, int which) {
820 // NOTE: which values: 0 - 0 only, 1 - 1 only, 2 - both
821 MACROBLOCKD *xd = &x->e_mbd;
822 MB_MODE_INFO *mbmi = xd->mi[0];
823 BLOCK_SIZE sb_type = mbmi->bsize;
824 const uint8_t *mask;
825 const int mask_stride = block_size_wide[bsize];
826
827 mask = av1_get_compound_type_mask(comp_data, sb_type);
828
829 tmp_mv[0].as_int = cur_mv[0].as_int;
830 tmp_mv[1].as_int = cur_mv[1].as_int;
831 if (which == 0 || which == 1) {
832 av1_compound_single_motion_search_interinter(cpi, x, bsize, tmp_mv, mask,
833 mask_stride, rate_mv, which);
834 } else if (which == 2) {
835 av1_joint_motion_search(cpi, x, bsize, tmp_mv, mask, mask_stride, rate_mv,
836 !cpi->sf.mv_sf.disable_second_mv);
837 }
838 }
839
av1_interinter_compound_motion_search(const AV1_COMP * const cpi,MACROBLOCK * x,const int_mv * const cur_mv,const BLOCK_SIZE bsize,const PREDICTION_MODE this_mode)840 int av1_interinter_compound_motion_search(const AV1_COMP *const cpi,
841 MACROBLOCK *x,
842 const int_mv *const cur_mv,
843 const BLOCK_SIZE bsize,
844 const PREDICTION_MODE this_mode) {
845 MACROBLOCKD *const xd = &x->e_mbd;
846 MB_MODE_INFO *const mbmi = xd->mi[0];
847 int_mv tmp_mv[2];
848 int tmp_rate_mv = 0;
849 // TODO(jingning): The average compound mode has proper SAD and variance
850 // functions implemented, and is triggerd by setting the mask pointer as
851 // Null. Need to further implement those for frame distance weighted mode.
852 mbmi->interinter_comp.seg_mask =
853 mbmi->interinter_comp.type == COMPOUND_AVERAGE ? NULL : xd->seg_mask;
854 const INTERINTER_COMPOUND_DATA *compound_data = &mbmi->interinter_comp;
855
856 if (this_mode == NEW_NEWMV) {
857 do_masked_motion_search_indexed(cpi, x, cur_mv, compound_data, bsize,
858 tmp_mv, &tmp_rate_mv, 2);
859 mbmi->mv[0].as_int = tmp_mv[0].as_int;
860 mbmi->mv[1].as_int = tmp_mv[1].as_int;
861 } else if (this_mode >= NEAREST_NEWMV && this_mode <= NEW_NEARMV) {
862 // which = 1 if this_mode == NEAREST_NEWMV || this_mode == NEAR_NEWMV
863 // which = 0 if this_mode == NEW_NEARESTMV || this_mode == NEW_NEARMV
864 int which = (NEWMV == compound_ref1_mode(this_mode));
865 do_masked_motion_search_indexed(cpi, x, cur_mv, compound_data, bsize,
866 tmp_mv, &tmp_rate_mv, which);
867 mbmi->mv[which].as_int = tmp_mv[which].as_int;
868 }
869 return tmp_rate_mv;
870 }
871
av1_simple_motion_search(AV1_COMP * const cpi,MACROBLOCK * x,int mi_row,int mi_col,BLOCK_SIZE bsize,int ref,FULLPEL_MV start_mv,int num_planes,int use_subpixel)872 int_mv av1_simple_motion_search(AV1_COMP *const cpi, MACROBLOCK *x, int mi_row,
873 int mi_col, BLOCK_SIZE bsize, int ref,
874 FULLPEL_MV start_mv, int num_planes,
875 int use_subpixel) {
876 assert(num_planes == 1 &&
877 "Currently simple_motion_search only supports luma plane");
878 assert(!frame_is_intra_only(&cpi->common) &&
879 "Simple motion search only enabled for non-key frames");
880 AV1_COMMON *const cm = &cpi->common;
881 MACROBLOCKD *xd = &x->e_mbd;
882
883 set_offsets_for_motion_search(cpi, x, mi_row, mi_col, bsize);
884
885 MB_MODE_INFO *mbmi = xd->mi[0];
886 mbmi->bsize = bsize;
887 mbmi->ref_frame[0] = ref;
888 mbmi->ref_frame[1] = NONE_FRAME;
889 mbmi->motion_mode = SIMPLE_TRANSLATION;
890 mbmi->interp_filters = av1_broadcast_interp_filter(EIGHTTAP_REGULAR);
891
892 const YV12_BUFFER_CONFIG *yv12 = get_ref_frame_yv12_buf(cm, ref);
893 const YV12_BUFFER_CONFIG *scaled_ref_frame =
894 av1_get_scaled_ref_frame(cpi, ref);
895 struct buf_2d backup_yv12;
896 // ref_mv is used to calculate the cost of the motion vector
897 const MV ref_mv = kZeroMv;
898 const int step_param =
899 AOMMIN(cpi->mv_search_params.mv_step_param +
900 cpi->sf.part_sf.simple_motion_search_reduce_search_steps,
901 MAX_MVSEARCH_STEPS - 2);
902 const search_site_config *src_search_sites =
903 cpi->mv_search_params.search_site_cfg[SS_CFG_SRC];
904 int cost_list[5];
905 const int ref_idx = 0;
906 int var;
907 int_mv best_mv;
908
909 av1_setup_pre_planes(xd, ref_idx, yv12, mi_row, mi_col,
910 get_ref_scale_factors(cm, ref), num_planes);
911 set_ref_ptrs(cm, xd, mbmi->ref_frame[0], mbmi->ref_frame[1]);
912 if (scaled_ref_frame) {
913 backup_yv12 = xd->plane[AOM_PLANE_Y].pre[ref_idx];
914 av1_setup_pre_planes(xd, ref_idx, scaled_ref_frame, mi_row, mi_col, NULL,
915 num_planes);
916 }
917
918 // Allow more mesh searches for screen content type on the ARF.
919 const int fine_search_interval = use_fine_search_interval(cpi);
920 FULLPEL_MOTION_SEARCH_PARAMS full_ms_params;
921 av1_make_default_fullpel_ms_params(&full_ms_params, cpi, x, bsize, &ref_mv,
922 src_search_sites, fine_search_interval);
923
924 var = av1_full_pixel_search(start_mv, &full_ms_params, step_param,
925 cond_cost_list(cpi, cost_list),
926 &best_mv.as_fullmv, NULL);
927
928 const int use_subpel_search =
929 var < INT_MAX && !cpi->common.features.cur_frame_force_integer_mv &&
930 use_subpixel;
931 if (scaled_ref_frame) {
932 xd->plane[AOM_PLANE_Y].pre[ref_idx] = backup_yv12;
933 }
934 if (use_subpel_search) {
935 int not_used = 0;
936
937 SUBPEL_MOTION_SEARCH_PARAMS ms_params;
938 av1_make_default_subpel_ms_params(&ms_params, cpi, x, bsize, &ref_mv,
939 cost_list);
940 // TODO(yunqing): integrate this into av1_make_default_subpel_ms_params().
941 ms_params.forced_stop = cpi->sf.mv_sf.simple_motion_subpel_force_stop;
942
943 MV subpel_start_mv = get_mv_from_fullmv(&best_mv.as_fullmv);
944
945 cpi->mv_search_params.find_fractional_mv_step(
946 xd, cm, &ms_params, subpel_start_mv, &best_mv.as_mv, ¬_used,
947 &x->pred_sse[ref], NULL);
948 } else {
949 // Manually convert from units of pixel to 1/8-pixels if we are not doing
950 // subpel search
951 convert_fullmv_to_mv(&best_mv);
952 }
953
954 mbmi->mv[0] = best_mv;
955
956 // Get a copy of the prediction output
957 av1_enc_build_inter_predictor(cm, xd, mi_row, mi_col, NULL, bsize,
958 AOM_PLANE_Y, AOM_PLANE_Y);
959
960 if (scaled_ref_frame) {
961 xd->plane[AOM_PLANE_Y].pre[ref_idx] = backup_yv12;
962 }
963
964 return best_mv;
965 }
966
av1_simple_motion_sse_var(AV1_COMP * cpi,MACROBLOCK * x,int mi_row,int mi_col,BLOCK_SIZE bsize,const FULLPEL_MV start_mv,int use_subpixel,unsigned int * sse,unsigned int * var)967 int_mv av1_simple_motion_sse_var(AV1_COMP *cpi, MACROBLOCK *x, int mi_row,
968 int mi_col, BLOCK_SIZE bsize,
969 const FULLPEL_MV start_mv, int use_subpixel,
970 unsigned int *sse, unsigned int *var) {
971 MACROBLOCKD *xd = &x->e_mbd;
972 const MV_REFERENCE_FRAME ref =
973 cpi->rc.is_src_frame_alt_ref ? ALTREF_FRAME : LAST_FRAME;
974
975 int_mv best_mv = av1_simple_motion_search(cpi, x, mi_row, mi_col, bsize, ref,
976 start_mv, 1, use_subpixel);
977
978 const uint8_t *src = x->plane[0].src.buf;
979 const int src_stride = x->plane[0].src.stride;
980 const uint8_t *dst = xd->plane[0].dst.buf;
981 const int dst_stride = xd->plane[0].dst.stride;
982
983 *var = cpi->ppi->fn_ptr[bsize].vf(src, src_stride, dst, dst_stride, sse);
984
985 return best_mv;
986 }
987