1 /*
2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #include <assert.h>
12 #include <limits.h>
13 #include <math.h>
14 #include <stdio.h>
15
16 #include "./vpx_config.h"
17 #include "./vpx_dsp_rtcd.h"
18
19 #include "vpx_dsp/vpx_dsp_common.h"
20 #include "vpx_mem/vpx_mem.h"
21 #include "vpx_ports/mem.h"
22
23 #include "vp9/common/vp9_common.h"
24 #include "vp9/common/vp9_mvref_common.h"
25 #include "vp9/common/vp9_reconinter.h"
26
27 #include "vp9/encoder/vp9_encoder.h"
28 #include "vp9/encoder/vp9_mcomp.h"
29
30 // #define NEW_DIAMOND_SEARCH
31
vp9_set_mv_search_range(MvLimits * mv_limits,const MV * mv)32 void vp9_set_mv_search_range(MvLimits *mv_limits, const MV *mv) {
33 int col_min = (mv->col >> 3) - MAX_FULL_PEL_VAL + (mv->col & 7 ? 1 : 0);
34 int row_min = (mv->row >> 3) - MAX_FULL_PEL_VAL + (mv->row & 7 ? 1 : 0);
35 int col_max = (mv->col >> 3) + MAX_FULL_PEL_VAL;
36 int row_max = (mv->row >> 3) + MAX_FULL_PEL_VAL;
37
38 col_min = VPXMAX(col_min, (MV_LOW >> 3) + 1);
39 row_min = VPXMAX(row_min, (MV_LOW >> 3) + 1);
40 col_max = VPXMIN(col_max, (MV_UPP >> 3) - 1);
41 row_max = VPXMIN(row_max, (MV_UPP >> 3) - 1);
42
43 // Get intersection of UMV window and valid MV window to reduce # of checks
44 // in diamond search.
45 if (mv_limits->col_min < col_min) mv_limits->col_min = col_min;
46 if (mv_limits->col_max > col_max) mv_limits->col_max = col_max;
47 if (mv_limits->row_min < row_min) mv_limits->row_min = row_min;
48 if (mv_limits->row_max > row_max) mv_limits->row_max = row_max;
49 }
50
vp9_set_subpel_mv_search_range(MvLimits * subpel_mv_limits,const MvLimits * umv_window_limits,const MV * ref_mv)51 void vp9_set_subpel_mv_search_range(MvLimits *subpel_mv_limits,
52 const MvLimits *umv_window_limits,
53 const MV *ref_mv) {
54 subpel_mv_limits->col_min = VPXMAX(umv_window_limits->col_min * 8,
55 ref_mv->col - MAX_FULL_PEL_VAL * 8);
56 subpel_mv_limits->col_max = VPXMIN(umv_window_limits->col_max * 8,
57 ref_mv->col + MAX_FULL_PEL_VAL * 8);
58 subpel_mv_limits->row_min = VPXMAX(umv_window_limits->row_min * 8,
59 ref_mv->row - MAX_FULL_PEL_VAL * 8);
60 subpel_mv_limits->row_max = VPXMIN(umv_window_limits->row_max * 8,
61 ref_mv->row + MAX_FULL_PEL_VAL * 8);
62
63 subpel_mv_limits->col_min = VPXMAX(MV_LOW + 1, subpel_mv_limits->col_min);
64 subpel_mv_limits->col_max = VPXMIN(MV_UPP - 1, subpel_mv_limits->col_max);
65 subpel_mv_limits->row_min = VPXMAX(MV_LOW + 1, subpel_mv_limits->row_min);
66 subpel_mv_limits->row_max = VPXMIN(MV_UPP - 1, subpel_mv_limits->row_max);
67 }
68
vp9_init_search_range(int size)69 int vp9_init_search_range(int size) {
70 int sr = 0;
71 // Minimum search size no matter what the passed in value.
72 size = VPXMAX(16, size);
73
74 while ((size << sr) < MAX_FULL_PEL_VAL) sr++;
75
76 sr = VPXMIN(sr, MAX_MVSEARCH_STEPS - 2);
77 return sr;
78 }
79
mv_cost(const MV * mv,const int * joint_cost,int * const comp_cost[2])80 static INLINE int mv_cost(const MV *mv, const int *joint_cost,
81 int *const comp_cost[2]) {
82 assert(mv->row >= -MV_MAX && mv->row < MV_MAX);
83 assert(mv->col >= -MV_MAX && mv->col < MV_MAX);
84 return joint_cost[vp9_get_mv_joint(mv)] + comp_cost[0][mv->row] +
85 comp_cost[1][mv->col];
86 }
87
vp9_mv_bit_cost(const MV * mv,const MV * ref,const int * mvjcost,int * mvcost[2],int weight)88 int vp9_mv_bit_cost(const MV *mv, const MV *ref, const int *mvjcost,
89 int *mvcost[2], int weight) {
90 const MV diff = { mv->row - ref->row, mv->col - ref->col };
91 return ROUND_POWER_OF_TWO(mv_cost(&diff, mvjcost, mvcost) * weight, 7);
92 }
93
94 #define PIXEL_TRANSFORM_ERROR_SCALE 4
mv_err_cost(const MV * mv,const MV * ref,const int * mvjcost,int * mvcost[2],int error_per_bit)95 static int mv_err_cost(const MV *mv, const MV *ref, const int *mvjcost,
96 int *mvcost[2], int error_per_bit) {
97 if (mvcost) {
98 const MV diff = { mv->row - ref->row, mv->col - ref->col };
99 return (int)ROUND64_POWER_OF_TWO(
100 (int64_t)mv_cost(&diff, mvjcost, mvcost) * error_per_bit,
101 RDDIV_BITS + VP9_PROB_COST_SHIFT - RD_EPB_SHIFT +
102 PIXEL_TRANSFORM_ERROR_SCALE);
103 }
104 return 0;
105 }
106
mvsad_err_cost(const MACROBLOCK * x,const MV * mv,const MV * ref,int sad_per_bit)107 static int mvsad_err_cost(const MACROBLOCK *x, const MV *mv, const MV *ref,
108 int sad_per_bit) {
109 const MV diff = { mv->row - ref->row, mv->col - ref->col };
110 return ROUND_POWER_OF_TWO(
111 (unsigned)mv_cost(&diff, x->nmvjointsadcost, x->nmvsadcost) * sad_per_bit,
112 VP9_PROB_COST_SHIFT);
113 }
114
vp9_init_dsmotion_compensation(search_site_config * cfg,int stride)115 void vp9_init_dsmotion_compensation(search_site_config *cfg, int stride) {
116 int len;
117 int ss_count = 0;
118
119 for (len = MAX_FIRST_STEP; len > 0; len /= 2) {
120 // Generate offsets for 4 search sites per step.
121 const MV ss_mvs[] = { { -len, 0 }, { len, 0 }, { 0, -len }, { 0, len } };
122 int i;
123 for (i = 0; i < 4; ++i, ++ss_count) {
124 cfg->ss_mv[ss_count] = ss_mvs[i];
125 cfg->ss_os[ss_count] = ss_mvs[i].row * stride + ss_mvs[i].col;
126 }
127 }
128
129 cfg->searches_per_step = 4;
130 cfg->total_steps = ss_count / cfg->searches_per_step;
131 }
132
vp9_init3smotion_compensation(search_site_config * cfg,int stride)133 void vp9_init3smotion_compensation(search_site_config *cfg, int stride) {
134 int len;
135 int ss_count = 0;
136
137 for (len = MAX_FIRST_STEP; len > 0; len /= 2) {
138 // Generate offsets for 8 search sites per step.
139 const MV ss_mvs[8] = { { -len, 0 }, { len, 0 }, { 0, -len },
140 { 0, len }, { -len, -len }, { -len, len },
141 { len, -len }, { len, len } };
142 int i;
143 for (i = 0; i < 8; ++i, ++ss_count) {
144 cfg->ss_mv[ss_count] = ss_mvs[i];
145 cfg->ss_os[ss_count] = ss_mvs[i].row * stride + ss_mvs[i].col;
146 }
147 }
148
149 cfg->searches_per_step = 8;
150 cfg->total_steps = ss_count / cfg->searches_per_step;
151 }
152
153 // convert motion vector component to offset for sv[a]f calc
sp(int x)154 static INLINE int sp(int x) { return x & 7; }
155
pre(const uint8_t * buf,int stride,int r,int c)156 static INLINE const uint8_t *pre(const uint8_t *buf, int stride, int r, int c) {
157 return &buf[(r >> 3) * stride + (c >> 3)];
158 }
159
160 #if CONFIG_VP9_HIGHBITDEPTH
161 /* checks if (r, c) has better score than previous best */
162 #define CHECK_BETTER(v, r, c) \
163 if (c >= minc && c <= maxc && r >= minr && r <= maxr) { \
164 int64_t tmpmse; \
165 const MV mv = { r, c }; \
166 const MV ref_mv = { rr, rc }; \
167 if (second_pred == NULL) { \
168 thismse = vfp->svf(pre(y, y_stride, r, c), y_stride, sp(c), sp(r), z, \
169 src_stride, &sse); \
170 } else { \
171 thismse = vfp->svaf(pre(y, y_stride, r, c), y_stride, sp(c), sp(r), z, \
172 src_stride, &sse, second_pred); \
173 } \
174 tmpmse = thismse; \
175 tmpmse += mv_err_cost(&mv, &ref_mv, mvjcost, mvcost, error_per_bit); \
176 if (tmpmse >= INT_MAX) { \
177 v = INT_MAX; \
178 } else if ((v = (uint32_t)tmpmse) < besterr) { \
179 besterr = v; \
180 br = r; \
181 bc = c; \
182 *distortion = thismse; \
183 *sse1 = sse; \
184 } \
185 } else { \
186 v = INT_MAX; \
187 }
188 #else
189 /* checks if (r, c) has better score than previous best */
190 #define CHECK_BETTER(v, r, c) \
191 if (c >= minc && c <= maxc && r >= minr && r <= maxr) { \
192 const MV mv = { r, c }; \
193 const MV ref_mv = { rr, rc }; \
194 if (second_pred == NULL) \
195 thismse = vfp->svf(pre(y, y_stride, r, c), y_stride, sp(c), sp(r), z, \
196 src_stride, &sse); \
197 else \
198 thismse = vfp->svaf(pre(y, y_stride, r, c), y_stride, sp(c), sp(r), z, \
199 src_stride, &sse, second_pred); \
200 if ((v = mv_err_cost(&mv, &ref_mv, mvjcost, mvcost, error_per_bit) + \
201 thismse) < besterr) { \
202 besterr = v; \
203 br = r; \
204 bc = c; \
205 *distortion = thismse; \
206 *sse1 = sse; \
207 } \
208 } else { \
209 v = INT_MAX; \
210 }
211
212 #endif
213 #define FIRST_LEVEL_CHECKS \
214 { \
215 unsigned int left, right, up, down, diag; \
216 CHECK_BETTER(left, tr, tc - hstep); \
217 CHECK_BETTER(right, tr, tc + hstep); \
218 CHECK_BETTER(up, tr - hstep, tc); \
219 CHECK_BETTER(down, tr + hstep, tc); \
220 whichdir = (left < right ? 0 : 1) + (up < down ? 0 : 2); \
221 switch (whichdir) { \
222 case 0: CHECK_BETTER(diag, tr - hstep, tc - hstep); break; \
223 case 1: CHECK_BETTER(diag, tr - hstep, tc + hstep); break; \
224 case 2: CHECK_BETTER(diag, tr + hstep, tc - hstep); break; \
225 case 3: CHECK_BETTER(diag, tr + hstep, tc + hstep); break; \
226 } \
227 }
228
229 #define SECOND_LEVEL_CHECKS \
230 { \
231 int kr, kc; \
232 unsigned int second; \
233 if (tr != br && tc != bc) { \
234 kr = br - tr; \
235 kc = bc - tc; \
236 CHECK_BETTER(second, tr + kr, tc + 2 * kc); \
237 CHECK_BETTER(second, tr + 2 * kr, tc + kc); \
238 } else if (tr == br && tc != bc) { \
239 kc = bc - tc; \
240 CHECK_BETTER(second, tr + hstep, tc + 2 * kc); \
241 CHECK_BETTER(second, tr - hstep, tc + 2 * kc); \
242 switch (whichdir) { \
243 case 0: \
244 case 1: CHECK_BETTER(second, tr + hstep, tc + kc); break; \
245 case 2: \
246 case 3: CHECK_BETTER(second, tr - hstep, tc + kc); break; \
247 } \
248 } else if (tr != br && tc == bc) { \
249 kr = br - tr; \
250 CHECK_BETTER(second, tr + 2 * kr, tc + hstep); \
251 CHECK_BETTER(second, tr + 2 * kr, tc - hstep); \
252 switch (whichdir) { \
253 case 0: \
254 case 2: CHECK_BETTER(second, tr + kr, tc + hstep); break; \
255 case 1: \
256 case 3: CHECK_BETTER(second, tr + kr, tc - hstep); break; \
257 } \
258 } \
259 }
260
261 #define SETUP_SUBPEL_SEARCH \
262 const uint8_t *const z = x->plane[0].src.buf; \
263 const int src_stride = x->plane[0].src.stride; \
264 const MACROBLOCKD *xd = &x->e_mbd; \
265 unsigned int besterr = UINT_MAX; \
266 unsigned int sse; \
267 unsigned int whichdir; \
268 int thismse; \
269 const unsigned int halfiters = iters_per_step; \
270 const unsigned int quarteriters = iters_per_step; \
271 const unsigned int eighthiters = iters_per_step; \
272 const int y_stride = xd->plane[0].pre[0].stride; \
273 const int offset = bestmv->row * y_stride + bestmv->col; \
274 const uint8_t *const y = xd->plane[0].pre[0].buf; \
275 \
276 int rr = ref_mv->row; \
277 int rc = ref_mv->col; \
278 int br = bestmv->row * 8; \
279 int bc = bestmv->col * 8; \
280 int hstep = 4; \
281 int minc, maxc, minr, maxr; \
282 int tr = br; \
283 int tc = bc; \
284 MvLimits subpel_mv_limits; \
285 \
286 vp9_set_subpel_mv_search_range(&subpel_mv_limits, &x->mv_limits, ref_mv); \
287 minc = subpel_mv_limits.col_min; \
288 maxc = subpel_mv_limits.col_max; \
289 minr = subpel_mv_limits.row_min; \
290 maxr = subpel_mv_limits.row_max; \
291 \
292 bestmv->row *= 8; \
293 bestmv->col *= 8;
294
setup_center_error(const MACROBLOCKD * xd,const MV * bestmv,const MV * ref_mv,int error_per_bit,const vp9_variance_fn_ptr_t * vfp,const uint8_t * const src,const int src_stride,const uint8_t * const y,int y_stride,const uint8_t * second_pred,int w,int h,int offset,int * mvjcost,int * mvcost[2],uint32_t * sse1,uint32_t * distortion)295 static unsigned int setup_center_error(
296 const MACROBLOCKD *xd, const MV *bestmv, const MV *ref_mv,
297 int error_per_bit, const vp9_variance_fn_ptr_t *vfp,
298 const uint8_t *const src, const int src_stride, const uint8_t *const y,
299 int y_stride, const uint8_t *second_pred, int w, int h, int offset,
300 int *mvjcost, int *mvcost[2], uint32_t *sse1, uint32_t *distortion) {
301 #if CONFIG_VP9_HIGHBITDEPTH
302 uint64_t besterr;
303 if (second_pred != NULL) {
304 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
305 DECLARE_ALIGNED(16, uint16_t, comp_pred16[64 * 64]);
306 vpx_highbd_comp_avg_pred(comp_pred16, CONVERT_TO_SHORTPTR(second_pred), w,
307 h, CONVERT_TO_SHORTPTR(y + offset), y_stride);
308 besterr =
309 vfp->vf(CONVERT_TO_BYTEPTR(comp_pred16), w, src, src_stride, sse1);
310 } else {
311 DECLARE_ALIGNED(16, uint8_t, comp_pred[64 * 64]);
312 vpx_comp_avg_pred(comp_pred, second_pred, w, h, y + offset, y_stride);
313 besterr = vfp->vf(comp_pred, w, src, src_stride, sse1);
314 }
315 } else {
316 besterr = vfp->vf(y + offset, y_stride, src, src_stride, sse1);
317 }
318 *distortion = (uint32_t)besterr;
319 besterr += mv_err_cost(bestmv, ref_mv, mvjcost, mvcost, error_per_bit);
320 if (besterr >= UINT_MAX) return UINT_MAX;
321 return (uint32_t)besterr;
322 #else
323 uint32_t besterr;
324 (void)xd;
325 if (second_pred != NULL) {
326 DECLARE_ALIGNED(16, uint8_t, comp_pred[64 * 64]);
327 vpx_comp_avg_pred(comp_pred, second_pred, w, h, y + offset, y_stride);
328 besterr = vfp->vf(comp_pred, w, src, src_stride, sse1);
329 } else {
330 besterr = vfp->vf(y + offset, y_stride, src, src_stride, sse1);
331 }
332 *distortion = besterr;
333 besterr += mv_err_cost(bestmv, ref_mv, mvjcost, mvcost, error_per_bit);
334 return besterr;
335 #endif // CONFIG_VP9_HIGHBITDEPTH
336 }
337
divide_and_round(const int64_t n,const int64_t d)338 static INLINE int64_t divide_and_round(const int64_t n, const int64_t d) {
339 return ((n < 0) ^ (d < 0)) ? ((n - d / 2) / d) : ((n + d / 2) / d);
340 }
341
is_cost_list_wellbehaved(int * cost_list)342 static INLINE int is_cost_list_wellbehaved(int *cost_list) {
343 return cost_list[0] < cost_list[1] && cost_list[0] < cost_list[2] &&
344 cost_list[0] < cost_list[3] && cost_list[0] < cost_list[4];
345 }
346
347 // Returns surface minima estimate at given precision in 1/2^n bits.
348 // Assume a model for the cost surface: S = A(x - x0)^2 + B(y - y0)^2 + C
349 // For a given set of costs S0, S1, S2, S3, S4 at points
350 // (y, x) = (0, 0), (0, -1), (1, 0), (0, 1) and (-1, 0) respectively,
351 // the solution for the location of the minima (x0, y0) is given by:
352 // x0 = 1/2 (S1 - S3)/(S1 + S3 - 2*S0),
353 // y0 = 1/2 (S4 - S2)/(S4 + S2 - 2*S0).
354 // The code below is an integerized version of that.
get_cost_surf_min(int * cost_list,int * ir,int * ic,int bits)355 static void get_cost_surf_min(int *cost_list, int *ir, int *ic, int bits) {
356 const int64_t x0 = (int64_t)cost_list[1] - cost_list[3];
357 const int64_t y0 = cost_list[1] - 2 * (int64_t)cost_list[0] + cost_list[3];
358 const int64_t x1 = (int64_t)cost_list[4] - cost_list[2];
359 const int64_t y1 = cost_list[4] - 2 * (int64_t)cost_list[0] + cost_list[2];
360 const int b = 1 << (bits - 1);
361 *ic = (int)divide_and_round(x0 * b, y0);
362 *ir = (int)divide_and_round(x1 * b, y1);
363 }
364
vp9_skip_sub_pixel_tree(const MACROBLOCK * x,MV * bestmv,const MV * ref_mv,int allow_hp,int error_per_bit,const vp9_variance_fn_ptr_t * vfp,int forced_stop,int iters_per_step,int * cost_list,int * mvjcost,int * mvcost[2],uint32_t * distortion,uint32_t * sse1,const uint8_t * second_pred,int w,int h,int use_accurate_subpel_search)365 uint32_t vp9_skip_sub_pixel_tree(
366 const MACROBLOCK *x, MV *bestmv, const MV *ref_mv, int allow_hp,
367 int error_per_bit, const vp9_variance_fn_ptr_t *vfp, int forced_stop,
368 int iters_per_step, int *cost_list, int *mvjcost, int *mvcost[2],
369 uint32_t *distortion, uint32_t *sse1, const uint8_t *second_pred, int w,
370 int h, int use_accurate_subpel_search) {
371 SETUP_SUBPEL_SEARCH;
372 besterr = setup_center_error(xd, bestmv, ref_mv, error_per_bit, vfp, z,
373 src_stride, y, y_stride, second_pred, w, h,
374 offset, mvjcost, mvcost, sse1, distortion);
375 (void)halfiters;
376 (void)quarteriters;
377 (void)eighthiters;
378 (void)whichdir;
379 (void)allow_hp;
380 (void)forced_stop;
381 (void)hstep;
382 (void)rr;
383 (void)rc;
384 (void)minr;
385 (void)minc;
386 (void)maxr;
387 (void)maxc;
388 (void)tr;
389 (void)tc;
390 (void)sse;
391 (void)thismse;
392 (void)cost_list;
393 (void)use_accurate_subpel_search;
394
395 return besterr;
396 }
397
vp9_find_best_sub_pixel_tree_pruned_evenmore(const MACROBLOCK * x,MV * bestmv,const MV * ref_mv,int allow_hp,int error_per_bit,const vp9_variance_fn_ptr_t * vfp,int forced_stop,int iters_per_step,int * cost_list,int * mvjcost,int * mvcost[2],uint32_t * distortion,uint32_t * sse1,const uint8_t * second_pred,int w,int h,int use_accurate_subpel_search)398 uint32_t vp9_find_best_sub_pixel_tree_pruned_evenmore(
399 const MACROBLOCK *x, MV *bestmv, const MV *ref_mv, int allow_hp,
400 int error_per_bit, const vp9_variance_fn_ptr_t *vfp, int forced_stop,
401 int iters_per_step, int *cost_list, int *mvjcost, int *mvcost[2],
402 uint32_t *distortion, uint32_t *sse1, const uint8_t *second_pred, int w,
403 int h, int use_accurate_subpel_search) {
404 SETUP_SUBPEL_SEARCH;
405 besterr = setup_center_error(xd, bestmv, ref_mv, error_per_bit, vfp, z,
406 src_stride, y, y_stride, second_pred, w, h,
407 offset, mvjcost, mvcost, sse1, distortion);
408 (void)halfiters;
409 (void)quarteriters;
410 (void)eighthiters;
411 (void)whichdir;
412 (void)allow_hp;
413 (void)forced_stop;
414 (void)hstep;
415 (void)use_accurate_subpel_search;
416
417 if (cost_list && cost_list[0] != INT_MAX && cost_list[1] != INT_MAX &&
418 cost_list[2] != INT_MAX && cost_list[3] != INT_MAX &&
419 cost_list[4] != INT_MAX && is_cost_list_wellbehaved(cost_list)) {
420 int ir, ic;
421 unsigned int minpt = INT_MAX;
422 get_cost_surf_min(cost_list, &ir, &ic, 2);
423 if (ir != 0 || ic != 0) {
424 CHECK_BETTER(minpt, tr + 2 * ir, tc + 2 * ic);
425 }
426 } else {
427 FIRST_LEVEL_CHECKS;
428 if (halfiters > 1) {
429 SECOND_LEVEL_CHECKS;
430 }
431
432 tr = br;
433 tc = bc;
434
435 // Each subsequent iteration checks at least one point in common with
436 // the last iteration could be 2 ( if diag selected) 1/4 pel
437 // Note forced_stop: 0 - full, 1 - qtr only, 2 - half only
438 if (forced_stop != 2) {
439 hstep >>= 1;
440 FIRST_LEVEL_CHECKS;
441 if (quarteriters > 1) {
442 SECOND_LEVEL_CHECKS;
443 }
444 }
445 }
446
447 tr = br;
448 tc = bc;
449
450 if (allow_hp && use_mv_hp(ref_mv) && forced_stop == 0) {
451 hstep >>= 1;
452 FIRST_LEVEL_CHECKS;
453 if (eighthiters > 1) {
454 SECOND_LEVEL_CHECKS;
455 }
456 }
457
458 bestmv->row = br;
459 bestmv->col = bc;
460
461 return besterr;
462 }
463
vp9_find_best_sub_pixel_tree_pruned_more(const MACROBLOCK * x,MV * bestmv,const MV * ref_mv,int allow_hp,int error_per_bit,const vp9_variance_fn_ptr_t * vfp,int forced_stop,int iters_per_step,int * cost_list,int * mvjcost,int * mvcost[2],uint32_t * distortion,uint32_t * sse1,const uint8_t * second_pred,int w,int h,int use_accurate_subpel_search)464 uint32_t vp9_find_best_sub_pixel_tree_pruned_more(
465 const MACROBLOCK *x, MV *bestmv, const MV *ref_mv, int allow_hp,
466 int error_per_bit, const vp9_variance_fn_ptr_t *vfp, int forced_stop,
467 int iters_per_step, int *cost_list, int *mvjcost, int *mvcost[2],
468 uint32_t *distortion, uint32_t *sse1, const uint8_t *second_pred, int w,
469 int h, int use_accurate_subpel_search) {
470 SETUP_SUBPEL_SEARCH;
471 (void)use_accurate_subpel_search;
472
473 besterr = setup_center_error(xd, bestmv, ref_mv, error_per_bit, vfp, z,
474 src_stride, y, y_stride, second_pred, w, h,
475 offset, mvjcost, mvcost, sse1, distortion);
476 if (cost_list && cost_list[0] != INT_MAX && cost_list[1] != INT_MAX &&
477 cost_list[2] != INT_MAX && cost_list[3] != INT_MAX &&
478 cost_list[4] != INT_MAX && is_cost_list_wellbehaved(cost_list)) {
479 unsigned int minpt;
480 int ir, ic;
481 get_cost_surf_min(cost_list, &ir, &ic, 1);
482 if (ir != 0 || ic != 0) {
483 CHECK_BETTER(minpt, tr + ir * hstep, tc + ic * hstep);
484 }
485 } else {
486 FIRST_LEVEL_CHECKS;
487 if (halfiters > 1) {
488 SECOND_LEVEL_CHECKS;
489 }
490 }
491
492 // Each subsequent iteration checks at least one point in common with
493 // the last iteration could be 2 ( if diag selected) 1/4 pel
494
495 // Note forced_stop: 0 - full, 1 - qtr only, 2 - half only
496 if (forced_stop != 2) {
497 tr = br;
498 tc = bc;
499 hstep >>= 1;
500 FIRST_LEVEL_CHECKS;
501 if (quarteriters > 1) {
502 SECOND_LEVEL_CHECKS;
503 }
504 }
505
506 if (allow_hp && use_mv_hp(ref_mv) && forced_stop == 0) {
507 tr = br;
508 tc = bc;
509 hstep >>= 1;
510 FIRST_LEVEL_CHECKS;
511 if (eighthiters > 1) {
512 SECOND_LEVEL_CHECKS;
513 }
514 }
515 // These lines insure static analysis doesn't warn that
516 // tr and tc aren't used after the above point.
517 (void)tr;
518 (void)tc;
519
520 bestmv->row = br;
521 bestmv->col = bc;
522
523 return besterr;
524 }
525
vp9_find_best_sub_pixel_tree_pruned(const MACROBLOCK * x,MV * bestmv,const MV * ref_mv,int allow_hp,int error_per_bit,const vp9_variance_fn_ptr_t * vfp,int forced_stop,int iters_per_step,int * cost_list,int * mvjcost,int * mvcost[2],uint32_t * distortion,uint32_t * sse1,const uint8_t * second_pred,int w,int h,int use_accurate_subpel_search)526 uint32_t vp9_find_best_sub_pixel_tree_pruned(
527 const MACROBLOCK *x, MV *bestmv, const MV *ref_mv, int allow_hp,
528 int error_per_bit, const vp9_variance_fn_ptr_t *vfp, int forced_stop,
529 int iters_per_step, int *cost_list, int *mvjcost, int *mvcost[2],
530 uint32_t *distortion, uint32_t *sse1, const uint8_t *second_pred, int w,
531 int h, int use_accurate_subpel_search) {
532 SETUP_SUBPEL_SEARCH;
533 (void)use_accurate_subpel_search;
534
535 besterr = setup_center_error(xd, bestmv, ref_mv, error_per_bit, vfp, z,
536 src_stride, y, y_stride, second_pred, w, h,
537 offset, mvjcost, mvcost, sse1, distortion);
538 if (cost_list && cost_list[0] != INT_MAX && cost_list[1] != INT_MAX &&
539 cost_list[2] != INT_MAX && cost_list[3] != INT_MAX &&
540 cost_list[4] != INT_MAX) {
541 unsigned int left, right, up, down, diag;
542 whichdir = (cost_list[1] < cost_list[3] ? 0 : 1) +
543 (cost_list[2] < cost_list[4] ? 0 : 2);
544 switch (whichdir) {
545 case 0:
546 CHECK_BETTER(left, tr, tc - hstep);
547 CHECK_BETTER(down, tr + hstep, tc);
548 CHECK_BETTER(diag, tr + hstep, tc - hstep);
549 break;
550 case 1:
551 CHECK_BETTER(right, tr, tc + hstep);
552 CHECK_BETTER(down, tr + hstep, tc);
553 CHECK_BETTER(diag, tr + hstep, tc + hstep);
554 break;
555 case 2:
556 CHECK_BETTER(left, tr, tc - hstep);
557 CHECK_BETTER(up, tr - hstep, tc);
558 CHECK_BETTER(diag, tr - hstep, tc - hstep);
559 break;
560 case 3:
561 CHECK_BETTER(right, tr, tc + hstep);
562 CHECK_BETTER(up, tr - hstep, tc);
563 CHECK_BETTER(diag, tr - hstep, tc + hstep);
564 break;
565 }
566 } else {
567 FIRST_LEVEL_CHECKS;
568 if (halfiters > 1) {
569 SECOND_LEVEL_CHECKS;
570 }
571 }
572
573 tr = br;
574 tc = bc;
575
576 // Each subsequent iteration checks at least one point in common with
577 // the last iteration could be 2 ( if diag selected) 1/4 pel
578
579 // Note forced_stop: 0 - full, 1 - qtr only, 2 - half only
580 if (forced_stop != 2) {
581 hstep >>= 1;
582 FIRST_LEVEL_CHECKS;
583 if (quarteriters > 1) {
584 SECOND_LEVEL_CHECKS;
585 }
586 tr = br;
587 tc = bc;
588 }
589
590 if (allow_hp && use_mv_hp(ref_mv) && forced_stop == 0) {
591 hstep >>= 1;
592 FIRST_LEVEL_CHECKS;
593 if (eighthiters > 1) {
594 SECOND_LEVEL_CHECKS;
595 }
596 tr = br;
597 tc = bc;
598 }
599 // These lines insure static analysis doesn't warn that
600 // tr and tc aren't used after the above point.
601 (void)tr;
602 (void)tc;
603
604 bestmv->row = br;
605 bestmv->col = bc;
606
607 return besterr;
608 }
609
610 /* clang-format off */
611 static const MV search_step_table[12] = {
612 // left, right, up, down
613 { 0, -4 }, { 0, 4 }, { -4, 0 }, { 4, 0 },
614 { 0, -2 }, { 0, 2 }, { -2, 0 }, { 2, 0 },
615 { 0, -1 }, { 0, 1 }, { -1, 0 }, { 1, 0 }
616 };
617 /* clang-format on */
618
accurate_sub_pel_search(const MACROBLOCKD * xd,const MV * this_mv,const struct scale_factors * sf,const InterpKernel * kernel,const vp9_variance_fn_ptr_t * vfp,const uint8_t * const src_address,const int src_stride,const uint8_t * const pre_address,int y_stride,const uint8_t * second_pred,int w,int h,uint32_t * sse)619 static int accurate_sub_pel_search(
620 const MACROBLOCKD *xd, const MV *this_mv, const struct scale_factors *sf,
621 const InterpKernel *kernel, const vp9_variance_fn_ptr_t *vfp,
622 const uint8_t *const src_address, const int src_stride,
623 const uint8_t *const pre_address, int y_stride, const uint8_t *second_pred,
624 int w, int h, uint32_t *sse) {
625 #if CONFIG_VP9_HIGHBITDEPTH
626 uint64_t besterr;
627 assert(sf->x_step_q4 == 16 && sf->y_step_q4 == 16);
628 assert(w != 0 && h != 0);
629 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
630 DECLARE_ALIGNED(16, uint16_t, pred16[64 * 64]);
631 vp9_highbd_build_inter_predictor(CONVERT_TO_SHORTPTR(pre_address), y_stride,
632 pred16, w, this_mv, sf, w, h, 0, kernel,
633 MV_PRECISION_Q3, 0, 0, xd->bd);
634 if (second_pred != NULL) {
635 DECLARE_ALIGNED(16, uint16_t, comp_pred16[64 * 64]);
636 vpx_highbd_comp_avg_pred(comp_pred16, CONVERT_TO_SHORTPTR(second_pred), w,
637 h, pred16, w);
638 besterr = vfp->vf(CONVERT_TO_BYTEPTR(comp_pred16), w, src_address,
639 src_stride, sse);
640 } else {
641 besterr =
642 vfp->vf(CONVERT_TO_BYTEPTR(pred16), w, src_address, src_stride, sse);
643 }
644 } else {
645 DECLARE_ALIGNED(16, uint8_t, pred[64 * 64]);
646 vp9_build_inter_predictor(pre_address, y_stride, pred, w, this_mv, sf, w, h,
647 0, kernel, MV_PRECISION_Q3, 0, 0);
648 if (second_pred != NULL) {
649 DECLARE_ALIGNED(16, uint8_t, comp_pred[64 * 64]);
650 vpx_comp_avg_pred(comp_pred, second_pred, w, h, pred, w);
651 besterr = vfp->vf(comp_pred, w, src_address, src_stride, sse);
652 } else {
653 besterr = vfp->vf(pred, w, src_address, src_stride, sse);
654 }
655 }
656 if (besterr >= UINT_MAX) return UINT_MAX;
657 return (int)besterr;
658 #else
659 int besterr;
660 DECLARE_ALIGNED(16, uint8_t, pred[64 * 64]);
661 assert(sf->x_step_q4 == 16 && sf->y_step_q4 == 16);
662 assert(w != 0 && h != 0);
663 (void)xd;
664
665 vp9_build_inter_predictor(pre_address, y_stride, pred, w, this_mv, sf, w, h,
666 0, kernel, MV_PRECISION_Q3, 0, 0);
667 if (second_pred != NULL) {
668 DECLARE_ALIGNED(16, uint8_t, comp_pred[64 * 64]);
669 vpx_comp_avg_pred(comp_pred, second_pred, w, h, pred, w);
670 besterr = vfp->vf(comp_pred, w, src_address, src_stride, sse);
671 } else {
672 besterr = vfp->vf(pred, w, src_address, src_stride, sse);
673 }
674 return besterr;
675 #endif // CONFIG_VP9_HIGHBITDEPTH
676 }
677
678 // TODO(yunqing): this part can be further refactored.
679 #if CONFIG_VP9_HIGHBITDEPTH
680 /* checks if (r, c) has better score than previous best */
681 #define CHECK_BETTER1(v, r, c) \
682 if (c >= minc && c <= maxc && r >= minr && r <= maxr) { \
683 int64_t tmpmse; \
684 const MV mv = { r, c }; \
685 const MV ref_mv = { rr, rc }; \
686 thismse = \
687 accurate_sub_pel_search(xd, &mv, x->me_sf, kernel, vfp, z, src_stride, \
688 y, y_stride, second_pred, w, h, &sse); \
689 tmpmse = thismse; \
690 tmpmse += mv_err_cost(&mv, &ref_mv, mvjcost, mvcost, error_per_bit); \
691 if (tmpmse >= INT_MAX) { \
692 v = INT_MAX; \
693 } else if ((v = (uint32_t)tmpmse) < besterr) { \
694 besterr = v; \
695 br = r; \
696 bc = c; \
697 *distortion = thismse; \
698 *sse1 = sse; \
699 } \
700 } else { \
701 v = INT_MAX; \
702 }
703 #else
704 /* checks if (r, c) has better score than previous best */
705 #define CHECK_BETTER1(v, r, c) \
706 if (c >= minc && c <= maxc && r >= minr && r <= maxr) { \
707 const MV mv = { r, c }; \
708 const MV ref_mv = { rr, rc }; \
709 thismse = \
710 accurate_sub_pel_search(xd, &mv, x->me_sf, kernel, vfp, z, src_stride, \
711 y, y_stride, second_pred, w, h, &sse); \
712 if ((v = mv_err_cost(&mv, &ref_mv, mvjcost, mvcost, error_per_bit) + \
713 thismse) < besterr) { \
714 besterr = v; \
715 br = r; \
716 bc = c; \
717 *distortion = thismse; \
718 *sse1 = sse; \
719 } \
720 } else { \
721 v = INT_MAX; \
722 }
723
724 #endif
725
vp9_find_best_sub_pixel_tree(const MACROBLOCK * x,MV * bestmv,const MV * ref_mv,int allow_hp,int error_per_bit,const vp9_variance_fn_ptr_t * vfp,int forced_stop,int iters_per_step,int * cost_list,int * mvjcost,int * mvcost[2],uint32_t * distortion,uint32_t * sse1,const uint8_t * second_pred,int w,int h,int use_accurate_subpel_search)726 uint32_t vp9_find_best_sub_pixel_tree(
727 const MACROBLOCK *x, MV *bestmv, const MV *ref_mv, int allow_hp,
728 int error_per_bit, const vp9_variance_fn_ptr_t *vfp, int forced_stop,
729 int iters_per_step, int *cost_list, int *mvjcost, int *mvcost[2],
730 uint32_t *distortion, uint32_t *sse1, const uint8_t *second_pred, int w,
731 int h, int use_accurate_subpel_search) {
732 const uint8_t *const z = x->plane[0].src.buf;
733 const uint8_t *const src_address = z;
734 const int src_stride = x->plane[0].src.stride;
735 const MACROBLOCKD *xd = &x->e_mbd;
736 unsigned int besterr = UINT_MAX;
737 unsigned int sse;
738 int thismse;
739 const int y_stride = xd->plane[0].pre[0].stride;
740 const int offset = bestmv->row * y_stride + bestmv->col;
741 const uint8_t *const y = xd->plane[0].pre[0].buf;
742
743 int rr = ref_mv->row;
744 int rc = ref_mv->col;
745 int br = bestmv->row * 8;
746 int bc = bestmv->col * 8;
747 int hstep = 4;
748 int iter, round = 3 - forced_stop;
749
750 int minc, maxc, minr, maxr;
751 int tr = br;
752 int tc = bc;
753 const MV *search_step = search_step_table;
754 int idx, best_idx = -1;
755 unsigned int cost_array[5];
756 int kr, kc;
757 MvLimits subpel_mv_limits;
758
759 // TODO(yunqing): need to add 4-tap filter optimization to speed up the
760 // encoder.
761 const InterpKernel *kernel =
762 (use_accurate_subpel_search > 0)
763 ? ((use_accurate_subpel_search == USE_4_TAPS)
764 ? vp9_filter_kernels[FOURTAP]
765 : ((use_accurate_subpel_search == USE_8_TAPS)
766 ? vp9_filter_kernels[EIGHTTAP]
767 : vp9_filter_kernels[EIGHTTAP_SHARP]))
768 : vp9_filter_kernels[BILINEAR];
769
770 vp9_set_subpel_mv_search_range(&subpel_mv_limits, &x->mv_limits, ref_mv);
771 minc = subpel_mv_limits.col_min;
772 maxc = subpel_mv_limits.col_max;
773 minr = subpel_mv_limits.row_min;
774 maxr = subpel_mv_limits.row_max;
775
776 if (!(allow_hp && use_mv_hp(ref_mv)))
777 if (round == 3) round = 2;
778
779 bestmv->row *= 8;
780 bestmv->col *= 8;
781
782 besterr = setup_center_error(xd, bestmv, ref_mv, error_per_bit, vfp, z,
783 src_stride, y, y_stride, second_pred, w, h,
784 offset, mvjcost, mvcost, sse1, distortion);
785
786 (void)cost_list; // to silence compiler warning
787
788 for (iter = 0; iter < round; ++iter) {
789 // Check vertical and horizontal sub-pixel positions.
790 for (idx = 0; idx < 4; ++idx) {
791 tr = br + search_step[idx].row;
792 tc = bc + search_step[idx].col;
793 if (tc >= minc && tc <= maxc && tr >= minr && tr <= maxr) {
794 MV this_mv;
795 this_mv.row = tr;
796 this_mv.col = tc;
797
798 if (use_accurate_subpel_search) {
799 thismse = accurate_sub_pel_search(xd, &this_mv, x->me_sf, kernel, vfp,
800 src_address, src_stride, y,
801 y_stride, second_pred, w, h, &sse);
802 } else {
803 const uint8_t *const pre_address =
804 y + (tr >> 3) * y_stride + (tc >> 3);
805 if (second_pred == NULL)
806 thismse = vfp->svf(pre_address, y_stride, sp(tc), sp(tr),
807 src_address, src_stride, &sse);
808 else
809 thismse = vfp->svaf(pre_address, y_stride, sp(tc), sp(tr),
810 src_address, src_stride, &sse, second_pred);
811 }
812
813 cost_array[idx] = thismse + mv_err_cost(&this_mv, ref_mv, mvjcost,
814 mvcost, error_per_bit);
815
816 if (cost_array[idx] < besterr) {
817 best_idx = idx;
818 besterr = cost_array[idx];
819 *distortion = thismse;
820 *sse1 = sse;
821 }
822 } else {
823 cost_array[idx] = UINT_MAX;
824 }
825 }
826
827 // Check diagonal sub-pixel position
828 kc = (cost_array[0] <= cost_array[1] ? -hstep : hstep);
829 kr = (cost_array[2] <= cost_array[3] ? -hstep : hstep);
830
831 tc = bc + kc;
832 tr = br + kr;
833 if (tc >= minc && tc <= maxc && tr >= minr && tr <= maxr) {
834 MV this_mv = { tr, tc };
835 if (use_accurate_subpel_search) {
836 thismse = accurate_sub_pel_search(xd, &this_mv, x->me_sf, kernel, vfp,
837 src_address, src_stride, y, y_stride,
838 second_pred, w, h, &sse);
839 } else {
840 const uint8_t *const pre_address = y + (tr >> 3) * y_stride + (tc >> 3);
841 if (second_pred == NULL)
842 thismse = vfp->svf(pre_address, y_stride, sp(tc), sp(tr), src_address,
843 src_stride, &sse);
844 else
845 thismse = vfp->svaf(pre_address, y_stride, sp(tc), sp(tr),
846 src_address, src_stride, &sse, second_pred);
847 }
848
849 cost_array[4] = thismse + mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost,
850 error_per_bit);
851
852 if (cost_array[4] < besterr) {
853 best_idx = 4;
854 besterr = cost_array[4];
855 *distortion = thismse;
856 *sse1 = sse;
857 }
858 } else {
859 cost_array[idx] = UINT_MAX;
860 }
861
862 if (best_idx < 4 && best_idx >= 0) {
863 br += search_step[best_idx].row;
864 bc += search_step[best_idx].col;
865 } else if (best_idx == 4) {
866 br = tr;
867 bc = tc;
868 }
869
870 if (iters_per_step > 0 && best_idx != -1) {
871 unsigned int second;
872 const int br0 = br;
873 const int bc0 = bc;
874 assert(tr == br || tc == bc);
875
876 if (tr == br && tc != bc) {
877 kc = bc - tc;
878 if (iters_per_step == 1) {
879 if (use_accurate_subpel_search) {
880 CHECK_BETTER1(second, br0, bc0 + kc);
881 } else {
882 CHECK_BETTER(second, br0, bc0 + kc);
883 }
884 }
885 } else if (tr != br && tc == bc) {
886 kr = br - tr;
887 if (iters_per_step == 1) {
888 if (use_accurate_subpel_search) {
889 CHECK_BETTER1(second, br0 + kr, bc0);
890 } else {
891 CHECK_BETTER(second, br0 + kr, bc0);
892 }
893 }
894 }
895
896 if (iters_per_step > 1) {
897 if (use_accurate_subpel_search) {
898 CHECK_BETTER1(second, br0 + kr, bc0);
899 CHECK_BETTER1(second, br0, bc0 + kc);
900 if (br0 != br || bc0 != bc) {
901 CHECK_BETTER1(second, br0 + kr, bc0 + kc);
902 }
903 } else {
904 CHECK_BETTER(second, br0 + kr, bc0);
905 CHECK_BETTER(second, br0, bc0 + kc);
906 if (br0 != br || bc0 != bc) {
907 CHECK_BETTER(second, br0 + kr, bc0 + kc);
908 }
909 }
910 }
911 }
912
913 search_step += 4;
914 hstep >>= 1;
915 best_idx = -1;
916 }
917
918 // Each subsequent iteration checks at least one point in common with
919 // the last iteration could be 2 ( if diag selected) 1/4 pel
920
921 // These lines insure static analysis doesn't warn that
922 // tr and tc aren't used after the above point.
923 (void)tr;
924 (void)tc;
925
926 bestmv->row = br;
927 bestmv->col = bc;
928
929 return besterr;
930 }
931
932 #undef CHECK_BETTER
933 #undef CHECK_BETTER1
934
check_bounds(const MvLimits * mv_limits,int row,int col,int range)935 static INLINE int check_bounds(const MvLimits *mv_limits, int row, int col,
936 int range) {
937 return ((row - range) >= mv_limits->row_min) &
938 ((row + range) <= mv_limits->row_max) &
939 ((col - range) >= mv_limits->col_min) &
940 ((col + range) <= mv_limits->col_max);
941 }
942
is_mv_in(const MvLimits * mv_limits,const MV * mv)943 static INLINE int is_mv_in(const MvLimits *mv_limits, const MV *mv) {
944 return (mv->col >= mv_limits->col_min) && (mv->col <= mv_limits->col_max) &&
945 (mv->row >= mv_limits->row_min) && (mv->row <= mv_limits->row_max);
946 }
947
948 #define CHECK_BETTER \
949 { \
950 if (thissad < bestsad) { \
951 if (use_mvcost) \
952 thissad += mvsad_err_cost(x, &this_mv, &fcenter_mv, sad_per_bit); \
953 if (thissad < bestsad) { \
954 bestsad = thissad; \
955 best_site = i; \
956 } \
957 } \
958 }
959
960 #define MAX_PATTERN_SCALES 11
961 #define MAX_PATTERN_CANDIDATES 8 // max number of canddiates per scale
962 #define PATTERN_CANDIDATES_REF 3 // number of refinement candidates
963
964 // Calculate and return a sad+mvcost list around an integer best pel.
calc_int_cost_list(const MACROBLOCK * x,const MV * ref_mv,int sadpb,const vp9_variance_fn_ptr_t * fn_ptr,const MV * best_mv,int * cost_list)965 static INLINE void calc_int_cost_list(const MACROBLOCK *x, const MV *ref_mv,
966 int sadpb,
967 const vp9_variance_fn_ptr_t *fn_ptr,
968 const MV *best_mv, int *cost_list) {
969 static const MV neighbors[4] = { { 0, -1 }, { 1, 0 }, { 0, 1 }, { -1, 0 } };
970 const struct buf_2d *const what = &x->plane[0].src;
971 const struct buf_2d *const in_what = &x->e_mbd.plane[0].pre[0];
972 const MV fcenter_mv = { ref_mv->row >> 3, ref_mv->col >> 3 };
973 int br = best_mv->row;
974 int bc = best_mv->col;
975 MV this_mv;
976 int i;
977 unsigned int sse;
978
979 this_mv.row = br;
980 this_mv.col = bc;
981 cost_list[0] =
982 fn_ptr->vf(what->buf, what->stride, get_buf_from_mv(in_what, &this_mv),
983 in_what->stride, &sse) +
984 mvsad_err_cost(x, &this_mv, &fcenter_mv, sadpb);
985 if (check_bounds(&x->mv_limits, br, bc, 1)) {
986 for (i = 0; i < 4; i++) {
987 const MV this_mv = { br + neighbors[i].row, bc + neighbors[i].col };
988 cost_list[i + 1] = fn_ptr->vf(what->buf, what->stride,
989 get_buf_from_mv(in_what, &this_mv),
990 in_what->stride, &sse) +
991 mv_err_cost(&this_mv, &fcenter_mv, x->nmvjointcost,
992 x->mvcost, x->errorperbit);
993 }
994 } else {
995 for (i = 0; i < 4; i++) {
996 const MV this_mv = { br + neighbors[i].row, bc + neighbors[i].col };
997 if (!is_mv_in(&x->mv_limits, &this_mv))
998 cost_list[i + 1] = INT_MAX;
999 else
1000 cost_list[i + 1] = fn_ptr->vf(what->buf, what->stride,
1001 get_buf_from_mv(in_what, &this_mv),
1002 in_what->stride, &sse) +
1003 mv_err_cost(&this_mv, &fcenter_mv, x->nmvjointcost,
1004 x->mvcost, x->errorperbit);
1005 }
1006 }
1007 }
1008
1009 // Generic pattern search function that searches over multiple scales.
1010 // Each scale can have a different number of candidates and shape of
1011 // candidates as indicated in the num_candidates and candidates arrays
1012 // passed into this function
1013 //
vp9_pattern_search(const MACROBLOCK * x,MV * ref_mv,int search_param,int sad_per_bit,int do_init_search,int * cost_list,const vp9_variance_fn_ptr_t * vfp,int use_mvcost,const MV * center_mv,MV * best_mv,const int num_candidates[MAX_PATTERN_SCALES],const MV candidates[MAX_PATTERN_SCALES][MAX_PATTERN_CANDIDATES])1014 static int vp9_pattern_search(
1015 const MACROBLOCK *x, MV *ref_mv, int search_param, int sad_per_bit,
1016 int do_init_search, int *cost_list, const vp9_variance_fn_ptr_t *vfp,
1017 int use_mvcost, const MV *center_mv, MV *best_mv,
1018 const int num_candidates[MAX_PATTERN_SCALES],
1019 const MV candidates[MAX_PATTERN_SCALES][MAX_PATTERN_CANDIDATES]) {
1020 const MACROBLOCKD *const xd = &x->e_mbd;
1021 static const int search_param_to_steps[MAX_MVSEARCH_STEPS] = {
1022 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
1023 };
1024 int i, s, t;
1025 const struct buf_2d *const what = &x->plane[0].src;
1026 const struct buf_2d *const in_what = &xd->plane[0].pre[0];
1027 int br, bc;
1028 int bestsad = INT_MAX;
1029 int thissad;
1030 int k = -1;
1031 const MV fcenter_mv = { center_mv->row >> 3, center_mv->col >> 3 };
1032 int best_init_s = search_param_to_steps[search_param];
1033 // adjust ref_mv to make sure it is within MV range
1034 clamp_mv(ref_mv, x->mv_limits.col_min, x->mv_limits.col_max,
1035 x->mv_limits.row_min, x->mv_limits.row_max);
1036 br = ref_mv->row;
1037 bc = ref_mv->col;
1038
1039 // Work out the start point for the search
1040 bestsad = vfp->sdf(what->buf, what->stride, get_buf_from_mv(in_what, ref_mv),
1041 in_what->stride) +
1042 mvsad_err_cost(x, ref_mv, &fcenter_mv, sad_per_bit);
1043
1044 // Search all possible scales upto the search param around the center point
1045 // pick the scale of the point that is best as the starting scale of
1046 // further steps around it.
1047 if (do_init_search) {
1048 s = best_init_s;
1049 best_init_s = -1;
1050 for (t = 0; t <= s; ++t) {
1051 int best_site = -1;
1052 if (check_bounds(&x->mv_limits, br, bc, 1 << t)) {
1053 for (i = 0; i < num_candidates[t]; i++) {
1054 const MV this_mv = { br + candidates[t][i].row,
1055 bc + candidates[t][i].col };
1056 thissad =
1057 vfp->sdf(what->buf, what->stride,
1058 get_buf_from_mv(in_what, &this_mv), in_what->stride);
1059 CHECK_BETTER
1060 }
1061 } else {
1062 for (i = 0; i < num_candidates[t]; i++) {
1063 const MV this_mv = { br + candidates[t][i].row,
1064 bc + candidates[t][i].col };
1065 if (!is_mv_in(&x->mv_limits, &this_mv)) continue;
1066 thissad =
1067 vfp->sdf(what->buf, what->stride,
1068 get_buf_from_mv(in_what, &this_mv), in_what->stride);
1069 CHECK_BETTER
1070 }
1071 }
1072 if (best_site == -1) {
1073 continue;
1074 } else {
1075 best_init_s = t;
1076 k = best_site;
1077 }
1078 }
1079 if (best_init_s != -1) {
1080 br += candidates[best_init_s][k].row;
1081 bc += candidates[best_init_s][k].col;
1082 }
1083 }
1084
1085 // If the center point is still the best, just skip this and move to
1086 // the refinement step.
1087 if (best_init_s != -1) {
1088 int best_site = -1;
1089 s = best_init_s;
1090
1091 do {
1092 // No need to search all 6 points the 1st time if initial search was used
1093 if (!do_init_search || s != best_init_s) {
1094 if (check_bounds(&x->mv_limits, br, bc, 1 << s)) {
1095 for (i = 0; i < num_candidates[s]; i++) {
1096 const MV this_mv = { br + candidates[s][i].row,
1097 bc + candidates[s][i].col };
1098 thissad =
1099 vfp->sdf(what->buf, what->stride,
1100 get_buf_from_mv(in_what, &this_mv), in_what->stride);
1101 CHECK_BETTER
1102 }
1103 } else {
1104 for (i = 0; i < num_candidates[s]; i++) {
1105 const MV this_mv = { br + candidates[s][i].row,
1106 bc + candidates[s][i].col };
1107 if (!is_mv_in(&x->mv_limits, &this_mv)) continue;
1108 thissad =
1109 vfp->sdf(what->buf, what->stride,
1110 get_buf_from_mv(in_what, &this_mv), in_what->stride);
1111 CHECK_BETTER
1112 }
1113 }
1114
1115 if (best_site == -1) {
1116 continue;
1117 } else {
1118 br += candidates[s][best_site].row;
1119 bc += candidates[s][best_site].col;
1120 k = best_site;
1121 }
1122 }
1123
1124 do {
1125 int next_chkpts_indices[PATTERN_CANDIDATES_REF];
1126 best_site = -1;
1127 next_chkpts_indices[0] = (k == 0) ? num_candidates[s] - 1 : k - 1;
1128 next_chkpts_indices[1] = k;
1129 next_chkpts_indices[2] = (k == num_candidates[s] - 1) ? 0 : k + 1;
1130
1131 if (check_bounds(&x->mv_limits, br, bc, 1 << s)) {
1132 for (i = 0; i < PATTERN_CANDIDATES_REF; i++) {
1133 const MV this_mv = {
1134 br + candidates[s][next_chkpts_indices[i]].row,
1135 bc + candidates[s][next_chkpts_indices[i]].col
1136 };
1137 thissad =
1138 vfp->sdf(what->buf, what->stride,
1139 get_buf_from_mv(in_what, &this_mv), in_what->stride);
1140 CHECK_BETTER
1141 }
1142 } else {
1143 for (i = 0; i < PATTERN_CANDIDATES_REF; i++) {
1144 const MV this_mv = {
1145 br + candidates[s][next_chkpts_indices[i]].row,
1146 bc + candidates[s][next_chkpts_indices[i]].col
1147 };
1148 if (!is_mv_in(&x->mv_limits, &this_mv)) continue;
1149 thissad =
1150 vfp->sdf(what->buf, what->stride,
1151 get_buf_from_mv(in_what, &this_mv), in_what->stride);
1152 CHECK_BETTER
1153 }
1154 }
1155
1156 if (best_site != -1) {
1157 k = next_chkpts_indices[best_site];
1158 br += candidates[s][k].row;
1159 bc += candidates[s][k].col;
1160 }
1161 } while (best_site != -1);
1162 } while (s--);
1163 }
1164
1165 // Returns the one-away integer pel sad values around the best as follows:
1166 // cost_list[0]: cost at the best integer pel
1167 // cost_list[1]: cost at delta {0, -1} (left) from the best integer pel
1168 // cost_list[2]: cost at delta { 1, 0} (bottom) from the best integer pel
1169 // cost_list[3]: cost at delta { 0, 1} (right) from the best integer pel
1170 // cost_list[4]: cost at delta {-1, 0} (top) from the best integer pel
1171 if (cost_list) {
1172 const MV best_mv = { br, bc };
1173 calc_int_cost_list(x, &fcenter_mv, sad_per_bit, vfp, &best_mv, cost_list);
1174 }
1175 best_mv->row = br;
1176 best_mv->col = bc;
1177 return bestsad;
1178 }
1179
1180 // A specialized function where the smallest scale search candidates
1181 // are 4 1-away neighbors, and cost_list is non-null
1182 // TODO(debargha): Merge this function with the one above. Also remove
1183 // use_mvcost option since it is always 1, to save unnecessary branches.
vp9_pattern_search_sad(const MACROBLOCK * x,MV * ref_mv,int search_param,int sad_per_bit,int do_init_search,int * cost_list,const vp9_variance_fn_ptr_t * vfp,int use_mvcost,const MV * center_mv,MV * best_mv,const int num_candidates[MAX_PATTERN_SCALES],const MV candidates[MAX_PATTERN_SCALES][MAX_PATTERN_CANDIDATES])1184 static int vp9_pattern_search_sad(
1185 const MACROBLOCK *x, MV *ref_mv, int search_param, int sad_per_bit,
1186 int do_init_search, int *cost_list, const vp9_variance_fn_ptr_t *vfp,
1187 int use_mvcost, const MV *center_mv, MV *best_mv,
1188 const int num_candidates[MAX_PATTERN_SCALES],
1189 const MV candidates[MAX_PATTERN_SCALES][MAX_PATTERN_CANDIDATES]) {
1190 const MACROBLOCKD *const xd = &x->e_mbd;
1191 static const int search_param_to_steps[MAX_MVSEARCH_STEPS] = {
1192 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
1193 };
1194 int i, s, t;
1195 const struct buf_2d *const what = &x->plane[0].src;
1196 const struct buf_2d *const in_what = &xd->plane[0].pre[0];
1197 int br, bc;
1198 int bestsad = INT_MAX;
1199 int thissad;
1200 int k = -1;
1201 const MV fcenter_mv = { center_mv->row >> 3, center_mv->col >> 3 };
1202 int best_init_s = search_param_to_steps[search_param];
1203 // adjust ref_mv to make sure it is within MV range
1204 clamp_mv(ref_mv, x->mv_limits.col_min, x->mv_limits.col_max,
1205 x->mv_limits.row_min, x->mv_limits.row_max);
1206 br = ref_mv->row;
1207 bc = ref_mv->col;
1208 if (cost_list != NULL) {
1209 cost_list[0] = cost_list[1] = cost_list[2] = cost_list[3] = cost_list[4] =
1210 INT_MAX;
1211 }
1212
1213 // Work out the start point for the search
1214 bestsad = vfp->sdf(what->buf, what->stride, get_buf_from_mv(in_what, ref_mv),
1215 in_what->stride) +
1216 mvsad_err_cost(x, ref_mv, &fcenter_mv, sad_per_bit);
1217
1218 // Search all possible scales upto the search param around the center point
1219 // pick the scale of the point that is best as the starting scale of
1220 // further steps around it.
1221 if (do_init_search) {
1222 s = best_init_s;
1223 best_init_s = -1;
1224 for (t = 0; t <= s; ++t) {
1225 int best_site = -1;
1226 if (check_bounds(&x->mv_limits, br, bc, 1 << t)) {
1227 for (i = 0; i < num_candidates[t]; i++) {
1228 const MV this_mv = { br + candidates[t][i].row,
1229 bc + candidates[t][i].col };
1230 thissad =
1231 vfp->sdf(what->buf, what->stride,
1232 get_buf_from_mv(in_what, &this_mv), in_what->stride);
1233 CHECK_BETTER
1234 }
1235 } else {
1236 for (i = 0; i < num_candidates[t]; i++) {
1237 const MV this_mv = { br + candidates[t][i].row,
1238 bc + candidates[t][i].col };
1239 if (!is_mv_in(&x->mv_limits, &this_mv)) continue;
1240 thissad =
1241 vfp->sdf(what->buf, what->stride,
1242 get_buf_from_mv(in_what, &this_mv), in_what->stride);
1243 CHECK_BETTER
1244 }
1245 }
1246 if (best_site == -1) {
1247 continue;
1248 } else {
1249 best_init_s = t;
1250 k = best_site;
1251 }
1252 }
1253 if (best_init_s != -1) {
1254 br += candidates[best_init_s][k].row;
1255 bc += candidates[best_init_s][k].col;
1256 }
1257 }
1258
1259 // If the center point is still the best, just skip this and move to
1260 // the refinement step.
1261 if (best_init_s != -1) {
1262 int do_sad = (num_candidates[0] == 4 && cost_list != NULL);
1263 int best_site = -1;
1264 s = best_init_s;
1265
1266 for (; s >= do_sad; s--) {
1267 if (!do_init_search || s != best_init_s) {
1268 if (check_bounds(&x->mv_limits, br, bc, 1 << s)) {
1269 for (i = 0; i < num_candidates[s]; i++) {
1270 const MV this_mv = { br + candidates[s][i].row,
1271 bc + candidates[s][i].col };
1272 thissad =
1273 vfp->sdf(what->buf, what->stride,
1274 get_buf_from_mv(in_what, &this_mv), in_what->stride);
1275 CHECK_BETTER
1276 }
1277 } else {
1278 for (i = 0; i < num_candidates[s]; i++) {
1279 const MV this_mv = { br + candidates[s][i].row,
1280 bc + candidates[s][i].col };
1281 if (!is_mv_in(&x->mv_limits, &this_mv)) continue;
1282 thissad =
1283 vfp->sdf(what->buf, what->stride,
1284 get_buf_from_mv(in_what, &this_mv), in_what->stride);
1285 CHECK_BETTER
1286 }
1287 }
1288
1289 if (best_site == -1) {
1290 continue;
1291 } else {
1292 br += candidates[s][best_site].row;
1293 bc += candidates[s][best_site].col;
1294 k = best_site;
1295 }
1296 }
1297
1298 do {
1299 int next_chkpts_indices[PATTERN_CANDIDATES_REF];
1300 best_site = -1;
1301 next_chkpts_indices[0] = (k == 0) ? num_candidates[s] - 1 : k - 1;
1302 next_chkpts_indices[1] = k;
1303 next_chkpts_indices[2] = (k == num_candidates[s] - 1) ? 0 : k + 1;
1304
1305 if (check_bounds(&x->mv_limits, br, bc, 1 << s)) {
1306 for (i = 0; i < PATTERN_CANDIDATES_REF; i++) {
1307 const MV this_mv = {
1308 br + candidates[s][next_chkpts_indices[i]].row,
1309 bc + candidates[s][next_chkpts_indices[i]].col
1310 };
1311 thissad =
1312 vfp->sdf(what->buf, what->stride,
1313 get_buf_from_mv(in_what, &this_mv), in_what->stride);
1314 CHECK_BETTER
1315 }
1316 } else {
1317 for (i = 0; i < PATTERN_CANDIDATES_REF; i++) {
1318 const MV this_mv = {
1319 br + candidates[s][next_chkpts_indices[i]].row,
1320 bc + candidates[s][next_chkpts_indices[i]].col
1321 };
1322 if (!is_mv_in(&x->mv_limits, &this_mv)) continue;
1323 thissad =
1324 vfp->sdf(what->buf, what->stride,
1325 get_buf_from_mv(in_what, &this_mv), in_what->stride);
1326 CHECK_BETTER
1327 }
1328 }
1329
1330 if (best_site != -1) {
1331 k = next_chkpts_indices[best_site];
1332 br += candidates[s][k].row;
1333 bc += candidates[s][k].col;
1334 }
1335 } while (best_site != -1);
1336 }
1337
1338 // Note: If we enter the if below, then cost_list must be non-NULL.
1339 if (s == 0) {
1340 cost_list[0] = bestsad;
1341 if (!do_init_search || s != best_init_s) {
1342 if (check_bounds(&x->mv_limits, br, bc, 1 << s)) {
1343 for (i = 0; i < num_candidates[s]; i++) {
1344 const MV this_mv = { br + candidates[s][i].row,
1345 bc + candidates[s][i].col };
1346 cost_list[i + 1] = thissad =
1347 vfp->sdf(what->buf, what->stride,
1348 get_buf_from_mv(in_what, &this_mv), in_what->stride);
1349 CHECK_BETTER
1350 }
1351 } else {
1352 for (i = 0; i < num_candidates[s]; i++) {
1353 const MV this_mv = { br + candidates[s][i].row,
1354 bc + candidates[s][i].col };
1355 if (!is_mv_in(&x->mv_limits, &this_mv)) continue;
1356 cost_list[i + 1] = thissad =
1357 vfp->sdf(what->buf, what->stride,
1358 get_buf_from_mv(in_what, &this_mv), in_what->stride);
1359 CHECK_BETTER
1360 }
1361 }
1362
1363 if (best_site != -1) {
1364 br += candidates[s][best_site].row;
1365 bc += candidates[s][best_site].col;
1366 k = best_site;
1367 }
1368 }
1369 while (best_site != -1) {
1370 int next_chkpts_indices[PATTERN_CANDIDATES_REF];
1371 best_site = -1;
1372 next_chkpts_indices[0] = (k == 0) ? num_candidates[s] - 1 : k - 1;
1373 next_chkpts_indices[1] = k;
1374 next_chkpts_indices[2] = (k == num_candidates[s] - 1) ? 0 : k + 1;
1375 cost_list[1] = cost_list[2] = cost_list[3] = cost_list[4] = INT_MAX;
1376 cost_list[((k + 2) % 4) + 1] = cost_list[0];
1377 cost_list[0] = bestsad;
1378
1379 if (check_bounds(&x->mv_limits, br, bc, 1 << s)) {
1380 for (i = 0; i < PATTERN_CANDIDATES_REF; i++) {
1381 const MV this_mv = {
1382 br + candidates[s][next_chkpts_indices[i]].row,
1383 bc + candidates[s][next_chkpts_indices[i]].col
1384 };
1385 cost_list[next_chkpts_indices[i] + 1] = thissad =
1386 vfp->sdf(what->buf, what->stride,
1387 get_buf_from_mv(in_what, &this_mv), in_what->stride);
1388 CHECK_BETTER
1389 }
1390 } else {
1391 for (i = 0; i < PATTERN_CANDIDATES_REF; i++) {
1392 const MV this_mv = {
1393 br + candidates[s][next_chkpts_indices[i]].row,
1394 bc + candidates[s][next_chkpts_indices[i]].col
1395 };
1396 if (!is_mv_in(&x->mv_limits, &this_mv)) {
1397 cost_list[next_chkpts_indices[i] + 1] = INT_MAX;
1398 continue;
1399 }
1400 cost_list[next_chkpts_indices[i] + 1] = thissad =
1401 vfp->sdf(what->buf, what->stride,
1402 get_buf_from_mv(in_what, &this_mv), in_what->stride);
1403 CHECK_BETTER
1404 }
1405 }
1406
1407 if (best_site != -1) {
1408 k = next_chkpts_indices[best_site];
1409 br += candidates[s][k].row;
1410 bc += candidates[s][k].col;
1411 }
1412 }
1413 }
1414 }
1415
1416 // Returns the one-away integer pel sad values around the best as follows:
1417 // cost_list[0]: sad at the best integer pel
1418 // cost_list[1]: sad at delta {0, -1} (left) from the best integer pel
1419 // cost_list[2]: sad at delta { 1, 0} (bottom) from the best integer pel
1420 // cost_list[3]: sad at delta { 0, 1} (right) from the best integer pel
1421 // cost_list[4]: sad at delta {-1, 0} (top) from the best integer pel
1422 if (cost_list) {
1423 static const MV neighbors[4] = { { 0, -1 }, { 1, 0 }, { 0, 1 }, { -1, 0 } };
1424 if (cost_list[0] == INT_MAX) {
1425 cost_list[0] = bestsad;
1426 if (check_bounds(&x->mv_limits, br, bc, 1)) {
1427 for (i = 0; i < 4; i++) {
1428 const MV this_mv = { br + neighbors[i].row, bc + neighbors[i].col };
1429 cost_list[i + 1] =
1430 vfp->sdf(what->buf, what->stride,
1431 get_buf_from_mv(in_what, &this_mv), in_what->stride);
1432 }
1433 } else {
1434 for (i = 0; i < 4; i++) {
1435 const MV this_mv = { br + neighbors[i].row, bc + neighbors[i].col };
1436 if (!is_mv_in(&x->mv_limits, &this_mv))
1437 cost_list[i + 1] = INT_MAX;
1438 else
1439 cost_list[i + 1] =
1440 vfp->sdf(what->buf, what->stride,
1441 get_buf_from_mv(in_what, &this_mv), in_what->stride);
1442 }
1443 }
1444 } else {
1445 if (use_mvcost) {
1446 for (i = 0; i < 4; i++) {
1447 const MV this_mv = { br + neighbors[i].row, bc + neighbors[i].col };
1448 if (cost_list[i + 1] != INT_MAX) {
1449 cost_list[i + 1] +=
1450 mvsad_err_cost(x, &this_mv, &fcenter_mv, sad_per_bit);
1451 }
1452 }
1453 }
1454 }
1455 }
1456 best_mv->row = br;
1457 best_mv->col = bc;
1458 return bestsad;
1459 }
1460
vp9_get_mvpred_var(const MACROBLOCK * x,const MV * best_mv,const MV * center_mv,const vp9_variance_fn_ptr_t * vfp,int use_mvcost)1461 int vp9_get_mvpred_var(const MACROBLOCK *x, const MV *best_mv,
1462 const MV *center_mv, const vp9_variance_fn_ptr_t *vfp,
1463 int use_mvcost) {
1464 const MACROBLOCKD *const xd = &x->e_mbd;
1465 const struct buf_2d *const what = &x->plane[0].src;
1466 const struct buf_2d *const in_what = &xd->plane[0].pre[0];
1467 const MV mv = { best_mv->row * 8, best_mv->col * 8 };
1468 uint32_t unused;
1469 #if CONFIG_VP9_HIGHBITDEPTH
1470 uint64_t err =
1471 vfp->vf(what->buf, what->stride, get_buf_from_mv(in_what, best_mv),
1472 in_what->stride, &unused);
1473 err += (use_mvcost ? mv_err_cost(&mv, center_mv, x->nmvjointcost, x->mvcost,
1474 x->errorperbit)
1475 : 0);
1476 if (err >= INT_MAX) return INT_MAX;
1477 return (int)err;
1478 #else
1479 return vfp->vf(what->buf, what->stride, get_buf_from_mv(in_what, best_mv),
1480 in_what->stride, &unused) +
1481 (use_mvcost ? mv_err_cost(&mv, center_mv, x->nmvjointcost, x->mvcost,
1482 x->errorperbit)
1483 : 0);
1484 #endif
1485 }
1486
vp9_get_mvpred_av_var(const MACROBLOCK * x,const MV * best_mv,const MV * center_mv,const uint8_t * second_pred,const vp9_variance_fn_ptr_t * vfp,int use_mvcost)1487 int vp9_get_mvpred_av_var(const MACROBLOCK *x, const MV *best_mv,
1488 const MV *center_mv, const uint8_t *second_pred,
1489 const vp9_variance_fn_ptr_t *vfp, int use_mvcost) {
1490 const MACROBLOCKD *const xd = &x->e_mbd;
1491 const struct buf_2d *const what = &x->plane[0].src;
1492 const struct buf_2d *const in_what = &xd->plane[0].pre[0];
1493 const MV mv = { best_mv->row * 8, best_mv->col * 8 };
1494 unsigned int unused;
1495
1496 return vfp->svaf(get_buf_from_mv(in_what, best_mv), in_what->stride, 0, 0,
1497 what->buf, what->stride, &unused, second_pred) +
1498 (use_mvcost ? mv_err_cost(&mv, center_mv, x->nmvjointcost, x->mvcost,
1499 x->errorperbit)
1500 : 0);
1501 }
1502
hex_search(const MACROBLOCK * x,MV * ref_mv,int search_param,int sad_per_bit,int do_init_search,int * cost_list,const vp9_variance_fn_ptr_t * vfp,int use_mvcost,const MV * center_mv,MV * best_mv)1503 static int hex_search(const MACROBLOCK *x, MV *ref_mv, int search_param,
1504 int sad_per_bit, int do_init_search, int *cost_list,
1505 const vp9_variance_fn_ptr_t *vfp, int use_mvcost,
1506 const MV *center_mv, MV *best_mv) {
1507 // First scale has 8-closest points, the rest have 6 points in hex shape
1508 // at increasing scales
1509 static const int hex_num_candidates[MAX_PATTERN_SCALES] = { 8, 6, 6, 6, 6, 6,
1510 6, 6, 6, 6, 6 };
1511 // Note that the largest candidate step at each scale is 2^scale
1512 /* clang-format off */
1513 static const MV hex_candidates[MAX_PATTERN_SCALES][MAX_PATTERN_CANDIDATES] = {
1514 { { -1, -1 }, { 0, -1 }, { 1, -1 }, { 1, 0 }, { 1, 1 }, { 0, 1 }, { -1, 1 },
1515 { -1, 0 } },
1516 { { -1, -2 }, { 1, -2 }, { 2, 0 }, { 1, 2 }, { -1, 2 }, { -2, 0 } },
1517 { { -2, -4 }, { 2, -4 }, { 4, 0 }, { 2, 4 }, { -2, 4 }, { -4, 0 } },
1518 { { -4, -8 }, { 4, -8 }, { 8, 0 }, { 4, 8 }, { -4, 8 }, { -8, 0 } },
1519 { { -8, -16 }, { 8, -16 }, { 16, 0 }, { 8, 16 }, { -8, 16 }, { -16, 0 } },
1520 { { -16, -32 }, { 16, -32 }, { 32, 0 }, { 16, 32 }, { -16, 32 },
1521 { -32, 0 } },
1522 { { -32, -64 }, { 32, -64 }, { 64, 0 }, { 32, 64 }, { -32, 64 },
1523 { -64, 0 } },
1524 { { -64, -128 }, { 64, -128 }, { 128, 0 }, { 64, 128 }, { -64, 128 },
1525 { -128, 0 } },
1526 { { -128, -256 }, { 128, -256 }, { 256, 0 }, { 128, 256 }, { -128, 256 },
1527 { -256, 0 } },
1528 { { -256, -512 }, { 256, -512 }, { 512, 0 }, { 256, 512 }, { -256, 512 },
1529 { -512, 0 } },
1530 { { -512, -1024 }, { 512, -1024 }, { 1024, 0 }, { 512, 1024 },
1531 { -512, 1024 }, { -1024, 0 } }
1532 };
1533 /* clang-format on */
1534 return vp9_pattern_search(
1535 x, ref_mv, search_param, sad_per_bit, do_init_search, cost_list, vfp,
1536 use_mvcost, center_mv, best_mv, hex_num_candidates, hex_candidates);
1537 }
1538
bigdia_search(const MACROBLOCK * x,MV * ref_mv,int search_param,int sad_per_bit,int do_init_search,int * cost_list,const vp9_variance_fn_ptr_t * vfp,int use_mvcost,const MV * center_mv,MV * best_mv)1539 static int bigdia_search(const MACROBLOCK *x, MV *ref_mv, int search_param,
1540 int sad_per_bit, int do_init_search, int *cost_list,
1541 const vp9_variance_fn_ptr_t *vfp, int use_mvcost,
1542 const MV *center_mv, MV *best_mv) {
1543 // First scale has 4-closest points, the rest have 8 points in diamond
1544 // shape at increasing scales
1545 static const int bigdia_num_candidates[MAX_PATTERN_SCALES] = {
1546 4, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1547 };
1548 // Note that the largest candidate step at each scale is 2^scale
1549 /* clang-format off */
1550 static const MV
1551 bigdia_candidates[MAX_PATTERN_SCALES][MAX_PATTERN_CANDIDATES] = {
1552 { { 0, -1 }, { 1, 0 }, { 0, 1 }, { -1, 0 } },
1553 { { -1, -1 }, { 0, -2 }, { 1, -1 }, { 2, 0 }, { 1, 1 }, { 0, 2 },
1554 { -1, 1 }, { -2, 0 } },
1555 { { -2, -2 }, { 0, -4 }, { 2, -2 }, { 4, 0 }, { 2, 2 }, { 0, 4 },
1556 { -2, 2 }, { -4, 0 } },
1557 { { -4, -4 }, { 0, -8 }, { 4, -4 }, { 8, 0 }, { 4, 4 }, { 0, 8 },
1558 { -4, 4 }, { -8, 0 } },
1559 { { -8, -8 }, { 0, -16 }, { 8, -8 }, { 16, 0 }, { 8, 8 }, { 0, 16 },
1560 { -8, 8 }, { -16, 0 } },
1561 { { -16, -16 }, { 0, -32 }, { 16, -16 }, { 32, 0 }, { 16, 16 },
1562 { 0, 32 }, { -16, 16 }, { -32, 0 } },
1563 { { -32, -32 }, { 0, -64 }, { 32, -32 }, { 64, 0 }, { 32, 32 },
1564 { 0, 64 }, { -32, 32 }, { -64, 0 } },
1565 { { -64, -64 }, { 0, -128 }, { 64, -64 }, { 128, 0 }, { 64, 64 },
1566 { 0, 128 }, { -64, 64 }, { -128, 0 } },
1567 { { -128, -128 }, { 0, -256 }, { 128, -128 }, { 256, 0 }, { 128, 128 },
1568 { 0, 256 }, { -128, 128 }, { -256, 0 } },
1569 { { -256, -256 }, { 0, -512 }, { 256, -256 }, { 512, 0 }, { 256, 256 },
1570 { 0, 512 }, { -256, 256 }, { -512, 0 } },
1571 { { -512, -512 }, { 0, -1024 }, { 512, -512 }, { 1024, 0 },
1572 { 512, 512 }, { 0, 1024 }, { -512, 512 }, { -1024, 0 } }
1573 };
1574 /* clang-format on */
1575 return vp9_pattern_search_sad(
1576 x, ref_mv, search_param, sad_per_bit, do_init_search, cost_list, vfp,
1577 use_mvcost, center_mv, best_mv, bigdia_num_candidates, bigdia_candidates);
1578 }
1579
square_search(const MACROBLOCK * x,MV * ref_mv,int search_param,int sad_per_bit,int do_init_search,int * cost_list,const vp9_variance_fn_ptr_t * vfp,int use_mvcost,const MV * center_mv,MV * best_mv)1580 static int square_search(const MACROBLOCK *x, MV *ref_mv, int search_param,
1581 int sad_per_bit, int do_init_search, int *cost_list,
1582 const vp9_variance_fn_ptr_t *vfp, int use_mvcost,
1583 const MV *center_mv, MV *best_mv) {
1584 // All scales have 8 closest points in square shape
1585 static const int square_num_candidates[MAX_PATTERN_SCALES] = {
1586 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1587 };
1588 // Note that the largest candidate step at each scale is 2^scale
1589 /* clang-format off */
1590 static const MV
1591 square_candidates[MAX_PATTERN_SCALES][MAX_PATTERN_CANDIDATES] = {
1592 { { -1, -1 }, { 0, -1 }, { 1, -1 }, { 1, 0 }, { 1, 1 }, { 0, 1 },
1593 { -1, 1 }, { -1, 0 } },
1594 { { -2, -2 }, { 0, -2 }, { 2, -2 }, { 2, 0 }, { 2, 2 }, { 0, 2 },
1595 { -2, 2 }, { -2, 0 } },
1596 { { -4, -4 }, { 0, -4 }, { 4, -4 }, { 4, 0 }, { 4, 4 }, { 0, 4 },
1597 { -4, 4 }, { -4, 0 } },
1598 { { -8, -8 }, { 0, -8 }, { 8, -8 }, { 8, 0 }, { 8, 8 }, { 0, 8 },
1599 { -8, 8 }, { -8, 0 } },
1600 { { -16, -16 }, { 0, -16 }, { 16, -16 }, { 16, 0 }, { 16, 16 },
1601 { 0, 16 }, { -16, 16 }, { -16, 0 } },
1602 { { -32, -32 }, { 0, -32 }, { 32, -32 }, { 32, 0 }, { 32, 32 },
1603 { 0, 32 }, { -32, 32 }, { -32, 0 } },
1604 { { -64, -64 }, { 0, -64 }, { 64, -64 }, { 64, 0 }, { 64, 64 },
1605 { 0, 64 }, { -64, 64 }, { -64, 0 } },
1606 { { -128, -128 }, { 0, -128 }, { 128, -128 }, { 128, 0 }, { 128, 128 },
1607 { 0, 128 }, { -128, 128 }, { -128, 0 } },
1608 { { -256, -256 }, { 0, -256 }, { 256, -256 }, { 256, 0 }, { 256, 256 },
1609 { 0, 256 }, { -256, 256 }, { -256, 0 } },
1610 { { -512, -512 }, { 0, -512 }, { 512, -512 }, { 512, 0 }, { 512, 512 },
1611 { 0, 512 }, { -512, 512 }, { -512, 0 } },
1612 { { -1024, -1024 }, { 0, -1024 }, { 1024, -1024 }, { 1024, 0 },
1613 { 1024, 1024 }, { 0, 1024 }, { -1024, 1024 }, { -1024, 0 } }
1614 };
1615 /* clang-format on */
1616 return vp9_pattern_search(
1617 x, ref_mv, search_param, sad_per_bit, do_init_search, cost_list, vfp,
1618 use_mvcost, center_mv, best_mv, square_num_candidates, square_candidates);
1619 }
1620
fast_hex_search(const MACROBLOCK * x,MV * ref_mv,int search_param,int sad_per_bit,int do_init_search,int * cost_list,const vp9_variance_fn_ptr_t * vfp,int use_mvcost,const MV * center_mv,MV * best_mv)1621 static int fast_hex_search(const MACROBLOCK *x, MV *ref_mv, int search_param,
1622 int sad_per_bit,
1623 int do_init_search, // must be zero for fast_hex
1624 int *cost_list, const vp9_variance_fn_ptr_t *vfp,
1625 int use_mvcost, const MV *center_mv, MV *best_mv) {
1626 return hex_search(x, ref_mv, VPXMAX(MAX_MVSEARCH_STEPS - 2, search_param),
1627 sad_per_bit, do_init_search, cost_list, vfp, use_mvcost,
1628 center_mv, best_mv);
1629 }
1630
fast_dia_search(const MACROBLOCK * x,MV * ref_mv,int search_param,int sad_per_bit,int do_init_search,int * cost_list,const vp9_variance_fn_ptr_t * vfp,int use_mvcost,const MV * center_mv,MV * best_mv)1631 static int fast_dia_search(const MACROBLOCK *x, MV *ref_mv, int search_param,
1632 int sad_per_bit, int do_init_search, int *cost_list,
1633 const vp9_variance_fn_ptr_t *vfp, int use_mvcost,
1634 const MV *center_mv, MV *best_mv) {
1635 return bigdia_search(x, ref_mv, VPXMAX(MAX_MVSEARCH_STEPS - 2, search_param),
1636 sad_per_bit, do_init_search, cost_list, vfp, use_mvcost,
1637 center_mv, best_mv);
1638 }
1639
1640 #undef CHECK_BETTER
1641
1642 // Exhuastive motion search around a given centre position with a given
1643 // step size.
exhaustive_mesh_search(const MACROBLOCK * x,MV * ref_mv,MV * best_mv,int range,int step,int sad_per_bit,const vp9_variance_fn_ptr_t * fn_ptr,const MV * center_mv)1644 static int exhaustive_mesh_search(const MACROBLOCK *x, MV *ref_mv, MV *best_mv,
1645 int range, int step, int sad_per_bit,
1646 const vp9_variance_fn_ptr_t *fn_ptr,
1647 const MV *center_mv) {
1648 const MACROBLOCKD *const xd = &x->e_mbd;
1649 const struct buf_2d *const what = &x->plane[0].src;
1650 const struct buf_2d *const in_what = &xd->plane[0].pre[0];
1651 MV fcenter_mv = { center_mv->row, center_mv->col };
1652 unsigned int best_sad = INT_MAX;
1653 int r, c, i;
1654 int start_col, end_col, start_row, end_row;
1655 int col_step = (step > 1) ? step : 4;
1656
1657 assert(step >= 1);
1658
1659 clamp_mv(&fcenter_mv, x->mv_limits.col_min, x->mv_limits.col_max,
1660 x->mv_limits.row_min, x->mv_limits.row_max);
1661 *best_mv = fcenter_mv;
1662 best_sad =
1663 fn_ptr->sdf(what->buf, what->stride,
1664 get_buf_from_mv(in_what, &fcenter_mv), in_what->stride) +
1665 mvsad_err_cost(x, &fcenter_mv, ref_mv, sad_per_bit);
1666 start_row = VPXMAX(-range, x->mv_limits.row_min - fcenter_mv.row);
1667 start_col = VPXMAX(-range, x->mv_limits.col_min - fcenter_mv.col);
1668 end_row = VPXMIN(range, x->mv_limits.row_max - fcenter_mv.row);
1669 end_col = VPXMIN(range, x->mv_limits.col_max - fcenter_mv.col);
1670
1671 for (r = start_row; r <= end_row; r += step) {
1672 for (c = start_col; c <= end_col; c += col_step) {
1673 // Step > 1 means we are not checking every location in this pass.
1674 if (step > 1) {
1675 const MV mv = { fcenter_mv.row + r, fcenter_mv.col + c };
1676 unsigned int sad =
1677 fn_ptr->sdf(what->buf, what->stride, get_buf_from_mv(in_what, &mv),
1678 in_what->stride);
1679 if (sad < best_sad) {
1680 sad += mvsad_err_cost(x, &mv, ref_mv, sad_per_bit);
1681 if (sad < best_sad) {
1682 best_sad = sad;
1683 *best_mv = mv;
1684 }
1685 }
1686 } else {
1687 // 4 sads in a single call if we are checking every location
1688 if (c + 3 <= end_col) {
1689 unsigned int sads[4];
1690 const uint8_t *addrs[4];
1691 for (i = 0; i < 4; ++i) {
1692 const MV mv = { fcenter_mv.row + r, fcenter_mv.col + c + i };
1693 addrs[i] = get_buf_from_mv(in_what, &mv);
1694 }
1695 fn_ptr->sdx4df(what->buf, what->stride, addrs, in_what->stride, sads);
1696
1697 for (i = 0; i < 4; ++i) {
1698 if (sads[i] < best_sad) {
1699 const MV mv = { fcenter_mv.row + r, fcenter_mv.col + c + i };
1700 const unsigned int sad =
1701 sads[i] + mvsad_err_cost(x, &mv, ref_mv, sad_per_bit);
1702 if (sad < best_sad) {
1703 best_sad = sad;
1704 *best_mv = mv;
1705 }
1706 }
1707 }
1708 } else {
1709 for (i = 0; i < end_col - c; ++i) {
1710 const MV mv = { fcenter_mv.row + r, fcenter_mv.col + c + i };
1711 unsigned int sad =
1712 fn_ptr->sdf(what->buf, what->stride,
1713 get_buf_from_mv(in_what, &mv), in_what->stride);
1714 if (sad < best_sad) {
1715 sad += mvsad_err_cost(x, &mv, ref_mv, sad_per_bit);
1716 if (sad < best_sad) {
1717 best_sad = sad;
1718 *best_mv = mv;
1719 }
1720 }
1721 }
1722 }
1723 }
1724 }
1725 }
1726
1727 return best_sad;
1728 }
1729
1730 #define MIN_RANGE 7
1731 #define MAX_RANGE 256
1732 #define MIN_INTERVAL 1
1733 #if CONFIG_NON_GREEDY_MV
exhaustive_mesh_search_multi_step(MV * best_mv,const MV * center_mv,int range,int step,const struct buf_2d * src,const struct buf_2d * pre,int lambda,const int_mv * nb_full_mvs,int full_mv_num,const MvLimits * mv_limits,const vp9_variance_fn_ptr_t * fn_ptr)1734 static int64_t exhaustive_mesh_search_multi_step(
1735 MV *best_mv, const MV *center_mv, int range, int step,
1736 const struct buf_2d *src, const struct buf_2d *pre, int lambda,
1737 const int_mv *nb_full_mvs, int full_mv_num, const MvLimits *mv_limits,
1738 const vp9_variance_fn_ptr_t *fn_ptr) {
1739 int64_t best_sad;
1740 int r, c;
1741 int start_col, end_col, start_row, end_row;
1742 *best_mv = *center_mv;
1743 best_sad =
1744 ((int64_t)fn_ptr->sdf(src->buf, src->stride,
1745 get_buf_from_mv(pre, center_mv), pre->stride)
1746 << LOG2_PRECISION) +
1747 lambda * vp9_nb_mvs_inconsistency(best_mv, nb_full_mvs, full_mv_num);
1748 start_row = VPXMAX(center_mv->row - range, mv_limits->row_min);
1749 start_col = VPXMAX(center_mv->col - range, mv_limits->col_min);
1750 end_row = VPXMIN(center_mv->row + range, mv_limits->row_max);
1751 end_col = VPXMIN(center_mv->col + range, mv_limits->col_max);
1752 for (r = start_row; r <= end_row; r += step) {
1753 for (c = start_col; c <= end_col; c += step) {
1754 const MV mv = { r, c };
1755 int64_t sad = (int64_t)fn_ptr->sdf(src->buf, src->stride,
1756 get_buf_from_mv(pre, &mv), pre->stride)
1757 << LOG2_PRECISION;
1758 if (sad < best_sad) {
1759 sad += lambda * vp9_nb_mvs_inconsistency(&mv, nb_full_mvs, full_mv_num);
1760 if (sad < best_sad) {
1761 best_sad = sad;
1762 *best_mv = mv;
1763 }
1764 }
1765 }
1766 }
1767 return best_sad;
1768 }
1769
exhaustive_mesh_search_single_step(MV * best_mv,const MV * center_mv,int range,const struct buf_2d * src,const struct buf_2d * pre,int lambda,const int_mv * nb_full_mvs,int full_mv_num,const MvLimits * mv_limits,const vp9_variance_fn_ptr_t * fn_ptr)1770 static int64_t exhaustive_mesh_search_single_step(
1771 MV *best_mv, const MV *center_mv, int range, const struct buf_2d *src,
1772 const struct buf_2d *pre, int lambda, const int_mv *nb_full_mvs,
1773 int full_mv_num, const MvLimits *mv_limits,
1774 const vp9_variance_fn_ptr_t *fn_ptr) {
1775 int64_t best_sad;
1776 int r, c, i;
1777 int start_col, end_col, start_row, end_row;
1778
1779 *best_mv = *center_mv;
1780 best_sad =
1781 ((int64_t)fn_ptr->sdf(src->buf, src->stride,
1782 get_buf_from_mv(pre, center_mv), pre->stride)
1783 << LOG2_PRECISION) +
1784 lambda * vp9_nb_mvs_inconsistency(best_mv, nb_full_mvs, full_mv_num);
1785 start_row = VPXMAX(center_mv->row - range, mv_limits->row_min);
1786 start_col = VPXMAX(center_mv->col - range, mv_limits->col_min);
1787 end_row = VPXMIN(center_mv->row + range, mv_limits->row_max);
1788 end_col = VPXMIN(center_mv->col + range, mv_limits->col_max);
1789 for (r = start_row; r <= end_row; r += 1) {
1790 c = start_col;
1791 // sdx8f may not be available some block size
1792 if (fn_ptr->sdx8f) {
1793 while (c + 7 <= end_col) {
1794 unsigned int sads[8];
1795 const MV mv = { r, c };
1796 const uint8_t *buf = get_buf_from_mv(pre, &mv);
1797 fn_ptr->sdx8f(src->buf, src->stride, buf, pre->stride, sads);
1798
1799 for (i = 0; i < 8; ++i) {
1800 int64_t sad = (int64_t)sads[i] << LOG2_PRECISION;
1801 if (sad < best_sad) {
1802 const MV mv = { r, c + i };
1803 sad += lambda *
1804 vp9_nb_mvs_inconsistency(&mv, nb_full_mvs, full_mv_num);
1805 if (sad < best_sad) {
1806 best_sad = sad;
1807 *best_mv = mv;
1808 }
1809 }
1810 }
1811 c += 8;
1812 }
1813 }
1814 while (c + 3 <= end_col) {
1815 unsigned int sads[4];
1816 const uint8_t *addrs[4];
1817 for (i = 0; i < 4; ++i) {
1818 const MV mv = { r, c + i };
1819 addrs[i] = get_buf_from_mv(pre, &mv);
1820 }
1821 fn_ptr->sdx4df(src->buf, src->stride, addrs, pre->stride, sads);
1822
1823 for (i = 0; i < 4; ++i) {
1824 int64_t sad = (int64_t)sads[i] << LOG2_PRECISION;
1825 if (sad < best_sad) {
1826 const MV mv = { r, c + i };
1827 sad +=
1828 lambda * vp9_nb_mvs_inconsistency(&mv, nb_full_mvs, full_mv_num);
1829 if (sad < best_sad) {
1830 best_sad = sad;
1831 *best_mv = mv;
1832 }
1833 }
1834 }
1835 c += 4;
1836 }
1837 while (c <= end_col) {
1838 const MV mv = { r, c };
1839 int64_t sad = (int64_t)fn_ptr->sdf(src->buf, src->stride,
1840 get_buf_from_mv(pre, &mv), pre->stride)
1841 << LOG2_PRECISION;
1842 if (sad < best_sad) {
1843 sad += lambda * vp9_nb_mvs_inconsistency(&mv, nb_full_mvs, full_mv_num);
1844 if (sad < best_sad) {
1845 best_sad = sad;
1846 *best_mv = mv;
1847 }
1848 }
1849 c += 1;
1850 }
1851 }
1852 return best_sad;
1853 }
1854
exhaustive_mesh_search_new(const MACROBLOCK * x,MV * best_mv,int range,int step,const vp9_variance_fn_ptr_t * fn_ptr,const MV * center_mv,int lambda,const int_mv * nb_full_mvs,int full_mv_num)1855 static int64_t exhaustive_mesh_search_new(const MACROBLOCK *x, MV *best_mv,
1856 int range, int step,
1857 const vp9_variance_fn_ptr_t *fn_ptr,
1858 const MV *center_mv, int lambda,
1859 const int_mv *nb_full_mvs,
1860 int full_mv_num) {
1861 const MACROBLOCKD *const xd = &x->e_mbd;
1862 const struct buf_2d *src = &x->plane[0].src;
1863 const struct buf_2d *pre = &xd->plane[0].pre[0];
1864 assert(step >= 1);
1865 assert(is_mv_in(&x->mv_limits, center_mv));
1866 if (step == 1) {
1867 return exhaustive_mesh_search_single_step(
1868 best_mv, center_mv, range, src, pre, lambda, nb_full_mvs, full_mv_num,
1869 &x->mv_limits, fn_ptr);
1870 }
1871 return exhaustive_mesh_search_multi_step(best_mv, center_mv, range, step, src,
1872 pre, lambda, nb_full_mvs,
1873 full_mv_num, &x->mv_limits, fn_ptr);
1874 }
1875
full_pixel_exhaustive_new(const VP9_COMP * cpi,MACROBLOCK * x,MV * centre_mv_full,const vp9_variance_fn_ptr_t * fn_ptr,MV * dst_mv,int lambda,const int_mv * nb_full_mvs,int full_mv_num)1876 static int64_t full_pixel_exhaustive_new(const VP9_COMP *cpi, MACROBLOCK *x,
1877 MV *centre_mv_full,
1878 const vp9_variance_fn_ptr_t *fn_ptr,
1879 MV *dst_mv, int lambda,
1880 const int_mv *nb_full_mvs,
1881 int full_mv_num) {
1882 const SPEED_FEATURES *const sf = &cpi->sf;
1883 MV temp_mv = { centre_mv_full->row, centre_mv_full->col };
1884 int64_t bestsme;
1885 int i;
1886 int interval = sf->mesh_patterns[0].interval;
1887 int range = sf->mesh_patterns[0].range;
1888 int baseline_interval_divisor;
1889
1890 // Trap illegal values for interval and range for this function.
1891 if ((range < MIN_RANGE) || (range > MAX_RANGE) || (interval < MIN_INTERVAL) ||
1892 (interval > range)) {
1893 printf("ERROR: invalid range\n");
1894 assert(0);
1895 }
1896
1897 baseline_interval_divisor = range / interval;
1898
1899 // Check size of proposed first range against magnitude of the centre
1900 // value used as a starting point.
1901 range = VPXMAX(range, (5 * VPXMAX(abs(temp_mv.row), abs(temp_mv.col))) / 4);
1902 range = VPXMIN(range, MAX_RANGE);
1903 interval = VPXMAX(interval, range / baseline_interval_divisor);
1904
1905 // initial search
1906 bestsme =
1907 exhaustive_mesh_search_new(x, &temp_mv, range, interval, fn_ptr, &temp_mv,
1908 lambda, nb_full_mvs, full_mv_num);
1909
1910 if ((interval > MIN_INTERVAL) && (range > MIN_RANGE)) {
1911 // Progressive searches with range and step size decreasing each time
1912 // till we reach a step size of 1. Then break out.
1913 for (i = 1; i < MAX_MESH_STEP; ++i) {
1914 // First pass with coarser step and longer range
1915 bestsme = exhaustive_mesh_search_new(
1916 x, &temp_mv, sf->mesh_patterns[i].range,
1917 sf->mesh_patterns[i].interval, fn_ptr, &temp_mv, lambda, nb_full_mvs,
1918 full_mv_num);
1919
1920 if (sf->mesh_patterns[i].interval == 1) break;
1921 }
1922 }
1923
1924 *dst_mv = temp_mv;
1925
1926 return bestsme;
1927 }
1928
diamond_search_sad_new(const MACROBLOCK * x,const search_site_config * cfg,const MV * init_full_mv,MV * best_full_mv,int search_param,int lambda,int * num00,const vp9_variance_fn_ptr_t * fn_ptr,const int_mv * nb_full_mvs,int full_mv_num)1929 static int64_t diamond_search_sad_new(const MACROBLOCK *x,
1930 const search_site_config *cfg,
1931 const MV *init_full_mv, MV *best_full_mv,
1932 int search_param, int lambda, int *num00,
1933 const vp9_variance_fn_ptr_t *fn_ptr,
1934 const int_mv *nb_full_mvs,
1935 int full_mv_num) {
1936 int i, j, step;
1937
1938 const MACROBLOCKD *const xd = &x->e_mbd;
1939 uint8_t *what = x->plane[0].src.buf;
1940 const int what_stride = x->plane[0].src.stride;
1941 const uint8_t *in_what;
1942 const int in_what_stride = xd->plane[0].pre[0].stride;
1943 const uint8_t *best_address;
1944
1945 int64_t bestsad;
1946 int best_site = -1;
1947 int last_site = -1;
1948
1949 // search_param determines the length of the initial step and hence the number
1950 // of iterations.
1951 // 0 = initial step (MAX_FIRST_STEP) pel
1952 // 1 = (MAX_FIRST_STEP/2) pel,
1953 // 2 = (MAX_FIRST_STEP/4) pel...
1954 // const search_site *ss = &cfg->ss[search_param * cfg->searches_per_step];
1955 const MV *ss_mv = &cfg->ss_mv[search_param * cfg->searches_per_step];
1956 const intptr_t *ss_os = &cfg->ss_os[search_param * cfg->searches_per_step];
1957 const int tot_steps = cfg->total_steps - search_param;
1958 vpx_clear_system_state();
1959
1960 *best_full_mv = *init_full_mv;
1961 clamp_mv(best_full_mv, x->mv_limits.col_min, x->mv_limits.col_max,
1962 x->mv_limits.row_min, x->mv_limits.row_max);
1963 *num00 = 0;
1964
1965 // Work out the start point for the search
1966 in_what = xd->plane[0].pre[0].buf + best_full_mv->row * in_what_stride +
1967 best_full_mv->col;
1968 best_address = in_what;
1969
1970 // Check the starting position
1971 {
1972 const int64_t mv_dist =
1973 (int64_t)fn_ptr->sdf(what, what_stride, in_what, in_what_stride)
1974 << LOG2_PRECISION;
1975 const int64_t mv_cost =
1976 vp9_nb_mvs_inconsistency(best_full_mv, nb_full_mvs, full_mv_num);
1977 bestsad = mv_dist + lambda * mv_cost;
1978 }
1979
1980 i = 0;
1981
1982 for (step = 0; step < tot_steps; step++) {
1983 int all_in = 1, t;
1984
1985 // All_in is true if every one of the points we are checking are within
1986 // the bounds of the image.
1987 all_in &= ((best_full_mv->row + ss_mv[i].row) > x->mv_limits.row_min);
1988 all_in &= ((best_full_mv->row + ss_mv[i + 1].row) < x->mv_limits.row_max);
1989 all_in &= ((best_full_mv->col + ss_mv[i + 2].col) > x->mv_limits.col_min);
1990 all_in &= ((best_full_mv->col + ss_mv[i + 3].col) < x->mv_limits.col_max);
1991
1992 // If all the pixels are within the bounds we don't check whether the
1993 // search point is valid in this loop, otherwise we check each point
1994 // for validity..
1995 if (all_in) {
1996 unsigned int sad_array[4];
1997
1998 for (j = 0; j < cfg->searches_per_step; j += 4) {
1999 unsigned char const *block_offset[4];
2000
2001 for (t = 0; t < 4; t++) block_offset[t] = ss_os[i + t] + best_address;
2002
2003 fn_ptr->sdx4df(what, what_stride, block_offset, in_what_stride,
2004 sad_array);
2005
2006 for (t = 0; t < 4; t++, i++) {
2007 const int64_t mv_dist = (int64_t)sad_array[t] << LOG2_PRECISION;
2008 if (mv_dist < bestsad) {
2009 const MV this_mv = { best_full_mv->row + ss_mv[i].row,
2010 best_full_mv->col + ss_mv[i].col };
2011 const int64_t mv_cost =
2012 vp9_nb_mvs_inconsistency(&this_mv, nb_full_mvs, full_mv_num);
2013 const int64_t thissad = mv_dist + lambda * mv_cost;
2014 if (thissad < bestsad) {
2015 bestsad = thissad;
2016 best_site = i;
2017 }
2018 }
2019 }
2020 }
2021 } else {
2022 for (j = 0; j < cfg->searches_per_step; j++) {
2023 // Trap illegal vectors
2024 const MV this_mv = { best_full_mv->row + ss_mv[i].row,
2025 best_full_mv->col + ss_mv[i].col };
2026
2027 if (is_mv_in(&x->mv_limits, &this_mv)) {
2028 const uint8_t *const check_here = ss_os[i] + best_address;
2029 const int64_t mv_dist =
2030 (int64_t)fn_ptr->sdf(what, what_stride, check_here,
2031 in_what_stride)
2032 << LOG2_PRECISION;
2033 if (mv_dist < bestsad) {
2034 const int64_t mv_cost =
2035 vp9_nb_mvs_inconsistency(&this_mv, nb_full_mvs, full_mv_num);
2036 const int64_t thissad = mv_dist + lambda * mv_cost;
2037 if (thissad < bestsad) {
2038 bestsad = thissad;
2039 best_site = i;
2040 }
2041 }
2042 }
2043 i++;
2044 }
2045 }
2046 if (best_site != last_site) {
2047 best_full_mv->row += ss_mv[best_site].row;
2048 best_full_mv->col += ss_mv[best_site].col;
2049 best_address += ss_os[best_site];
2050 last_site = best_site;
2051 } else if (best_address == in_what) {
2052 (*num00)++;
2053 }
2054 }
2055 return bestsad;
2056 }
2057
vp9_prepare_nb_full_mvs(const MotionField * motion_field,int mi_row,int mi_col,int_mv * nb_full_mvs)2058 int vp9_prepare_nb_full_mvs(const MotionField *motion_field, int mi_row,
2059 int mi_col, int_mv *nb_full_mvs) {
2060 const int mi_width = num_8x8_blocks_wide_lookup[motion_field->bsize];
2061 const int mi_height = num_8x8_blocks_high_lookup[motion_field->bsize];
2062 const int dirs[NB_MVS_NUM][2] = { { -1, 0 }, { 0, -1 }, { 1, 0 }, { 0, 1 } };
2063 int nb_full_mv_num = 0;
2064 int i;
2065 assert(mi_row % mi_height == 0);
2066 assert(mi_col % mi_width == 0);
2067 for (i = 0; i < NB_MVS_NUM; ++i) {
2068 int r = dirs[i][0];
2069 int c = dirs[i][1];
2070 int brow = mi_row / mi_height + r;
2071 int bcol = mi_col / mi_width + c;
2072 if (brow >= 0 && brow < motion_field->block_rows && bcol >= 0 &&
2073 bcol < motion_field->block_cols) {
2074 if (vp9_motion_field_is_mv_set(motion_field, brow, bcol)) {
2075 int_mv mv = vp9_motion_field_get_mv(motion_field, brow, bcol);
2076 nb_full_mvs[nb_full_mv_num].as_mv = get_full_mv(&mv.as_mv);
2077 ++nb_full_mv_num;
2078 }
2079 }
2080 }
2081 return nb_full_mv_num;
2082 }
2083 #endif // CONFIG_NON_GREEDY_MV
2084
vp9_diamond_search_sad_c(const MACROBLOCK * x,const search_site_config * cfg,MV * ref_mv,MV * best_mv,int search_param,int sad_per_bit,int * num00,const vp9_variance_fn_ptr_t * fn_ptr,const MV * center_mv)2085 int vp9_diamond_search_sad_c(const MACROBLOCK *x, const search_site_config *cfg,
2086 MV *ref_mv, MV *best_mv, int search_param,
2087 int sad_per_bit, int *num00,
2088 const vp9_variance_fn_ptr_t *fn_ptr,
2089 const MV *center_mv) {
2090 int i, j, step;
2091
2092 const MACROBLOCKD *const xd = &x->e_mbd;
2093 uint8_t *what = x->plane[0].src.buf;
2094 const int what_stride = x->plane[0].src.stride;
2095 const uint8_t *in_what;
2096 const int in_what_stride = xd->plane[0].pre[0].stride;
2097 const uint8_t *best_address;
2098
2099 unsigned int bestsad = INT_MAX;
2100 int best_site = -1;
2101 int last_site = -1;
2102
2103 int ref_row;
2104 int ref_col;
2105
2106 // search_param determines the length of the initial step and hence the number
2107 // of iterations.
2108 // 0 = initial step (MAX_FIRST_STEP) pel
2109 // 1 = (MAX_FIRST_STEP/2) pel,
2110 // 2 = (MAX_FIRST_STEP/4) pel...
2111 // const search_site *ss = &cfg->ss[search_param * cfg->searches_per_step];
2112 const MV *ss_mv = &cfg->ss_mv[search_param * cfg->searches_per_step];
2113 const intptr_t *ss_os = &cfg->ss_os[search_param * cfg->searches_per_step];
2114 const int tot_steps = cfg->total_steps - search_param;
2115
2116 const MV fcenter_mv = { center_mv->row >> 3, center_mv->col >> 3 };
2117 clamp_mv(ref_mv, x->mv_limits.col_min, x->mv_limits.col_max,
2118 x->mv_limits.row_min, x->mv_limits.row_max);
2119 ref_row = ref_mv->row;
2120 ref_col = ref_mv->col;
2121 *num00 = 0;
2122 best_mv->row = ref_row;
2123 best_mv->col = ref_col;
2124
2125 // Work out the start point for the search
2126 in_what = xd->plane[0].pre[0].buf + ref_row * in_what_stride + ref_col;
2127 best_address = in_what;
2128
2129 // Check the starting position
2130 bestsad = fn_ptr->sdf(what, what_stride, in_what, in_what_stride) +
2131 mvsad_err_cost(x, best_mv, &fcenter_mv, sad_per_bit);
2132
2133 i = 0;
2134
2135 for (step = 0; step < tot_steps; step++) {
2136 int all_in = 1, t;
2137
2138 // All_in is true if every one of the points we are checking are within
2139 // the bounds of the image.
2140 all_in &= ((best_mv->row + ss_mv[i].row) > x->mv_limits.row_min);
2141 all_in &= ((best_mv->row + ss_mv[i + 1].row) < x->mv_limits.row_max);
2142 all_in &= ((best_mv->col + ss_mv[i + 2].col) > x->mv_limits.col_min);
2143 all_in &= ((best_mv->col + ss_mv[i + 3].col) < x->mv_limits.col_max);
2144
2145 // If all the pixels are within the bounds we don't check whether the
2146 // search point is valid in this loop, otherwise we check each point
2147 // for validity..
2148 if (all_in) {
2149 unsigned int sad_array[4];
2150
2151 for (j = 0; j < cfg->searches_per_step; j += 4) {
2152 unsigned char const *block_offset[4];
2153
2154 for (t = 0; t < 4; t++) block_offset[t] = ss_os[i + t] + best_address;
2155
2156 fn_ptr->sdx4df(what, what_stride, block_offset, in_what_stride,
2157 sad_array);
2158
2159 for (t = 0; t < 4; t++, i++) {
2160 if (sad_array[t] < bestsad) {
2161 const MV this_mv = { best_mv->row + ss_mv[i].row,
2162 best_mv->col + ss_mv[i].col };
2163 sad_array[t] +=
2164 mvsad_err_cost(x, &this_mv, &fcenter_mv, sad_per_bit);
2165 if (sad_array[t] < bestsad) {
2166 bestsad = sad_array[t];
2167 best_site = i;
2168 }
2169 }
2170 }
2171 }
2172 } else {
2173 for (j = 0; j < cfg->searches_per_step; j++) {
2174 // Trap illegal vectors
2175 const MV this_mv = { best_mv->row + ss_mv[i].row,
2176 best_mv->col + ss_mv[i].col };
2177
2178 if (is_mv_in(&x->mv_limits, &this_mv)) {
2179 const uint8_t *const check_here = ss_os[i] + best_address;
2180 unsigned int thissad =
2181 fn_ptr->sdf(what, what_stride, check_here, in_what_stride);
2182
2183 if (thissad < bestsad) {
2184 thissad += mvsad_err_cost(x, &this_mv, &fcenter_mv, sad_per_bit);
2185 if (thissad < bestsad) {
2186 bestsad = thissad;
2187 best_site = i;
2188 }
2189 }
2190 }
2191 i++;
2192 }
2193 }
2194 if (best_site != last_site) {
2195 best_mv->row += ss_mv[best_site].row;
2196 best_mv->col += ss_mv[best_site].col;
2197 best_address += ss_os[best_site];
2198 last_site = best_site;
2199 #if defined(NEW_DIAMOND_SEARCH)
2200 while (1) {
2201 const MV this_mv = { best_mv->row + ss_mv[best_site].row,
2202 best_mv->col + ss_mv[best_site].col };
2203 if (is_mv_in(&x->mv_limits, &this_mv)) {
2204 const uint8_t *const check_here = ss_os[best_site] + best_address;
2205 unsigned int thissad =
2206 fn_ptr->sdf(what, what_stride, check_here, in_what_stride);
2207 if (thissad < bestsad) {
2208 thissad += mvsad_err_cost(x, &this_mv, &fcenter_mv, sad_per_bit);
2209 if (thissad < bestsad) {
2210 bestsad = thissad;
2211 best_mv->row += ss_mv[best_site].row;
2212 best_mv->col += ss_mv[best_site].col;
2213 best_address += ss_os[best_site];
2214 continue;
2215 }
2216 }
2217 }
2218 break;
2219 }
2220 #endif
2221 } else if (best_address == in_what) {
2222 (*num00)++;
2223 }
2224 }
2225 return bestsad;
2226 }
2227
vector_match(int16_t * ref,int16_t * src,int bwl)2228 static int vector_match(int16_t *ref, int16_t *src, int bwl) {
2229 int best_sad = INT_MAX;
2230 int this_sad;
2231 int d;
2232 int center, offset = 0;
2233 int bw = 4 << bwl; // redundant variable, to be changed in the experiments.
2234 for (d = 0; d <= bw; d += 16) {
2235 this_sad = vpx_vector_var(&ref[d], src, bwl);
2236 if (this_sad < best_sad) {
2237 best_sad = this_sad;
2238 offset = d;
2239 }
2240 }
2241 center = offset;
2242
2243 for (d = -8; d <= 8; d += 16) {
2244 int this_pos = offset + d;
2245 // check limit
2246 if (this_pos < 0 || this_pos > bw) continue;
2247 this_sad = vpx_vector_var(&ref[this_pos], src, bwl);
2248 if (this_sad < best_sad) {
2249 best_sad = this_sad;
2250 center = this_pos;
2251 }
2252 }
2253 offset = center;
2254
2255 for (d = -4; d <= 4; d += 8) {
2256 int this_pos = offset + d;
2257 // check limit
2258 if (this_pos < 0 || this_pos > bw) continue;
2259 this_sad = vpx_vector_var(&ref[this_pos], src, bwl);
2260 if (this_sad < best_sad) {
2261 best_sad = this_sad;
2262 center = this_pos;
2263 }
2264 }
2265 offset = center;
2266
2267 for (d = -2; d <= 2; d += 4) {
2268 int this_pos = offset + d;
2269 // check limit
2270 if (this_pos < 0 || this_pos > bw) continue;
2271 this_sad = vpx_vector_var(&ref[this_pos], src, bwl);
2272 if (this_sad < best_sad) {
2273 best_sad = this_sad;
2274 center = this_pos;
2275 }
2276 }
2277 offset = center;
2278
2279 for (d = -1; d <= 1; d += 2) {
2280 int this_pos = offset + d;
2281 // check limit
2282 if (this_pos < 0 || this_pos > bw) continue;
2283 this_sad = vpx_vector_var(&ref[this_pos], src, bwl);
2284 if (this_sad < best_sad) {
2285 best_sad = this_sad;
2286 center = this_pos;
2287 }
2288 }
2289
2290 return (center - (bw >> 1));
2291 }
2292
2293 static const MV search_pos[4] = {
2294 { -1, 0 },
2295 { 0, -1 },
2296 { 0, 1 },
2297 { 1, 0 },
2298 };
2299
vp9_int_pro_motion_estimation(const VP9_COMP * cpi,MACROBLOCK * x,BLOCK_SIZE bsize,int mi_row,int mi_col,const MV * ref_mv)2300 unsigned int vp9_int_pro_motion_estimation(const VP9_COMP *cpi, MACROBLOCK *x,
2301 BLOCK_SIZE bsize, int mi_row,
2302 int mi_col, const MV *ref_mv) {
2303 MACROBLOCKD *xd = &x->e_mbd;
2304 MODE_INFO *mi = xd->mi[0];
2305 struct buf_2d backup_yv12[MAX_MB_PLANE] = { { 0, 0 } };
2306 DECLARE_ALIGNED(16, int16_t, hbuf[128]);
2307 DECLARE_ALIGNED(16, int16_t, vbuf[128]);
2308 DECLARE_ALIGNED(16, int16_t, src_hbuf[64]);
2309 DECLARE_ALIGNED(16, int16_t, src_vbuf[64]);
2310 int idx;
2311 const int bw = 4 << b_width_log2_lookup[bsize];
2312 const int bh = 4 << b_height_log2_lookup[bsize];
2313 const int search_width = bw << 1;
2314 const int search_height = bh << 1;
2315 const int src_stride = x->plane[0].src.stride;
2316 const int ref_stride = xd->plane[0].pre[0].stride;
2317 uint8_t const *ref_buf, *src_buf;
2318 MV *tmp_mv = &xd->mi[0]->mv[0].as_mv;
2319 unsigned int best_sad, tmp_sad, this_sad[4];
2320 MV this_mv;
2321 const int norm_factor = 3 + (bw >> 5);
2322 const YV12_BUFFER_CONFIG *scaled_ref_frame =
2323 vp9_get_scaled_ref_frame(cpi, mi->ref_frame[0]);
2324 MvLimits subpel_mv_limits;
2325
2326 if (scaled_ref_frame) {
2327 int i;
2328 // Swap out the reference frame for a version that's been scaled to
2329 // match the resolution of the current frame, allowing the existing
2330 // motion search code to be used without additional modifications.
2331 for (i = 0; i < MAX_MB_PLANE; i++) backup_yv12[i] = xd->plane[i].pre[0];
2332 vp9_setup_pre_planes(xd, 0, scaled_ref_frame, mi_row, mi_col, NULL);
2333 }
2334
2335 #if CONFIG_VP9_HIGHBITDEPTH
2336 // TODO(jingning): Implement integral projection functions for high bit-depth
2337 // setting and remove this part of code.
2338 if (xd->bd != 8) {
2339 unsigned int this_sad;
2340 tmp_mv->row = 0;
2341 tmp_mv->col = 0;
2342 this_sad = cpi->fn_ptr[bsize].sdf(x->plane[0].src.buf, src_stride,
2343 xd->plane[0].pre[0].buf, ref_stride);
2344
2345 if (scaled_ref_frame) {
2346 int i;
2347 for (i = 0; i < MAX_MB_PLANE; i++) xd->plane[i].pre[0] = backup_yv12[i];
2348 }
2349 return this_sad;
2350 }
2351 #endif
2352
2353 // Set up prediction 1-D reference set
2354 ref_buf = xd->plane[0].pre[0].buf - (bw >> 1);
2355 for (idx = 0; idx < search_width; idx += 16) {
2356 vpx_int_pro_row(&hbuf[idx], ref_buf, ref_stride, bh);
2357 ref_buf += 16;
2358 }
2359
2360 ref_buf = xd->plane[0].pre[0].buf - (bh >> 1) * ref_stride;
2361 for (idx = 0; idx < search_height; ++idx) {
2362 vbuf[idx] = vpx_int_pro_col(ref_buf, bw) >> norm_factor;
2363 ref_buf += ref_stride;
2364 }
2365
2366 // Set up src 1-D reference set
2367 for (idx = 0; idx < bw; idx += 16) {
2368 src_buf = x->plane[0].src.buf + idx;
2369 vpx_int_pro_row(&src_hbuf[idx], src_buf, src_stride, bh);
2370 }
2371
2372 src_buf = x->plane[0].src.buf;
2373 for (idx = 0; idx < bh; ++idx) {
2374 src_vbuf[idx] = vpx_int_pro_col(src_buf, bw) >> norm_factor;
2375 src_buf += src_stride;
2376 }
2377
2378 // Find the best match per 1-D search
2379 tmp_mv->col = vector_match(hbuf, src_hbuf, b_width_log2_lookup[bsize]);
2380 tmp_mv->row = vector_match(vbuf, src_vbuf, b_height_log2_lookup[bsize]);
2381
2382 this_mv = *tmp_mv;
2383 src_buf = x->plane[0].src.buf;
2384 ref_buf = xd->plane[0].pre[0].buf + this_mv.row * ref_stride + this_mv.col;
2385 best_sad = cpi->fn_ptr[bsize].sdf(src_buf, src_stride, ref_buf, ref_stride);
2386
2387 {
2388 const uint8_t *const pos[4] = {
2389 ref_buf - ref_stride,
2390 ref_buf - 1,
2391 ref_buf + 1,
2392 ref_buf + ref_stride,
2393 };
2394
2395 cpi->fn_ptr[bsize].sdx4df(src_buf, src_stride, pos, ref_stride, this_sad);
2396 }
2397
2398 for (idx = 0; idx < 4; ++idx) {
2399 if (this_sad[idx] < best_sad) {
2400 best_sad = this_sad[idx];
2401 tmp_mv->row = search_pos[idx].row + this_mv.row;
2402 tmp_mv->col = search_pos[idx].col + this_mv.col;
2403 }
2404 }
2405
2406 if (this_sad[0] < this_sad[3])
2407 this_mv.row -= 1;
2408 else
2409 this_mv.row += 1;
2410
2411 if (this_sad[1] < this_sad[2])
2412 this_mv.col -= 1;
2413 else
2414 this_mv.col += 1;
2415
2416 ref_buf = xd->plane[0].pre[0].buf + this_mv.row * ref_stride + this_mv.col;
2417
2418 tmp_sad = cpi->fn_ptr[bsize].sdf(src_buf, src_stride, ref_buf, ref_stride);
2419 if (best_sad > tmp_sad) {
2420 *tmp_mv = this_mv;
2421 best_sad = tmp_sad;
2422 }
2423
2424 tmp_mv->row *= 8;
2425 tmp_mv->col *= 8;
2426
2427 vp9_set_subpel_mv_search_range(&subpel_mv_limits, &x->mv_limits, ref_mv);
2428 clamp_mv(tmp_mv, subpel_mv_limits.col_min, subpel_mv_limits.col_max,
2429 subpel_mv_limits.row_min, subpel_mv_limits.row_max);
2430
2431 if (scaled_ref_frame) {
2432 int i;
2433 for (i = 0; i < MAX_MB_PLANE; i++) xd->plane[i].pre[0] = backup_yv12[i];
2434 }
2435
2436 return best_sad;
2437 }
2438
get_exhaustive_threshold(int exhaustive_searches_thresh,BLOCK_SIZE bsize)2439 static int get_exhaustive_threshold(int exhaustive_searches_thresh,
2440 BLOCK_SIZE bsize) {
2441 return exhaustive_searches_thresh >>
2442 (8 - (b_width_log2_lookup[bsize] + b_height_log2_lookup[bsize]));
2443 }
2444
2445 #if CONFIG_NON_GREEDY_MV
2446 // Runs sequence of diamond searches in smaller steps for RD.
2447 /* do_refine: If last step (1-away) of n-step search doesn't pick the center
2448 point as the best match, we will do a final 1-away diamond
2449 refining search */
vp9_full_pixel_diamond_new(const VP9_COMP * cpi,MACROBLOCK * x,BLOCK_SIZE bsize,MV * mvp_full,int step_param,int lambda,int do_refine,const int_mv * nb_full_mvs,int full_mv_num,MV * best_mv)2450 int vp9_full_pixel_diamond_new(const VP9_COMP *cpi, MACROBLOCK *x,
2451 BLOCK_SIZE bsize, MV *mvp_full, int step_param,
2452 int lambda, int do_refine,
2453 const int_mv *nb_full_mvs, int full_mv_num,
2454 MV *best_mv) {
2455 const vp9_variance_fn_ptr_t *fn_ptr = &cpi->fn_ptr[bsize];
2456 const SPEED_FEATURES *const sf = &cpi->sf;
2457 int n, num00 = 0;
2458 int thissme;
2459 int bestsme;
2460 const int further_steps = MAX_MVSEARCH_STEPS - 1 - step_param;
2461 const MV center_mv = { 0, 0 };
2462 vpx_clear_system_state();
2463 diamond_search_sad_new(x, &cpi->ss_cfg, mvp_full, best_mv, step_param, lambda,
2464 &n, fn_ptr, nb_full_mvs, full_mv_num);
2465
2466 bestsme = vp9_get_mvpred_var(x, best_mv, ¢er_mv, fn_ptr, 0);
2467
2468 // If there won't be more n-step search, check to see if refining search is
2469 // needed.
2470 if (n > further_steps) do_refine = 0;
2471
2472 while (n < further_steps) {
2473 ++n;
2474 if (num00) {
2475 num00--;
2476 } else {
2477 MV temp_mv;
2478 diamond_search_sad_new(x, &cpi->ss_cfg, mvp_full, &temp_mv,
2479 step_param + n, lambda, &num00, fn_ptr,
2480 nb_full_mvs, full_mv_num);
2481 thissme = vp9_get_mvpred_var(x, &temp_mv, ¢er_mv, fn_ptr, 0);
2482 // check to see if refining search is needed.
2483 if (num00 > further_steps - n) do_refine = 0;
2484
2485 if (thissme < bestsme) {
2486 bestsme = thissme;
2487 *best_mv = temp_mv;
2488 }
2489 }
2490 }
2491
2492 // final 1-away diamond refining search
2493 if (do_refine) {
2494 const int search_range = 8;
2495 MV temp_mv = *best_mv;
2496 vp9_refining_search_sad_new(x, &temp_mv, lambda, search_range, fn_ptr,
2497 nb_full_mvs, full_mv_num);
2498 thissme = vp9_get_mvpred_var(x, &temp_mv, ¢er_mv, fn_ptr, 0);
2499 if (thissme < bestsme) {
2500 bestsme = thissme;
2501 *best_mv = temp_mv;
2502 }
2503 }
2504
2505 if (sf->exhaustive_searches_thresh < INT_MAX &&
2506 !cpi->rc.is_src_frame_alt_ref) {
2507 const int64_t exhaustive_thr =
2508 get_exhaustive_threshold(sf->exhaustive_searches_thresh, bsize);
2509 if (bestsme > exhaustive_thr) {
2510 full_pixel_exhaustive_new(cpi, x, best_mv, fn_ptr, best_mv, lambda,
2511 nb_full_mvs, full_mv_num);
2512 bestsme = vp9_get_mvpred_var(x, best_mv, ¢er_mv, fn_ptr, 0);
2513 }
2514 }
2515 return bestsme;
2516 }
2517 #endif // CONFIG_NON_GREEDY_MV
2518
2519 // Runs sequence of diamond searches in smaller steps for RD.
2520 /* do_refine: If last step (1-away) of n-step search doesn't pick the center
2521 point as the best match, we will do a final 1-away diamond
2522 refining search */
full_pixel_diamond(const VP9_COMP * const cpi,const MACROBLOCK * const x,MV * mvp_full,int step_param,int sadpb,int further_steps,int do_refine,int * cost_list,const vp9_variance_fn_ptr_t * fn_ptr,const MV * ref_mv,MV * dst_mv)2523 static int full_pixel_diamond(const VP9_COMP *const cpi,
2524 const MACROBLOCK *const x, MV *mvp_full,
2525 int step_param, int sadpb, int further_steps,
2526 int do_refine, int *cost_list,
2527 const vp9_variance_fn_ptr_t *fn_ptr,
2528 const MV *ref_mv, MV *dst_mv) {
2529 MV temp_mv;
2530 int thissme, n, num00 = 0;
2531 int bestsme = cpi->diamond_search_sad(x, &cpi->ss_cfg, mvp_full, &temp_mv,
2532 step_param, sadpb, &n, fn_ptr, ref_mv);
2533 if (bestsme < INT_MAX)
2534 bestsme = vp9_get_mvpred_var(x, &temp_mv, ref_mv, fn_ptr, 1);
2535 *dst_mv = temp_mv;
2536
2537 // If there won't be more n-step search, check to see if refining search is
2538 // needed.
2539 if (n > further_steps) do_refine = 0;
2540
2541 while (n < further_steps) {
2542 ++n;
2543
2544 if (num00) {
2545 num00--;
2546 } else {
2547 thissme = cpi->diamond_search_sad(x, &cpi->ss_cfg, mvp_full, &temp_mv,
2548 step_param + n, sadpb, &num00, fn_ptr,
2549 ref_mv);
2550 if (thissme < INT_MAX)
2551 thissme = vp9_get_mvpred_var(x, &temp_mv, ref_mv, fn_ptr, 1);
2552
2553 // check to see if refining search is needed.
2554 if (num00 > further_steps - n) do_refine = 0;
2555
2556 if (thissme < bestsme) {
2557 bestsme = thissme;
2558 *dst_mv = temp_mv;
2559 }
2560 }
2561 }
2562
2563 // final 1-away diamond refining search
2564 if (do_refine) {
2565 const int search_range = 8;
2566 MV best_mv = *dst_mv;
2567 thissme = vp9_refining_search_sad(x, &best_mv, sadpb, search_range, fn_ptr,
2568 ref_mv);
2569 if (thissme < INT_MAX)
2570 thissme = vp9_get_mvpred_var(x, &best_mv, ref_mv, fn_ptr, 1);
2571 if (thissme < bestsme) {
2572 bestsme = thissme;
2573 *dst_mv = best_mv;
2574 }
2575 }
2576
2577 // Return cost list.
2578 if (cost_list) {
2579 calc_int_cost_list(x, ref_mv, sadpb, fn_ptr, dst_mv, cost_list);
2580 }
2581 return bestsme;
2582 }
2583
2584 // Runs an limited range exhaustive mesh search using a pattern set
2585 // according to the encode speed profile.
full_pixel_exhaustive(const VP9_COMP * const cpi,const MACROBLOCK * const x,MV * centre_mv_full,int sadpb,int * cost_list,const vp9_variance_fn_ptr_t * fn_ptr,const MV * ref_mv,MV * dst_mv)2586 static int full_pixel_exhaustive(const VP9_COMP *const cpi,
2587 const MACROBLOCK *const x, MV *centre_mv_full,
2588 int sadpb, int *cost_list,
2589 const vp9_variance_fn_ptr_t *fn_ptr,
2590 const MV *ref_mv, MV *dst_mv) {
2591 const SPEED_FEATURES *const sf = &cpi->sf;
2592 MV temp_mv = { centre_mv_full->row, centre_mv_full->col };
2593 MV f_ref_mv = { ref_mv->row >> 3, ref_mv->col >> 3 };
2594 int bestsme;
2595 int i;
2596 int interval = sf->mesh_patterns[0].interval;
2597 int range = sf->mesh_patterns[0].range;
2598 int baseline_interval_divisor;
2599
2600 // Trap illegal values for interval and range for this function.
2601 if ((range < MIN_RANGE) || (range > MAX_RANGE) || (interval < MIN_INTERVAL) ||
2602 (interval > range))
2603 return INT_MAX;
2604
2605 baseline_interval_divisor = range / interval;
2606
2607 // Check size of proposed first range against magnitude of the centre
2608 // value used as a starting point.
2609 range = VPXMAX(range, (5 * VPXMAX(abs(temp_mv.row), abs(temp_mv.col))) / 4);
2610 range = VPXMIN(range, MAX_RANGE);
2611 interval = VPXMAX(interval, range / baseline_interval_divisor);
2612
2613 // initial search
2614 bestsme = exhaustive_mesh_search(x, &f_ref_mv, &temp_mv, range, interval,
2615 sadpb, fn_ptr, &temp_mv);
2616
2617 if ((interval > MIN_INTERVAL) && (range > MIN_RANGE)) {
2618 // Progressive searches with range and step size decreasing each time
2619 // till we reach a step size of 1. Then break out.
2620 for (i = 1; i < MAX_MESH_STEP; ++i) {
2621 // First pass with coarser step and longer range
2622 bestsme = exhaustive_mesh_search(
2623 x, &f_ref_mv, &temp_mv, sf->mesh_patterns[i].range,
2624 sf->mesh_patterns[i].interval, sadpb, fn_ptr, &temp_mv);
2625
2626 if (sf->mesh_patterns[i].interval == 1) break;
2627 }
2628 }
2629
2630 if (bestsme < INT_MAX)
2631 bestsme = vp9_get_mvpred_var(x, &temp_mv, ref_mv, fn_ptr, 1);
2632 *dst_mv = temp_mv;
2633
2634 // Return cost list.
2635 if (cost_list) {
2636 calc_int_cost_list(x, ref_mv, sadpb, fn_ptr, dst_mv, cost_list);
2637 }
2638 return bestsme;
2639 }
2640
2641 #if CONFIG_NON_GREEDY_MV
vp9_refining_search_sad_new(const MACROBLOCK * x,MV * best_full_mv,int lambda,int search_range,const vp9_variance_fn_ptr_t * fn_ptr,const int_mv * nb_full_mvs,int full_mv_num)2642 int64_t vp9_refining_search_sad_new(const MACROBLOCK *x, MV *best_full_mv,
2643 int lambda, int search_range,
2644 const vp9_variance_fn_ptr_t *fn_ptr,
2645 const int_mv *nb_full_mvs,
2646 int full_mv_num) {
2647 const MACROBLOCKD *const xd = &x->e_mbd;
2648 const MV neighbors[4] = { { -1, 0 }, { 0, -1 }, { 0, 1 }, { 1, 0 } };
2649 const struct buf_2d *const what = &x->plane[0].src;
2650 const struct buf_2d *const in_what = &xd->plane[0].pre[0];
2651 const uint8_t *best_address = get_buf_from_mv(in_what, best_full_mv);
2652 int64_t best_sad;
2653 int i, j;
2654 vpx_clear_system_state();
2655 {
2656 const int64_t mv_dist = (int64_t)fn_ptr->sdf(what->buf, what->stride,
2657 best_address, in_what->stride)
2658 << LOG2_PRECISION;
2659 const int64_t mv_cost =
2660 vp9_nb_mvs_inconsistency(best_full_mv, nb_full_mvs, full_mv_num);
2661 best_sad = mv_dist + lambda * mv_cost;
2662 }
2663
2664 for (i = 0; i < search_range; i++) {
2665 int best_site = -1;
2666 const int all_in = ((best_full_mv->row - 1) > x->mv_limits.row_min) &
2667 ((best_full_mv->row + 1) < x->mv_limits.row_max) &
2668 ((best_full_mv->col - 1) > x->mv_limits.col_min) &
2669 ((best_full_mv->col + 1) < x->mv_limits.col_max);
2670
2671 if (all_in) {
2672 unsigned int sads[4];
2673 const uint8_t *const positions[4] = { best_address - in_what->stride,
2674 best_address - 1, best_address + 1,
2675 best_address + in_what->stride };
2676
2677 fn_ptr->sdx4df(what->buf, what->stride, positions, in_what->stride, sads);
2678
2679 for (j = 0; j < 4; ++j) {
2680 const MV mv = { best_full_mv->row + neighbors[j].row,
2681 best_full_mv->col + neighbors[j].col };
2682 const int64_t mv_dist = (int64_t)sads[j] << LOG2_PRECISION;
2683 const int64_t mv_cost =
2684 vp9_nb_mvs_inconsistency(&mv, nb_full_mvs, full_mv_num);
2685 const int64_t thissad = mv_dist + lambda * mv_cost;
2686 if (thissad < best_sad) {
2687 best_sad = thissad;
2688 best_site = j;
2689 }
2690 }
2691 } else {
2692 for (j = 0; j < 4; ++j) {
2693 const MV mv = { best_full_mv->row + neighbors[j].row,
2694 best_full_mv->col + neighbors[j].col };
2695
2696 if (is_mv_in(&x->mv_limits, &mv)) {
2697 const int64_t mv_dist =
2698 (int64_t)fn_ptr->sdf(what->buf, what->stride,
2699 get_buf_from_mv(in_what, &mv),
2700 in_what->stride)
2701 << LOG2_PRECISION;
2702 const int64_t mv_cost =
2703 vp9_nb_mvs_inconsistency(&mv, nb_full_mvs, full_mv_num);
2704 const int64_t thissad = mv_dist + lambda * mv_cost;
2705 if (thissad < best_sad) {
2706 best_sad = thissad;
2707 best_site = j;
2708 }
2709 }
2710 }
2711 }
2712
2713 if (best_site == -1) {
2714 break;
2715 } else {
2716 best_full_mv->row += neighbors[best_site].row;
2717 best_full_mv->col += neighbors[best_site].col;
2718 best_address = get_buf_from_mv(in_what, best_full_mv);
2719 }
2720 }
2721
2722 return best_sad;
2723 }
2724 #endif // CONFIG_NON_GREEDY_MV
2725
vp9_refining_search_sad(const MACROBLOCK * x,MV * ref_mv,int error_per_bit,int search_range,const vp9_variance_fn_ptr_t * fn_ptr,const MV * center_mv)2726 int vp9_refining_search_sad(const MACROBLOCK *x, MV *ref_mv, int error_per_bit,
2727 int search_range,
2728 const vp9_variance_fn_ptr_t *fn_ptr,
2729 const MV *center_mv) {
2730 const MACROBLOCKD *const xd = &x->e_mbd;
2731 const MV neighbors[4] = { { -1, 0 }, { 0, -1 }, { 0, 1 }, { 1, 0 } };
2732 const struct buf_2d *const what = &x->plane[0].src;
2733 const struct buf_2d *const in_what = &xd->plane[0].pre[0];
2734 const MV fcenter_mv = { center_mv->row >> 3, center_mv->col >> 3 };
2735 const uint8_t *best_address = get_buf_from_mv(in_what, ref_mv);
2736 unsigned int best_sad =
2737 fn_ptr->sdf(what->buf, what->stride, best_address, in_what->stride) +
2738 mvsad_err_cost(x, ref_mv, &fcenter_mv, error_per_bit);
2739 int i, j;
2740
2741 for (i = 0; i < search_range; i++) {
2742 int best_site = -1;
2743 const int all_in = ((ref_mv->row - 1) > x->mv_limits.row_min) &
2744 ((ref_mv->row + 1) < x->mv_limits.row_max) &
2745 ((ref_mv->col - 1) > x->mv_limits.col_min) &
2746 ((ref_mv->col + 1) < x->mv_limits.col_max);
2747
2748 if (all_in) {
2749 unsigned int sads[4];
2750 const uint8_t *const positions[4] = { best_address - in_what->stride,
2751 best_address - 1, best_address + 1,
2752 best_address + in_what->stride };
2753
2754 fn_ptr->sdx4df(what->buf, what->stride, positions, in_what->stride, sads);
2755
2756 for (j = 0; j < 4; ++j) {
2757 if (sads[j] < best_sad) {
2758 const MV mv = { ref_mv->row + neighbors[j].row,
2759 ref_mv->col + neighbors[j].col };
2760 sads[j] += mvsad_err_cost(x, &mv, &fcenter_mv, error_per_bit);
2761 if (sads[j] < best_sad) {
2762 best_sad = sads[j];
2763 best_site = j;
2764 }
2765 }
2766 }
2767 } else {
2768 for (j = 0; j < 4; ++j) {
2769 const MV mv = { ref_mv->row + neighbors[j].row,
2770 ref_mv->col + neighbors[j].col };
2771
2772 if (is_mv_in(&x->mv_limits, &mv)) {
2773 unsigned int sad =
2774 fn_ptr->sdf(what->buf, what->stride,
2775 get_buf_from_mv(in_what, &mv), in_what->stride);
2776 if (sad < best_sad) {
2777 sad += mvsad_err_cost(x, &mv, &fcenter_mv, error_per_bit);
2778 if (sad < best_sad) {
2779 best_sad = sad;
2780 best_site = j;
2781 }
2782 }
2783 }
2784 }
2785 }
2786
2787 if (best_site == -1) {
2788 break;
2789 } else {
2790 ref_mv->row += neighbors[best_site].row;
2791 ref_mv->col += neighbors[best_site].col;
2792 best_address = get_buf_from_mv(in_what, ref_mv);
2793 }
2794 }
2795
2796 return best_sad;
2797 }
2798
2799 // This function is called when we do joint motion search in comp_inter_inter
2800 // mode.
vp9_refining_search_8p_c(const MACROBLOCK * x,MV * ref_mv,int error_per_bit,int search_range,const vp9_variance_fn_ptr_t * fn_ptr,const MV * center_mv,const uint8_t * second_pred)2801 int vp9_refining_search_8p_c(const MACROBLOCK *x, MV *ref_mv, int error_per_bit,
2802 int search_range,
2803 const vp9_variance_fn_ptr_t *fn_ptr,
2804 const MV *center_mv, const uint8_t *second_pred) {
2805 const MV neighbors[8] = { { -1, 0 }, { 0, -1 }, { 0, 1 }, { 1, 0 },
2806 { -1, -1 }, { 1, -1 }, { -1, 1 }, { 1, 1 } };
2807 const MACROBLOCKD *const xd = &x->e_mbd;
2808 const struct buf_2d *const what = &x->plane[0].src;
2809 const struct buf_2d *const in_what = &xd->plane[0].pre[0];
2810 const MV fcenter_mv = { center_mv->row >> 3, center_mv->col >> 3 };
2811 unsigned int best_sad = INT_MAX;
2812 int i, j;
2813 clamp_mv(ref_mv, x->mv_limits.col_min, x->mv_limits.col_max,
2814 x->mv_limits.row_min, x->mv_limits.row_max);
2815 best_sad =
2816 fn_ptr->sdaf(what->buf, what->stride, get_buf_from_mv(in_what, ref_mv),
2817 in_what->stride, second_pred) +
2818 mvsad_err_cost(x, ref_mv, &fcenter_mv, error_per_bit);
2819
2820 for (i = 0; i < search_range; ++i) {
2821 int best_site = -1;
2822
2823 for (j = 0; j < 8; ++j) {
2824 const MV mv = { ref_mv->row + neighbors[j].row,
2825 ref_mv->col + neighbors[j].col };
2826
2827 if (is_mv_in(&x->mv_limits, &mv)) {
2828 unsigned int sad =
2829 fn_ptr->sdaf(what->buf, what->stride, get_buf_from_mv(in_what, &mv),
2830 in_what->stride, second_pred);
2831 if (sad < best_sad) {
2832 sad += mvsad_err_cost(x, &mv, &fcenter_mv, error_per_bit);
2833 if (sad < best_sad) {
2834 best_sad = sad;
2835 best_site = j;
2836 }
2837 }
2838 }
2839 }
2840
2841 if (best_site == -1) {
2842 break;
2843 } else {
2844 ref_mv->row += neighbors[best_site].row;
2845 ref_mv->col += neighbors[best_site].col;
2846 }
2847 }
2848 return best_sad;
2849 }
2850
vp9_full_pixel_search(const VP9_COMP * const cpi,const MACROBLOCK * const x,BLOCK_SIZE bsize,MV * mvp_full,int step_param,int search_method,int error_per_bit,int * cost_list,const MV * ref_mv,MV * tmp_mv,int var_max,int rd)2851 int vp9_full_pixel_search(const VP9_COMP *const cpi, const MACROBLOCK *const x,
2852 BLOCK_SIZE bsize, MV *mvp_full, int step_param,
2853 int search_method, int error_per_bit, int *cost_list,
2854 const MV *ref_mv, MV *tmp_mv, int var_max, int rd) {
2855 const SPEED_FEATURES *const sf = &cpi->sf;
2856 const SEARCH_METHODS method = (SEARCH_METHODS)search_method;
2857 const vp9_variance_fn_ptr_t *fn_ptr = &cpi->fn_ptr[bsize];
2858 int var = 0;
2859 int run_exhaustive_search = 0;
2860
2861 if (cost_list) {
2862 cost_list[0] = INT_MAX;
2863 cost_list[1] = INT_MAX;
2864 cost_list[2] = INT_MAX;
2865 cost_list[3] = INT_MAX;
2866 cost_list[4] = INT_MAX;
2867 }
2868
2869 switch (method) {
2870 case FAST_DIAMOND:
2871 var = fast_dia_search(x, mvp_full, step_param, error_per_bit, 0,
2872 cost_list, fn_ptr, 1, ref_mv, tmp_mv);
2873 break;
2874 case FAST_HEX:
2875 var = fast_hex_search(x, mvp_full, step_param, error_per_bit, 0,
2876 cost_list, fn_ptr, 1, ref_mv, tmp_mv);
2877 break;
2878 case HEX:
2879 var = hex_search(x, mvp_full, step_param, error_per_bit, 1, cost_list,
2880 fn_ptr, 1, ref_mv, tmp_mv);
2881 break;
2882 case SQUARE:
2883 var = square_search(x, mvp_full, step_param, error_per_bit, 1, cost_list,
2884 fn_ptr, 1, ref_mv, tmp_mv);
2885 break;
2886 case BIGDIA:
2887 var = bigdia_search(x, mvp_full, step_param, error_per_bit, 1, cost_list,
2888 fn_ptr, 1, ref_mv, tmp_mv);
2889 break;
2890 case NSTEP:
2891 case MESH:
2892 var = full_pixel_diamond(cpi, x, mvp_full, step_param, error_per_bit,
2893 MAX_MVSEARCH_STEPS - 1 - step_param, 1,
2894 cost_list, fn_ptr, ref_mv, tmp_mv);
2895 break;
2896 default: assert(0 && "Unknown search method");
2897 }
2898
2899 if (method == NSTEP) {
2900 if (sf->exhaustive_searches_thresh < INT_MAX &&
2901 !cpi->rc.is_src_frame_alt_ref) {
2902 const int64_t exhaustive_thr =
2903 get_exhaustive_threshold(sf->exhaustive_searches_thresh, bsize);
2904 if (var > exhaustive_thr) {
2905 run_exhaustive_search = 1;
2906 }
2907 }
2908 } else if (method == MESH) {
2909 run_exhaustive_search = 1;
2910 }
2911
2912 if (run_exhaustive_search) {
2913 int var_ex;
2914 MV tmp_mv_ex;
2915 var_ex = full_pixel_exhaustive(cpi, x, tmp_mv, error_per_bit, cost_list,
2916 fn_ptr, ref_mv, &tmp_mv_ex);
2917 if (var_ex < var) {
2918 var = var_ex;
2919 *tmp_mv = tmp_mv_ex;
2920 }
2921 }
2922
2923 if (method != NSTEP && method != MESH && rd && var < var_max)
2924 var = vp9_get_mvpred_var(x, tmp_mv, ref_mv, fn_ptr, 1);
2925
2926 return var;
2927 }
2928
2929 // Note(yunqingwang): The following 2 functions are only used in the motion
2930 // vector unit test, which return extreme motion vectors allowed by the MV
2931 // limits.
2932 #define COMMON_MV_TEST \
2933 SETUP_SUBPEL_SEARCH; \
2934 \
2935 (void)error_per_bit; \
2936 (void)vfp; \
2937 (void)z; \
2938 (void)src_stride; \
2939 (void)y; \
2940 (void)y_stride; \
2941 (void)second_pred; \
2942 (void)w; \
2943 (void)h; \
2944 (void)offset; \
2945 (void)mvjcost; \
2946 (void)mvcost; \
2947 (void)sse1; \
2948 (void)distortion; \
2949 \
2950 (void)halfiters; \
2951 (void)quarteriters; \
2952 (void)eighthiters; \
2953 (void)whichdir; \
2954 (void)allow_hp; \
2955 (void)forced_stop; \
2956 (void)hstep; \
2957 (void)rr; \
2958 (void)rc; \
2959 \
2960 (void)tr; \
2961 (void)tc; \
2962 (void)sse; \
2963 (void)thismse; \
2964 (void)cost_list; \
2965 (void)use_accurate_subpel_search;
2966
2967 // Return the maximum MV.
vp9_return_max_sub_pixel_mv(const MACROBLOCK * x,MV * bestmv,const MV * ref_mv,int allow_hp,int error_per_bit,const vp9_variance_fn_ptr_t * vfp,int forced_stop,int iters_per_step,int * cost_list,int * mvjcost,int * mvcost[2],uint32_t * distortion,uint32_t * sse1,const uint8_t * second_pred,int w,int h,int use_accurate_subpel_search)2968 uint32_t vp9_return_max_sub_pixel_mv(
2969 const MACROBLOCK *x, MV *bestmv, const MV *ref_mv, int allow_hp,
2970 int error_per_bit, const vp9_variance_fn_ptr_t *vfp, int forced_stop,
2971 int iters_per_step, int *cost_list, int *mvjcost, int *mvcost[2],
2972 uint32_t *distortion, uint32_t *sse1, const uint8_t *second_pred, int w,
2973 int h, int use_accurate_subpel_search) {
2974 COMMON_MV_TEST;
2975
2976 (void)minr;
2977 (void)minc;
2978
2979 bestmv->row = maxr;
2980 bestmv->col = maxc;
2981 besterr = 0;
2982
2983 // In the sub-pel motion search, if hp is not used, then the last bit of mv
2984 // has to be 0.
2985 lower_mv_precision(bestmv, allow_hp && use_mv_hp(ref_mv));
2986
2987 return besterr;
2988 }
2989 // Return the minimum MV.
vp9_return_min_sub_pixel_mv(const MACROBLOCK * x,MV * bestmv,const MV * ref_mv,int allow_hp,int error_per_bit,const vp9_variance_fn_ptr_t * vfp,int forced_stop,int iters_per_step,int * cost_list,int * mvjcost,int * mvcost[2],uint32_t * distortion,uint32_t * sse1,const uint8_t * second_pred,int w,int h,int use_accurate_subpel_search)2990 uint32_t vp9_return_min_sub_pixel_mv(
2991 const MACROBLOCK *x, MV *bestmv, const MV *ref_mv, int allow_hp,
2992 int error_per_bit, const vp9_variance_fn_ptr_t *vfp, int forced_stop,
2993 int iters_per_step, int *cost_list, int *mvjcost, int *mvcost[2],
2994 uint32_t *distortion, uint32_t *sse1, const uint8_t *second_pred, int w,
2995 int h, int use_accurate_subpel_search) {
2996 COMMON_MV_TEST;
2997
2998 (void)maxr;
2999 (void)maxc;
3000
3001 bestmv->row = minr;
3002 bestmv->col = minc;
3003 besterr = 0;
3004
3005 // In the sub-pel motion search, if hp is not used, then the last bit of mv
3006 // has to be 0.
3007 lower_mv_precision(bestmv, allow_hp && use_mv_hp(ref_mv));
3008
3009 return besterr;
3010 }
3011