1 /*
2 * Copyright (c) 2016, Alliance for Open Media. All rights reserved
3 *
4 * This source code is subject to the terms of the BSD 2 Clause License and
5 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6 * was not distributed with this source code in the LICENSE file, you can
7 * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8 * Media Patent License 1.0 was not distributed with this source code in the
9 * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10 */
11
12 #include <stdio.h>
13 #include <stdlib.h>
14 #include <memory.h>
15 #include <math.h>
16 #include <assert.h>
17
18 #include "config/aom_dsp_rtcd.h"
19
20 #include "av1/encoder/global_motion.h"
21
22 #include "av1/common/convolve.h"
23 #include "av1/common/resize.h"
24 #include "av1/common/warped_motion.h"
25
26 #include "av1/encoder/segmentation.h"
27 #include "av1/encoder/corner_detect.h"
28 #include "av1/encoder/corner_match.h"
29 #include "av1/encoder/ransac.h"
30
31 #define MIN_INLIER_PROB 0.1
32
33 #define MIN_TRANS_THRESH (1 * GM_TRANS_DECODE_FACTOR)
34
35 // Border over which to compute the global motion
36 #define ERRORADV_BORDER 0
37
38 // Number of pyramid levels in disflow computation
39 #define N_LEVELS 2
40 // Size of square patches in the disflow dense grid
41 #define PATCH_SIZE 8
42 // Center point of square patch
43 #define PATCH_CENTER ((PATCH_SIZE + 1) >> 1)
44 // Step size between patches, lower value means greater patch overlap
45 #define PATCH_STEP 1
46 // Minimum size of border padding for disflow
47 #define MIN_PAD 7
48 // Warp error convergence threshold for disflow
49 #define DISFLOW_ERROR_TR 0.01
50 // Max number of iterations if warp convergence is not found
51 #define DISFLOW_MAX_ITR 10
52
53 // Struct for an image pyramid
54 typedef struct {
55 int n_levels;
56 int pad_size;
57 int has_gradient;
58 int widths[N_LEVELS];
59 int heights[N_LEVELS];
60 int strides[N_LEVELS];
61 int level_loc[N_LEVELS];
62 unsigned char *level_buffer;
63 double *level_dx_buffer;
64 double *level_dy_buffer;
65 } ImagePyramid;
66
av1_is_enough_erroradvantage(double best_erroradvantage,int params_cost)67 int av1_is_enough_erroradvantage(double best_erroradvantage, int params_cost) {
68 return best_erroradvantage < erroradv_tr &&
69 best_erroradvantage * params_cost < erroradv_prod_tr;
70 }
71
convert_to_params(const double * params,int32_t * model)72 static void convert_to_params(const double *params, int32_t *model) {
73 int i;
74 int alpha_present = 0;
75 model[0] = (int32_t)floor(params[0] * (1 << GM_TRANS_PREC_BITS) + 0.5);
76 model[1] = (int32_t)floor(params[1] * (1 << GM_TRANS_PREC_BITS) + 0.5);
77 model[0] = (int32_t)clamp(model[0], GM_TRANS_MIN, GM_TRANS_MAX) *
78 GM_TRANS_DECODE_FACTOR;
79 model[1] = (int32_t)clamp(model[1], GM_TRANS_MIN, GM_TRANS_MAX) *
80 GM_TRANS_DECODE_FACTOR;
81
82 for (i = 2; i < 6; ++i) {
83 const int diag_value = ((i == 2 || i == 5) ? (1 << GM_ALPHA_PREC_BITS) : 0);
84 model[i] = (int32_t)floor(params[i] * (1 << GM_ALPHA_PREC_BITS) + 0.5);
85 model[i] =
86 (int32_t)clamp(model[i] - diag_value, GM_ALPHA_MIN, GM_ALPHA_MAX);
87 alpha_present |= (model[i] != 0);
88 model[i] = (model[i] + diag_value) * GM_ALPHA_DECODE_FACTOR;
89 }
90 for (; i < 8; ++i) {
91 model[i] = (int32_t)floor(params[i] * (1 << GM_ROW3HOMO_PREC_BITS) + 0.5);
92 model[i] = (int32_t)clamp(model[i], GM_ROW3HOMO_MIN, GM_ROW3HOMO_MAX) *
93 GM_ROW3HOMO_DECODE_FACTOR;
94 alpha_present |= (model[i] != 0);
95 }
96
97 if (!alpha_present) {
98 if (abs(model[0]) < MIN_TRANS_THRESH && abs(model[1]) < MIN_TRANS_THRESH) {
99 model[0] = 0;
100 model[1] = 0;
101 }
102 }
103 }
104
av1_convert_model_to_params(const double * params,WarpedMotionParams * model)105 void av1_convert_model_to_params(const double *params,
106 WarpedMotionParams *model) {
107 convert_to_params(params, model->wmmat);
108 model->wmtype = get_wmtype(model);
109 model->invalid = 0;
110 }
111
112 // Adds some offset to a global motion parameter and handles
113 // all of the necessary precision shifts, clamping, and
114 // zero-centering.
add_param_offset(int param_index,int32_t param_value,int32_t offset)115 static int32_t add_param_offset(int param_index, int32_t param_value,
116 int32_t offset) {
117 const int scale_vals[3] = { GM_TRANS_PREC_DIFF, GM_ALPHA_PREC_DIFF,
118 GM_ROW3HOMO_PREC_DIFF };
119 const int clamp_vals[3] = { GM_TRANS_MAX, GM_ALPHA_MAX, GM_ROW3HOMO_MAX };
120 // type of param: 0 - translation, 1 - affine, 2 - homography
121 const int param_type = (param_index < 2 ? 0 : (param_index < 6 ? 1 : 2));
122 const int is_one_centered = (param_index == 2 || param_index == 5);
123
124 // Make parameter zero-centered and offset the shift that was done to make
125 // it compatible with the warped model
126 param_value = (param_value - (is_one_centered << WARPEDMODEL_PREC_BITS)) >>
127 scale_vals[param_type];
128 // Add desired offset to the rescaled/zero-centered parameter
129 param_value += offset;
130 // Clamp the parameter so it does not overflow the number of bits allotted
131 // to it in the bitstream
132 param_value = (int32_t)clamp(param_value, -clamp_vals[param_type],
133 clamp_vals[param_type]);
134 // Rescale the parameter to WARPEDMODEL_PRECISION_BITS so it is compatible
135 // with the warped motion library
136 param_value *= (1 << scale_vals[param_type]);
137
138 // Undo the zero-centering step if necessary
139 return param_value + (is_one_centered << WARPEDMODEL_PREC_BITS);
140 }
141
force_wmtype(WarpedMotionParams * wm,TransformationType wmtype)142 static void force_wmtype(WarpedMotionParams *wm, TransformationType wmtype) {
143 switch (wmtype) {
144 case IDENTITY:
145 wm->wmmat[0] = 0;
146 wm->wmmat[1] = 0;
147 AOM_FALLTHROUGH_INTENDED;
148 case TRANSLATION:
149 wm->wmmat[2] = 1 << WARPEDMODEL_PREC_BITS;
150 wm->wmmat[3] = 0;
151 AOM_FALLTHROUGH_INTENDED;
152 case ROTZOOM:
153 wm->wmmat[4] = -wm->wmmat[3];
154 wm->wmmat[5] = wm->wmmat[2];
155 AOM_FALLTHROUGH_INTENDED;
156 case AFFINE: wm->wmmat[6] = wm->wmmat[7] = 0; break;
157 default: assert(0);
158 }
159 wm->wmtype = wmtype;
160 }
161
162 #if CONFIG_AV1_HIGHBITDEPTH
highbd_warp_error(WarpedMotionParams * wm,const uint16_t * const ref,int width,int height,int stride,const uint16_t * const dst,int p_col,int p_row,int p_width,int p_height,int p_stride,int subsampling_x,int subsampling_y,int bd,int64_t best_error,uint8_t * segment_map,int segment_map_stride)163 static int64_t highbd_warp_error(
164 WarpedMotionParams *wm, const uint16_t *const ref, int width, int height,
165 int stride, const uint16_t *const dst, int p_col, int p_row, int p_width,
166 int p_height, int p_stride, int subsampling_x, int subsampling_y, int bd,
167 int64_t best_error, uint8_t *segment_map, int segment_map_stride) {
168 int64_t gm_sumerr = 0;
169 const int error_bsize_w = AOMMIN(p_width, WARP_ERROR_BLOCK);
170 const int error_bsize_h = AOMMIN(p_height, WARP_ERROR_BLOCK);
171 uint16_t tmp[WARP_ERROR_BLOCK * WARP_ERROR_BLOCK];
172
173 ConvolveParams conv_params = get_conv_params(0, 0, bd);
174 conv_params.use_dist_wtd_comp_avg = 0;
175 for (int i = p_row; i < p_row + p_height; i += WARP_ERROR_BLOCK) {
176 for (int j = p_col; j < p_col + p_width; j += WARP_ERROR_BLOCK) {
177 int seg_x = j >> WARP_ERROR_BLOCK_LOG;
178 int seg_y = i >> WARP_ERROR_BLOCK_LOG;
179 // Only compute the error if this block contains inliers from the motion
180 // model
181 if (!segment_map[seg_y * segment_map_stride + seg_x]) continue;
182 // avoid warping extra 8x8 blocks in the padded region of the frame
183 // when p_width and p_height are not multiples of WARP_ERROR_BLOCK
184 const int warp_w = AOMMIN(error_bsize_w, p_col + p_width - j);
185 const int warp_h = AOMMIN(error_bsize_h, p_row + p_height - i);
186 highbd_warp_plane(wm, ref, width, height, stride, tmp, j, i, warp_w,
187 warp_h, WARP_ERROR_BLOCK, subsampling_x, subsampling_y,
188 bd, &conv_params);
189 gm_sumerr += av1_calc_highbd_frame_error(tmp, WARP_ERROR_BLOCK,
190 dst + j + i * p_stride, warp_w,
191 warp_h, p_stride, bd);
192 if (gm_sumerr > best_error) return INT64_MAX;
193 }
194 }
195 return gm_sumerr;
196 }
197 #endif
198
warp_error(WarpedMotionParams * wm,const uint8_t * const ref,int width,int height,int stride,const uint8_t * const dst,int p_col,int p_row,int p_width,int p_height,int p_stride,int subsampling_x,int subsampling_y,int64_t best_error,uint8_t * segment_map,int segment_map_stride)199 static int64_t warp_error(WarpedMotionParams *wm, const uint8_t *const ref,
200 int width, int height, int stride,
201 const uint8_t *const dst, int p_col, int p_row,
202 int p_width, int p_height, int p_stride,
203 int subsampling_x, int subsampling_y,
204 int64_t best_error, uint8_t *segment_map,
205 int segment_map_stride) {
206 int64_t gm_sumerr = 0;
207 int warp_w, warp_h;
208 const int error_bsize_w = AOMMIN(p_width, WARP_ERROR_BLOCK);
209 const int error_bsize_h = AOMMIN(p_height, WARP_ERROR_BLOCK);
210 DECLARE_ALIGNED(16, uint8_t, tmp[WARP_ERROR_BLOCK * WARP_ERROR_BLOCK]);
211 ConvolveParams conv_params = get_conv_params(0, 0, 8);
212 conv_params.use_dist_wtd_comp_avg = 0;
213
214 for (int i = p_row; i < p_row + p_height; i += WARP_ERROR_BLOCK) {
215 for (int j = p_col; j < p_col + p_width; j += WARP_ERROR_BLOCK) {
216 int seg_x = j >> WARP_ERROR_BLOCK_LOG;
217 int seg_y = i >> WARP_ERROR_BLOCK_LOG;
218 // Only compute the error if this block contains inliers from the motion
219 // model
220 if (!segment_map[seg_y * segment_map_stride + seg_x]) continue;
221 // avoid warping extra 8x8 blocks in the padded region of the frame
222 // when p_width and p_height are not multiples of WARP_ERROR_BLOCK
223 warp_w = AOMMIN(error_bsize_w, p_col + p_width - j);
224 warp_h = AOMMIN(error_bsize_h, p_row + p_height - i);
225 warp_plane(wm, ref, width, height, stride, tmp, j, i, warp_w, warp_h,
226 WARP_ERROR_BLOCK, subsampling_x, subsampling_y, &conv_params);
227
228 gm_sumerr +=
229 av1_calc_frame_error(tmp, WARP_ERROR_BLOCK, dst + j + i * p_stride,
230 warp_w, warp_h, p_stride);
231 if (gm_sumerr > best_error) return INT64_MAX;
232 }
233 }
234 return gm_sumerr;
235 }
236
av1_warp_error(WarpedMotionParams * wm,int use_hbd,int bd,const uint8_t * ref,int width,int height,int stride,uint8_t * dst,int p_col,int p_row,int p_width,int p_height,int p_stride,int subsampling_x,int subsampling_y,int64_t best_error,uint8_t * segment_map,int segment_map_stride)237 int64_t av1_warp_error(WarpedMotionParams *wm, int use_hbd, int bd,
238 const uint8_t *ref, int width, int height, int stride,
239 uint8_t *dst, int p_col, int p_row, int p_width,
240 int p_height, int p_stride, int subsampling_x,
241 int subsampling_y, int64_t best_error,
242 uint8_t *segment_map, int segment_map_stride) {
243 if (wm->wmtype <= AFFINE)
244 if (!av1_get_shear_params(wm)) return INT64_MAX;
245 #if CONFIG_AV1_HIGHBITDEPTH
246 if (use_hbd)
247 return highbd_warp_error(wm, CONVERT_TO_SHORTPTR(ref), width, height,
248 stride, CONVERT_TO_SHORTPTR(dst), p_col, p_row,
249 p_width, p_height, p_stride, subsampling_x,
250 subsampling_y, bd, best_error, segment_map,
251 segment_map_stride);
252 #endif
253 (void)use_hbd;
254 (void)bd;
255 return warp_error(wm, ref, width, height, stride, dst, p_col, p_row, p_width,
256 p_height, p_stride, subsampling_x, subsampling_y,
257 best_error, segment_map, segment_map_stride);
258 }
259
260 // Factors used to calculate the thresholds for av1_warp_error
261 static double thresh_factors[GM_REFINEMENT_COUNT] = { 1.25, 1.20, 1.15, 1.10,
262 1.05 };
263
calc_approx_erroradv_threshold(double scaling_factor,int64_t erroradv_threshold)264 static INLINE int64_t calc_approx_erroradv_threshold(
265 double scaling_factor, int64_t erroradv_threshold) {
266 return erroradv_threshold <
267 (int64_t)(((double)INT64_MAX / scaling_factor) + 0.5)
268 ? (int64_t)(scaling_factor * erroradv_threshold + 0.5)
269 : INT64_MAX;
270 }
271
av1_refine_integerized_param(WarpedMotionParams * wm,TransformationType wmtype,int use_hbd,int bd,uint8_t * ref,int r_width,int r_height,int r_stride,uint8_t * dst,int d_width,int d_height,int d_stride,int n_refinements,int64_t best_frame_error,uint8_t * segment_map,int segment_map_stride,int64_t erroradv_threshold)272 int64_t av1_refine_integerized_param(
273 WarpedMotionParams *wm, TransformationType wmtype, int use_hbd, int bd,
274 uint8_t *ref, int r_width, int r_height, int r_stride, uint8_t *dst,
275 int d_width, int d_height, int d_stride, int n_refinements,
276 int64_t best_frame_error, uint8_t *segment_map, int segment_map_stride,
277 int64_t erroradv_threshold) {
278 static const int max_trans_model_params[TRANS_TYPES] = { 0, 2, 4, 6 };
279 const int border = ERRORADV_BORDER;
280 int i = 0, p;
281 int n_params = max_trans_model_params[wmtype];
282 int32_t *param_mat = wm->wmmat;
283 int64_t step_error, best_error;
284 int32_t step;
285 int32_t *param;
286 int32_t curr_param;
287 int32_t best_param;
288
289 force_wmtype(wm, wmtype);
290 best_error =
291 av1_warp_error(wm, use_hbd, bd, ref, r_width, r_height, r_stride,
292 dst + border * d_stride + border, border, border,
293 d_width - 2 * border, d_height - 2 * border, d_stride, 0,
294 0, best_frame_error, segment_map, segment_map_stride);
295 best_error = AOMMIN(best_error, best_frame_error);
296 step = 1 << (n_refinements - 1);
297 for (i = 0; i < n_refinements; i++, step >>= 1) {
298 int64_t error_adv_thresh =
299 calc_approx_erroradv_threshold(thresh_factors[i], erroradv_threshold);
300 for (p = 0; p < n_params; ++p) {
301 int step_dir = 0;
302 // Skip searches for parameters that are forced to be 0
303 param = param_mat + p;
304 curr_param = *param;
305 best_param = curr_param;
306 // look to the left
307 *param = add_param_offset(p, curr_param, -step);
308 step_error =
309 av1_warp_error(wm, use_hbd, bd, ref, r_width, r_height, r_stride,
310 dst + border * d_stride + border, border, border,
311 d_width - 2 * border, d_height - 2 * border, d_stride,
312 0, 0, AOMMIN(best_error, error_adv_thresh),
313 segment_map, segment_map_stride);
314 if (step_error < best_error) {
315 best_error = step_error;
316 best_param = *param;
317 step_dir = -1;
318 }
319
320 // look to the right
321 *param = add_param_offset(p, curr_param, step);
322 step_error =
323 av1_warp_error(wm, use_hbd, bd, ref, r_width, r_height, r_stride,
324 dst + border * d_stride + border, border, border,
325 d_width - 2 * border, d_height - 2 * border, d_stride,
326 0, 0, AOMMIN(best_error, error_adv_thresh),
327 segment_map, segment_map_stride);
328 if (step_error < best_error) {
329 best_error = step_error;
330 best_param = *param;
331 step_dir = 1;
332 }
333 *param = best_param;
334
335 // look to the direction chosen above repeatedly until error increases
336 // for the biggest step size
337 while (step_dir) {
338 *param = add_param_offset(p, best_param, step * step_dir);
339 step_error =
340 av1_warp_error(wm, use_hbd, bd, ref, r_width, r_height, r_stride,
341 dst + border * d_stride + border, border, border,
342 d_width - 2 * border, d_height - 2 * border,
343 d_stride, 0, 0, AOMMIN(best_error, error_adv_thresh),
344 segment_map, segment_map_stride);
345 if (step_error < best_error) {
346 best_error = step_error;
347 best_param = *param;
348 } else {
349 *param = best_param;
350 step_dir = 0;
351 }
352 }
353 }
354 }
355 force_wmtype(wm, wmtype);
356 wm->wmtype = get_wmtype(wm);
357 return best_error;
358 }
359
av1_downconvert_frame(YV12_BUFFER_CONFIG * frm,int bit_depth)360 unsigned char *av1_downconvert_frame(YV12_BUFFER_CONFIG *frm, int bit_depth) {
361 int i, j;
362 uint16_t *orig_buf = CONVERT_TO_SHORTPTR(frm->y_buffer);
363 uint8_t *buf_8bit = frm->y_buffer_8bit;
364 assert(buf_8bit);
365 if (!frm->buf_8bit_valid) {
366 for (i = 0; i < frm->y_height; ++i) {
367 for (j = 0; j < frm->y_width; ++j) {
368 buf_8bit[i * frm->y_stride + j] =
369 orig_buf[i * frm->y_stride + j] >> (bit_depth - 8);
370 }
371 }
372 frm->buf_8bit_valid = 1;
373 }
374 return buf_8bit;
375 }
376
get_inliers_from_indices(MotionModel * params,int * correspondences)377 static void get_inliers_from_indices(MotionModel *params,
378 int *correspondences) {
379 int *inliers_tmp = (int *)aom_malloc(2 * MAX_CORNERS * sizeof(*inliers_tmp));
380 memset(inliers_tmp, 0, 2 * MAX_CORNERS * sizeof(*inliers_tmp));
381
382 for (int i = 0; i < params->num_inliers; i++) {
383 int index = params->inliers[i];
384 inliers_tmp[2 * i] = correspondences[4 * index];
385 inliers_tmp[2 * i + 1] = correspondences[4 * index + 1];
386 }
387 memcpy(params->inliers, inliers_tmp, sizeof(*inliers_tmp) * 2 * MAX_CORNERS);
388 aom_free(inliers_tmp);
389 }
390
391 #define FEAT_COUNT_TR 3
392 #define SEG_COUNT_TR 0.40
av1_compute_feature_segmentation_map(uint8_t * segment_map,int width,int height,int * inliers,int num_inliers)393 void av1_compute_feature_segmentation_map(uint8_t *segment_map, int width,
394 int height, int *inliers,
395 int num_inliers) {
396 int seg_count = 0;
397 memset(segment_map, 0, sizeof(*segment_map) * width * height);
398
399 for (int i = 0; i < num_inliers; i++) {
400 int x = inliers[i * 2];
401 int y = inliers[i * 2 + 1];
402 int seg_x = x >> WARP_ERROR_BLOCK_LOG;
403 int seg_y = y >> WARP_ERROR_BLOCK_LOG;
404 segment_map[seg_y * width + seg_x] += 1;
405 }
406
407 for (int i = 0; i < height; i++) {
408 for (int j = 0; j < width; j++) {
409 uint8_t feat_count = segment_map[i * width + j];
410 segment_map[i * width + j] = (feat_count >= FEAT_COUNT_TR);
411 seg_count += (segment_map[i * width + j]);
412 }
413 }
414
415 // If this motion does not make up a large enough portion of the frame,
416 // use the unsegmented version of the error metric
417 if (seg_count < (width * height * SEG_COUNT_TR))
418 memset(segment_map, 1, width * height * sizeof(*segment_map));
419 }
420
compute_global_motion_feature_based(TransformationType type,unsigned char * src_buffer,int src_width,int src_height,int src_stride,int * src_corners,int num_src_corners,YV12_BUFFER_CONFIG * ref,int bit_depth,int * num_inliers_by_motion,MotionModel * params_by_motion,int num_motions)421 static int compute_global_motion_feature_based(
422 TransformationType type, unsigned char *src_buffer, int src_width,
423 int src_height, int src_stride, int *src_corners, int num_src_corners,
424 YV12_BUFFER_CONFIG *ref, int bit_depth, int *num_inliers_by_motion,
425 MotionModel *params_by_motion, int num_motions) {
426 int i;
427 int num_ref_corners;
428 int num_correspondences;
429 int *correspondences;
430 int ref_corners[2 * MAX_CORNERS];
431 unsigned char *ref_buffer = ref->y_buffer;
432 RansacFunc ransac = av1_get_ransac_type(type);
433
434 if (ref->flags & YV12_FLAG_HIGHBITDEPTH) {
435 ref_buffer = av1_downconvert_frame(ref, bit_depth);
436 }
437
438 num_ref_corners =
439 av1_fast_corner_detect(ref_buffer, ref->y_width, ref->y_height,
440 ref->y_stride, ref_corners, MAX_CORNERS);
441
442 // find correspondences between the two images
443 correspondences =
444 (int *)malloc(num_src_corners * 4 * sizeof(*correspondences));
445 num_correspondences = av1_determine_correspondence(
446 src_buffer, (int *)src_corners, num_src_corners, ref_buffer,
447 (int *)ref_corners, num_ref_corners, src_width, src_height, src_stride,
448 ref->y_stride, correspondences);
449
450 ransac(correspondences, num_correspondences, num_inliers_by_motion,
451 params_by_motion, num_motions);
452
453 // Set num_inliers = 0 for motions with too few inliers so they are ignored.
454 for (i = 0; i < num_motions; ++i) {
455 if (num_inliers_by_motion[i] < MIN_INLIER_PROB * num_correspondences ||
456 num_correspondences == 0) {
457 num_inliers_by_motion[i] = 0;
458 } else {
459 get_inliers_from_indices(¶ms_by_motion[i], correspondences);
460 }
461 }
462
463 free(correspondences);
464
465 // Return true if any one of the motions has inliers.
466 for (i = 0; i < num_motions; ++i) {
467 if (num_inliers_by_motion[i] > 0) return 1;
468 }
469 return 0;
470 }
471
472 // Don't use points around the frame border since they are less reliable
valid_point(int x,int y,int width,int height)473 static INLINE int valid_point(int x, int y, int width, int height) {
474 return (x > (PATCH_SIZE + PATCH_CENTER)) &&
475 (x < (width - PATCH_SIZE - PATCH_CENTER)) &&
476 (y > (PATCH_SIZE + PATCH_CENTER)) &&
477 (y < (height - PATCH_SIZE - PATCH_CENTER));
478 }
479
determine_disflow_correspondence(int * frm_corners,int num_frm_corners,double * flow_u,double * flow_v,int width,int height,int stride,double * correspondences)480 static int determine_disflow_correspondence(int *frm_corners,
481 int num_frm_corners, double *flow_u,
482 double *flow_v, int width,
483 int height, int stride,
484 double *correspondences) {
485 int num_correspondences = 0;
486 int x, y;
487 for (int i = 0; i < num_frm_corners; ++i) {
488 x = frm_corners[2 * i];
489 y = frm_corners[2 * i + 1];
490 if (valid_point(x, y, width, height)) {
491 correspondences[4 * num_correspondences] = x;
492 correspondences[4 * num_correspondences + 1] = y;
493 correspondences[4 * num_correspondences + 2] = x + flow_u[y * stride + x];
494 correspondences[4 * num_correspondences + 3] = y + flow_v[y * stride + x];
495 num_correspondences++;
496 }
497 }
498 return num_correspondences;
499 }
500
getCubicValue(double p[4],double x)501 static double getCubicValue(double p[4], double x) {
502 return p[1] + 0.5 * x *
503 (p[2] - p[0] +
504 x * (2.0 * p[0] - 5.0 * p[1] + 4.0 * p[2] - p[3] +
505 x * (3.0 * (p[1] - p[2]) + p[3] - p[0])));
506 }
507
get_subcolumn(unsigned char * ref,double col[4],int stride,int x,int y_start)508 static void get_subcolumn(unsigned char *ref, double col[4], int stride, int x,
509 int y_start) {
510 int i;
511 for (i = 0; i < 4; ++i) {
512 col[i] = ref[(i + y_start) * stride + x];
513 }
514 }
515
bicubic(unsigned char * ref,double x,double y,int stride)516 static double bicubic(unsigned char *ref, double x, double y, int stride) {
517 double arr[4];
518 int k;
519 int i = (int)x;
520 int j = (int)y;
521 for (k = 0; k < 4; ++k) {
522 double arr_temp[4];
523 get_subcolumn(ref, arr_temp, stride, i + k - 1, j - 1);
524 arr[k] = getCubicValue(arr_temp, y - j);
525 }
526 return getCubicValue(arr, x - i);
527 }
528
529 // Interpolate a warped block using bicubic interpolation when possible
interpolate(unsigned char * ref,double x,double y,int width,int height,int stride)530 static unsigned char interpolate(unsigned char *ref, double x, double y,
531 int width, int height, int stride) {
532 if (x < 0 && y < 0)
533 return ref[0];
534 else if (x < 0 && y > height - 1)
535 return ref[(height - 1) * stride];
536 else if (x > width - 1 && y < 0)
537 return ref[width - 1];
538 else if (x > width - 1 && y > height - 1)
539 return ref[(height - 1) * stride + (width - 1)];
540 else if (x < 0) {
541 int v;
542 int i = (int)y;
543 double a = y - i;
544 if (y > 1 && y < height - 2) {
545 double arr[4];
546 get_subcolumn(ref, arr, stride, 0, i - 1);
547 return clamp((int)(getCubicValue(arr, a) + 0.5), 0, 255);
548 }
549 v = (int)(ref[i * stride] * (1 - a) + ref[(i + 1) * stride] * a + 0.5);
550 return clamp(v, 0, 255);
551 } else if (y < 0) {
552 int v;
553 int j = (int)x;
554 double b = x - j;
555 if (x > 1 && x < width - 2) {
556 double arr[4] = { ref[j - 1], ref[j], ref[j + 1], ref[j + 2] };
557 return clamp((int)(getCubicValue(arr, b) + 0.5), 0, 255);
558 }
559 v = (int)(ref[j] * (1 - b) + ref[j + 1] * b + 0.5);
560 return clamp(v, 0, 255);
561 } else if (x > width - 1) {
562 int v;
563 int i = (int)y;
564 double a = y - i;
565 if (y > 1 && y < height - 2) {
566 double arr[4];
567 get_subcolumn(ref, arr, stride, width - 1, i - 1);
568 return clamp((int)(getCubicValue(arr, a) + 0.5), 0, 255);
569 }
570 v = (int)(ref[i * stride + width - 1] * (1 - a) +
571 ref[(i + 1) * stride + width - 1] * a + 0.5);
572 return clamp(v, 0, 255);
573 } else if (y > height - 1) {
574 int v;
575 int j = (int)x;
576 double b = x - j;
577 if (x > 1 && x < width - 2) {
578 int row = (height - 1) * stride;
579 double arr[4] = { ref[row + j - 1], ref[row + j], ref[row + j + 1],
580 ref[row + j + 2] };
581 return clamp((int)(getCubicValue(arr, b) + 0.5), 0, 255);
582 }
583 v = (int)(ref[(height - 1) * stride + j] * (1 - b) +
584 ref[(height - 1) * stride + j + 1] * b + 0.5);
585 return clamp(v, 0, 255);
586 } else if (x > 1 && y > 1 && x < width - 2 && y < height - 2) {
587 return clamp((int)(bicubic(ref, x, y, stride) + 0.5), 0, 255);
588 } else {
589 int i = (int)y;
590 int j = (int)x;
591 double a = y - i;
592 double b = x - j;
593 int v = (int)(ref[i * stride + j] * (1 - a) * (1 - b) +
594 ref[i * stride + j + 1] * (1 - a) * b +
595 ref[(i + 1) * stride + j] * a * (1 - b) +
596 ref[(i + 1) * stride + j + 1] * a * b);
597 return clamp(v, 0, 255);
598 }
599 }
600
601 // Warps a block using flow vector [u, v] and computes the mse
compute_warp_and_error(unsigned char * ref,unsigned char * frm,int width,int height,int stride,int x,int y,double u,double v,int16_t * dt)602 static double compute_warp_and_error(unsigned char *ref, unsigned char *frm,
603 int width, int height, int stride, int x,
604 int y, double u, double v, int16_t *dt) {
605 int i, j;
606 unsigned char warped;
607 double x_w, y_w;
608 double mse = 0;
609 int16_t err = 0;
610 for (i = y; i < y + PATCH_SIZE; ++i)
611 for (j = x; j < x + PATCH_SIZE; ++j) {
612 x_w = (double)j + u;
613 y_w = (double)i + v;
614 warped = interpolate(ref, x_w, y_w, width, height, stride);
615 err = warped - frm[j + i * stride];
616 mse += err * err;
617 dt[(i - y) * PATCH_SIZE + (j - x)] = err;
618 }
619
620 mse /= (PATCH_SIZE * PATCH_SIZE);
621 return mse;
622 }
623
624 // Computes the components of the system of equations used to solve for
625 // a flow vector. This includes:
626 // 1.) The hessian matrix for optical flow. This matrix is in the
627 // form of:
628 //
629 // M = |sum(dx * dx) sum(dx * dy)|
630 // |sum(dx * dy) sum(dy * dy)|
631 //
632 // 2.) b = |sum(dx * dt)|
633 // |sum(dy * dt)|
634 // Where the sums are computed over a square window of PATCH_SIZE.
compute_flow_system(const double * dx,int dx_stride,const double * dy,int dy_stride,const int16_t * dt,int dt_stride,double * M,double * b)635 static INLINE void compute_flow_system(const double *dx, int dx_stride,
636 const double *dy, int dy_stride,
637 const int16_t *dt, int dt_stride,
638 double *M, double *b) {
639 for (int i = 0; i < PATCH_SIZE; i++) {
640 for (int j = 0; j < PATCH_SIZE; j++) {
641 M[0] += dx[i * dx_stride + j] * dx[i * dx_stride + j];
642 M[1] += dx[i * dx_stride + j] * dy[i * dy_stride + j];
643 M[3] += dy[i * dy_stride + j] * dy[i * dy_stride + j];
644
645 b[0] += dx[i * dx_stride + j] * dt[i * dt_stride + j];
646 b[1] += dy[i * dy_stride + j] * dt[i * dt_stride + j];
647 }
648 }
649
650 M[2] = M[1];
651 }
652
653 // Solves a general Mx = b where M is a 2x2 matrix and b is a 2x1 matrix
solve_2x2_system(const double * M,const double * b,double * output_vec)654 static INLINE void solve_2x2_system(const double *M, const double *b,
655 double *output_vec) {
656 double M_0 = M[0];
657 double M_3 = M[3];
658 double det = (M_0 * M_3) - (M[1] * M[2]);
659 if (det < 1e-5) {
660 // Handle singular matrix
661 // TODO(sarahparker) compare results using pseudo inverse instead
662 M_0 += 1e-10;
663 M_3 += 1e-10;
664 det = (M_0 * M_3) - (M[1] * M[2]);
665 }
666 const double det_inv = 1 / det;
667 const double mult_b0 = det_inv * b[0];
668 const double mult_b1 = det_inv * b[1];
669 output_vec[0] = M_3 * mult_b0 - M[1] * mult_b1;
670 output_vec[1] = -M[2] * mult_b0 + M_0 * mult_b1;
671 }
672
673 /*
674 static INLINE void image_difference(const uint8_t *src, int src_stride,
675 const uint8_t *ref, int ref_stride,
676 int16_t *dst, int dst_stride, int height,
677 int width) {
678 const int block_unit = 8;
679 // Take difference in 8x8 blocks to make use of optimized diff function
680 for (int i = 0; i < height; i += block_unit) {
681 for (int j = 0; j < width; j += block_unit) {
682 aom_subtract_block(block_unit, block_unit, dst + i * dst_stride + j,
683 dst_stride, src + i * src_stride + j, src_stride,
684 ref + i * ref_stride + j, ref_stride);
685 }
686 }
687 }
688 */
689
690 // Compute an image gradient using a sobel filter.
691 // If dir == 1, compute the x gradient. If dir == 0, compute y. This function
692 // assumes the images have been padded so that they can be processed in units
693 // of 8.
sobel_xy_image_gradient(const uint8_t * src,int src_stride,double * dst,int dst_stride,int height,int width,int dir)694 static INLINE void sobel_xy_image_gradient(const uint8_t *src, int src_stride,
695 double *dst, int dst_stride,
696 int height, int width, int dir) {
697 double norm = 1.0;
698 // TODO(sarahparker) experiment with doing this over larger block sizes
699 const int block_unit = 8;
700 // Filter in 8x8 blocks to eventually make use of optimized convolve function
701 for (int i = 0; i < height; i += block_unit) {
702 for (int j = 0; j < width; j += block_unit) {
703 av1_convolve_2d_sobel_y_c(src + i * src_stride + j, src_stride,
704 dst + i * dst_stride + j, dst_stride,
705 block_unit, block_unit, dir, norm);
706 }
707 }
708 }
709
alloc_pyramid(int width,int height,int pad_size,int compute_gradient)710 static ImagePyramid *alloc_pyramid(int width, int height, int pad_size,
711 int compute_gradient) {
712 ImagePyramid *pyr = aom_malloc(sizeof(*pyr));
713 pyr->has_gradient = compute_gradient;
714 // 2 * width * height is the upper bound for a buffer that fits
715 // all pyramid levels + padding for each level
716 const int buffer_size = sizeof(*pyr->level_buffer) * 2 * width * height +
717 (width + 2 * pad_size) * 2 * pad_size * N_LEVELS;
718 pyr->level_buffer = aom_malloc(buffer_size);
719 memset(pyr->level_buffer, 0, buffer_size);
720
721 if (compute_gradient) {
722 const int gradient_size =
723 sizeof(*pyr->level_dx_buffer) * 2 * width * height +
724 (width + 2 * pad_size) * 2 * pad_size * N_LEVELS;
725 pyr->level_dx_buffer = aom_malloc(gradient_size);
726 pyr->level_dy_buffer = aom_malloc(gradient_size);
727 memset(pyr->level_dx_buffer, 0, gradient_size);
728 memset(pyr->level_dy_buffer, 0, gradient_size);
729 }
730 return pyr;
731 }
732
free_pyramid(ImagePyramid * pyr)733 static void free_pyramid(ImagePyramid *pyr) {
734 aom_free(pyr->level_buffer);
735 if (pyr->has_gradient) {
736 aom_free(pyr->level_dx_buffer);
737 aom_free(pyr->level_dy_buffer);
738 }
739 aom_free(pyr);
740 }
741
update_level_dims(ImagePyramid * frm_pyr,int level)742 static INLINE void update_level_dims(ImagePyramid *frm_pyr, int level) {
743 frm_pyr->widths[level] = frm_pyr->widths[level - 1] >> 1;
744 frm_pyr->heights[level] = frm_pyr->heights[level - 1] >> 1;
745 frm_pyr->strides[level] = frm_pyr->widths[level] + 2 * frm_pyr->pad_size;
746 // Point the beginning of the next level buffer to the correct location inside
747 // the padded border
748 frm_pyr->level_loc[level] =
749 frm_pyr->level_loc[level - 1] +
750 frm_pyr->strides[level - 1] *
751 (2 * frm_pyr->pad_size + frm_pyr->heights[level - 1]);
752 }
753
754 // Compute coarse to fine pyramids for a frame
compute_flow_pyramids(unsigned char * frm,const int frm_width,const int frm_height,const int frm_stride,int n_levels,int pad_size,int compute_grad,ImagePyramid * frm_pyr)755 static void compute_flow_pyramids(unsigned char *frm, const int frm_width,
756 const int frm_height, const int frm_stride,
757 int n_levels, int pad_size, int compute_grad,
758 ImagePyramid *frm_pyr) {
759 int cur_width, cur_height, cur_stride, cur_loc;
760 assert((frm_width >> n_levels) > 0);
761 assert((frm_height >> n_levels) > 0);
762
763 // Initialize first level
764 frm_pyr->n_levels = n_levels;
765 frm_pyr->pad_size = pad_size;
766 frm_pyr->widths[0] = frm_width;
767 frm_pyr->heights[0] = frm_height;
768 frm_pyr->strides[0] = frm_width + 2 * frm_pyr->pad_size;
769 // Point the beginning of the level buffer to the location inside
770 // the padded border
771 frm_pyr->level_loc[0] =
772 frm_pyr->strides[0] * frm_pyr->pad_size + frm_pyr->pad_size;
773 // This essentially copies the original buffer into the pyramid buffer
774 // without the original padding
775 av1_resize_plane(frm, frm_height, frm_width, frm_stride,
776 frm_pyr->level_buffer + frm_pyr->level_loc[0],
777 frm_pyr->heights[0], frm_pyr->widths[0],
778 frm_pyr->strides[0]);
779
780 if (compute_grad) {
781 cur_width = frm_pyr->widths[0];
782 cur_height = frm_pyr->heights[0];
783 cur_stride = frm_pyr->strides[0];
784 cur_loc = frm_pyr->level_loc[0];
785 assert(frm_pyr->has_gradient && frm_pyr->level_dx_buffer != NULL &&
786 frm_pyr->level_dy_buffer != NULL);
787 // Computation x gradient
788 sobel_xy_image_gradient(frm_pyr->level_buffer + cur_loc, cur_stride,
789 frm_pyr->level_dx_buffer + cur_loc, cur_stride,
790 cur_height, cur_width, 1);
791
792 // Computation y gradient
793 sobel_xy_image_gradient(frm_pyr->level_buffer + cur_loc, cur_stride,
794 frm_pyr->level_dy_buffer + cur_loc, cur_stride,
795 cur_height, cur_width, 0);
796 }
797
798 // Start at the finest level and resize down to the coarsest level
799 for (int level = 1; level < n_levels; ++level) {
800 update_level_dims(frm_pyr, level);
801 cur_width = frm_pyr->widths[level];
802 cur_height = frm_pyr->heights[level];
803 cur_stride = frm_pyr->strides[level];
804 cur_loc = frm_pyr->level_loc[level];
805
806 av1_resize_plane(frm_pyr->level_buffer + frm_pyr->level_loc[level - 1],
807 frm_pyr->heights[level - 1], frm_pyr->widths[level - 1],
808 frm_pyr->strides[level - 1],
809 frm_pyr->level_buffer + cur_loc, cur_height, cur_width,
810 cur_stride);
811
812 if (compute_grad) {
813 assert(frm_pyr->has_gradient && frm_pyr->level_dx_buffer != NULL &&
814 frm_pyr->level_dy_buffer != NULL);
815 // Computation x gradient
816 sobel_xy_image_gradient(frm_pyr->level_buffer + cur_loc, cur_stride,
817 frm_pyr->level_dx_buffer + cur_loc, cur_stride,
818 cur_height, cur_width, 1);
819
820 // Computation y gradient
821 sobel_xy_image_gradient(frm_pyr->level_buffer + cur_loc, cur_stride,
822 frm_pyr->level_dy_buffer + cur_loc, cur_stride,
823 cur_height, cur_width, 0);
824 }
825 }
826 }
827
compute_flow_at_point(unsigned char * frm,unsigned char * ref,double * dx,double * dy,int x,int y,int width,int height,int stride,double * u,double * v)828 static INLINE void compute_flow_at_point(unsigned char *frm, unsigned char *ref,
829 double *dx, double *dy, int x, int y,
830 int width, int height, int stride,
831 double *u, double *v) {
832 double M[4] = { 0 };
833 double b[2] = { 0 };
834 double tmp_output_vec[2] = { 0 };
835 double error = 0;
836 int16_t dt[PATCH_SIZE * PATCH_SIZE];
837 double o_u = *u;
838 double o_v = *v;
839
840 for (int itr = 0; itr < DISFLOW_MAX_ITR; itr++) {
841 error = compute_warp_and_error(ref, frm, width, height, stride, x, y, *u,
842 *v, dt);
843 if (error <= DISFLOW_ERROR_TR) break;
844 compute_flow_system(dx, stride, dy, stride, dt, PATCH_SIZE, M, b);
845 solve_2x2_system(M, b, tmp_output_vec);
846 *u += tmp_output_vec[0];
847 *v += tmp_output_vec[1];
848 }
849 if (fabs(*u - o_u) > PATCH_SIZE || fabs(*v - o_u) > PATCH_SIZE) {
850 *u = o_u;
851 *v = o_v;
852 }
853 }
854
855 // make sure flow_u and flow_v start at 0
compute_flow_field(ImagePyramid * frm_pyr,ImagePyramid * ref_pyr,double * flow_u,double * flow_v)856 static void compute_flow_field(ImagePyramid *frm_pyr, ImagePyramid *ref_pyr,
857 double *flow_u, double *flow_v) {
858 int cur_width, cur_height, cur_stride, cur_loc, patch_loc, patch_center;
859 double *u_upscale =
860 aom_malloc(frm_pyr->strides[0] * frm_pyr->heights[0] * sizeof(*flow_u));
861 double *v_upscale =
862 aom_malloc(frm_pyr->strides[0] * frm_pyr->heights[0] * sizeof(*flow_v));
863
864 assert(frm_pyr->n_levels == ref_pyr->n_levels);
865
866 // Compute flow field from coarsest to finest level of the pyramid
867 for (int level = frm_pyr->n_levels - 1; level >= 0; --level) {
868 cur_width = frm_pyr->widths[level];
869 cur_height = frm_pyr->heights[level];
870 cur_stride = frm_pyr->strides[level];
871 cur_loc = frm_pyr->level_loc[level];
872
873 for (int i = PATCH_SIZE; i < cur_height - PATCH_SIZE; i += PATCH_STEP) {
874 for (int j = PATCH_SIZE; j < cur_width - PATCH_SIZE; j += PATCH_STEP) {
875 patch_loc = i * cur_stride + j;
876 patch_center = patch_loc + PATCH_CENTER * cur_stride + PATCH_CENTER;
877 compute_flow_at_point(frm_pyr->level_buffer + cur_loc,
878 ref_pyr->level_buffer + cur_loc,
879 frm_pyr->level_dx_buffer + cur_loc + patch_loc,
880 frm_pyr->level_dy_buffer + cur_loc + patch_loc, j,
881 i, cur_width, cur_height, cur_stride,
882 flow_u + patch_center, flow_v + patch_center);
883 }
884 }
885 // TODO(sarahparker) Replace this with upscale function in resize.c
886 if (level > 0) {
887 int h_upscale = frm_pyr->heights[level - 1];
888 int w_upscale = frm_pyr->widths[level - 1];
889 int s_upscale = frm_pyr->strides[level - 1];
890 for (int i = 0; i < h_upscale; ++i) {
891 for (int j = 0; j < w_upscale; ++j) {
892 u_upscale[j + i * s_upscale] =
893 flow_u[(int)(j >> 1) + (int)(i >> 1) * cur_stride];
894 v_upscale[j + i * s_upscale] =
895 flow_v[(int)(j >> 1) + (int)(i >> 1) * cur_stride];
896 }
897 }
898 memcpy(flow_u, u_upscale,
899 frm_pyr->strides[0] * frm_pyr->heights[0] * sizeof(*flow_u));
900 memcpy(flow_v, v_upscale,
901 frm_pyr->strides[0] * frm_pyr->heights[0] * sizeof(*flow_v));
902 }
903 }
904 aom_free(u_upscale);
905 aom_free(v_upscale);
906 }
907
compute_global_motion_disflow_based(TransformationType type,unsigned char * frm_buffer,int frm_width,int frm_height,int frm_stride,int * frm_corners,int num_frm_corners,YV12_BUFFER_CONFIG * ref,int bit_depth,int * num_inliers_by_motion,MotionModel * params_by_motion,int num_motions)908 static int compute_global_motion_disflow_based(
909 TransformationType type, unsigned char *frm_buffer, int frm_width,
910 int frm_height, int frm_stride, int *frm_corners, int num_frm_corners,
911 YV12_BUFFER_CONFIG *ref, int bit_depth, int *num_inliers_by_motion,
912 MotionModel *params_by_motion, int num_motions) {
913 unsigned char *ref_buffer = ref->y_buffer;
914 const int ref_width = ref->y_width;
915 const int ref_height = ref->y_height;
916 const int pad_size = AOMMAX(PATCH_SIZE, MIN_PAD);
917 int num_correspondences;
918 double *correspondences;
919 RansacFuncDouble ransac = av1_get_ransac_double_prec_type(type);
920 assert(frm_width == ref_width);
921 assert(frm_height == ref_height);
922
923 // Ensure the number of pyramid levels will work with the frame resolution
924 const int msb =
925 frm_width < frm_height ? get_msb(frm_width) : get_msb(frm_height);
926 const int n_levels = AOMMIN(msb, N_LEVELS);
927
928 if (ref->flags & YV12_FLAG_HIGHBITDEPTH) {
929 ref_buffer = av1_downconvert_frame(ref, bit_depth);
930 }
931
932 // TODO(sarahparker) We will want to do the source pyramid computation
933 // outside of this function so it doesn't get recomputed for every
934 // reference. We also don't need to compute every pyramid level for the
935 // reference in advance, since lower levels can be overwritten once their
936 // flow field is computed and upscaled. I'll add these optimizations
937 // once the full implementation is working.
938 // Allocate frm image pyramids
939 int compute_gradient = 1;
940 ImagePyramid *frm_pyr =
941 alloc_pyramid(frm_width, frm_height, pad_size, compute_gradient);
942 compute_flow_pyramids(frm_buffer, frm_width, frm_height, frm_stride, n_levels,
943 pad_size, compute_gradient, frm_pyr);
944 // Allocate ref image pyramids
945 compute_gradient = 0;
946 ImagePyramid *ref_pyr =
947 alloc_pyramid(ref_width, ref_height, pad_size, compute_gradient);
948 compute_flow_pyramids(ref_buffer, ref_width, ref_height, ref->y_stride,
949 n_levels, pad_size, compute_gradient, ref_pyr);
950
951 double *flow_u =
952 aom_malloc(frm_pyr->strides[0] * frm_pyr->heights[0] * sizeof(*flow_u));
953 double *flow_v =
954 aom_malloc(frm_pyr->strides[0] * frm_pyr->heights[0] * sizeof(*flow_v));
955
956 memset(flow_u, 0,
957 frm_pyr->strides[0] * frm_pyr->heights[0] * sizeof(*flow_u));
958 memset(flow_v, 0,
959 frm_pyr->strides[0] * frm_pyr->heights[0] * sizeof(*flow_v));
960
961 compute_flow_field(frm_pyr, ref_pyr, flow_u, flow_v);
962
963 // find correspondences between the two images using the flow field
964 correspondences = aom_malloc(num_frm_corners * 4 * sizeof(*correspondences));
965 num_correspondences = determine_disflow_correspondence(
966 frm_corners, num_frm_corners, flow_u, flow_v, frm_width, frm_height,
967 frm_pyr->strides[0], correspondences);
968 ransac(correspondences, num_correspondences, num_inliers_by_motion,
969 params_by_motion, num_motions);
970
971 free_pyramid(frm_pyr);
972 free_pyramid(ref_pyr);
973 aom_free(correspondences);
974 aom_free(flow_u);
975 aom_free(flow_v);
976 // Set num_inliers = 0 for motions with too few inliers so they are ignored.
977 for (int i = 0; i < num_motions; ++i) {
978 if (num_inliers_by_motion[i] < MIN_INLIER_PROB * num_correspondences) {
979 num_inliers_by_motion[i] = 0;
980 }
981 }
982
983 // Return true if any one of the motions has inliers.
984 for (int i = 0; i < num_motions; ++i) {
985 if (num_inliers_by_motion[i] > 0) return 1;
986 }
987 return 0;
988 }
989
av1_compute_global_motion(TransformationType type,unsigned char * src_buffer,int src_width,int src_height,int src_stride,int * src_corners,int num_src_corners,YV12_BUFFER_CONFIG * ref,int bit_depth,GlobalMotionEstimationType gm_estimation_type,int * num_inliers_by_motion,MotionModel * params_by_motion,int num_motions)990 int av1_compute_global_motion(TransformationType type,
991 unsigned char *src_buffer, int src_width,
992 int src_height, int src_stride, int *src_corners,
993 int num_src_corners, YV12_BUFFER_CONFIG *ref,
994 int bit_depth,
995 GlobalMotionEstimationType gm_estimation_type,
996 int *num_inliers_by_motion,
997 MotionModel *params_by_motion, int num_motions) {
998 switch (gm_estimation_type) {
999 case GLOBAL_MOTION_FEATURE_BASED:
1000 return compute_global_motion_feature_based(
1001 type, src_buffer, src_width, src_height, src_stride, src_corners,
1002 num_src_corners, ref, bit_depth, num_inliers_by_motion,
1003 params_by_motion, num_motions);
1004 case GLOBAL_MOTION_DISFLOW_BASED:
1005 return compute_global_motion_disflow_based(
1006 type, src_buffer, src_width, src_height, src_stride, src_corners,
1007 num_src_corners, ref, bit_depth, num_inliers_by_motion,
1008 params_by_motion, num_motions);
1009 default: assert(0 && "Unknown global motion estimation type");
1010 }
1011 return 0;
1012 }
1013