• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2016, Alliance for Open Media. All rights reserved
3  *
4  * This source code is subject to the terms of the BSD 2 Clause License and
5  * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6  * was not distributed with this source code in the LICENSE file, you can
7  * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8  * Media Patent License 1.0 was not distributed with this source code in the
9  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10  */
11 
12 #include <stdbool.h>
13 #include <stddef.h>
14 #include <stdint.h>
15 
16 #include "aom_dsp/flow_estimation/disflow.h"
17 #include "aom_dsp/flow_estimation/flow_estimation.h"
18 #include "aom_dsp/flow_estimation/ransac.h"
19 
20 #include "aom_scale/yv12config.h"
21 
22 #include "av1/common/resize.h"
23 
24 // Number of pyramid levels in disflow computation
25 #define N_LEVELS 2
26 // Size of square patches in the disflow dense grid
27 #define PATCH_SIZE 8
28 // Center point of square patch
29 #define PATCH_CENTER ((PATCH_SIZE + 1) >> 1)
30 // Step size between patches, lower value means greater patch overlap
31 #define PATCH_STEP 1
32 // Minimum size of border padding for disflow
33 #define MIN_PAD 7
34 // Warp error convergence threshold for disflow
35 #define DISFLOW_ERROR_TR 0.01
36 // Max number of iterations if warp convergence is not found
37 #define DISFLOW_MAX_ITR 10
38 
39 // Struct for an image pyramid
40 typedef struct {
41   int n_levels;
42   int pad_size;
43   int has_gradient;
44   int widths[N_LEVELS];
45   int heights[N_LEVELS];
46   int strides[N_LEVELS];
47   int level_loc[N_LEVELS];
48   unsigned char *level_buffer;
49   double *level_dx_buffer;
50   double *level_dy_buffer;
51 } ImagePyramid;
52 
53 // Don't use points around the frame border since they are less reliable
valid_point(int x,int y,int width,int height)54 static INLINE int valid_point(int x, int y, int width, int height) {
55   return (x > (PATCH_SIZE + PATCH_CENTER)) &&
56          (x < (width - PATCH_SIZE - PATCH_CENTER)) &&
57          (y > (PATCH_SIZE + PATCH_CENTER)) &&
58          (y < (height - PATCH_SIZE - PATCH_CENTER));
59 }
60 
determine_disflow_correspondence(int * frm_corners,int num_frm_corners,double * flow_u,double * flow_v,int width,int height,int stride,double * correspondences)61 static int determine_disflow_correspondence(int *frm_corners,
62                                             int num_frm_corners, double *flow_u,
63                                             double *flow_v, int width,
64                                             int height, int stride,
65                                             double *correspondences) {
66   int num_correspondences = 0;
67   int x, y;
68   for (int i = 0; i < num_frm_corners; ++i) {
69     x = frm_corners[2 * i];
70     y = frm_corners[2 * i + 1];
71     if (valid_point(x, y, width, height)) {
72       correspondences[4 * num_correspondences] = x;
73       correspondences[4 * num_correspondences + 1] = y;
74       correspondences[4 * num_correspondences + 2] = x + flow_u[y * stride + x];
75       correspondences[4 * num_correspondences + 3] = y + flow_v[y * stride + x];
76       num_correspondences++;
77     }
78   }
79   return num_correspondences;
80 }
81 
getCubicValue(double p[4],double x)82 static double getCubicValue(double p[4], double x) {
83   return p[1] + 0.5 * x *
84                     (p[2] - p[0] +
85                      x * (2.0 * p[0] - 5.0 * p[1] + 4.0 * p[2] - p[3] +
86                           x * (3.0 * (p[1] - p[2]) + p[3] - p[0])));
87 }
88 
get_subcolumn(unsigned char * ref,double col[4],int stride,int x,int y_start)89 static void get_subcolumn(unsigned char *ref, double col[4], int stride, int x,
90                           int y_start) {
91   int i;
92   for (i = 0; i < 4; ++i) {
93     col[i] = ref[(i + y_start) * stride + x];
94   }
95 }
96 
bicubic(unsigned char * ref,double x,double y,int stride)97 static double bicubic(unsigned char *ref, double x, double y, int stride) {
98   double arr[4];
99   int k;
100   int i = (int)x;
101   int j = (int)y;
102   for (k = 0; k < 4; ++k) {
103     double arr_temp[4];
104     get_subcolumn(ref, arr_temp, stride, i + k - 1, j - 1);
105     arr[k] = getCubicValue(arr_temp, y - j);
106   }
107   return getCubicValue(arr, x - i);
108 }
109 
110 // Interpolate a warped block using bicubic interpolation when possible
interpolate(unsigned char * ref,double x,double y,int width,int height,int stride)111 static unsigned char interpolate(unsigned char *ref, double x, double y,
112                                  int width, int height, int stride) {
113   if (x < 0 && y < 0)
114     return ref[0];
115   else if (x < 0 && y > height - 1)
116     return ref[(height - 1) * stride];
117   else if (x > width - 1 && y < 0)
118     return ref[width - 1];
119   else if (x > width - 1 && y > height - 1)
120     return ref[(height - 1) * stride + (width - 1)];
121   else if (x < 0) {
122     int v;
123     int i = (int)y;
124     double a = y - i;
125     if (y > 1 && y < height - 2) {
126       double arr[4];
127       get_subcolumn(ref, arr, stride, 0, i - 1);
128       return clamp((int)(getCubicValue(arr, a) + 0.5), 0, 255);
129     }
130     v = (int)(ref[i * stride] * (1 - a) + ref[(i + 1) * stride] * a + 0.5);
131     return clamp(v, 0, 255);
132   } else if (y < 0) {
133     int v;
134     int j = (int)x;
135     double b = x - j;
136     if (x > 1 && x < width - 2) {
137       double arr[4] = { ref[j - 1], ref[j], ref[j + 1], ref[j + 2] };
138       return clamp((int)(getCubicValue(arr, b) + 0.5), 0, 255);
139     }
140     v = (int)(ref[j] * (1 - b) + ref[j + 1] * b + 0.5);
141     return clamp(v, 0, 255);
142   } else if (x > width - 1) {
143     int v;
144     int i = (int)y;
145     double a = y - i;
146     if (y > 1 && y < height - 2) {
147       double arr[4];
148       get_subcolumn(ref, arr, stride, width - 1, i - 1);
149       return clamp((int)(getCubicValue(arr, a) + 0.5), 0, 255);
150     }
151     v = (int)(ref[i * stride + width - 1] * (1 - a) +
152               ref[(i + 1) * stride + width - 1] * a + 0.5);
153     return clamp(v, 0, 255);
154   } else if (y > height - 1) {
155     int v;
156     int j = (int)x;
157     double b = x - j;
158     if (x > 1 && x < width - 2) {
159       int row = (height - 1) * stride;
160       double arr[4] = { ref[row + j - 1], ref[row + j], ref[row + j + 1],
161                         ref[row + j + 2] };
162       return clamp((int)(getCubicValue(arr, b) + 0.5), 0, 255);
163     }
164     v = (int)(ref[(height - 1) * stride + j] * (1 - b) +
165               ref[(height - 1) * stride + j + 1] * b + 0.5);
166     return clamp(v, 0, 255);
167   } else if (x > 1 && y > 1 && x < width - 2 && y < height - 2) {
168     return clamp((int)(bicubic(ref, x, y, stride) + 0.5), 0, 255);
169   } else {
170     int i = (int)y;
171     int j = (int)x;
172     double a = y - i;
173     double b = x - j;
174     int v = (int)(ref[i * stride + j] * (1 - a) * (1 - b) +
175                   ref[i * stride + j + 1] * (1 - a) * b +
176                   ref[(i + 1) * stride + j] * a * (1 - b) +
177                   ref[(i + 1) * stride + j + 1] * a * b);
178     return clamp(v, 0, 255);
179   }
180 }
181 
182 // Warps a block using flow vector [u, v] and computes the mse
compute_warp_and_error(unsigned char * ref,unsigned char * frm,int width,int height,int stride,int x,int y,double u,double v,int16_t * dt)183 static double compute_warp_and_error(unsigned char *ref, unsigned char *frm,
184                                      int width, int height, int stride, int x,
185                                      int y, double u, double v, int16_t *dt) {
186   int i, j;
187   unsigned char warped;
188   double x_w, y_w;
189   double mse = 0;
190   int16_t err = 0;
191   for (i = y; i < y + PATCH_SIZE; ++i)
192     for (j = x; j < x + PATCH_SIZE; ++j) {
193       x_w = (double)j + u;
194       y_w = (double)i + v;
195       warped = interpolate(ref, x_w, y_w, width, height, stride);
196       err = warped - frm[j + i * stride];
197       mse += err * err;
198       dt[(i - y) * PATCH_SIZE + (j - x)] = err;
199     }
200 
201   mse /= (PATCH_SIZE * PATCH_SIZE);
202   return mse;
203 }
204 
205 // Computes the components of the system of equations used to solve for
206 // a flow vector. This includes:
207 // 1.) The hessian matrix for optical flow. This matrix is in the
208 // form of:
209 //
210 //       M = |sum(dx * dx)  sum(dx * dy)|
211 //           |sum(dx * dy)  sum(dy * dy)|
212 //
213 // 2.)   b = |sum(dx * dt)|
214 //           |sum(dy * dt)|
215 // Where the sums are computed over a square window of PATCH_SIZE.
compute_flow_system(const double * dx,int dx_stride,const double * dy,int dy_stride,const int16_t * dt,int dt_stride,double * M,double * b)216 static INLINE void compute_flow_system(const double *dx, int dx_stride,
217                                        const double *dy, int dy_stride,
218                                        const int16_t *dt, int dt_stride,
219                                        double *M, double *b) {
220   for (int i = 0; i < PATCH_SIZE; i++) {
221     for (int j = 0; j < PATCH_SIZE; j++) {
222       M[0] += dx[i * dx_stride + j] * dx[i * dx_stride + j];
223       M[1] += dx[i * dx_stride + j] * dy[i * dy_stride + j];
224       M[3] += dy[i * dy_stride + j] * dy[i * dy_stride + j];
225 
226       b[0] += dx[i * dx_stride + j] * dt[i * dt_stride + j];
227       b[1] += dy[i * dy_stride + j] * dt[i * dt_stride + j];
228     }
229   }
230 
231   M[2] = M[1];
232 }
233 
234 // Solves a general Mx = b where M is a 2x2 matrix and b is a 2x1 matrix
solve_2x2_system(const double * M,const double * b,double * output_vec)235 static INLINE void solve_2x2_system(const double *M, const double *b,
236                                     double *output_vec) {
237   double M_0 = M[0];
238   double M_3 = M[3];
239   double det = (M_0 * M_3) - (M[1] * M[2]);
240   if (det < 1e-5) {
241     // Handle singular matrix
242     // TODO(sarahparker) compare results using pseudo inverse instead
243     M_0 += 1e-10;
244     M_3 += 1e-10;
245     det = (M_0 * M_3) - (M[1] * M[2]);
246   }
247   const double det_inv = 1 / det;
248   const double mult_b0 = det_inv * b[0];
249   const double mult_b1 = det_inv * b[1];
250   output_vec[0] = M_3 * mult_b0 - M[1] * mult_b1;
251   output_vec[1] = -M[2] * mult_b0 + M_0 * mult_b1;
252 }
253 
254 /*
255 static INLINE void image_difference(const uint8_t *src, int src_stride,
256                                     const uint8_t *ref, int ref_stride,
257                                     int16_t *dst, int dst_stride, int height,
258                                     int width) {
259   const int block_unit = 8;
260   // Take difference in 8x8 blocks to make use of optimized diff function
261   for (int i = 0; i < height; i += block_unit) {
262     for (int j = 0; j < width; j += block_unit) {
263       aom_subtract_block(block_unit, block_unit, dst + i * dst_stride + j,
264                          dst_stride, src + i * src_stride + j, src_stride,
265                          ref + i * ref_stride + j, ref_stride);
266     }
267   }
268 }
269 */
270 
convolve_2d_sobel_y(const uint8_t * src,int src_stride,double * dst,int dst_stride,int w,int h,int dir,double norm)271 static INLINE void convolve_2d_sobel_y(const uint8_t *src, int src_stride,
272                                        double *dst, int dst_stride, int w,
273                                        int h, int dir, double norm) {
274   int16_t im_block[(MAX_SB_SIZE + MAX_FILTER_TAP - 1) * MAX_SB_SIZE];
275   DECLARE_ALIGNED(256, static const int16_t, sobel_a[3]) = { 1, 0, -1 };
276   DECLARE_ALIGNED(256, static const int16_t, sobel_b[3]) = { 1, 2, 1 };
277   const int taps = 3;
278   int im_h = h + taps - 1;
279   int im_stride = w;
280   const int fo_vert = 1;
281   const int fo_horiz = 1;
282 
283   // horizontal filter
284   const uint8_t *src_horiz = src - fo_vert * src_stride;
285   const int16_t *x_filter = dir ? sobel_a : sobel_b;
286   for (int y = 0; y < im_h; ++y) {
287     for (int x = 0; x < w; ++x) {
288       int16_t sum = 0;
289       for (int k = 0; k < taps; ++k) {
290         sum += x_filter[k] * src_horiz[y * src_stride + x - fo_horiz + k];
291       }
292       im_block[y * im_stride + x] = sum;
293     }
294   }
295 
296   // vertical filter
297   int16_t *src_vert = im_block + fo_vert * im_stride;
298   const int16_t *y_filter = dir ? sobel_b : sobel_a;
299   for (int y = 0; y < h; ++y) {
300     for (int x = 0; x < w; ++x) {
301       int16_t sum = 0;
302       for (int k = 0; k < taps; ++k) {
303         sum += y_filter[k] * src_vert[(y - fo_vert + k) * im_stride + x];
304       }
305       dst[y * dst_stride + x] = sum * norm;
306     }
307   }
308 }
309 
310 // Compute an image gradient using a sobel filter.
311 // If dir == 1, compute the x gradient. If dir == 0, compute y. This function
312 // assumes the images have been padded so that they can be processed in units
313 // of 8.
sobel_xy_image_gradient(const uint8_t * src,int src_stride,double * dst,int dst_stride,int height,int width,int dir)314 static INLINE void sobel_xy_image_gradient(const uint8_t *src, int src_stride,
315                                            double *dst, int dst_stride,
316                                            int height, int width, int dir) {
317   double norm = 1.0;
318   // TODO(sarahparker) experiment with doing this over larger block sizes
319   const int block_unit = 8;
320   // Filter in 8x8 blocks to eventually make use of optimized convolve function
321   for (int i = 0; i < height; i += block_unit) {
322     for (int j = 0; j < width; j += block_unit) {
323       convolve_2d_sobel_y(src + i * src_stride + j, src_stride,
324                           dst + i * dst_stride + j, dst_stride, block_unit,
325                           block_unit, dir, norm);
326     }
327   }
328 }
329 
free_pyramid(ImagePyramid * pyr)330 static void free_pyramid(ImagePyramid *pyr) {
331   aom_free(pyr->level_buffer);
332   if (pyr->has_gradient) {
333     aom_free(pyr->level_dx_buffer);
334     aom_free(pyr->level_dy_buffer);
335   }
336   aom_free(pyr);
337 }
338 
alloc_pyramid(int width,int height,int pad_size,int compute_gradient)339 static ImagePyramid *alloc_pyramid(int width, int height, int pad_size,
340                                    int compute_gradient) {
341   ImagePyramid *pyr = aom_calloc(1, sizeof(*pyr));
342   if (!pyr) return NULL;
343   pyr->has_gradient = compute_gradient;
344   // 2 * width * height is the upper bound for a buffer that fits
345   // all pyramid levels + padding for each level
346   const int buffer_size = sizeof(*pyr->level_buffer) * 2 * width * height +
347                           (width + 2 * pad_size) * 2 * pad_size * N_LEVELS;
348   pyr->level_buffer = aom_malloc(buffer_size);
349   if (!pyr->level_buffer) {
350     free_pyramid(pyr);
351     return NULL;
352   }
353   memset(pyr->level_buffer, 0, buffer_size);
354 
355   if (compute_gradient) {
356     const int gradient_size =
357         sizeof(*pyr->level_dx_buffer) * 2 * width * height +
358         (width + 2 * pad_size) * 2 * pad_size * N_LEVELS;
359     pyr->level_dx_buffer = aom_calloc(1, gradient_size);
360     pyr->level_dy_buffer = aom_calloc(1, gradient_size);
361     if (!(pyr->level_dx_buffer && pyr->level_dy_buffer)) {
362       free_pyramid(pyr);
363       return NULL;
364     }
365   }
366   return pyr;
367 }
368 
update_level_dims(ImagePyramid * frm_pyr,int level)369 static INLINE void update_level_dims(ImagePyramid *frm_pyr, int level) {
370   frm_pyr->widths[level] = frm_pyr->widths[level - 1] >> 1;
371   frm_pyr->heights[level] = frm_pyr->heights[level - 1] >> 1;
372   frm_pyr->strides[level] = frm_pyr->widths[level] + 2 * frm_pyr->pad_size;
373   // Point the beginning of the next level buffer to the correct location inside
374   // the padded border
375   frm_pyr->level_loc[level] =
376       frm_pyr->level_loc[level - 1] +
377       frm_pyr->strides[level - 1] *
378           (2 * frm_pyr->pad_size + frm_pyr->heights[level - 1]);
379 }
380 
381 // Compute coarse to fine pyramids for a frame
compute_flow_pyramids(unsigned char * frm,const int frm_width,const int frm_height,const int frm_stride,int n_levels,int pad_size,int compute_grad,ImagePyramid * frm_pyr)382 static void compute_flow_pyramids(unsigned char *frm, const int frm_width,
383                                   const int frm_height, const int frm_stride,
384                                   int n_levels, int pad_size, int compute_grad,
385                                   ImagePyramid *frm_pyr) {
386   int cur_width, cur_height, cur_stride, cur_loc;
387   assert((frm_width >> n_levels) > 0);
388   assert((frm_height >> n_levels) > 0);
389 
390   // Initialize first level
391   frm_pyr->n_levels = n_levels;
392   frm_pyr->pad_size = pad_size;
393   frm_pyr->widths[0] = frm_width;
394   frm_pyr->heights[0] = frm_height;
395   frm_pyr->strides[0] = frm_width + 2 * frm_pyr->pad_size;
396   // Point the beginning of the level buffer to the location inside
397   // the padded border
398   frm_pyr->level_loc[0] =
399       frm_pyr->strides[0] * frm_pyr->pad_size + frm_pyr->pad_size;
400   // This essentially copies the original buffer into the pyramid buffer
401   // without the original padding
402   av1_resize_plane(frm, frm_height, frm_width, frm_stride,
403                    frm_pyr->level_buffer + frm_pyr->level_loc[0],
404                    frm_pyr->heights[0], frm_pyr->widths[0],
405                    frm_pyr->strides[0]);
406 
407   if (compute_grad) {
408     cur_width = frm_pyr->widths[0];
409     cur_height = frm_pyr->heights[0];
410     cur_stride = frm_pyr->strides[0];
411     cur_loc = frm_pyr->level_loc[0];
412     assert(frm_pyr->has_gradient && frm_pyr->level_dx_buffer != NULL &&
413            frm_pyr->level_dy_buffer != NULL);
414     // Computation x gradient
415     sobel_xy_image_gradient(frm_pyr->level_buffer + cur_loc, cur_stride,
416                             frm_pyr->level_dx_buffer + cur_loc, cur_stride,
417                             cur_height, cur_width, 1);
418 
419     // Computation y gradient
420     sobel_xy_image_gradient(frm_pyr->level_buffer + cur_loc, cur_stride,
421                             frm_pyr->level_dy_buffer + cur_loc, cur_stride,
422                             cur_height, cur_width, 0);
423   }
424 
425   // Start at the finest level and resize down to the coarsest level
426   for (int level = 1; level < n_levels; ++level) {
427     update_level_dims(frm_pyr, level);
428     cur_width = frm_pyr->widths[level];
429     cur_height = frm_pyr->heights[level];
430     cur_stride = frm_pyr->strides[level];
431     cur_loc = frm_pyr->level_loc[level];
432 
433     av1_resize_plane(frm_pyr->level_buffer + frm_pyr->level_loc[level - 1],
434                      frm_pyr->heights[level - 1], frm_pyr->widths[level - 1],
435                      frm_pyr->strides[level - 1],
436                      frm_pyr->level_buffer + cur_loc, cur_height, cur_width,
437                      cur_stride);
438 
439     if (compute_grad) {
440       assert(frm_pyr->has_gradient && frm_pyr->level_dx_buffer != NULL &&
441              frm_pyr->level_dy_buffer != NULL);
442       // Computation x gradient
443       sobel_xy_image_gradient(frm_pyr->level_buffer + cur_loc, cur_stride,
444                               frm_pyr->level_dx_buffer + cur_loc, cur_stride,
445                               cur_height, cur_width, 1);
446 
447       // Computation y gradient
448       sobel_xy_image_gradient(frm_pyr->level_buffer + cur_loc, cur_stride,
449                               frm_pyr->level_dy_buffer + cur_loc, cur_stride,
450                               cur_height, cur_width, 0);
451     }
452   }
453 }
454 
compute_flow_at_point(unsigned char * frm,unsigned char * ref,double * dx,double * dy,int x,int y,int width,int height,int stride,double * u,double * v)455 static INLINE void compute_flow_at_point(unsigned char *frm, unsigned char *ref,
456                                          double *dx, double *dy, int x, int y,
457                                          int width, int height, int stride,
458                                          double *u, double *v) {
459   double M[4] = { 0 };
460   double b[2] = { 0 };
461   double tmp_output_vec[2] = { 0 };
462   double error = 0;
463   int16_t dt[PATCH_SIZE * PATCH_SIZE];
464   double o_u = *u;
465   double o_v = *v;
466 
467   for (int itr = 0; itr < DISFLOW_MAX_ITR; itr++) {
468     error = compute_warp_and_error(ref, frm, width, height, stride, x, y, *u,
469                                    *v, dt);
470     if (error <= DISFLOW_ERROR_TR) break;
471     compute_flow_system(dx, stride, dy, stride, dt, PATCH_SIZE, M, b);
472     solve_2x2_system(M, b, tmp_output_vec);
473     *u += tmp_output_vec[0];
474     *v += tmp_output_vec[1];
475   }
476   if (fabs(*u - o_u) > PATCH_SIZE || fabs(*v - o_u) > PATCH_SIZE) {
477     *u = o_u;
478     *v = o_v;
479   }
480 }
481 
482 // make sure flow_u and flow_v start at 0
compute_flow_field(ImagePyramid * frm_pyr,ImagePyramid * ref_pyr,double * flow_u,double * flow_v)483 static bool compute_flow_field(ImagePyramid *frm_pyr, ImagePyramid *ref_pyr,
484                                double *flow_u, double *flow_v) {
485   int cur_width, cur_height, cur_stride, cur_loc, patch_loc, patch_center;
486   double *u_upscale =
487       aom_malloc(frm_pyr->strides[0] * frm_pyr->heights[0] * sizeof(*flow_u));
488   double *v_upscale =
489       aom_malloc(frm_pyr->strides[0] * frm_pyr->heights[0] * sizeof(*flow_v));
490   if (!(u_upscale && v_upscale)) {
491     aom_free(u_upscale);
492     aom_free(v_upscale);
493     return false;
494   }
495 
496   assert(frm_pyr->n_levels == ref_pyr->n_levels);
497 
498   // Compute flow field from coarsest to finest level of the pyramid
499   for (int level = frm_pyr->n_levels - 1; level >= 0; --level) {
500     cur_width = frm_pyr->widths[level];
501     cur_height = frm_pyr->heights[level];
502     cur_stride = frm_pyr->strides[level];
503     cur_loc = frm_pyr->level_loc[level];
504 
505     for (int i = PATCH_SIZE; i < cur_height - PATCH_SIZE; i += PATCH_STEP) {
506       for (int j = PATCH_SIZE; j < cur_width - PATCH_SIZE; j += PATCH_STEP) {
507         patch_loc = i * cur_stride + j;
508         patch_center = patch_loc + PATCH_CENTER * cur_stride + PATCH_CENTER;
509         compute_flow_at_point(frm_pyr->level_buffer + cur_loc,
510                               ref_pyr->level_buffer + cur_loc,
511                               frm_pyr->level_dx_buffer + cur_loc + patch_loc,
512                               frm_pyr->level_dy_buffer + cur_loc + patch_loc, j,
513                               i, cur_width, cur_height, cur_stride,
514                               flow_u + patch_center, flow_v + patch_center);
515       }
516     }
517     // TODO(sarahparker) Replace this with upscale function in resize.c
518     if (level > 0) {
519       int h_upscale = frm_pyr->heights[level - 1];
520       int w_upscale = frm_pyr->widths[level - 1];
521       int s_upscale = frm_pyr->strides[level - 1];
522       for (int i = 0; i < h_upscale; ++i) {
523         for (int j = 0; j < w_upscale; ++j) {
524           u_upscale[j + i * s_upscale] =
525               flow_u[(int)(j >> 1) + (int)(i >> 1) * cur_stride];
526           v_upscale[j + i * s_upscale] =
527               flow_v[(int)(j >> 1) + (int)(i >> 1) * cur_stride];
528         }
529       }
530       memcpy(flow_u, u_upscale,
531              frm_pyr->strides[0] * frm_pyr->heights[0] * sizeof(*flow_u));
532       memcpy(flow_v, v_upscale,
533              frm_pyr->strides[0] * frm_pyr->heights[0] * sizeof(*flow_v));
534     }
535   }
536   aom_free(u_upscale);
537   aom_free(v_upscale);
538   return true;
539 }
540 
av1_compute_global_motion_disflow_based(TransformationType type,unsigned char * frm_buffer,int frm_width,int frm_height,int frm_stride,int * frm_corners,int num_frm_corners,YV12_BUFFER_CONFIG * ref,int bit_depth,int * num_inliers_by_motion,MotionModel * params_by_motion,int num_motions)541 int av1_compute_global_motion_disflow_based(
542     TransformationType type, unsigned char *frm_buffer, int frm_width,
543     int frm_height, int frm_stride, int *frm_corners, int num_frm_corners,
544     YV12_BUFFER_CONFIG *ref, int bit_depth, int *num_inliers_by_motion,
545     MotionModel *params_by_motion, int num_motions) {
546   unsigned char *ref_buffer = ref->y_buffer;
547   const int ref_width = ref->y_width;
548   const int ref_height = ref->y_height;
549   const int pad_size = AOMMAX(PATCH_SIZE, MIN_PAD);
550   int num_correspondences;
551   double *correspondences;
552   RansacFuncDouble ransac = av1_get_ransac_double_prec_type(type);
553   assert(frm_width == ref_width);
554   assert(frm_height == ref_height);
555 
556   // Ensure the number of pyramid levels will work with the frame resolution
557   const int msb =
558       frm_width < frm_height ? get_msb(frm_width) : get_msb(frm_height);
559   const int n_levels = AOMMIN(msb, N_LEVELS);
560 
561   if (ref->flags & YV12_FLAG_HIGHBITDEPTH) {
562     ref_buffer = av1_downconvert_frame(ref, bit_depth);
563   }
564 
565   // TODO(sarahparker) We will want to do the source pyramid computation
566   // outside of this function so it doesn't get recomputed for every
567   // reference. We also don't need to compute every pyramid level for the
568   // reference in advance, since lower levels can be overwritten once their
569   // flow field is computed and upscaled. I'll add these optimizations
570   // once the full implementation is working.
571   // Allocate frm image pyramids
572   int compute_gradient = 1;
573   ImagePyramid *frm_pyr =
574       alloc_pyramid(frm_width, frm_height, pad_size, compute_gradient);
575   if (!frm_pyr) return 0;
576   compute_flow_pyramids(frm_buffer, frm_width, frm_height, frm_stride, n_levels,
577                         pad_size, compute_gradient, frm_pyr);
578   // Allocate ref image pyramids
579   compute_gradient = 0;
580   ImagePyramid *ref_pyr =
581       alloc_pyramid(ref_width, ref_height, pad_size, compute_gradient);
582   if (!ref_pyr) {
583     free_pyramid(frm_pyr);
584     return 0;
585   }
586   compute_flow_pyramids(ref_buffer, ref_width, ref_height, ref->y_stride,
587                         n_levels, pad_size, compute_gradient, ref_pyr);
588 
589   int ret = 0;
590   double *flow_u =
591       aom_malloc(frm_pyr->strides[0] * frm_pyr->heights[0] * sizeof(*flow_u));
592   double *flow_v =
593       aom_malloc(frm_pyr->strides[0] * frm_pyr->heights[0] * sizeof(*flow_v));
594   if (!(flow_u && flow_v)) goto Error;
595 
596   memset(flow_u, 0,
597          frm_pyr->strides[0] * frm_pyr->heights[0] * sizeof(*flow_u));
598   memset(flow_v, 0,
599          frm_pyr->strides[0] * frm_pyr->heights[0] * sizeof(*flow_v));
600 
601   if (!compute_flow_field(frm_pyr, ref_pyr, flow_u, flow_v)) goto Error;
602 
603   // find correspondences between the two images using the flow field
604   correspondences = aom_malloc(num_frm_corners * 4 * sizeof(*correspondences));
605   if (!correspondences) goto Error;
606   num_correspondences = determine_disflow_correspondence(
607       frm_corners, num_frm_corners, flow_u, flow_v, frm_width, frm_height,
608       frm_pyr->strides[0], correspondences);
609   ransac(correspondences, num_correspondences, num_inliers_by_motion,
610          params_by_motion, num_motions);
611 
612   // Set num_inliers = 0 for motions with too few inliers so they are ignored.
613   for (int i = 0; i < num_motions; ++i) {
614     if (num_inliers_by_motion[i] < MIN_INLIER_PROB * num_correspondences) {
615       num_inliers_by_motion[i] = 0;
616     }
617   }
618 
619   // Return true if any one of the motions has inliers.
620   for (int i = 0; i < num_motions; ++i) {
621     if (num_inliers_by_motion[i] > 0) {
622       ret = 1;
623       break;
624     }
625   }
626 
627   aom_free(correspondences);
628 Error:
629   free_pyramid(frm_pyr);
630   free_pyramid(ref_pyr);
631   aom_free(flow_u);
632   aom_free(flow_v);
633   return ret;
634 }
635