1 /*
2 * Copyright (c) 2016, Alliance for Open Media. All rights reserved
3 *
4 * This source code is subject to the terms of the BSD 2 Clause License and
5 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6 * was not distributed with this source code in the LICENSE file, you can
7 * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8 * Media Patent License 1.0 was not distributed with this source code in the
9 * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10 */
11
12 #ifndef AOM_AV1_COMMON_RECONINTER_H_
13 #define AOM_AV1_COMMON_RECONINTER_H_
14
15 #include "av1/common/av1_common_int.h"
16 #include "av1/common/convolve.h"
17 #include "av1/common/filter.h"
18 #include "av1/common/warped_motion.h"
19 #include "aom/aom_integer.h"
20
21 // Work out how many pixels off the edge of a reference frame we're allowed
22 // to go when forming an inter prediction.
23 // The outermost row/col of each referernce frame is extended by
24 // (AOM_BORDER_IN_PIXELS >> subsampling) pixels, but we need to keep
25 // at least AOM_INTERP_EXTEND pixels within that to account for filtering.
26 //
27 // We have to break this up into two macros to keep both clang-format and
28 // tools/lint-hunks.py happy.
29 #define AOM_LEFT_TOP_MARGIN_PX(subsampling) \
30 ((AOM_BORDER_IN_PIXELS >> subsampling) - AOM_INTERP_EXTEND)
31 #define AOM_LEFT_TOP_MARGIN_SCALED(subsampling) \
32 (AOM_LEFT_TOP_MARGIN_PX(subsampling) << SCALE_SUBPEL_BITS)
33
34 #ifdef __cplusplus
35 extern "C" {
36 #endif
37
38 #define MAX_WEDGE_TYPES 16
39
40 #define MAX_WEDGE_SIZE_LOG2 5 // 32x32
41 #define MAX_WEDGE_SIZE (1 << MAX_WEDGE_SIZE_LOG2)
42 #define MAX_WEDGE_SQUARE (MAX_WEDGE_SIZE * MAX_WEDGE_SIZE)
43
44 #define WEDGE_WEIGHT_BITS 6
45
46 #define WEDGE_NONE -1
47
48 // Angles are with respect to horizontal anti-clockwise
49 enum {
50 WEDGE_HORIZONTAL = 0,
51 WEDGE_VERTICAL = 1,
52 WEDGE_OBLIQUE27 = 2,
53 WEDGE_OBLIQUE63 = 3,
54 WEDGE_OBLIQUE117 = 4,
55 WEDGE_OBLIQUE153 = 5,
56 WEDGE_DIRECTIONS
57 } UENUM1BYTE(WedgeDirectionType);
58
59 // 3-tuple: {direction, x_offset, y_offset}
60 typedef struct {
61 WedgeDirectionType direction;
62 int x_offset;
63 int y_offset;
64 } wedge_code_type;
65
66 typedef uint8_t *wedge_masks_type[MAX_WEDGE_TYPES];
67
68 typedef struct {
69 int wedge_types;
70 const wedge_code_type *codebook;
71 uint8_t *signflip;
72 wedge_masks_type *masks;
73 } wedge_params_type;
74
75 extern const wedge_params_type av1_wedge_params_lookup[BLOCK_SIZES_ALL];
76
77 typedef struct SubpelParams {
78 int xs;
79 int ys;
80 int subpel_x;
81 int subpel_y;
82 } SubpelParams;
83
84 struct build_prediction_ctxt {
85 const AV1_COMMON *cm;
86 uint8_t **tmp_buf;
87 int *tmp_width;
88 int *tmp_height;
89 int *tmp_stride;
90 int mb_to_far_edge;
91 };
92
93 typedef enum InterPredMode {
94 TRANSLATION_PRED,
95 WARP_PRED,
96 } InterPredMode;
97
98 typedef enum InterCompMode {
99 UNIFORM_SINGLE,
100 UNIFORM_COMP,
101 MASK_COMP,
102 } InterCompMode;
103
104 typedef struct InterPredParams {
105 InterPredMode mode;
106 InterCompMode comp_mode;
107 WarpedMotionParams warp_params;
108 ConvolveParams conv_params;
109 const InterpFilterParams *interp_filter_params[2];
110 int block_width;
111 int block_height;
112 int pix_row;
113 int pix_col;
114 struct buf_2d ref_frame_buf;
115 int subsampling_x;
116 int subsampling_y;
117 const struct scale_factors *scale_factors;
118 int bit_depth;
119 int use_hbd_buf;
120 INTERINTER_COMPOUND_DATA mask_comp;
121 BLOCK_SIZE sb_type;
122 int is_intrabc;
123 } InterPredParams;
124
125 void av1_init_inter_params(InterPredParams *inter_pred_params, int block_width,
126 int block_height, int pix_row, int pix_col,
127 int subsampling_x, int subsampling_y, int bit_depth,
128 int use_hbd_buf, int is_intrabc,
129 const struct scale_factors *sf,
130 const struct buf_2d *ref_buf,
131 int_interpfilters interp_filters);
132
133 void av1_init_comp_mode(InterPredParams *inter_pred_params);
134
135 void av1_init_warp_params(InterPredParams *inter_pred_params,
136 const WarpTypesAllowed *warp_types, int ref,
137 const MACROBLOCKD *xd, const MB_MODE_INFO *mi);
138
139 void av1_init_mask_comp(InterPredParams *inter_pred_params, BLOCK_SIZE bsize,
140 const INTERINTER_COMPOUND_DATA *mask_comp);
141
has_scale(int xs,int ys)142 static INLINE int has_scale(int xs, int ys) {
143 return xs != SCALE_SUBPEL_SHIFTS || ys != SCALE_SUBPEL_SHIFTS;
144 }
145
revert_scale_extra_bits(SubpelParams * sp)146 static INLINE void revert_scale_extra_bits(SubpelParams *sp) {
147 sp->subpel_x >>= SCALE_EXTRA_BITS;
148 sp->subpel_y >>= SCALE_EXTRA_BITS;
149 sp->xs >>= SCALE_EXTRA_BITS;
150 sp->ys >>= SCALE_EXTRA_BITS;
151 assert(sp->subpel_x < SUBPEL_SHIFTS);
152 assert(sp->subpel_y < SUBPEL_SHIFTS);
153 assert(sp->xs <= SUBPEL_SHIFTS);
154 assert(sp->ys <= SUBPEL_SHIFTS);
155 }
156
inter_predictor(const uint8_t * src,int src_stride,uint8_t * dst,int dst_stride,const SubpelParams * subpel_params,const struct scale_factors * sf,int w,int h,ConvolveParams * conv_params,const InterpFilterParams * interp_filters[2])157 static INLINE void inter_predictor(
158 const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride,
159 const SubpelParams *subpel_params, const struct scale_factors *sf, int w,
160 int h, ConvolveParams *conv_params,
161 const InterpFilterParams *interp_filters[2]) {
162 assert(conv_params->do_average == 0 || conv_params->do_average == 1);
163 assert(sf);
164 const int is_scaled = has_scale(subpel_params->xs, subpel_params->ys);
165 if (is_scaled) {
166 av1_convolve_2d_facade(src, src_stride, dst, dst_stride, w, h,
167 interp_filters, subpel_params->subpel_x,
168 subpel_params->xs, subpel_params->subpel_y,
169 subpel_params->ys, 1, conv_params, sf);
170 } else {
171 SubpelParams sp = *subpel_params;
172 revert_scale_extra_bits(&sp);
173 av1_convolve_2d_facade(src, src_stride, dst, dst_stride, w, h,
174 interp_filters, sp.subpel_x, sp.xs, sp.subpel_y,
175 sp.ys, 0, conv_params, sf);
176 }
177 }
178
highbd_inter_predictor(const uint8_t * src,int src_stride,uint8_t * dst,int dst_stride,const SubpelParams * subpel_params,const struct scale_factors * sf,int w,int h,ConvolveParams * conv_params,const InterpFilterParams * interp_filters[2],int bd)179 static INLINE void highbd_inter_predictor(
180 const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride,
181 const SubpelParams *subpel_params, const struct scale_factors *sf, int w,
182 int h, ConvolveParams *conv_params,
183 const InterpFilterParams *interp_filters[2], int bd) {
184 assert(conv_params->do_average == 0 || conv_params->do_average == 1);
185 assert(sf);
186 const int is_scaled = has_scale(subpel_params->xs, subpel_params->ys);
187 if (is_scaled) {
188 av1_highbd_convolve_2d_facade(src, src_stride, dst, dst_stride, w, h,
189 interp_filters, subpel_params->subpel_x,
190 subpel_params->xs, subpel_params->subpel_y,
191 subpel_params->ys, 1, conv_params, sf, bd);
192 } else {
193 SubpelParams sp = *subpel_params;
194 revert_scale_extra_bits(&sp);
195 av1_highbd_convolve_2d_facade(src, src_stride, dst, dst_stride, w, h,
196 interp_filters, sp.subpel_x, sp.xs,
197 sp.subpel_y, sp.ys, 0, conv_params, sf, bd);
198 }
199 }
200
201 void av1_modify_neighbor_predictor_for_obmc(MB_MODE_INFO *mbmi);
202 int av1_skip_u4x4_pred_in_obmc(BLOCK_SIZE bsize,
203 const struct macroblockd_plane *pd, int dir);
204
is_interinter_compound_used(COMPOUND_TYPE type,BLOCK_SIZE sb_type)205 static INLINE int is_interinter_compound_used(COMPOUND_TYPE type,
206 BLOCK_SIZE sb_type) {
207 const int comp_allowed = is_comp_ref_allowed(sb_type);
208 switch (type) {
209 case COMPOUND_AVERAGE:
210 case COMPOUND_DISTWTD:
211 case COMPOUND_DIFFWTD: return comp_allowed;
212 case COMPOUND_WEDGE:
213 return comp_allowed && av1_wedge_params_lookup[sb_type].wedge_types > 0;
214 default: assert(0); return 0;
215 }
216 }
217
is_any_masked_compound_used(BLOCK_SIZE sb_type)218 static INLINE int is_any_masked_compound_used(BLOCK_SIZE sb_type) {
219 COMPOUND_TYPE comp_type;
220 int i;
221 if (!is_comp_ref_allowed(sb_type)) return 0;
222 for (i = 0; i < COMPOUND_TYPES; i++) {
223 comp_type = (COMPOUND_TYPE)i;
224 if (is_masked_compound_type(comp_type) &&
225 is_interinter_compound_used(comp_type, sb_type))
226 return 1;
227 }
228 return 0;
229 }
230
get_wedge_types_lookup(BLOCK_SIZE sb_type)231 static INLINE int get_wedge_types_lookup(BLOCK_SIZE sb_type) {
232 return av1_wedge_params_lookup[sb_type].wedge_types;
233 }
234
av1_is_wedge_used(BLOCK_SIZE sb_type)235 static INLINE int av1_is_wedge_used(BLOCK_SIZE sb_type) {
236 return av1_wedge_params_lookup[sb_type].wedge_types > 0;
237 }
238
239 void av1_make_inter_predictor(const uint8_t *src, int src_stride, uint8_t *dst,
240 int dst_stride,
241 InterPredParams *inter_pred_params,
242 const SubpelParams *subpel_params);
243
244 void av1_make_masked_inter_predictor(const uint8_t *pre, int pre_stride,
245 uint8_t *dst, int dst_stride,
246 InterPredParams *inter_pred_params,
247 const SubpelParams *subpel_params);
248
249 typedef void (*CalcSubpelParamsFunc)(const MV *const src_mv,
250 InterPredParams *const inter_pred_params,
251 MACROBLOCKD *xd, int mi_x, int mi_y,
252 int ref, uint8_t **pre,
253 SubpelParams *subpel_params,
254 int *src_stride);
255
256 void av1_build_one_inter_predictor(
257 uint8_t *dst, int dst_stride, const MV *const src_mv,
258 InterPredParams *inter_pred_params, MACROBLOCKD *xd, int mi_x, int mi_y,
259 int ref, CalcSubpelParamsFunc calc_subpel_params_func);
260
261 void av1_build_inter_predictors(const AV1_COMMON *cm, MACROBLOCKD *xd,
262 int plane, const MB_MODE_INFO *mi,
263 int build_for_obmc, int bw, int bh, int mi_x,
264 int mi_y,
265 CalcSubpelParamsFunc calc_subpel_params_func);
266
267 // TODO(jkoleszar): yet another mv clamping function :-(
clamp_mv_to_umv_border_sb(const MACROBLOCKD * xd,const MV * src_mv,int bw,int bh,int ss_x,int ss_y)268 static INLINE MV clamp_mv_to_umv_border_sb(const MACROBLOCKD *xd,
269 const MV *src_mv, int bw, int bh,
270 int ss_x, int ss_y) {
271 // If the MV points so far into the UMV border that no visible pixels
272 // are used for reconstruction, the subpel part of the MV can be
273 // discarded and the MV limited to 16 pixels with equivalent results.
274 const int spel_left = (AOM_INTERP_EXTEND + bw) << SUBPEL_BITS;
275 const int spel_right = spel_left - SUBPEL_SHIFTS;
276 const int spel_top = (AOM_INTERP_EXTEND + bh) << SUBPEL_BITS;
277 const int spel_bottom = spel_top - SUBPEL_SHIFTS;
278 MV clamped_mv = { (int16_t)(src_mv->row * (1 << (1 - ss_y))),
279 (int16_t)(src_mv->col * (1 << (1 - ss_x))) };
280 assert(ss_x <= 1);
281 assert(ss_y <= 1);
282 const SubpelMvLimits mv_limits = {
283 xd->mb_to_left_edge * (1 << (1 - ss_x)) - spel_left,
284 xd->mb_to_right_edge * (1 << (1 - ss_x)) + spel_right,
285 xd->mb_to_top_edge * (1 << (1 - ss_y)) - spel_top,
286 xd->mb_to_bottom_edge * (1 << (1 - ss_y)) + spel_bottom
287 };
288
289 clamp_mv(&clamped_mv, &mv_limits);
290
291 return clamped_mv;
292 }
293
scaled_buffer_offset(int x_offset,int y_offset,int stride,const struct scale_factors * sf)294 static INLINE int64_t scaled_buffer_offset(int x_offset, int y_offset,
295 int stride,
296 const struct scale_factors *sf) {
297 const int x =
298 sf ? sf->scale_value_x(x_offset, sf) >> SCALE_EXTRA_BITS : x_offset;
299 const int y =
300 sf ? sf->scale_value_y(y_offset, sf) >> SCALE_EXTRA_BITS : y_offset;
301 return (int64_t)y * stride + x;
302 }
303
setup_pred_plane(struct buf_2d * dst,BLOCK_SIZE bsize,uint8_t * src,int width,int height,int stride,int mi_row,int mi_col,const struct scale_factors * scale,int subsampling_x,int subsampling_y)304 static INLINE void setup_pred_plane(struct buf_2d *dst, BLOCK_SIZE bsize,
305 uint8_t *src, int width, int height,
306 int stride, int mi_row, int mi_col,
307 const struct scale_factors *scale,
308 int subsampling_x, int subsampling_y) {
309 // Offset the buffer pointer
310 if (subsampling_y && (mi_row & 0x01) && (mi_size_high[bsize] == 1))
311 mi_row -= 1;
312 if (subsampling_x && (mi_col & 0x01) && (mi_size_wide[bsize] == 1))
313 mi_col -= 1;
314
315 const int x = (MI_SIZE * mi_col) >> subsampling_x;
316 const int y = (MI_SIZE * mi_row) >> subsampling_y;
317 dst->buf = src + scaled_buffer_offset(x, y, stride, scale);
318 dst->buf0 = src;
319 dst->width = width;
320 dst->height = height;
321 dst->stride = stride;
322 }
323
324 void av1_setup_dst_planes(struct macroblockd_plane *planes, BLOCK_SIZE bsize,
325 const YV12_BUFFER_CONFIG *src, int mi_row, int mi_col,
326 const int plane_start, const int plane_end);
327
328 void av1_setup_pre_planes(MACROBLOCKD *xd, int idx,
329 const YV12_BUFFER_CONFIG *src, int mi_row, int mi_col,
330 const struct scale_factors *sf, const int num_planes);
331
set_default_interp_filters(MB_MODE_INFO * const mbmi,InterpFilter frame_interp_filter)332 static INLINE void set_default_interp_filters(
333 MB_MODE_INFO *const mbmi, InterpFilter frame_interp_filter) {
334 mbmi->interp_filters =
335 av1_broadcast_interp_filter(av1_unswitchable_filter(frame_interp_filter));
336 }
337
av1_is_interp_needed(const MACROBLOCKD * const xd)338 static INLINE int av1_is_interp_needed(const MACROBLOCKD *const xd) {
339 const MB_MODE_INFO *const mbmi = xd->mi[0];
340 if (mbmi->skip_mode) return 0;
341 if (mbmi->motion_mode == WARPED_CAUSAL) return 0;
342 if (is_nontrans_global_motion(xd, xd->mi[0])) return 0;
343 return 1;
344 }
345
346 void av1_setup_address_for_obmc(MACROBLOCKD *xd, int mi_row_offset,
347 int mi_col_offset, MB_MODE_INFO *ref_mbmi,
348 struct build_prediction_ctxt *ctxt,
349 const int num_planes);
350
351 void av1_setup_build_prediction_by_above_pred(
352 MACROBLOCKD *xd, int rel_mi_col, uint8_t above_mi_width,
353 MB_MODE_INFO *above_mbmi, struct build_prediction_ctxt *ctxt,
354 const int num_planes);
355 void av1_setup_build_prediction_by_left_pred(MACROBLOCKD *xd, int rel_mi_row,
356 uint8_t left_mi_height,
357 MB_MODE_INFO *left_mbmi,
358 struct build_prediction_ctxt *ctxt,
359 const int num_planes);
360 void av1_build_obmc_inter_prediction(const AV1_COMMON *cm, MACROBLOCKD *xd,
361 uint8_t *above[MAX_MB_PLANE],
362 int above_stride[MAX_MB_PLANE],
363 uint8_t *left[MAX_MB_PLANE],
364 int left_stride[MAX_MB_PLANE]);
365
366 const uint8_t *av1_get_obmc_mask(int length);
367 void av1_count_overlappable_neighbors(const AV1_COMMON *cm, MACROBLOCKD *xd);
368
369 #define MASK_MASTER_SIZE ((MAX_WEDGE_SIZE) << 1)
370 #define MASK_MASTER_STRIDE (MASK_MASTER_SIZE)
371
372 void av1_init_wedge_masks();
373
av1_get_contiguous_soft_mask(int8_t wedge_index,int8_t wedge_sign,BLOCK_SIZE sb_type)374 static INLINE const uint8_t *av1_get_contiguous_soft_mask(int8_t wedge_index,
375 int8_t wedge_sign,
376 BLOCK_SIZE sb_type) {
377 return av1_wedge_params_lookup[sb_type].masks[wedge_sign][wedge_index];
378 }
379
380 const uint8_t *av1_get_compound_type_mask(
381 const INTERINTER_COMPOUND_DATA *const comp_data, BLOCK_SIZE sb_type);
382
383 // build interintra_predictors for one plane
384 void av1_build_interintra_predictor(const AV1_COMMON *cm, MACROBLOCKD *xd,
385 uint8_t *pred, int stride,
386 const BUFFER_SET *ctx, int plane,
387 BLOCK_SIZE bsize);
388
389 void av1_build_intra_predictors_for_interintra(const AV1_COMMON *cm,
390 MACROBLOCKD *xd,
391 BLOCK_SIZE bsize, int plane,
392 const BUFFER_SET *ctx,
393 uint8_t *dst, int dst_stride);
394
395 void av1_combine_interintra(MACROBLOCKD *xd, BLOCK_SIZE bsize, int plane,
396 const uint8_t *inter_pred, int inter_stride,
397 const uint8_t *intra_pred, int intra_stride);
398
399 void av1_dist_wtd_comp_weight_assign(const AV1_COMMON *cm,
400 const MB_MODE_INFO *mbmi, int order_idx,
401 int *fwd_offset, int *bck_offset,
402 int *use_dist_wtd_comp_avg,
403 int is_compound);
404 int av1_allow_warp(const MB_MODE_INFO *const mbmi,
405 const WarpTypesAllowed *const warp_types,
406 const WarpedMotionParams *const gm_params,
407 int build_for_obmc, const struct scale_factors *const sf,
408 WarpedMotionParams *final_warp_params);
409
410 #ifdef __cplusplus
411 } // extern "C"
412 #endif
413
414 #endif // AOM_AV1_COMMON_RECONINTER_H_
415