1 /*
2 * Copyright (c) 2016, Alliance for Open Media. All rights reserved
3 *
4 * This source code is subject to the terms of the BSD 2 Clause License and
5 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6 * was not distributed with this source code in the LICENSE file, you can
7 * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8 * Media Patent License 1.0 was not distributed with this source code in the
9 * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10 */
11
12 #ifndef AOM_AV1_COMMON_RECONINTER_H_
13 #define AOM_AV1_COMMON_RECONINTER_H_
14
15 #include "av1/common/av1_common_int.h"
16 #include "av1/common/convolve.h"
17 #include "av1/common/filter.h"
18 #include "av1/common/warped_motion.h"
19 #include "aom/aom_integer.h"
20
21 // Work out how many pixels off the edge of a reference frame we're allowed
22 // to go when forming an inter prediction.
23 // The outermost row/col of each referernce frame is extended by
24 // (AOM_BORDER_IN_PIXELS >> subsampling) pixels, but we need to keep
25 // at least AOM_INTERP_EXTEND pixels within that to account for filtering.
26 //
27 // We have to break this up into two macros to keep both clang-format and
28 // tools/lint-hunks.py happy.
29 #define AOM_LEFT_TOP_MARGIN_PX(subsampling) \
30 ((AOM_BORDER_IN_PIXELS >> subsampling) - AOM_INTERP_EXTEND)
31 #define AOM_LEFT_TOP_MARGIN_SCALED(subsampling) \
32 (AOM_LEFT_TOP_MARGIN_PX(subsampling) << SCALE_SUBPEL_BITS)
33
34 #ifdef __cplusplus
35 extern "C" {
36 #endif
37
38 #define MAX_WEDGE_TYPES 16
39
40 #define MAX_WEDGE_SIZE_LOG2 5 // 32x32
41 #define MAX_WEDGE_SIZE (1 << MAX_WEDGE_SIZE_LOG2)
42 #define MAX_WEDGE_SQUARE (MAX_WEDGE_SIZE * MAX_WEDGE_SIZE)
43
44 #define WEDGE_WEIGHT_BITS 6
45
46 #define WEDGE_NONE -1
47
48 // Angles are with respect to horizontal anti-clockwise
49 enum {
50 WEDGE_HORIZONTAL = 0,
51 WEDGE_VERTICAL = 1,
52 WEDGE_OBLIQUE27 = 2,
53 WEDGE_OBLIQUE63 = 3,
54 WEDGE_OBLIQUE117 = 4,
55 WEDGE_OBLIQUE153 = 5,
56 WEDGE_DIRECTIONS
57 } UENUM1BYTE(WedgeDirectionType);
58
59 // 3-tuple: {direction, x_offset, y_offset}
60 typedef struct {
61 WedgeDirectionType direction;
62 int x_offset;
63 int y_offset;
64 } wedge_code_type;
65
66 typedef uint8_t *wedge_masks_type[MAX_WEDGE_TYPES];
67
68 typedef struct {
69 int wedge_types;
70 const wedge_code_type *codebook;
71 uint8_t *signflip;
72 wedge_masks_type *masks;
73 } wedge_params_type;
74
75 extern const wedge_params_type av1_wedge_params_lookup[BLOCK_SIZES_ALL];
76
77 typedef struct SubpelParams {
78 int xs;
79 int ys;
80 int subpel_x;
81 int subpel_y;
82 } SubpelParams;
83
84 struct build_prediction_ctxt {
85 const AV1_COMMON *cm;
86 uint8_t **tmp_buf;
87 int *tmp_width;
88 int *tmp_height;
89 int *tmp_stride;
90 int mb_to_far_edge;
91 void *dcb; // Decoder-only coding block.
92 };
93
94 typedef enum InterPredMode {
95 TRANSLATION_PRED,
96 WARP_PRED,
97 } InterPredMode;
98
99 typedef enum InterCompMode {
100 UNIFORM_SINGLE,
101 UNIFORM_COMP,
102 MASK_COMP,
103 } InterCompMode;
104
105 typedef struct InterPredParams {
106 InterPredMode mode;
107 InterCompMode comp_mode;
108 WarpedMotionParams warp_params;
109 ConvolveParams conv_params;
110 const InterpFilterParams *interp_filter_params[2];
111 int block_width;
112 int block_height;
113 int pix_row;
114 int pix_col;
115 struct buf_2d ref_frame_buf;
116 int subsampling_x;
117 int subsampling_y;
118 const struct scale_factors *scale_factors;
119 int bit_depth;
120 int use_hbd_buf;
121 INTERINTER_COMPOUND_DATA mask_comp;
122 BLOCK_SIZE sb_type;
123 int is_intrabc;
124 } InterPredParams;
125
126 void av1_init_inter_params(InterPredParams *inter_pred_params, int block_width,
127 int block_height, int pix_row, int pix_col,
128 int subsampling_x, int subsampling_y, int bit_depth,
129 int use_hbd_buf, int is_intrabc,
130 const struct scale_factors *sf,
131 const struct buf_2d *ref_buf,
132 int_interpfilters interp_filters);
133
134 void av1_init_comp_mode(InterPredParams *inter_pred_params);
135
136 void av1_init_warp_params(InterPredParams *inter_pred_params,
137 const WarpTypesAllowed *warp_types, int ref,
138 const MACROBLOCKD *xd, const MB_MODE_INFO *mi);
139
has_scale(int xs,int ys)140 static INLINE int has_scale(int xs, int ys) {
141 return xs != SCALE_SUBPEL_SHIFTS || ys != SCALE_SUBPEL_SHIFTS;
142 }
143
revert_scale_extra_bits(SubpelParams * sp)144 static INLINE void revert_scale_extra_bits(SubpelParams *sp) {
145 sp->subpel_x >>= SCALE_EXTRA_BITS;
146 sp->subpel_y >>= SCALE_EXTRA_BITS;
147 sp->xs >>= SCALE_EXTRA_BITS;
148 sp->ys >>= SCALE_EXTRA_BITS;
149 assert(sp->subpel_x < SUBPEL_SHIFTS);
150 assert(sp->subpel_y < SUBPEL_SHIFTS);
151 assert(sp->xs <= SUBPEL_SHIFTS);
152 assert(sp->ys <= SUBPEL_SHIFTS);
153 }
154
inter_predictor(const uint8_t * src,int src_stride,uint8_t * dst,int dst_stride,const SubpelParams * subpel_params,int w,int h,ConvolveParams * conv_params,const InterpFilterParams * interp_filters[2])155 static INLINE void inter_predictor(
156 const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride,
157 const SubpelParams *subpel_params, int w, int h,
158 ConvolveParams *conv_params, const InterpFilterParams *interp_filters[2]) {
159 assert(conv_params->do_average == 0 || conv_params->do_average == 1);
160 const int is_scaled = has_scale(subpel_params->xs, subpel_params->ys);
161 if (is_scaled) {
162 av1_convolve_2d_facade(src, src_stride, dst, dst_stride, w, h,
163 interp_filters, subpel_params->subpel_x,
164 subpel_params->xs, subpel_params->subpel_y,
165 subpel_params->ys, 1, conv_params);
166 } else {
167 SubpelParams sp = *subpel_params;
168 revert_scale_extra_bits(&sp);
169 av1_convolve_2d_facade(src, src_stride, dst, dst_stride, w, h,
170 interp_filters, sp.subpel_x, sp.xs, sp.subpel_y,
171 sp.ys, 0, conv_params);
172 }
173 }
174
highbd_inter_predictor(const uint8_t * src,int src_stride,uint8_t * dst,int dst_stride,const SubpelParams * subpel_params,int w,int h,ConvolveParams * conv_params,const InterpFilterParams * interp_filters[2],int bd)175 static INLINE void highbd_inter_predictor(
176 const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride,
177 const SubpelParams *subpel_params, int w, int h,
178 ConvolveParams *conv_params, const InterpFilterParams *interp_filters[2],
179 int bd) {
180 assert(conv_params->do_average == 0 || conv_params->do_average == 1);
181 const int is_scaled = has_scale(subpel_params->xs, subpel_params->ys);
182 if (is_scaled) {
183 av1_highbd_convolve_2d_facade(src, src_stride, dst, dst_stride, w, h,
184 interp_filters, subpel_params->subpel_x,
185 subpel_params->xs, subpel_params->subpel_y,
186 subpel_params->ys, 1, conv_params, bd);
187 } else {
188 SubpelParams sp = *subpel_params;
189 revert_scale_extra_bits(&sp);
190 av1_highbd_convolve_2d_facade(src, src_stride, dst, dst_stride, w, h,
191 interp_filters, sp.subpel_x, sp.xs,
192 sp.subpel_y, sp.ys, 0, conv_params, bd);
193 }
194 }
195
196 void av1_modify_neighbor_predictor_for_obmc(MB_MODE_INFO *mbmi);
197 int av1_skip_u4x4_pred_in_obmc(BLOCK_SIZE bsize,
198 const struct macroblockd_plane *pd, int dir);
199
is_interinter_compound_used(COMPOUND_TYPE type,BLOCK_SIZE sb_type)200 static INLINE int is_interinter_compound_used(COMPOUND_TYPE type,
201 BLOCK_SIZE sb_type) {
202 const int comp_allowed = is_comp_ref_allowed(sb_type);
203 switch (type) {
204 case COMPOUND_AVERAGE:
205 case COMPOUND_DISTWTD:
206 case COMPOUND_DIFFWTD: return comp_allowed;
207 case COMPOUND_WEDGE:
208 return comp_allowed && av1_wedge_params_lookup[sb_type].wedge_types > 0;
209 default: assert(0); return 0;
210 }
211 }
212
is_any_masked_compound_used(BLOCK_SIZE sb_type)213 static INLINE int is_any_masked_compound_used(BLOCK_SIZE sb_type) {
214 COMPOUND_TYPE comp_type;
215 int i;
216 if (!is_comp_ref_allowed(sb_type)) return 0;
217 for (i = 0; i < COMPOUND_TYPES; i++) {
218 comp_type = (COMPOUND_TYPE)i;
219 if (is_masked_compound_type(comp_type) &&
220 is_interinter_compound_used(comp_type, sb_type))
221 return 1;
222 }
223 return 0;
224 }
225
get_wedge_types_lookup(BLOCK_SIZE sb_type)226 static INLINE int get_wedge_types_lookup(BLOCK_SIZE sb_type) {
227 return av1_wedge_params_lookup[sb_type].wedge_types;
228 }
229
av1_is_wedge_used(BLOCK_SIZE sb_type)230 static INLINE int av1_is_wedge_used(BLOCK_SIZE sb_type) {
231 return av1_wedge_params_lookup[sb_type].wedge_types > 0;
232 }
233
234 void av1_make_inter_predictor(const uint8_t *src, int src_stride, uint8_t *dst,
235 int dst_stride,
236 InterPredParams *inter_pred_params,
237 const SubpelParams *subpel_params);
238
239 typedef void (*CalcSubpelParamsFunc)(const MV *const src_mv,
240 InterPredParams *const inter_pred_params,
241 MACROBLOCKD *xd, int mi_x, int mi_y,
242 int ref, uint8_t **mc_buf, uint8_t **pre,
243 SubpelParams *subpel_params,
244 int *src_stride);
245
246 void av1_build_one_inter_predictor(
247 uint8_t *dst, int dst_stride, const MV *const src_mv,
248 InterPredParams *inter_pred_params, MACROBLOCKD *xd, int mi_x, int mi_y,
249 int ref, uint8_t **mc_buf, CalcSubpelParamsFunc calc_subpel_params_func);
250
251 void av1_build_inter_predictors(const AV1_COMMON *cm, MACROBLOCKD *xd,
252 int plane, const MB_MODE_INFO *mi,
253 int build_for_obmc, int bw, int bh, int mi_x,
254 int mi_y, uint8_t **mc_buf,
255 CalcSubpelParamsFunc calc_subpel_params_func);
256
257 // TODO(jkoleszar): yet another mv clamping function :-(
clamp_mv_to_umv_border_sb(const MACROBLOCKD * xd,const MV * src_mv,int bw,int bh,int ss_x,int ss_y)258 static INLINE MV clamp_mv_to_umv_border_sb(const MACROBLOCKD *xd,
259 const MV *src_mv, int bw, int bh,
260 int ss_x, int ss_y) {
261 // If the MV points so far into the UMV border that no visible pixels
262 // are used for reconstruction, the subpel part of the MV can be
263 // discarded and the MV limited to 16 pixels with equivalent results.
264 const int spel_left = (AOM_INTERP_EXTEND + bw) << SUBPEL_BITS;
265 const int spel_right = spel_left - SUBPEL_SHIFTS;
266 const int spel_top = (AOM_INTERP_EXTEND + bh) << SUBPEL_BITS;
267 const int spel_bottom = spel_top - SUBPEL_SHIFTS;
268 MV clamped_mv = { (int16_t)(src_mv->row * (1 << (1 - ss_y))),
269 (int16_t)(src_mv->col * (1 << (1 - ss_x))) };
270 assert(ss_x <= 1);
271 assert(ss_y <= 1);
272 const SubpelMvLimits mv_limits = {
273 xd->mb_to_left_edge * (1 << (1 - ss_x)) - spel_left,
274 xd->mb_to_right_edge * (1 << (1 - ss_x)) + spel_right,
275 xd->mb_to_top_edge * (1 << (1 - ss_y)) - spel_top,
276 xd->mb_to_bottom_edge * (1 << (1 - ss_y)) + spel_bottom
277 };
278
279 clamp_mv(&clamped_mv, &mv_limits);
280
281 return clamped_mv;
282 }
283
scaled_buffer_offset(int x_offset,int y_offset,int stride,const struct scale_factors * sf)284 static INLINE int64_t scaled_buffer_offset(int x_offset, int y_offset,
285 int stride,
286 const struct scale_factors *sf) {
287 const int x =
288 sf ? sf->scale_value_x(x_offset, sf) >> SCALE_EXTRA_BITS : x_offset;
289 const int y =
290 sf ? sf->scale_value_y(y_offset, sf) >> SCALE_EXTRA_BITS : y_offset;
291 return (int64_t)y * stride + x;
292 }
293
setup_pred_plane(struct buf_2d * dst,BLOCK_SIZE bsize,uint8_t * src,int width,int height,int stride,int mi_row,int mi_col,const struct scale_factors * scale,int subsampling_x,int subsampling_y)294 static INLINE void setup_pred_plane(struct buf_2d *dst, BLOCK_SIZE bsize,
295 uint8_t *src, int width, int height,
296 int stride, int mi_row, int mi_col,
297 const struct scale_factors *scale,
298 int subsampling_x, int subsampling_y) {
299 // Offset the buffer pointer
300 if (subsampling_y && (mi_row & 0x01) && (mi_size_high[bsize] == 1))
301 mi_row -= 1;
302 if (subsampling_x && (mi_col & 0x01) && (mi_size_wide[bsize] == 1))
303 mi_col -= 1;
304
305 const int x = (MI_SIZE * mi_col) >> subsampling_x;
306 const int y = (MI_SIZE * mi_row) >> subsampling_y;
307 dst->buf = src + scaled_buffer_offset(x, y, stride, scale);
308 dst->buf0 = src;
309 dst->width = width;
310 dst->height = height;
311 dst->stride = stride;
312 }
313
314 void av1_setup_dst_planes(struct macroblockd_plane *planes, BLOCK_SIZE bsize,
315 const YV12_BUFFER_CONFIG *src, int mi_row, int mi_col,
316 const int plane_start, const int plane_end);
317
318 void av1_setup_pre_planes(MACROBLOCKD *xd, int idx,
319 const YV12_BUFFER_CONFIG *src, int mi_row, int mi_col,
320 const struct scale_factors *sf, const int num_planes);
321
set_default_interp_filters(MB_MODE_INFO * const mbmi,InterpFilter frame_interp_filter)322 static INLINE void set_default_interp_filters(
323 MB_MODE_INFO *const mbmi, InterpFilter frame_interp_filter) {
324 mbmi->interp_filters =
325 av1_broadcast_interp_filter(av1_unswitchable_filter(frame_interp_filter));
326 }
327
av1_is_interp_needed(const MACROBLOCKD * const xd)328 static INLINE int av1_is_interp_needed(const MACROBLOCKD *const xd) {
329 const MB_MODE_INFO *const mbmi = xd->mi[0];
330 if (mbmi->skip_mode) return 0;
331 if (mbmi->motion_mode == WARPED_CAUSAL) return 0;
332 if (is_nontrans_global_motion(xd, xd->mi[0])) return 0;
333 return 1;
334 }
335
336 // Sets up buffers 'dst_buf1' and 'dst_buf2' from relevant buffers in 'xd' for
337 // subsequent use in OBMC prediction.
338 void av1_setup_obmc_dst_bufs(MACROBLOCKD *xd, uint8_t **dst_buf1,
339 uint8_t **dst_buf2);
340
341 void av1_setup_build_prediction_by_above_pred(
342 MACROBLOCKD *xd, int rel_mi_col, uint8_t above_mi_width,
343 MB_MODE_INFO *above_mbmi, struct build_prediction_ctxt *ctxt,
344 const int num_planes);
345 void av1_setup_build_prediction_by_left_pred(MACROBLOCKD *xd, int rel_mi_row,
346 uint8_t left_mi_height,
347 MB_MODE_INFO *left_mbmi,
348 struct build_prediction_ctxt *ctxt,
349 const int num_planes);
350 void av1_build_obmc_inter_prediction(const AV1_COMMON *cm, MACROBLOCKD *xd,
351 uint8_t *above[MAX_MB_PLANE],
352 int above_stride[MAX_MB_PLANE],
353 uint8_t *left[MAX_MB_PLANE],
354 int left_stride[MAX_MB_PLANE]);
355
356 const uint8_t *av1_get_obmc_mask(int length);
357 void av1_count_overlappable_neighbors(const AV1_COMMON *cm, MACROBLOCKD *xd);
358
359 #define MASK_MASTER_SIZE ((MAX_WEDGE_SIZE) << 1)
360 #define MASK_MASTER_STRIDE (MASK_MASTER_SIZE)
361
362 void av1_init_wedge_masks();
363
av1_get_contiguous_soft_mask(int8_t wedge_index,int8_t wedge_sign,BLOCK_SIZE sb_type)364 static INLINE const uint8_t *av1_get_contiguous_soft_mask(int8_t wedge_index,
365 int8_t wedge_sign,
366 BLOCK_SIZE sb_type) {
367 return av1_wedge_params_lookup[sb_type].masks[wedge_sign][wedge_index];
368 }
369
370 void av1_dist_wtd_comp_weight_assign(const AV1_COMMON *cm,
371 const MB_MODE_INFO *mbmi, int *fwd_offset,
372 int *bck_offset,
373 int *use_dist_wtd_comp_avg,
374 int is_compound);
375
376 const uint8_t *av1_get_compound_type_mask(
377 const INTERINTER_COMPOUND_DATA *const comp_data, BLOCK_SIZE sb_type);
378
379 // build interintra_predictors for one plane
380 void av1_build_interintra_predictor(const AV1_COMMON *cm, MACROBLOCKD *xd,
381 uint8_t *pred, int stride,
382 const BUFFER_SET *ctx, int plane,
383 BLOCK_SIZE bsize);
384
385 void av1_build_intra_predictors_for_interintra(const AV1_COMMON *cm,
386 MACROBLOCKD *xd,
387 BLOCK_SIZE bsize, int plane,
388 const BUFFER_SET *ctx,
389 uint8_t *dst, int dst_stride);
390
391 void av1_combine_interintra(MACROBLOCKD *xd, BLOCK_SIZE bsize, int plane,
392 const uint8_t *inter_pred, int inter_stride,
393 const uint8_t *intra_pred, int intra_stride);
394
395 int av1_allow_warp(const MB_MODE_INFO *const mbmi,
396 const WarpTypesAllowed *const warp_types,
397 const WarpedMotionParams *const gm_params,
398 int build_for_obmc, const struct scale_factors *const sf,
399 WarpedMotionParams *final_warp_params);
400
401 #ifdef __cplusplus
402 } // extern "C"
403 #endif
404
405 #endif // AOM_AV1_COMMON_RECONINTER_H_
406