• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2016, Alliance for Open Media. All rights reserved
3  *
4  * This source code is subject to the terms of the BSD 2 Clause License and
5  * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6  * was not distributed with this source code in the LICENSE file, you can
7  * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8  * Media Patent License 1.0 was not distributed with this source code in the
9  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10  */
11 
12 #ifndef AOM_AV1_COMMON_BLOCKD_H_
13 #define AOM_AV1_COMMON_BLOCKD_H_
14 
15 #include "config/aom_config.h"
16 
17 #include "aom_dsp/aom_dsp_common.h"
18 #include "aom_ports/mem.h"
19 #include "aom_scale/yv12config.h"
20 
21 #include "av1/common/common_data.h"
22 #include "av1/common/quant_common.h"
23 #include "av1/common/entropy.h"
24 #include "av1/common/entropymode.h"
25 #include "av1/common/mv.h"
26 #include "av1/common/scale.h"
27 #include "av1/common/seg_common.h"
28 #include "av1/common/tile_common.h"
29 
30 #ifdef __cplusplus
31 extern "C" {
32 #endif
33 
34 #define USE_B_QUANT_NO_TRELLIS 1
35 
36 #define MAX_MB_PLANE 3
37 
38 #define MAX_DIFFWTD_MASK_BITS 1
39 
40 #define INTERINTRA_WEDGE_SIGN 0
41 
42 #define DEFAULT_INTER_TX_TYPE DCT_DCT
43 
44 #define MAX_PALETTE_BLOCK_WIDTH 64
45 
46 #define MAX_PALETTE_BLOCK_HEIGHT 64
47 
48 /*!\cond */
49 
50 // DIFFWTD_MASK_TYPES should not surpass 1 << MAX_DIFFWTD_MASK_BITS
51 enum {
52   DIFFWTD_38 = 0,
53   DIFFWTD_38_INV,
54   DIFFWTD_MASK_TYPES,
55 } UENUM1BYTE(DIFFWTD_MASK_TYPE);
56 
57 enum {
58   KEY_FRAME = 0,
59   INTER_FRAME = 1,
60   INTRA_ONLY_FRAME = 2,  // replaces intra-only
61   S_FRAME = 3,
62   FRAME_TYPES,
63 } UENUM1BYTE(FRAME_TYPE);
64 
is_comp_ref_allowed(BLOCK_SIZE bsize)65 static INLINE int is_comp_ref_allowed(BLOCK_SIZE bsize) {
66   return AOMMIN(block_size_wide[bsize], block_size_high[bsize]) >= 8;
67 }
68 
is_inter_mode(PREDICTION_MODE mode)69 static INLINE int is_inter_mode(PREDICTION_MODE mode) {
70   return mode >= INTER_MODE_START && mode < INTER_MODE_END;
71 }
72 
73 typedef struct {
74   uint8_t *plane[MAX_MB_PLANE];
75   int stride[MAX_MB_PLANE];
76 } BUFFER_SET;
77 
is_inter_singleref_mode(PREDICTION_MODE mode)78 static INLINE int is_inter_singleref_mode(PREDICTION_MODE mode) {
79   return mode >= SINGLE_INTER_MODE_START && mode < SINGLE_INTER_MODE_END;
80 }
is_inter_compound_mode(PREDICTION_MODE mode)81 static INLINE int is_inter_compound_mode(PREDICTION_MODE mode) {
82   return mode >= COMP_INTER_MODE_START && mode < COMP_INTER_MODE_END;
83 }
84 
compound_ref0_mode(PREDICTION_MODE mode)85 static INLINE PREDICTION_MODE compound_ref0_mode(PREDICTION_MODE mode) {
86   static const PREDICTION_MODE lut[] = {
87     DC_PRED,        // DC_PRED
88     V_PRED,         // V_PRED
89     H_PRED,         // H_PRED
90     D45_PRED,       // D45_PRED
91     D135_PRED,      // D135_PRED
92     D113_PRED,      // D113_PRED
93     D157_PRED,      // D157_PRED
94     D203_PRED,      // D203_PRED
95     D67_PRED,       // D67_PRED
96     SMOOTH_PRED,    // SMOOTH_PRED
97     SMOOTH_V_PRED,  // SMOOTH_V_PRED
98     SMOOTH_H_PRED,  // SMOOTH_H_PRED
99     PAETH_PRED,     // PAETH_PRED
100     NEARESTMV,      // NEARESTMV
101     NEARMV,         // NEARMV
102     GLOBALMV,       // GLOBALMV
103     NEWMV,          // NEWMV
104     NEARESTMV,      // NEAREST_NEARESTMV
105     NEARMV,         // NEAR_NEARMV
106     NEARESTMV,      // NEAREST_NEWMV
107     NEWMV,          // NEW_NEARESTMV
108     NEARMV,         // NEAR_NEWMV
109     NEWMV,          // NEW_NEARMV
110     GLOBALMV,       // GLOBAL_GLOBALMV
111     NEWMV,          // NEW_NEWMV
112   };
113   assert(NELEMENTS(lut) == MB_MODE_COUNT);
114   assert(is_inter_compound_mode(mode) || is_inter_singleref_mode(mode));
115   return lut[mode];
116 }
117 
compound_ref1_mode(PREDICTION_MODE mode)118 static INLINE PREDICTION_MODE compound_ref1_mode(PREDICTION_MODE mode) {
119   static const PREDICTION_MODE lut[] = {
120     MB_MODE_COUNT,  // DC_PRED
121     MB_MODE_COUNT,  // V_PRED
122     MB_MODE_COUNT,  // H_PRED
123     MB_MODE_COUNT,  // D45_PRED
124     MB_MODE_COUNT,  // D135_PRED
125     MB_MODE_COUNT,  // D113_PRED
126     MB_MODE_COUNT,  // D157_PRED
127     MB_MODE_COUNT,  // D203_PRED
128     MB_MODE_COUNT,  // D67_PRED
129     MB_MODE_COUNT,  // SMOOTH_PRED
130     MB_MODE_COUNT,  // SMOOTH_V_PRED
131     MB_MODE_COUNT,  // SMOOTH_H_PRED
132     MB_MODE_COUNT,  // PAETH_PRED
133     MB_MODE_COUNT,  // NEARESTMV
134     MB_MODE_COUNT,  // NEARMV
135     MB_MODE_COUNT,  // GLOBALMV
136     MB_MODE_COUNT,  // NEWMV
137     NEARESTMV,      // NEAREST_NEARESTMV
138     NEARMV,         // NEAR_NEARMV
139     NEWMV,          // NEAREST_NEWMV
140     NEARESTMV,      // NEW_NEARESTMV
141     NEWMV,          // NEAR_NEWMV
142     NEARMV,         // NEW_NEARMV
143     GLOBALMV,       // GLOBAL_GLOBALMV
144     NEWMV,          // NEW_NEWMV
145   };
146   assert(NELEMENTS(lut) == MB_MODE_COUNT);
147   assert(is_inter_compound_mode(mode));
148   return lut[mode];
149 }
150 
have_nearmv_in_inter_mode(PREDICTION_MODE mode)151 static INLINE int have_nearmv_in_inter_mode(PREDICTION_MODE mode) {
152   return (mode == NEARMV || mode == NEAR_NEARMV || mode == NEAR_NEWMV ||
153           mode == NEW_NEARMV);
154 }
155 
have_newmv_in_inter_mode(PREDICTION_MODE mode)156 static INLINE int have_newmv_in_inter_mode(PREDICTION_MODE mode) {
157   return (mode == NEWMV || mode == NEW_NEWMV || mode == NEAREST_NEWMV ||
158           mode == NEW_NEARESTMV || mode == NEAR_NEWMV || mode == NEW_NEARMV);
159 }
160 
is_masked_compound_type(COMPOUND_TYPE type)161 static INLINE int is_masked_compound_type(COMPOUND_TYPE type) {
162   return (type == COMPOUND_WEDGE || type == COMPOUND_DIFFWTD);
163 }
164 
165 /* For keyframes, intra block modes are predicted by the (already decoded)
166    modes for the Y blocks to the left and above us; for interframes, there
167    is a single probability table. */
168 
169 typedef struct {
170   // Value of base colors for Y, U, and V
171   uint16_t palette_colors[3 * PALETTE_MAX_SIZE];
172   // Number of base colors for Y (0) and UV (1)
173   uint8_t palette_size[2];
174 } PALETTE_MODE_INFO;
175 
176 typedef struct {
177   FILTER_INTRA_MODE filter_intra_mode;
178   uint8_t use_filter_intra;
179 } FILTER_INTRA_MODE_INFO;
180 
181 static const PREDICTION_MODE fimode_to_intradir[FILTER_INTRA_MODES] = {
182   DC_PRED, V_PRED, H_PRED, D157_PRED, DC_PRED
183 };
184 
185 #if CONFIG_RD_DEBUG
186 #define TXB_COEFF_COST_MAP_SIZE (MAX_MIB_SIZE)
187 #endif
188 
189 typedef struct RD_STATS {
190   int rate;
191   int64_t dist;
192   // Please be careful of using rdcost, it's not guaranteed to be set all the
193   // time.
194   // TODO(angiebird): Create a set of functions to manipulate the RD_STATS. In
195   // these functions, make sure rdcost is always up-to-date according to
196   // rate/dist.
197   int64_t rdcost;
198   int64_t sse;
199   int skip_txfm;  // sse should equal to dist when skip_txfm == 1
200   int zero_rate;
201 #if CONFIG_RD_DEBUG
202   int txb_coeff_cost[MAX_MB_PLANE];
203 #endif  // CONFIG_RD_DEBUG
204 } RD_STATS;
205 
206 // This struct is used to group function args that are commonly
207 // sent together in functions related to interinter compound modes
208 typedef struct {
209   uint8_t *seg_mask;
210   int8_t wedge_index;
211   int8_t wedge_sign;
212   DIFFWTD_MASK_TYPE mask_type;
213   COMPOUND_TYPE type;
214 } INTERINTER_COMPOUND_DATA;
215 
216 #define INTER_TX_SIZE_BUF_LEN 16
217 #define TXK_TYPE_BUF_LEN 64
218 /*!\endcond */
219 
220 /*! \brief Stores the prediction/txfm mode of the current coding block
221  */
222 typedef struct MB_MODE_INFO {
223   /*****************************************************************************
224    * \name General Info of the Coding Block
225    ****************************************************************************/
226   /**@{*/
227   /*! \brief The block size of the current coding block */
228   BLOCK_SIZE bsize;
229   /*! \brief The partition type of the current coding block. */
230   PARTITION_TYPE partition;
231   /*! \brief The prediction mode used */
232   PREDICTION_MODE mode;
233   /*! \brief The UV mode when intra is used */
234   UV_PREDICTION_MODE uv_mode;
235   /*! \brief The q index for the current coding block. */
236   int current_qindex;
237   /**@}*/
238 
239   /*****************************************************************************
240    * \name Inter Mode Info
241    ****************************************************************************/
242   /**@{*/
243   /*! \brief The motion vectors used by the current inter mode */
244   int_mv mv[2];
245   /*! \brief The reference frames for the MV */
246   MV_REFERENCE_FRAME ref_frame[2];
247   /*! \brief Filter used in subpel interpolation. */
248   int_interpfilters interp_filters;
249   /*! \brief The motion mode used by the inter prediction. */
250   MOTION_MODE motion_mode;
251   /*! \brief Number of samples used by warp causal */
252   uint8_t num_proj_ref;
253   /*! \brief The number of overlapped neighbors above/left for obmc/warp motion
254    * mode. */
255   uint8_t overlappable_neighbors;
256   /*! \brief The parameters used in warp motion mode. */
257   WarpedMotionParams wm_params;
258   /*! \brief The type of intra mode used by inter-intra */
259   INTERINTRA_MODE interintra_mode;
260   /*! \brief The type of wedge used in interintra mode. */
261   int8_t interintra_wedge_index;
262   /*! \brief Struct that stores the data used in interinter compound mode. */
263   INTERINTER_COMPOUND_DATA interinter_comp;
264   /**@}*/
265 
266   /*****************************************************************************
267    * \name Intra Mode Info
268    ****************************************************************************/
269   /**@{*/
270   /*! \brief Directional mode delta: the angle is base angle + (angle_delta *
271    * step). */
272   int8_t angle_delta[PLANE_TYPES];
273   /*! \brief The type of filter intra mode used (if applicable). */
274   FILTER_INTRA_MODE_INFO filter_intra_mode_info;
275   /*! \brief Chroma from Luma: Joint sign of alpha Cb and alpha Cr */
276   int8_t cfl_alpha_signs;
277   /*! \brief Chroma from Luma: Index of the alpha Cb and alpha Cr combination */
278   uint8_t cfl_alpha_idx;
279   /*! \brief Stores the size and colors of palette mode */
280   PALETTE_MODE_INFO palette_mode_info;
281   /**@}*/
282 
283   /*****************************************************************************
284    * \name Transform Info
285    ****************************************************************************/
286   /**@{*/
287   /*! \brief Whether to skip transforming and sending. */
288   int8_t skip_txfm;
289   /*! \brief Transform size when fixed size txfm is used (e.g. intra modes). */
290   TX_SIZE tx_size;
291   /*! \brief Transform size when recursive txfm tree is on. */
292   TX_SIZE inter_tx_size[INTER_TX_SIZE_BUF_LEN];
293   /**@}*/
294 
295   /*****************************************************************************
296    * \name Loop Filter Info
297    ****************************************************************************/
298   /**@{*/
299   /*! \copydoc MACROBLOCKD::delta_lf_from_base */
300   int8_t delta_lf_from_base;
301   /*! \copydoc MACROBLOCKD::delta_lf */
302   int8_t delta_lf[FRAME_LF_COUNT];
303   /**@}*/
304 
305   /*****************************************************************************
306    * \name Bitfield for Memory Reduction
307    ****************************************************************************/
308   /**@{*/
309   /*! \brief The segment id */
310   uint8_t segment_id : 3;
311   /*! \brief Only valid when temporal update if off. */
312   uint8_t seg_id_predicted : 1;
313   /*! \brief Which ref_mv to use */
314   uint8_t ref_mv_idx : 2;
315   /*! \brief Inter skip mode */
316   uint8_t skip_mode : 1;
317   /*! \brief Whether intrabc is used. */
318   uint8_t use_intrabc : 1;
319   /*! \brief Indicates if masked compound is used(1) or not (0). */
320   uint8_t comp_group_idx : 1;
321   /*! \brief Indicates whether dist_wtd_comp(0) is used or not (0). */
322   uint8_t compound_idx : 1;
323   /*! \brief Whether to use interintra wedge */
324   uint8_t use_wedge_interintra : 1;
325   /*! \brief CDEF strength per BLOCK_64X64 */
326   int8_t cdef_strength : 4;
327   /**@}*/
328 
329   /*! \brief Skip CDEF for this superblock */
330   uint8_t skip_cdef_curr_sb;
331 
332 #if CONFIG_RD_DEBUG
333   /*! \brief RD info used for debugging */
334   RD_STATS rd_stats;
335   /*! \brief The current row in unit of 4x4 blocks for debugging */
336   int mi_row;
337   /*! \brief The current col in unit of 4x4 blocks for debugging */
338   int mi_col;
339 #endif
340 #if CONFIG_INSPECTION
341   /*! \brief Whether we are skipping the current rows or columns. */
342   int16_t tx_skip[TXK_TYPE_BUF_LEN];
343 #endif
344 } MB_MODE_INFO;
345 
346 /*!\cond */
347 
is_intrabc_block(const MB_MODE_INFO * mbmi)348 static INLINE int is_intrabc_block(const MB_MODE_INFO *mbmi) {
349   return mbmi->use_intrabc;
350 }
351 
get_uv_mode(UV_PREDICTION_MODE mode)352 static INLINE PREDICTION_MODE get_uv_mode(UV_PREDICTION_MODE mode) {
353   assert(mode < UV_INTRA_MODES);
354   static const PREDICTION_MODE uv2y[] = {
355     DC_PRED,        // UV_DC_PRED
356     V_PRED,         // UV_V_PRED
357     H_PRED,         // UV_H_PRED
358     D45_PRED,       // UV_D45_PRED
359     D135_PRED,      // UV_D135_PRED
360     D113_PRED,      // UV_D113_PRED
361     D157_PRED,      // UV_D157_PRED
362     D203_PRED,      // UV_D203_PRED
363     D67_PRED,       // UV_D67_PRED
364     SMOOTH_PRED,    // UV_SMOOTH_PRED
365     SMOOTH_V_PRED,  // UV_SMOOTH_V_PRED
366     SMOOTH_H_PRED,  // UV_SMOOTH_H_PRED
367     PAETH_PRED,     // UV_PAETH_PRED
368     DC_PRED,        // UV_CFL_PRED
369     INTRA_INVALID,  // UV_INTRA_MODES
370     INTRA_INVALID,  // UV_MODE_INVALID
371   };
372   return uv2y[mode];
373 }
374 
is_inter_block(const MB_MODE_INFO * mbmi)375 static INLINE int is_inter_block(const MB_MODE_INFO *mbmi) {
376   return is_intrabc_block(mbmi) || mbmi->ref_frame[0] > INTRA_FRAME;
377 }
378 
has_second_ref(const MB_MODE_INFO * mbmi)379 static INLINE int has_second_ref(const MB_MODE_INFO *mbmi) {
380   return mbmi->ref_frame[1] > INTRA_FRAME;
381 }
382 
has_uni_comp_refs(const MB_MODE_INFO * mbmi)383 static INLINE int has_uni_comp_refs(const MB_MODE_INFO *mbmi) {
384   return has_second_ref(mbmi) && (!((mbmi->ref_frame[0] >= BWDREF_FRAME) ^
385                                     (mbmi->ref_frame[1] >= BWDREF_FRAME)));
386 }
387 
comp_ref0(int ref_idx)388 static INLINE MV_REFERENCE_FRAME comp_ref0(int ref_idx) {
389   static const MV_REFERENCE_FRAME lut[] = {
390     LAST_FRAME,     // LAST_LAST2_FRAMES,
391     LAST_FRAME,     // LAST_LAST3_FRAMES,
392     LAST_FRAME,     // LAST_GOLDEN_FRAMES,
393     BWDREF_FRAME,   // BWDREF_ALTREF_FRAMES,
394     LAST2_FRAME,    // LAST2_LAST3_FRAMES
395     LAST2_FRAME,    // LAST2_GOLDEN_FRAMES,
396     LAST3_FRAME,    // LAST3_GOLDEN_FRAMES,
397     BWDREF_FRAME,   // BWDREF_ALTREF2_FRAMES,
398     ALTREF2_FRAME,  // ALTREF2_ALTREF_FRAMES,
399   };
400   assert(NELEMENTS(lut) == TOTAL_UNIDIR_COMP_REFS);
401   return lut[ref_idx];
402 }
403 
comp_ref1(int ref_idx)404 static INLINE MV_REFERENCE_FRAME comp_ref1(int ref_idx) {
405   static const MV_REFERENCE_FRAME lut[] = {
406     LAST2_FRAME,    // LAST_LAST2_FRAMES,
407     LAST3_FRAME,    // LAST_LAST3_FRAMES,
408     GOLDEN_FRAME,   // LAST_GOLDEN_FRAMES,
409     ALTREF_FRAME,   // BWDREF_ALTREF_FRAMES,
410     LAST3_FRAME,    // LAST2_LAST3_FRAMES
411     GOLDEN_FRAME,   // LAST2_GOLDEN_FRAMES,
412     GOLDEN_FRAME,   // LAST3_GOLDEN_FRAMES,
413     ALTREF2_FRAME,  // BWDREF_ALTREF2_FRAMES,
414     ALTREF_FRAME,   // ALTREF2_ALTREF_FRAMES,
415   };
416   assert(NELEMENTS(lut) == TOTAL_UNIDIR_COMP_REFS);
417   return lut[ref_idx];
418 }
419 
420 PREDICTION_MODE av1_left_block_mode(const MB_MODE_INFO *left_mi);
421 
422 PREDICTION_MODE av1_above_block_mode(const MB_MODE_INFO *above_mi);
423 
is_global_mv_block(const MB_MODE_INFO * const mbmi,TransformationType type)424 static INLINE int is_global_mv_block(const MB_MODE_INFO *const mbmi,
425                                      TransformationType type) {
426   const PREDICTION_MODE mode = mbmi->mode;
427   const BLOCK_SIZE bsize = mbmi->bsize;
428   const int block_size_allowed =
429       AOMMIN(block_size_wide[bsize], block_size_high[bsize]) >= 8;
430   return (mode == GLOBALMV || mode == GLOBAL_GLOBALMV) && type > TRANSLATION &&
431          block_size_allowed;
432 }
433 
434 #if CONFIG_MISMATCH_DEBUG
mi_to_pixel_loc(int * pixel_c,int * pixel_r,int mi_col,int mi_row,int tx_blk_col,int tx_blk_row,int subsampling_x,int subsampling_y)435 static INLINE void mi_to_pixel_loc(int *pixel_c, int *pixel_r, int mi_col,
436                                    int mi_row, int tx_blk_col, int tx_blk_row,
437                                    int subsampling_x, int subsampling_y) {
438   *pixel_c = ((mi_col >> subsampling_x) << MI_SIZE_LOG2) +
439              (tx_blk_col << MI_SIZE_LOG2);
440   *pixel_r = ((mi_row >> subsampling_y) << MI_SIZE_LOG2) +
441              (tx_blk_row << MI_SIZE_LOG2);
442 }
443 #endif
444 
445 enum { MV_PRECISION_Q3, MV_PRECISION_Q4 } UENUM1BYTE(mv_precision);
446 
447 struct buf_2d {
448   uint8_t *buf;
449   uint8_t *buf0;
450   int width;
451   int height;
452   int stride;
453 };
454 
455 typedef struct eob_info {
456   uint16_t eob;
457   uint16_t max_scan_line;
458 } eob_info;
459 
460 typedef struct {
461   DECLARE_ALIGNED(32, tran_low_t, dqcoeff[MAX_MB_PLANE][MAX_SB_SQUARE]);
462   eob_info eob_data[MAX_MB_PLANE]
463                    [MAX_SB_SQUARE / (TX_SIZE_W_MIN * TX_SIZE_H_MIN)];
464   DECLARE_ALIGNED(16, uint8_t, color_index_map[2][MAX_SB_SQUARE]);
465 } CB_BUFFER;
466 
467 typedef struct macroblockd_plane {
468   PLANE_TYPE plane_type;
469   int subsampling_x;
470   int subsampling_y;
471   struct buf_2d dst;
472   struct buf_2d pre[2];
473   ENTROPY_CONTEXT *above_entropy_context;
474   ENTROPY_CONTEXT *left_entropy_context;
475 
476   // The dequantizers below are true dequantizers used only in the
477   // dequantization process.  They have the same coefficient
478   // shift/scale as TX.
479   int16_t seg_dequant_QTX[MAX_SEGMENTS][2];
480   // Pointer to color index map of:
481   // - Current coding block, on encoder side.
482   // - Current superblock, on decoder side.
483   uint8_t *color_index_map;
484 
485   // block size in pixels
486   uint8_t width, height;
487 
488   qm_val_t *seg_iqmatrix[MAX_SEGMENTS][TX_SIZES_ALL];
489   qm_val_t *seg_qmatrix[MAX_SEGMENTS][TX_SIZES_ALL];
490 } MACROBLOCKD_PLANE;
491 
492 #define BLOCK_OFFSET(i) ((i) << 4)
493 
494 /*!\endcond */
495 
496 /*!\brief Parameters related to Wiener Filter */
497 typedef struct {
498   /*!
499    * Vertical filter kernel.
500    */
501   DECLARE_ALIGNED(16, InterpKernel, vfilter);
502 
503   /*!
504    * Horizontal filter kernel.
505    */
506   DECLARE_ALIGNED(16, InterpKernel, hfilter);
507 } WienerInfo;
508 
509 /*!\brief Parameters related to Sgrproj Filter */
510 typedef struct {
511   /*!
512    * Parameter index.
513    */
514   int ep;
515 
516   /*!
517    * Weights for linear combination of filtered versions
518    */
519   int xqd[2];
520 } SgrprojInfo;
521 
522 /*!\cond */
523 
524 #if CONFIG_DEBUG
525 #define CFL_SUB8X8_VAL_MI_SIZE (4)
526 #define CFL_SUB8X8_VAL_MI_SQUARE \
527   (CFL_SUB8X8_VAL_MI_SIZE * CFL_SUB8X8_VAL_MI_SIZE)
528 #endif  // CONFIG_DEBUG
529 #define CFL_MAX_BLOCK_SIZE (BLOCK_32X32)
530 #define CFL_BUF_LINE (32)
531 #define CFL_BUF_LINE_I128 (CFL_BUF_LINE >> 3)
532 #define CFL_BUF_LINE_I256 (CFL_BUF_LINE >> 4)
533 #define CFL_BUF_SQUARE (CFL_BUF_LINE * CFL_BUF_LINE)
534 typedef struct cfl_ctx {
535   // Q3 reconstructed luma pixels (only Q2 is required, but Q3 is used to avoid
536   // shifts)
537   uint16_t recon_buf_q3[CFL_BUF_SQUARE];
538   // Q3 AC contributions (reconstructed luma pixels - tx block avg)
539   int16_t ac_buf_q3[CFL_BUF_SQUARE];
540 
541   // Cache the DC_PRED when performing RDO, so it does not have to be recomputed
542   // for every scaling parameter
543   int dc_pred_is_cached[CFL_PRED_PLANES];
544   // The DC_PRED cache is disable when decoding
545   int use_dc_pred_cache;
546   // Only cache the first row of the DC_PRED
547   int16_t dc_pred_cache[CFL_PRED_PLANES][CFL_BUF_LINE];
548 
549   // Height and width currently used in the CfL prediction buffer.
550   int buf_height, buf_width;
551 
552   int are_parameters_computed;
553 
554   // Chroma subsampling
555   int subsampling_x, subsampling_y;
556 
557   // Whether the reconstructed luma pixels need to be stored
558   int store_y;
559 } CFL_CTX;
560 
561 typedef struct dist_wtd_comp_params {
562   int use_dist_wtd_comp_avg;
563   int fwd_offset;
564   int bck_offset;
565 } DIST_WTD_COMP_PARAMS;
566 
567 struct scale_factors;
568 
569 /*!\endcond */
570 
571 /*! \brief Variables related to current coding block.
572  *
573  * This is a common set of variables used by both encoder and decoder.
574  * Most/all of the pointers are mere pointers to actual arrays are allocated
575  * elsewhere. This is mostly for coding convenience.
576  */
577 typedef struct macroblockd {
578   /**
579    * \name Position of current macroblock in mi units
580    */
581   /**@{*/
582   int mi_row; /*!< Row position in mi units. */
583   int mi_col; /*!< Column position in mi units. */
584   /**@}*/
585 
586   /*!
587    * Same as cm->mi_params.mi_stride, copied here for convenience.
588    */
589   int mi_stride;
590 
591   /*!
592    * True if current block transmits chroma information.
593    * More detail:
594    * Smallest supported block size for both luma and chroma plane is 4x4. Hence,
595    * in case of subsampled chroma plane (YUV 4:2:0 or YUV 4:2:2), multiple luma
596    * blocks smaller than 8x8 maybe combined into one chroma block.
597    * For example, for YUV 4:2:0, let's say an 8x8 area is split into four 4x4
598    * luma blocks. Then, a single chroma block of size 4x4 will cover the area of
599    * these four luma blocks. This is implemented in bitstream as follows:
600    * - There are four MB_MODE_INFO structs for the four luma blocks.
601    * - First 3 MB_MODE_INFO have is_chroma_ref = false, and so do not transmit
602    * any information for chroma planes.
603    * - Last block will have is_chroma_ref = true and transmits chroma
604    * information for the 4x4 chroma block that covers whole 8x8 area covered by
605    * four luma blocks.
606    * Similar logic applies for chroma blocks that cover 2 or 3 luma blocks.
607    */
608   bool is_chroma_ref;
609 
610   /*!
611    * Info specific to each plane.
612    */
613   struct macroblockd_plane plane[MAX_MB_PLANE];
614 
615   /*!
616    * Tile related info.
617    */
618   TileInfo tile;
619 
620   /*!
621    * Appropriate offset inside cm->mi_params.mi_grid_base based on current
622    * mi_row and mi_col.
623    */
624   MB_MODE_INFO **mi;
625 
626   /*!
627    * True if 4x4 block above the current block is available.
628    */
629   bool up_available;
630   /*!
631    * True if 4x4 block to the left of the current block is available.
632    */
633   bool left_available;
634   /*!
635    * True if the above chrome reference block is available.
636    */
637   bool chroma_up_available;
638   /*!
639    * True if the left chrome reference block is available.
640    */
641   bool chroma_left_available;
642 
643   /*!
644    * MB_MODE_INFO for 4x4 block to the left of the current block, if
645    * left_available == true; otherwise NULL.
646    */
647   MB_MODE_INFO *left_mbmi;
648   /*!
649    * MB_MODE_INFO for 4x4 block above the current block, if
650    * up_available == true; otherwise NULL.
651    */
652   MB_MODE_INFO *above_mbmi;
653   /*!
654    * Above chroma reference block if is_chroma_ref == true for the current block
655    * and chroma_up_available == true; otherwise NULL.
656    * See also: the special case logic when current chroma block covers more than
657    * one luma blocks in set_mi_row_col().
658    */
659   MB_MODE_INFO *chroma_left_mbmi;
660   /*!
661    * Left chroma reference block if is_chroma_ref == true for the current block
662    * and chroma_left_available == true; otherwise NULL.
663    * See also: the special case logic when current chroma block covers more than
664    * one luma blocks in set_mi_row_col().
665    */
666   MB_MODE_INFO *chroma_above_mbmi;
667 
668   /*!
669    * Appropriate offset based on current 'mi_row' and 'mi_col', inside
670    * 'tx_type_map' in one of 'CommonModeInfoParams', 'PICK_MODE_CONTEXT' or
671    * 'MACROBLOCK' structs.
672    */
673   uint8_t *tx_type_map;
674   /*!
675    * Stride for 'tx_type_map'. Note that this may / may not be same as
676    * 'mi_stride', depending on which actual array 'tx_type_map' points to.
677    */
678   int tx_type_map_stride;
679 
680   /**
681    * \name Distance of this macroblock from frame edges in 1/8th pixel units.
682    */
683   /**@{*/
684   int mb_to_left_edge;   /*!< Distance from left edge */
685   int mb_to_right_edge;  /*!< Distance from right edge */
686   int mb_to_top_edge;    /*!< Distance from top edge */
687   int mb_to_bottom_edge; /*!< Distance from bottom edge */
688   /**@}*/
689 
690   /*!
691    * Scale factors for reference frames of the current block.
692    * These are pointers into 'cm->ref_scale_factors'.
693    */
694   const struct scale_factors *block_ref_scale_factors[2];
695 
696   /*!
697    * - On encoder side: points to cpi->source, which is the buffer containing
698    * the current *source* frame (maybe filtered).
699    * - On decoder side: points to cm->cur_frame->buf, which is the buffer into
700    * which current frame is being *decoded*.
701    */
702   const YV12_BUFFER_CONFIG *cur_buf;
703 
704   /*!
705    * Entropy contexts for the above blocks.
706    * above_entropy_context[i][j] corresponds to above entropy context for ith
707    * plane and jth mi column of this *frame*, wrt current 'mi_row'.
708    * These are pointers into 'cm->above_contexts.entropy'.
709    */
710   ENTROPY_CONTEXT *above_entropy_context[MAX_MB_PLANE];
711   /*!
712    * Entropy contexts for the left blocks.
713    * left_entropy_context[i][j] corresponds to left entropy context for ith
714    * plane and jth mi row of this *superblock*, wrt current 'mi_col'.
715    * Note: These contain actual data, NOT pointers.
716    */
717   ENTROPY_CONTEXT left_entropy_context[MAX_MB_PLANE][MAX_MIB_SIZE];
718 
719   /*!
720    * Partition contexts for the above blocks.
721    * above_partition_context[i] corresponds to above partition context for ith
722    * mi column of this *frame*, wrt current 'mi_row'.
723    * This is a pointer into 'cm->above_contexts.partition'.
724    */
725   PARTITION_CONTEXT *above_partition_context;
726   /*!
727    * Partition contexts for the left blocks.
728    * left_partition_context[i] corresponds to left partition context for ith
729    * mi row of this *superblock*, wrt current 'mi_col'.
730    * Note: These contain actual data, NOT pointers.
731    */
732   PARTITION_CONTEXT left_partition_context[MAX_MIB_SIZE];
733 
734   /*!
735    * Transform contexts for the above blocks.
736    * above_txfm_context[i] corresponds to above transform context for ith mi col
737    * from the current position (mi row and mi column) for this *frame*.
738    * This is a pointer into 'cm->above_contexts.txfm'.
739    */
740   TXFM_CONTEXT *above_txfm_context;
741   /*!
742    * Transform contexts for the left blocks.
743    * left_txfm_context[i] corresponds to left transform context for ith mi row
744    * from the current position (mi_row and mi_col) for this *superblock*.
745    * This is a pointer into 'left_txfm_context_buffer'.
746    */
747   TXFM_CONTEXT *left_txfm_context;
748   /*!
749    * left_txfm_context_buffer[i] is the left transform context for ith mi_row
750    * in this *superblock*.
751    * Behaves like an internal actual buffer which 'left_txt_context' points to,
752    * and never accessed directly except to fill in initial default values.
753    */
754   TXFM_CONTEXT left_txfm_context_buffer[MAX_MIB_SIZE];
755 
756   /**
757    * \name Default values for the two restoration filters for each plane.
758    * Default values for the two restoration filters for each plane.
759    * These values are used as reference values when writing the bitstream. That
760    * is, we transmit the delta between the actual values in
761    * cm->rst_info[plane].unit_info[unit_idx] and these reference values.
762    */
763   /**@{*/
764   WienerInfo wiener_info[MAX_MB_PLANE];   /*!< Defaults for Wiener filter*/
765   SgrprojInfo sgrproj_info[MAX_MB_PLANE]; /*!< Defaults for SGR filter */
766   /**@}*/
767 
768   /**
769    * \name Block dimensions in MB_MODE_INFO units.
770    */
771   /**@{*/
772   uint8_t width;  /*!< Block width in MB_MODE_INFO units */
773   uint8_t height; /*!< Block height in MB_MODE_INFO units */
774   /**@}*/
775 
776   /*!
777    * Contains the motion vector candidates found during motion vector prediction
778    * process. ref_mv_stack[i] contains the candidates for ith type of
779    * reference frame (single/compound). The actual number of candidates found in
780    * ref_mv_stack[i] is stored in either dcb->ref_mv_count[i] (decoder side)
781    * or mbmi_ext->ref_mv_count[i] (encoder side).
782    */
783   CANDIDATE_MV ref_mv_stack[MODE_CTX_REF_FRAMES][MAX_REF_MV_STACK_SIZE];
784   /*!
785    * weight[i][j] is the weight for ref_mv_stack[i][j] and used to compute the
786    * DRL (dynamic reference list) mode contexts.
787    */
788   uint16_t weight[MODE_CTX_REF_FRAMES][MAX_REF_MV_STACK_SIZE];
789 
790   /*!
791    * True if this is the last vertical rectangular block in a VERTICAL or
792    * VERTICAL_4 partition.
793    */
794   bool is_last_vertical_rect;
795   /*!
796    * True if this is the 1st horizontal rectangular block in a HORIZONTAL or
797    * HORIZONTAL_4 partition.
798    */
799   bool is_first_horizontal_rect;
800 
801   /*!
802    * Counts of each reference frame in the above and left neighboring blocks.
803    * NOTE: Take into account both single and comp references.
804    */
805   uint8_t neighbors_ref_counts[REF_FRAMES];
806 
807   /*!
808    * Current CDFs of all the symbols for the current tile.
809    */
810   FRAME_CONTEXT *tile_ctx;
811 
812   /*!
813    * Bit depth: copied from cm->seq_params->bit_depth for convenience.
814    */
815   int bd;
816 
817   /*!
818    * Quantizer index for each segment (base qindex + delta for each segment).
819    */
820   int qindex[MAX_SEGMENTS];
821   /*!
822    * lossless[s] is true if segment 's' is coded losslessly.
823    */
824   int lossless[MAX_SEGMENTS];
825   /*!
826    * Q index for the coding blocks in this superblock will be stored in
827    * mbmi->current_qindex. Now, when cm->delta_q_info.delta_q_present_flag is
828    * true, mbmi->current_qindex is computed by taking 'current_base_qindex' as
829    * the base, and adding any transmitted delta qindex on top of it.
830    * Precisely, this is the latest qindex used by the first coding block of a
831    * non-skip superblock in the current tile; OR
832    * same as cm->quant_params.base_qindex (if not explicitly set yet).
833    * Note: This is 'CurrentQIndex' in the AV1 spec.
834    */
835   int current_base_qindex;
836 
837   /*!
838    * Same as cm->features.cur_frame_force_integer_mv.
839    */
840   int cur_frame_force_integer_mv;
841 
842   /*!
843    * Pointer to cm->error.
844    */
845   struct aom_internal_error_info *error_info;
846 
847   /*!
848    * Same as cm->global_motion.
849    */
850   const WarpedMotionParams *global_motion;
851 
852   /*!
853    * Since actual frame level loop filtering level value is not available
854    * at the beginning of the tile (only available during actual filtering)
855    * at encoder side.we record the delta_lf (against the frame level loop
856    * filtering level) and code the delta between previous superblock's delta
857    * lf and current delta lf. It is equivalent to the delta between previous
858    * superblock's actual lf and current lf.
859    */
860   int8_t delta_lf_from_base;
861   /*!
862    * We have four frame filter levels for different plane and direction. So, to
863    * support the per superblock update, we need to add a few more params:
864    * 0. delta loop filter level for y plane vertical
865    * 1. delta loop filter level for y plane horizontal
866    * 2. delta loop filter level for u plane
867    * 3. delta loop filter level for v plane
868    * To make it consistent with the reference to each filter level in segment,
869    * we need to -1, since
870    * - SEG_LVL_ALT_LF_Y_V = 1;
871    * - SEG_LVL_ALT_LF_Y_H = 2;
872    * - SEG_LVL_ALT_LF_U   = 3;
873    * - SEG_LVL_ALT_LF_V   = 4;
874    */
875   int8_t delta_lf[FRAME_LF_COUNT];
876   /*!
877    * cdef_transmitted[i] is true if CDEF strength for ith CDEF unit in the
878    * current superblock has already been read from (decoder) / written to
879    * (encoder) the bitstream; and false otherwise.
880    * More detail:
881    * 1. CDEF strength is transmitted only once per CDEF unit, in the 1st
882    * non-skip coding block. So, we need this array to keep track of whether CDEF
883    * strengths for the given CDEF units have been transmitted yet or not.
884    * 2. Superblock size can be either 128x128 or 64x64, but CDEF unit size is
885    * fixed to be 64x64. So, there may be 4 CDEF units within a superblock (if
886    * superblock size is 128x128). Hence the array size is 4.
887    * 3. In the current implementation, CDEF strength for this CDEF unit is
888    * stored in the MB_MODE_INFO of the 1st block in this CDEF unit (inside
889    * cm->mi_params.mi_grid_base).
890    */
891   bool cdef_transmitted[4];
892 
893   /*!
894    * Mask for this block used for compound prediction.
895    */
896   uint8_t *seg_mask;
897 
898   /*!
899    * CFL (chroma from luma) related parameters.
900    */
901   CFL_CTX cfl;
902 
903   /*!
904    * Offset to plane[p].color_index_map.
905    * Currently:
906    * - On encoder side, this is always 0 as 'color_index_map' is allocated per
907    * *coding block* there.
908    * - On decoder side, this may be non-zero, as 'color_index_map' is a (static)
909    * memory pointing to the base of a *superblock* there, and we need an offset
910    * to it to get the color index map for current coding block.
911    */
912   uint16_t color_index_map_offset[2];
913 
914   /*!
915    * Temporary buffer used for convolution in case of compound reference only
916    * for (weighted or uniform) averaging operation.
917    * There are pointers to actual buffers allocated elsewhere: e.g.
918    * - In decoder, 'pbi->td.tmp_conv_dst' or
919    * 'pbi->thread_data[t].td->xd.tmp_conv_dst' and
920    * - In encoder, 'x->tmp_conv_dst' or
921    * 'cpi->tile_thr_data[t].td->mb.tmp_conv_dst'.
922    */
923   CONV_BUF_TYPE *tmp_conv_dst;
924   /*!
925    * Temporary buffers used to build OBMC prediction by above (index 0) and left
926    * (index 1) predictors respectively.
927    * tmp_obmc_bufs[i][p * MAX_SB_SQUARE] is the buffer used for plane 'p'.
928    * There are pointers to actual buffers allocated elsewhere: e.g.
929    * - In decoder, 'pbi->td.tmp_obmc_bufs' or
930    * 'pbi->thread_data[t].td->xd.tmp_conv_dst' and
931    * -In encoder, 'x->tmp_pred_bufs' or
932    * 'cpi->tile_thr_data[t].td->mb.tmp_pred_bufs'.
933    */
934   uint8_t *tmp_obmc_bufs[2];
935 } MACROBLOCKD;
936 
937 /*!\cond */
938 
is_cur_buf_hbd(const MACROBLOCKD * xd)939 static INLINE int is_cur_buf_hbd(const MACROBLOCKD *xd) {
940 #if CONFIG_AV1_HIGHBITDEPTH
941   return xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH ? 1 : 0;
942 #else
943   (void)xd;
944   return 0;
945 #endif
946 }
947 
get_buf_by_bd(const MACROBLOCKD * xd,uint8_t * buf16)948 static INLINE uint8_t *get_buf_by_bd(const MACROBLOCKD *xd, uint8_t *buf16) {
949 #if CONFIG_AV1_HIGHBITDEPTH
950   return (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH)
951              ? CONVERT_TO_BYTEPTR(buf16)
952              : buf16;
953 #else
954   (void)xd;
955   return buf16;
956 #endif
957 }
958 
959 typedef struct BitDepthInfo {
960   int bit_depth;
961   /*! Is the image buffer high bit depth?
962    * Low bit depth buffer uses uint8_t.
963    * High bit depth buffer uses uint16_t.
964    * Equivalent to cm->seq_params->use_highbitdepth
965    */
966   int use_highbitdepth_buf;
967 } BitDepthInfo;
968 
get_bit_depth_info(const MACROBLOCKD * xd)969 static INLINE BitDepthInfo get_bit_depth_info(const MACROBLOCKD *xd) {
970   BitDepthInfo bit_depth_info;
971   bit_depth_info.bit_depth = xd->bd;
972   bit_depth_info.use_highbitdepth_buf = is_cur_buf_hbd(xd);
973   assert(IMPLIES(!bit_depth_info.use_highbitdepth_buf,
974                  bit_depth_info.bit_depth == 8));
975   return bit_depth_info;
976 }
977 
get_sqr_bsize_idx(BLOCK_SIZE bsize)978 static INLINE int get_sqr_bsize_idx(BLOCK_SIZE bsize) {
979   switch (bsize) {
980     case BLOCK_4X4: return 0;
981     case BLOCK_8X8: return 1;
982     case BLOCK_16X16: return 2;
983     case BLOCK_32X32: return 3;
984     case BLOCK_64X64: return 4;
985     case BLOCK_128X128: return 5;
986     default: return SQR_BLOCK_SIZES;
987   }
988 }
989 
990 // For a square block size 'bsize', returns the size of the sub-blocks used by
991 // the given partition type. If the partition produces sub-blocks of different
992 // sizes, then the function returns the largest sub-block size.
993 // Implements the Partition_Subsize lookup table in the spec (Section 9.3.
994 // Conversion tables).
995 // Note: the input block size should be square.
996 // Otherwise it's considered invalid.
get_partition_subsize(BLOCK_SIZE bsize,PARTITION_TYPE partition)997 static INLINE BLOCK_SIZE get_partition_subsize(BLOCK_SIZE bsize,
998                                                PARTITION_TYPE partition) {
999   if (partition == PARTITION_INVALID) {
1000     return BLOCK_INVALID;
1001   } else {
1002     const int sqr_bsize_idx = get_sqr_bsize_idx(bsize);
1003     return sqr_bsize_idx >= SQR_BLOCK_SIZES
1004                ? BLOCK_INVALID
1005                : subsize_lookup[partition][sqr_bsize_idx];
1006   }
1007 }
1008 
intra_mode_to_tx_type(const MB_MODE_INFO * mbmi,PLANE_TYPE plane_type)1009 static TX_TYPE intra_mode_to_tx_type(const MB_MODE_INFO *mbmi,
1010                                      PLANE_TYPE plane_type) {
1011   static const TX_TYPE _intra_mode_to_tx_type[INTRA_MODES] = {
1012     DCT_DCT,    // DC_PRED
1013     ADST_DCT,   // V_PRED
1014     DCT_ADST,   // H_PRED
1015     DCT_DCT,    // D45_PRED
1016     ADST_ADST,  // D135_PRED
1017     ADST_DCT,   // D113_PRED
1018     DCT_ADST,   // D157_PRED
1019     DCT_ADST,   // D203_PRED
1020     ADST_DCT,   // D67_PRED
1021     ADST_ADST,  // SMOOTH_PRED
1022     ADST_DCT,   // SMOOTH_V_PRED
1023     DCT_ADST,   // SMOOTH_H_PRED
1024     ADST_ADST,  // PAETH_PRED
1025   };
1026   const PREDICTION_MODE mode =
1027       (plane_type == PLANE_TYPE_Y) ? mbmi->mode : get_uv_mode(mbmi->uv_mode);
1028   assert(mode < INTRA_MODES);
1029   return _intra_mode_to_tx_type[mode];
1030 }
1031 
is_rect_tx(TX_SIZE tx_size)1032 static INLINE int is_rect_tx(TX_SIZE tx_size) { return tx_size >= TX_SIZES; }
1033 
block_signals_txsize(BLOCK_SIZE bsize)1034 static INLINE int block_signals_txsize(BLOCK_SIZE bsize) {
1035   return bsize > BLOCK_4X4;
1036 }
1037 
1038 // Number of transform types in each set type
1039 static const int av1_num_ext_tx_set[EXT_TX_SET_TYPES] = {
1040   1, 2, 5, 7, 12, 16,
1041 };
1042 
1043 static const int av1_ext_tx_used[EXT_TX_SET_TYPES][TX_TYPES] = {
1044   { 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
1045   { 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0 },
1046   { 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0 },
1047   { 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0 },
1048   { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0 },
1049   { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 },
1050 };
1051 
1052 // The bitmask corresponds to the transform types as defined in
1053 // enums.h TX_TYPE enumeration type. Setting the bit 0 means to disable
1054 // the use of the corresponding transform type in that table.
1055 // The av1_derived_intra_tx_used_flag table is used when
1056 // use_reduced_intra_txset is set to 2, where one only searches
1057 // the transform types derived from residual statistics.
1058 static const uint16_t av1_derived_intra_tx_used_flag[INTRA_MODES] = {
1059   0x0209,  // DC_PRED:       0000 0010 0000 1001
1060   0x0403,  // V_PRED:        0000 0100 0000 0011
1061   0x0805,  // H_PRED:        0000 1000 0000 0101
1062   0x020F,  // D45_PRED:      0000 0010 0000 1111
1063   0x0009,  // D135_PRED:     0000 0000 0000 1001
1064   0x0009,  // D113_PRED:     0000 0000 0000 1001
1065   0x0009,  // D157_PRED:     0000 0000 0000 1001
1066   0x0805,  // D203_PRED:     0000 1000 0000 0101
1067   0x0403,  // D67_PRED:      0000 0100 0000 0011
1068   0x0205,  // SMOOTH_PRED:   0000 0010 0000 1001
1069   0x0403,  // SMOOTH_V_PRED: 0000 0100 0000 0011
1070   0x0805,  // SMOOTH_H_PRED: 0000 1000 0000 0101
1071   0x0209,  // PAETH_PRED:    0000 0010 0000 1001
1072 };
1073 
1074 static const uint16_t av1_reduced_intra_tx_used_flag[INTRA_MODES] = {
1075   0x080F,  // DC_PRED:       0000 1000 0000 1111
1076   0x040F,  // V_PRED:        0000 0100 0000 1111
1077   0x080F,  // H_PRED:        0000 1000 0000 1111
1078   0x020F,  // D45_PRED:      0000 0010 0000 1111
1079   0x080F,  // D135_PRED:     0000 1000 0000 1111
1080   0x040F,  // D113_PRED:     0000 0100 0000 1111
1081   0x080F,  // D157_PRED:     0000 1000 0000 1111
1082   0x080F,  // D203_PRED:     0000 1000 0000 1111
1083   0x040F,  // D67_PRED:      0000 0100 0000 1111
1084   0x080F,  // SMOOTH_PRED:   0000 1000 0000 1111
1085   0x040F,  // SMOOTH_V_PRED: 0000 0100 0000 1111
1086   0x080F,  // SMOOTH_H_PRED: 0000 1000 0000 1111
1087   0x0C0E,  // PAETH_PRED:    0000 1100 0000 1110
1088 };
1089 
1090 static const uint16_t av1_ext_tx_used_flag[EXT_TX_SET_TYPES] = {
1091   0x0001,  // 0000 0000 0000 0001
1092   0x0201,  // 0000 0010 0000 0001
1093   0x020F,  // 0000 0010 0000 1111
1094   0x0E0F,  // 0000 1110 0000 1111
1095   0x0FFF,  // 0000 1111 1111 1111
1096   0xFFFF,  // 1111 1111 1111 1111
1097 };
1098 
1099 static const TxSetType av1_ext_tx_set_lookup[2][2] = {
1100   { EXT_TX_SET_DTT4_IDTX_1DDCT, EXT_TX_SET_DTT4_IDTX },
1101   { EXT_TX_SET_ALL16, EXT_TX_SET_DTT9_IDTX_1DDCT },
1102 };
1103 
av1_get_ext_tx_set_type(TX_SIZE tx_size,int is_inter,int use_reduced_set)1104 static INLINE TxSetType av1_get_ext_tx_set_type(TX_SIZE tx_size, int is_inter,
1105                                                 int use_reduced_set) {
1106   const TX_SIZE tx_size_sqr_up = txsize_sqr_up_map[tx_size];
1107   if (tx_size_sqr_up > TX_32X32) return EXT_TX_SET_DCTONLY;
1108   if (tx_size_sqr_up == TX_32X32)
1109     return is_inter ? EXT_TX_SET_DCT_IDTX : EXT_TX_SET_DCTONLY;
1110   if (use_reduced_set)
1111     return is_inter ? EXT_TX_SET_DCT_IDTX : EXT_TX_SET_DTT4_IDTX;
1112   const TX_SIZE tx_size_sqr = txsize_sqr_map[tx_size];
1113   return av1_ext_tx_set_lookup[is_inter][tx_size_sqr == TX_16X16];
1114 }
1115 
1116 // Maps tx set types to the indices.
1117 static const int ext_tx_set_index[2][EXT_TX_SET_TYPES] = {
1118   { // Intra
1119     0, -1, 2, 1, -1, -1 },
1120   { // Inter
1121     0, 3, -1, -1, 2, 1 },
1122 };
1123 
get_ext_tx_set(TX_SIZE tx_size,int is_inter,int use_reduced_set)1124 static INLINE int get_ext_tx_set(TX_SIZE tx_size, int is_inter,
1125                                  int use_reduced_set) {
1126   const TxSetType set_type =
1127       av1_get_ext_tx_set_type(tx_size, is_inter, use_reduced_set);
1128   return ext_tx_set_index[is_inter][set_type];
1129 }
1130 
get_ext_tx_types(TX_SIZE tx_size,int is_inter,int use_reduced_set)1131 static INLINE int get_ext_tx_types(TX_SIZE tx_size, int is_inter,
1132                                    int use_reduced_set) {
1133   const int set_type =
1134       av1_get_ext_tx_set_type(tx_size, is_inter, use_reduced_set);
1135   return av1_num_ext_tx_set[set_type];
1136 }
1137 
1138 #define TXSIZEMAX(t1, t2) (tx_size_2d[(t1)] >= tx_size_2d[(t2)] ? (t1) : (t2))
1139 #define TXSIZEMIN(t1, t2) (tx_size_2d[(t1)] <= tx_size_2d[(t2)] ? (t1) : (t2))
1140 
tx_size_from_tx_mode(BLOCK_SIZE bsize,TX_MODE tx_mode)1141 static INLINE TX_SIZE tx_size_from_tx_mode(BLOCK_SIZE bsize, TX_MODE tx_mode) {
1142   const TX_SIZE largest_tx_size = tx_mode_to_biggest_tx_size[tx_mode];
1143   const TX_SIZE max_rect_tx_size = max_txsize_rect_lookup[bsize];
1144   if (bsize == BLOCK_4X4)
1145     return AOMMIN(max_txsize_lookup[bsize], largest_tx_size);
1146   if (txsize_sqr_map[max_rect_tx_size] <= largest_tx_size)
1147     return max_rect_tx_size;
1148   else
1149     return largest_tx_size;
1150 }
1151 
1152 static const uint8_t mode_to_angle_map[] = {
1153   0, 90, 180, 45, 135, 113, 157, 203, 67, 0, 0, 0, 0,
1154 };
1155 
1156 // Converts block_index for given transform size to index of the block in raster
1157 // order.
av1_block_index_to_raster_order(TX_SIZE tx_size,int block_idx)1158 static INLINE int av1_block_index_to_raster_order(TX_SIZE tx_size,
1159                                                   int block_idx) {
1160   // For transform size 4x8, the possible block_idx values are 0 & 2, because
1161   // block_idx values are incremented in steps of size 'tx_width_unit x
1162   // tx_height_unit'. But, for this transform size, block_idx = 2 corresponds to
1163   // block number 1 in raster order, inside an 8x8 MI block.
1164   // For any other transform size, the two indices are equivalent.
1165   return (tx_size == TX_4X8 && block_idx == 2) ? 1 : block_idx;
1166 }
1167 
1168 // Inverse of above function.
1169 // Note: only implemented for transform sizes 4x4, 4x8 and 8x4 right now.
av1_raster_order_to_block_index(TX_SIZE tx_size,int raster_order)1170 static INLINE int av1_raster_order_to_block_index(TX_SIZE tx_size,
1171                                                   int raster_order) {
1172   assert(tx_size == TX_4X4 || tx_size == TX_4X8 || tx_size == TX_8X4);
1173   // We ensure that block indices are 0 & 2 if tx size is 4x8 or 8x4.
1174   return (tx_size == TX_4X4) ? raster_order : (raster_order > 0) ? 2 : 0;
1175 }
1176 
get_default_tx_type(PLANE_TYPE plane_type,const MACROBLOCKD * xd,TX_SIZE tx_size,int use_screen_content_tools)1177 static INLINE TX_TYPE get_default_tx_type(PLANE_TYPE plane_type,
1178                                           const MACROBLOCKD *xd,
1179                                           TX_SIZE tx_size,
1180                                           int use_screen_content_tools) {
1181   const MB_MODE_INFO *const mbmi = xd->mi[0];
1182 
1183   if (is_inter_block(mbmi) || plane_type != PLANE_TYPE_Y ||
1184       xd->lossless[mbmi->segment_id] || tx_size >= TX_32X32 ||
1185       use_screen_content_tools)
1186     return DEFAULT_INTER_TX_TYPE;
1187 
1188   return intra_mode_to_tx_type(mbmi, plane_type);
1189 }
1190 
1191 // Implements the get_plane_residual_size() function in the spec (Section
1192 // 5.11.38. Get plane residual size function).
get_plane_block_size(BLOCK_SIZE bsize,int subsampling_x,int subsampling_y)1193 static INLINE BLOCK_SIZE get_plane_block_size(BLOCK_SIZE bsize,
1194                                               int subsampling_x,
1195                                               int subsampling_y) {
1196   assert(bsize < BLOCK_SIZES_ALL);
1197   assert(subsampling_x >= 0 && subsampling_x < 2);
1198   assert(subsampling_y >= 0 && subsampling_y < 2);
1199   return ss_size_lookup[bsize][subsampling_x][subsampling_y];
1200 }
1201 
1202 /*
1203  * Logic to generate the lookup tables:
1204  *
1205  * TX_SIZE txs = max_txsize_rect_lookup[bsize];
1206  * for (int level = 0; level < MAX_VARTX_DEPTH - 1; ++level)
1207  *   txs = sub_tx_size_map[txs];
1208  * const int tx_w_log2 = tx_size_wide_log2[txs] - MI_SIZE_LOG2;
1209  * const int tx_h_log2 = tx_size_high_log2[txs] - MI_SIZE_LOG2;
1210  * const int bw_uint_log2 = mi_size_wide_log2[bsize];
1211  * const int stride_log2 = bw_uint_log2 - tx_w_log2;
1212  */
av1_get_txb_size_index(BLOCK_SIZE bsize,int blk_row,int blk_col)1213 static INLINE int av1_get_txb_size_index(BLOCK_SIZE bsize, int blk_row,
1214                                          int blk_col) {
1215   static const uint8_t tw_w_log2_table[BLOCK_SIZES_ALL] = {
1216     0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3, 3, 0, 1, 1, 2, 2, 3,
1217   };
1218   static const uint8_t tw_h_log2_table[BLOCK_SIZES_ALL] = {
1219     0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3, 3, 1, 0, 2, 1, 3, 2,
1220   };
1221   static const uint8_t stride_log2_table[BLOCK_SIZES_ALL] = {
1222     0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 2, 2, 0, 1, 0, 1, 0, 1,
1223   };
1224   const int index =
1225       ((blk_row >> tw_h_log2_table[bsize]) << stride_log2_table[bsize]) +
1226       (blk_col >> tw_w_log2_table[bsize]);
1227   assert(index < INTER_TX_SIZE_BUF_LEN);
1228   return index;
1229 }
1230 
1231 #if CONFIG_INSPECTION
1232 /*
1233  * Here is the logic to generate the lookup tables:
1234  *
1235  * TX_SIZE txs = max_txsize_rect_lookup[bsize];
1236  * for (int level = 0; level < MAX_VARTX_DEPTH; ++level)
1237  *   txs = sub_tx_size_map[txs];
1238  * const int tx_w_log2 = tx_size_wide_log2[txs] - MI_SIZE_LOG2;
1239  * const int tx_h_log2 = tx_size_high_log2[txs] - MI_SIZE_LOG2;
1240  * const int bw_uint_log2 = mi_size_wide_log2[bsize];
1241  * const int stride_log2 = bw_uint_log2 - tx_w_log2;
1242  */
av1_get_txk_type_index(BLOCK_SIZE bsize,int blk_row,int blk_col)1243 static INLINE int av1_get_txk_type_index(BLOCK_SIZE bsize, int blk_row,
1244                                          int blk_col) {
1245   static const uint8_t tw_w_log2_table[BLOCK_SIZES_ALL] = {
1246     0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 2, 2, 2, 0, 0, 1, 1, 2, 2,
1247   };
1248   static const uint8_t tw_h_log2_table[BLOCK_SIZES_ALL] = {
1249     0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 2, 2, 2, 0, 0, 1, 1, 2, 2,
1250   };
1251   static const uint8_t stride_log2_table[BLOCK_SIZES_ALL] = {
1252     0, 0, 1, 1, 1, 2, 2, 1, 2, 2, 1, 2, 2, 2, 3, 3, 0, 2, 0, 2, 0, 2,
1253   };
1254   const int index =
1255       ((blk_row >> tw_h_log2_table[bsize]) << stride_log2_table[bsize]) +
1256       (blk_col >> tw_w_log2_table[bsize]);
1257   assert(index < TXK_TYPE_BUF_LEN);
1258   return index;
1259 }
1260 #endif  // CONFIG_INSPECTION
1261 
update_txk_array(MACROBLOCKD * const xd,int blk_row,int blk_col,TX_SIZE tx_size,TX_TYPE tx_type)1262 static INLINE void update_txk_array(MACROBLOCKD *const xd, int blk_row,
1263                                     int blk_col, TX_SIZE tx_size,
1264                                     TX_TYPE tx_type) {
1265   const int stride = xd->tx_type_map_stride;
1266   xd->tx_type_map[blk_row * stride + blk_col] = tx_type;
1267 
1268   const int txw = tx_size_wide_unit[tx_size];
1269   const int txh = tx_size_high_unit[tx_size];
1270   // The 16x16 unit is due to the constraint from tx_64x64 which sets the
1271   // maximum tx size for chroma as 32x32. Coupled with 4x1 transform block
1272   // size, the constraint takes effect in 32x16 / 16x32 size too. To solve
1273   // the intricacy, cover all the 16x16 units inside a 64 level transform.
1274   if (txw == tx_size_wide_unit[TX_64X64] ||
1275       txh == tx_size_high_unit[TX_64X64]) {
1276     const int tx_unit = tx_size_wide_unit[TX_16X16];
1277     for (int idy = 0; idy < txh; idy += tx_unit) {
1278       for (int idx = 0; idx < txw; idx += tx_unit) {
1279         xd->tx_type_map[(blk_row + idy) * stride + blk_col + idx] = tx_type;
1280       }
1281     }
1282   }
1283 }
1284 
av1_get_tx_type(const MACROBLOCKD * xd,PLANE_TYPE plane_type,int blk_row,int blk_col,TX_SIZE tx_size,int reduced_tx_set)1285 static INLINE TX_TYPE av1_get_tx_type(const MACROBLOCKD *xd,
1286                                       PLANE_TYPE plane_type, int blk_row,
1287                                       int blk_col, TX_SIZE tx_size,
1288                                       int reduced_tx_set) {
1289   const MB_MODE_INFO *const mbmi = xd->mi[0];
1290   if (xd->lossless[mbmi->segment_id] || txsize_sqr_up_map[tx_size] > TX_32X32) {
1291     return DCT_DCT;
1292   }
1293 
1294   TX_TYPE tx_type;
1295   if (plane_type == PLANE_TYPE_Y) {
1296     tx_type = xd->tx_type_map[blk_row * xd->tx_type_map_stride + blk_col];
1297   } else {
1298     if (is_inter_block(mbmi)) {
1299       // scale back to y plane's coordinate
1300       const struct macroblockd_plane *const pd = &xd->plane[plane_type];
1301       blk_row <<= pd->subsampling_y;
1302       blk_col <<= pd->subsampling_x;
1303       tx_type = xd->tx_type_map[blk_row * xd->tx_type_map_stride + blk_col];
1304     } else {
1305       // In intra mode, uv planes don't share the same prediction mode as y
1306       // plane, so the tx_type should not be shared
1307       tx_type = intra_mode_to_tx_type(mbmi, PLANE_TYPE_UV);
1308     }
1309     const TxSetType tx_set_type =
1310         av1_get_ext_tx_set_type(tx_size, is_inter_block(mbmi), reduced_tx_set);
1311     if (!av1_ext_tx_used[tx_set_type][tx_type]) tx_type = DCT_DCT;
1312   }
1313   assert(tx_type < TX_TYPES);
1314   assert(av1_ext_tx_used[av1_get_ext_tx_set_type(tx_size, is_inter_block(mbmi),
1315                                                  reduced_tx_set)][tx_type]);
1316   return tx_type;
1317 }
1318 
1319 void av1_setup_block_planes(MACROBLOCKD *xd, int ss_x, int ss_y,
1320                             const int num_planes);
1321 
1322 /*
1323  * Logic to generate the lookup table:
1324  *
1325  * TX_SIZE tx_size = max_txsize_rect_lookup[bsize];
1326  * int depth = 0;
1327  * while (depth < MAX_TX_DEPTH && tx_size != TX_4X4) {
1328  *   depth++;
1329  *   tx_size = sub_tx_size_map[tx_size];
1330  * }
1331  */
bsize_to_max_depth(BLOCK_SIZE bsize)1332 static INLINE int bsize_to_max_depth(BLOCK_SIZE bsize) {
1333   static const uint8_t bsize_to_max_depth_table[BLOCK_SIZES_ALL] = {
1334     0, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
1335   };
1336   return bsize_to_max_depth_table[bsize];
1337 }
1338 
1339 /*
1340  * Logic to generate the lookup table:
1341  *
1342  * TX_SIZE tx_size = max_txsize_rect_lookup[bsize];
1343  * assert(tx_size != TX_4X4);
1344  * int depth = 0;
1345  * while (tx_size != TX_4X4) {
1346  *   depth++;
1347  *   tx_size = sub_tx_size_map[tx_size];
1348  * }
1349  * assert(depth < 10);
1350  */
bsize_to_tx_size_cat(BLOCK_SIZE bsize)1351 static INLINE int bsize_to_tx_size_cat(BLOCK_SIZE bsize) {
1352   assert(bsize < BLOCK_SIZES_ALL);
1353   static const uint8_t bsize_to_tx_size_depth_table[BLOCK_SIZES_ALL] = {
1354     0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 2, 2, 3, 3, 4, 4,
1355   };
1356   const int depth = bsize_to_tx_size_depth_table[bsize];
1357   assert(depth <= MAX_TX_CATS);
1358   return depth - 1;
1359 }
1360 
depth_to_tx_size(int depth,BLOCK_SIZE bsize)1361 static INLINE TX_SIZE depth_to_tx_size(int depth, BLOCK_SIZE bsize) {
1362   TX_SIZE max_tx_size = max_txsize_rect_lookup[bsize];
1363   TX_SIZE tx_size = max_tx_size;
1364   for (int d = 0; d < depth; ++d) tx_size = sub_tx_size_map[tx_size];
1365   return tx_size;
1366 }
1367 
av1_get_adjusted_tx_size(TX_SIZE tx_size)1368 static INLINE TX_SIZE av1_get_adjusted_tx_size(TX_SIZE tx_size) {
1369   switch (tx_size) {
1370     case TX_64X64:
1371     case TX_64X32:
1372     case TX_32X64: return TX_32X32;
1373     case TX_64X16: return TX_32X16;
1374     case TX_16X64: return TX_16X32;
1375     default: return tx_size;
1376   }
1377 }
1378 
av1_get_max_uv_txsize(BLOCK_SIZE bsize,int subsampling_x,int subsampling_y)1379 static INLINE TX_SIZE av1_get_max_uv_txsize(BLOCK_SIZE bsize, int subsampling_x,
1380                                             int subsampling_y) {
1381   const BLOCK_SIZE plane_bsize =
1382       get_plane_block_size(bsize, subsampling_x, subsampling_y);
1383   assert(plane_bsize < BLOCK_SIZES_ALL);
1384   const TX_SIZE uv_tx = max_txsize_rect_lookup[plane_bsize];
1385   return av1_get_adjusted_tx_size(uv_tx);
1386 }
1387 
av1_get_tx_size(int plane,const MACROBLOCKD * xd)1388 static INLINE TX_SIZE av1_get_tx_size(int plane, const MACROBLOCKD *xd) {
1389   const MB_MODE_INFO *mbmi = xd->mi[0];
1390   if (xd->lossless[mbmi->segment_id]) return TX_4X4;
1391   if (plane == 0) return mbmi->tx_size;
1392   const MACROBLOCKD_PLANE *pd = &xd->plane[plane];
1393   return av1_get_max_uv_txsize(mbmi->bsize, pd->subsampling_x,
1394                                pd->subsampling_y);
1395 }
1396 
1397 void av1_reset_entropy_context(MACROBLOCKD *xd, BLOCK_SIZE bsize,
1398                                const int num_planes);
1399 
1400 void av1_reset_loop_filter_delta(MACROBLOCKD *xd, int num_planes);
1401 
1402 void av1_reset_loop_restoration(MACROBLOCKD *xd, const int num_planes);
1403 
1404 typedef void (*foreach_transformed_block_visitor)(int plane, int block,
1405                                                   int blk_row, int blk_col,
1406                                                   BLOCK_SIZE plane_bsize,
1407                                                   TX_SIZE tx_size, void *arg);
1408 
1409 void av1_set_entropy_contexts(const MACROBLOCKD *xd,
1410                               struct macroblockd_plane *pd, int plane,
1411                               BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
1412                               int has_eob, int aoff, int loff);
1413 
1414 #define MAX_INTERINTRA_SB_SQUARE 32 * 32
is_interintra_mode(const MB_MODE_INFO * mbmi)1415 static INLINE int is_interintra_mode(const MB_MODE_INFO *mbmi) {
1416   return (mbmi->ref_frame[0] > INTRA_FRAME &&
1417           mbmi->ref_frame[1] == INTRA_FRAME);
1418 }
1419 
is_interintra_allowed_bsize(const BLOCK_SIZE bsize)1420 static INLINE int is_interintra_allowed_bsize(const BLOCK_SIZE bsize) {
1421   return (bsize >= BLOCK_8X8) && (bsize <= BLOCK_32X32);
1422 }
1423 
is_interintra_allowed_mode(const PREDICTION_MODE mode)1424 static INLINE int is_interintra_allowed_mode(const PREDICTION_MODE mode) {
1425   return (mode >= SINGLE_INTER_MODE_START) && (mode < SINGLE_INTER_MODE_END);
1426 }
1427 
is_interintra_allowed_ref(const MV_REFERENCE_FRAME rf[2])1428 static INLINE int is_interintra_allowed_ref(const MV_REFERENCE_FRAME rf[2]) {
1429   return (rf[0] > INTRA_FRAME) && (rf[1] <= INTRA_FRAME);
1430 }
1431 
is_interintra_allowed(const MB_MODE_INFO * mbmi)1432 static INLINE int is_interintra_allowed(const MB_MODE_INFO *mbmi) {
1433   return is_interintra_allowed_bsize(mbmi->bsize) &&
1434          is_interintra_allowed_mode(mbmi->mode) &&
1435          is_interintra_allowed_ref(mbmi->ref_frame);
1436 }
1437 
is_interintra_allowed_bsize_group(int group)1438 static INLINE int is_interintra_allowed_bsize_group(int group) {
1439   int i;
1440   for (i = 0; i < BLOCK_SIZES_ALL; i++) {
1441     if (size_group_lookup[i] == group &&
1442         is_interintra_allowed_bsize((BLOCK_SIZE)i)) {
1443       return 1;
1444     }
1445   }
1446   return 0;
1447 }
1448 
is_interintra_pred(const MB_MODE_INFO * mbmi)1449 static INLINE int is_interintra_pred(const MB_MODE_INFO *mbmi) {
1450   return mbmi->ref_frame[0] > INTRA_FRAME &&
1451          mbmi->ref_frame[1] == INTRA_FRAME && is_interintra_allowed(mbmi);
1452 }
1453 
get_vartx_max_txsize(const MACROBLOCKD * xd,BLOCK_SIZE bsize,int plane)1454 static INLINE int get_vartx_max_txsize(const MACROBLOCKD *xd, BLOCK_SIZE bsize,
1455                                        int plane) {
1456   if (xd->lossless[xd->mi[0]->segment_id]) return TX_4X4;
1457   const TX_SIZE max_txsize = max_txsize_rect_lookup[bsize];
1458   if (plane == 0) return max_txsize;            // luma
1459   return av1_get_adjusted_tx_size(max_txsize);  // chroma
1460 }
1461 
is_motion_variation_allowed_bsize(BLOCK_SIZE bsize)1462 static INLINE int is_motion_variation_allowed_bsize(BLOCK_SIZE bsize) {
1463   assert(bsize < BLOCK_SIZES_ALL);
1464   return AOMMIN(block_size_wide[bsize], block_size_high[bsize]) >= 8;
1465 }
1466 
is_motion_variation_allowed_compound(const MB_MODE_INFO * mbmi)1467 static INLINE int is_motion_variation_allowed_compound(
1468     const MB_MODE_INFO *mbmi) {
1469   return !has_second_ref(mbmi);
1470 }
1471 
1472 // input: log2 of length, 0(4), 1(8), ...
1473 static const int max_neighbor_obmc[6] = { 0, 1, 2, 3, 4, 4 };
1474 
check_num_overlappable_neighbors(const MB_MODE_INFO * mbmi)1475 static INLINE int check_num_overlappable_neighbors(const MB_MODE_INFO *mbmi) {
1476   return mbmi->overlappable_neighbors != 0;
1477 }
1478 
1479 static INLINE MOTION_MODE
motion_mode_allowed(const WarpedMotionParams * gm_params,const MACROBLOCKD * xd,const MB_MODE_INFO * mbmi,int allow_warped_motion)1480 motion_mode_allowed(const WarpedMotionParams *gm_params, const MACROBLOCKD *xd,
1481                     const MB_MODE_INFO *mbmi, int allow_warped_motion) {
1482   if (!check_num_overlappable_neighbors(mbmi)) return SIMPLE_TRANSLATION;
1483   if (xd->cur_frame_force_integer_mv == 0) {
1484     const TransformationType gm_type = gm_params[mbmi->ref_frame[0]].wmtype;
1485     if (is_global_mv_block(mbmi, gm_type)) return SIMPLE_TRANSLATION;
1486   }
1487   if (is_motion_variation_allowed_bsize(mbmi->bsize) &&
1488       is_inter_mode(mbmi->mode) && mbmi->ref_frame[1] != INTRA_FRAME &&
1489       is_motion_variation_allowed_compound(mbmi)) {
1490     assert(!has_second_ref(mbmi));
1491     if (mbmi->num_proj_ref >= 1 && allow_warped_motion &&
1492         !xd->cur_frame_force_integer_mv &&
1493         !av1_is_scaled(xd->block_ref_scale_factors[0])) {
1494       return WARPED_CAUSAL;
1495     }
1496     return OBMC_CAUSAL;
1497   }
1498   return SIMPLE_TRANSLATION;
1499 }
1500 
is_neighbor_overlappable(const MB_MODE_INFO * mbmi)1501 static INLINE int is_neighbor_overlappable(const MB_MODE_INFO *mbmi) {
1502   return (is_inter_block(mbmi));
1503 }
1504 
av1_allow_palette(int allow_screen_content_tools,BLOCK_SIZE sb_type)1505 static INLINE int av1_allow_palette(int allow_screen_content_tools,
1506                                     BLOCK_SIZE sb_type) {
1507   assert(sb_type < BLOCK_SIZES_ALL);
1508   return allow_screen_content_tools &&
1509          block_size_wide[sb_type] <= MAX_PALETTE_BLOCK_WIDTH &&
1510          block_size_high[sb_type] <= MAX_PALETTE_BLOCK_HEIGHT &&
1511          sb_type >= BLOCK_8X8;
1512 }
1513 
1514 // Returns sub-sampled dimensions of the given block.
1515 // The output values for 'rows_within_bounds' and 'cols_within_bounds' will
1516 // differ from 'height' and 'width' when part of the block is outside the
1517 // right
1518 // and/or bottom image boundary.
av1_get_block_dimensions(BLOCK_SIZE bsize,int plane,const MACROBLOCKD * xd,int * width,int * height,int * rows_within_bounds,int * cols_within_bounds)1519 static INLINE void av1_get_block_dimensions(BLOCK_SIZE bsize, int plane,
1520                                             const MACROBLOCKD *xd, int *width,
1521                                             int *height,
1522                                             int *rows_within_bounds,
1523                                             int *cols_within_bounds) {
1524   const int block_height = block_size_high[bsize];
1525   const int block_width = block_size_wide[bsize];
1526   const int block_rows = (xd->mb_to_bottom_edge >= 0)
1527                              ? block_height
1528                              : (xd->mb_to_bottom_edge >> 3) + block_height;
1529   const int block_cols = (xd->mb_to_right_edge >= 0)
1530                              ? block_width
1531                              : (xd->mb_to_right_edge >> 3) + block_width;
1532   const struct macroblockd_plane *const pd = &xd->plane[plane];
1533   assert(IMPLIES(plane == PLANE_TYPE_Y, pd->subsampling_x == 0));
1534   assert(IMPLIES(plane == PLANE_TYPE_Y, pd->subsampling_y == 0));
1535   assert(block_width >= block_cols);
1536   assert(block_height >= block_rows);
1537   const int plane_block_width = block_width >> pd->subsampling_x;
1538   const int plane_block_height = block_height >> pd->subsampling_y;
1539   // Special handling for chroma sub8x8.
1540   const int is_chroma_sub8_x = plane > 0 && plane_block_width < 4;
1541   const int is_chroma_sub8_y = plane > 0 && plane_block_height < 4;
1542   if (width) {
1543     *width = plane_block_width + 2 * is_chroma_sub8_x;
1544     assert(*width >= 0);
1545   }
1546   if (height) {
1547     *height = plane_block_height + 2 * is_chroma_sub8_y;
1548     assert(*height >= 0);
1549   }
1550   if (rows_within_bounds) {
1551     *rows_within_bounds =
1552         (block_rows >> pd->subsampling_y) + 2 * is_chroma_sub8_y;
1553     assert(*rows_within_bounds >= 0);
1554   }
1555   if (cols_within_bounds) {
1556     *cols_within_bounds =
1557         (block_cols >> pd->subsampling_x) + 2 * is_chroma_sub8_x;
1558     assert(*cols_within_bounds >= 0);
1559   }
1560 }
1561 
1562 /* clang-format off */
1563 // Pointer to a three-dimensional array whose first dimension is PALETTE_SIZES.
1564 typedef aom_cdf_prob (*MapCdf)[PALETTE_COLOR_INDEX_CONTEXTS]
1565                               [CDF_SIZE(PALETTE_COLORS)];
1566 // Pointer to a const three-dimensional array whose first dimension is
1567 // PALETTE_SIZES.
1568 typedef const int (*ColorCost)[PALETTE_COLOR_INDEX_CONTEXTS][PALETTE_COLORS];
1569 /* clang-format on */
1570 
1571 typedef struct {
1572   int rows;
1573   int cols;
1574   int n_colors;
1575   int plane_width;
1576   int plane_height;
1577   uint8_t *color_map;
1578   MapCdf map_cdf;
1579   ColorCost color_cost;
1580 } Av1ColorMapParam;
1581 
is_nontrans_global_motion(const MACROBLOCKD * xd,const MB_MODE_INFO * mbmi)1582 static INLINE int is_nontrans_global_motion(const MACROBLOCKD *xd,
1583                                             const MB_MODE_INFO *mbmi) {
1584   int ref;
1585 
1586   // First check if all modes are GLOBALMV
1587   if (mbmi->mode != GLOBALMV && mbmi->mode != GLOBAL_GLOBALMV) return 0;
1588 
1589   if (AOMMIN(mi_size_wide[mbmi->bsize], mi_size_high[mbmi->bsize]) < 2)
1590     return 0;
1591 
1592   // Now check if all global motion is non translational
1593   for (ref = 0; ref < 1 + has_second_ref(mbmi); ++ref) {
1594     if (xd->global_motion[mbmi->ref_frame[ref]].wmtype == TRANSLATION) return 0;
1595   }
1596   return 1;
1597 }
1598 
get_plane_type(int plane)1599 static INLINE PLANE_TYPE get_plane_type(int plane) {
1600   return (plane == 0) ? PLANE_TYPE_Y : PLANE_TYPE_UV;
1601 }
1602 
av1_get_max_eob(TX_SIZE tx_size)1603 static INLINE int av1_get_max_eob(TX_SIZE tx_size) {
1604   if (tx_size == TX_64X64 || tx_size == TX_64X32 || tx_size == TX_32X64) {
1605     return 1024;
1606   }
1607   if (tx_size == TX_16X64 || tx_size == TX_64X16) {
1608     return 512;
1609   }
1610   return tx_size_2d[tx_size];
1611 }
1612 
1613 /*!\endcond */
1614 
1615 #ifdef __cplusplus
1616 }  // extern "C"
1617 #endif
1618 
1619 #endif  // AOM_AV1_COMMON_BLOCKD_H_
1620