1 /*
2 * Copyright (c) 2016, Alliance for Open Media. All rights reserved
3 *
4 * This source code is subject to the terms of the BSD 2 Clause License and
5 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6 * was not distributed with this source code in the LICENSE file, you can
7 * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8 * Media Patent License 1.0 was not distributed with this source code in the
9 * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10 */
11
12 #ifndef AOM_AV1_COMMON_AV1_COMMON_INT_H_
13 #define AOM_AV1_COMMON_AV1_COMMON_INT_H_
14
15 #include "config/aom_config.h"
16 #include "config/av1_rtcd.h"
17
18 #include "aom/internal/aom_codec_internal.h"
19 #include "aom_util/aom_thread.h"
20 #include "av1/common/alloccommon.h"
21 #include "av1/common/av1_loopfilter.h"
22 #include "av1/common/entropy.h"
23 #include "av1/common/entropymode.h"
24 #include "av1/common/entropymv.h"
25 #include "av1/common/enums.h"
26 #include "av1/common/frame_buffers.h"
27 #include "av1/common/mv.h"
28 #include "av1/common/quant_common.h"
29 #include "av1/common/restoration.h"
30 #include "av1/common/tile_common.h"
31 #include "av1/common/timing.h"
32 #include "aom_dsp/grain_params.h"
33 #include "aom_dsp/grain_table.h"
34 #include "aom_dsp/odintrin.h"
35 #ifdef __cplusplus
36 extern "C" {
37 #endif
38
39 #if defined(__clang__) && defined(__has_warning)
40 #if __has_feature(cxx_attributes) && __has_warning("-Wimplicit-fallthrough")
41 #define AOM_FALLTHROUGH_INTENDED [[clang::fallthrough]] // NOLINT
42 #endif
43 #elif defined(__GNUC__) && __GNUC__ >= 7
44 #define AOM_FALLTHROUGH_INTENDED __attribute__((fallthrough)) // NOLINT
45 #endif
46
47 #ifndef AOM_FALLTHROUGH_INTENDED
48 #define AOM_FALLTHROUGH_INTENDED \
49 do { \
50 } while (0)
51 #endif
52
53 #define CDEF_MAX_STRENGTHS 16
54
55 /* Constant values while waiting for the sequence header */
56 #define FRAME_ID_LENGTH 15
57 #define DELTA_FRAME_ID_LENGTH 14
58
59 #define FRAME_CONTEXTS (FRAME_BUFFERS + 1)
60 // Extra frame context which is always kept at default values
61 #define FRAME_CONTEXT_DEFAULTS (FRAME_CONTEXTS - 1)
62 #define PRIMARY_REF_BITS 3
63 #define PRIMARY_REF_NONE 7
64
65 #define NUM_PING_PONG_BUFFERS 2
66
67 #define MAX_NUM_TEMPORAL_LAYERS 8
68 #define MAX_NUM_SPATIAL_LAYERS 4
69 /* clang-format off */
70 // clang-format seems to think this is a pointer dereference and not a
71 // multiplication.
72 #define MAX_NUM_OPERATING_POINTS \
73 (MAX_NUM_TEMPORAL_LAYERS * MAX_NUM_SPATIAL_LAYERS)
74 /* clang-format on */
75
76 // TODO(jingning): Turning this on to set up transform coefficient
77 // processing timer.
78 #define TXCOEFF_TIMER 0
79 #define TXCOEFF_COST_TIMER 0
80
81 /*!\cond */
82
83 enum {
84 SINGLE_REFERENCE = 0,
85 COMPOUND_REFERENCE = 1,
86 REFERENCE_MODE_SELECT = 2,
87 REFERENCE_MODES = 3,
88 } UENUM1BYTE(REFERENCE_MODE);
89
90 enum {
91 /**
92 * Frame context updates are disabled
93 */
94 REFRESH_FRAME_CONTEXT_DISABLED,
95 /**
96 * Update frame context to values resulting from backward probability
97 * updates based on entropy/counts in the decoded frame
98 */
99 REFRESH_FRAME_CONTEXT_BACKWARD,
100 } UENUM1BYTE(REFRESH_FRAME_CONTEXT_MODE);
101
102 #define MFMV_STACK_SIZE 3
103 typedef struct {
104 int_mv mfmv0;
105 uint8_t ref_frame_offset;
106 } TPL_MV_REF;
107
108 typedef struct {
109 int_mv mv;
110 MV_REFERENCE_FRAME ref_frame;
111 } MV_REF;
112
113 typedef struct RefCntBuffer {
114 // For a RefCntBuffer, the following are reference-holding variables:
115 // - cm->ref_frame_map[]
116 // - cm->cur_frame
117 // - cm->scaled_ref_buf[] (encoder only)
118 // - pbi->output_frame_index[] (decoder only)
119 // With that definition, 'ref_count' is the number of reference-holding
120 // variables that are currently referencing this buffer.
121 // For example:
122 // - suppose this buffer is at index 'k' in the buffer pool, and
123 // - Total 'n' of the variables / array elements above have value 'k' (that
124 // is, they are pointing to buffer at index 'k').
125 // Then, pool->frame_bufs[k].ref_count = n.
126 int ref_count;
127
128 unsigned int order_hint;
129 unsigned int ref_order_hints[INTER_REFS_PER_FRAME];
130
131 // These variables are used only in encoder and compare the absolute
132 // display order hint to compute the relative distance and overcome
133 // the limitation of get_relative_dist() which returns incorrect
134 // distance when a very old frame is used as a reference.
135 unsigned int display_order_hint;
136 unsigned int ref_display_order_hint[INTER_REFS_PER_FRAME];
137 // Frame's level within the hierarchical structure.
138 unsigned int pyramid_level;
139 MV_REF *mvs;
140 uint8_t *seg_map;
141 struct segmentation seg;
142 int mi_rows;
143 int mi_cols;
144 // Width and height give the size of the buffer (before any upscaling, unlike
145 // the sizes that can be derived from the buf structure)
146 int width;
147 int height;
148 WarpedMotionParams global_motion[REF_FRAMES];
149 int showable_frame; // frame can be used as show existing frame in future
150 uint8_t film_grain_params_present;
151 aom_film_grain_t film_grain_params;
152 aom_codec_frame_buffer_t raw_frame_buffer;
153 YV12_BUFFER_CONFIG buf;
154 int temporal_id; // Temporal layer ID of the frame
155 int spatial_id; // Spatial layer ID of the frame
156 FRAME_TYPE frame_type;
157
158 // This is only used in the encoder but needs to be indexed per ref frame
159 // so it's extremely convenient to keep it here.
160 int interp_filter_selected[SWITCHABLE];
161
162 // Inter frame reference frame delta for loop filter
163 int8_t ref_deltas[REF_FRAMES];
164
165 // 0 = ZERO_MV, MV
166 int8_t mode_deltas[MAX_MODE_LF_DELTAS];
167
168 FRAME_CONTEXT frame_context;
169 } RefCntBuffer;
170
171 typedef struct BufferPool {
172 // Protect BufferPool from being accessed by several FrameWorkers at
173 // the same time during frame parallel decode.
174 // TODO(hkuang): Try to use atomic variable instead of locking the whole pool.
175 // TODO(wtc): Remove this. See
176 // https://chromium-review.googlesource.com/c/webm/libvpx/+/560630.
177 #if CONFIG_MULTITHREAD
178 pthread_mutex_t pool_mutex;
179 #endif
180
181 // Private data associated with the frame buffer callbacks.
182 void *cb_priv;
183
184 aom_get_frame_buffer_cb_fn_t get_fb_cb;
185 aom_release_frame_buffer_cb_fn_t release_fb_cb;
186
187 RefCntBuffer frame_bufs[FRAME_BUFFERS];
188
189 // Frame buffers allocated internally by the codec.
190 InternalFrameBufferList int_frame_buffers;
191 } BufferPool;
192
193 /*!\endcond */
194
195 /*!\brief Parameters related to CDEF */
196 typedef struct {
197 //! CDEF column line buffer
198 uint16_t *colbuf[MAX_MB_PLANE];
199 //! CDEF top & bottom line buffer
200 uint16_t *linebuf[MAX_MB_PLANE];
201 //! CDEF intermediate buffer
202 uint16_t *srcbuf;
203 //! CDEF column line buffer sizes
204 size_t allocated_colbuf_size[MAX_MB_PLANE];
205 //! CDEF top and bottom line buffer sizes
206 size_t allocated_linebuf_size[MAX_MB_PLANE];
207 //! CDEF intermediate buffer size
208 size_t allocated_srcbuf_size;
209 //! CDEF damping factor
210 int cdef_damping;
211 //! Number of CDEF strength values
212 int nb_cdef_strengths;
213 //! CDEF strength values for luma
214 int cdef_strengths[CDEF_MAX_STRENGTHS];
215 //! CDEF strength values for chroma
216 int cdef_uv_strengths[CDEF_MAX_STRENGTHS];
217 //! Number of CDEF strength values in bits
218 int cdef_bits;
219 //! Number of rows in the frame in 4 pixel
220 int allocated_mi_rows;
221 //! Number of CDEF workers
222 int allocated_num_workers;
223 } CdefInfo;
224
225 /*!\cond */
226
227 typedef struct {
228 int delta_q_present_flag;
229 // Resolution of delta quant
230 int delta_q_res;
231 int delta_lf_present_flag;
232 // Resolution of delta lf level
233 int delta_lf_res;
234 // This is a flag for number of deltas of loop filter level
235 // 0: use 1 delta, for y_vertical, y_horizontal, u, and v
236 // 1: use separate deltas for each filter level
237 int delta_lf_multi;
238 } DeltaQInfo;
239
240 typedef struct {
241 int enable_order_hint; // 0 - disable order hint, and related tools
242 int order_hint_bits_minus_1; // dist_wtd_comp, ref_frame_mvs,
243 // frame_sign_bias
244 // if 0, enable_dist_wtd_comp and
245 // enable_ref_frame_mvs must be set as 0.
246 int enable_dist_wtd_comp; // 0 - disable dist-wtd compound modes
247 // 1 - enable it
248 int enable_ref_frame_mvs; // 0 - disable ref frame mvs
249 // 1 - enable it
250 } OrderHintInfo;
251
252 // Sequence header structure.
253 // Note: All syntax elements of sequence_header_obu that need to be
254 // bit-identical across multiple sequence headers must be part of this struct,
255 // so that consistency is checked by are_seq_headers_consistent() function.
256 // One exception is the last member 'op_params' that is ignored by
257 // are_seq_headers_consistent() function.
258 typedef struct SequenceHeader {
259 int num_bits_width;
260 int num_bits_height;
261 int max_frame_width;
262 int max_frame_height;
263 // Whether current and reference frame IDs are signaled in the bitstream.
264 // Frame id numbers are additional information that do not affect the
265 // decoding process, but provide decoders with a way of detecting missing
266 // reference frames so that appropriate action can be taken.
267 uint8_t frame_id_numbers_present_flag;
268 int frame_id_length;
269 int delta_frame_id_length;
270 BLOCK_SIZE sb_size; // Size of the superblock used for this frame
271 int mib_size; // Size of the superblock in units of MI blocks
272 int mib_size_log2; // Log 2 of above.
273
274 OrderHintInfo order_hint_info;
275
276 uint8_t force_screen_content_tools; // 0 - force off
277 // 1 - force on
278 // 2 - adaptive
279 uint8_t still_picture; // Video is a single frame still picture
280 uint8_t reduced_still_picture_hdr; // Use reduced header for still picture
281 uint8_t force_integer_mv; // 0 - Don't force. MV can use subpel
282 // 1 - force to integer
283 // 2 - adaptive
284 uint8_t enable_filter_intra; // enables/disables filterintra
285 uint8_t enable_intra_edge_filter; // enables/disables edge upsampling
286 uint8_t enable_interintra_compound; // enables/disables interintra_compound
287 uint8_t enable_masked_compound; // enables/disables masked compound
288 uint8_t enable_dual_filter; // 0 - disable dual interpolation filter
289 // 1 - enable vert/horz filter selection
290 uint8_t enable_warped_motion; // 0 - disable warp for the sequence
291 // 1 - enable warp for the sequence
292 uint8_t enable_superres; // 0 - Disable superres for the sequence
293 // and no frame level superres flag
294 // 1 - Enable superres for the sequence
295 // enable per-frame superres flag
296 uint8_t enable_cdef; // To turn on/off CDEF
297 uint8_t enable_restoration; // To turn on/off loop restoration
298 BITSTREAM_PROFILE profile;
299
300 // Color config.
301 aom_bit_depth_t bit_depth; // AOM_BITS_8 in profile 0 or 1,
302 // AOM_BITS_10 or AOM_BITS_12 in profile 2 or 3.
303 uint8_t use_highbitdepth; // If true, we need to use 16bit frame buffers.
304 uint8_t monochrome; // Monochrome video
305 aom_color_primaries_t color_primaries;
306 aom_transfer_characteristics_t transfer_characteristics;
307 aom_matrix_coefficients_t matrix_coefficients;
308 int color_range;
309 int subsampling_x; // Chroma subsampling for x
310 int subsampling_y; // Chroma subsampling for y
311 aom_chroma_sample_position_t chroma_sample_position;
312 uint8_t separate_uv_delta_q;
313 uint8_t film_grain_params_present;
314
315 // Operating point info.
316 int operating_points_cnt_minus_1;
317 int operating_point_idc[MAX_NUM_OPERATING_POINTS];
318 int timing_info_present;
319 aom_timing_info_t timing_info;
320 uint8_t decoder_model_info_present_flag;
321 aom_dec_model_info_t decoder_model_info;
322 uint8_t display_model_info_present_flag;
323 AV1_LEVEL seq_level_idx[MAX_NUM_OPERATING_POINTS];
324 uint8_t tier[MAX_NUM_OPERATING_POINTS]; // seq_tier in spec. One bit: 0 or 1.
325
326 // IMPORTANT: the op_params member must be at the end of the struct so that
327 // are_seq_headers_consistent() can be implemented with a memcmp() call.
328 // TODO(urvang): We probably don't need the +1 here.
329 aom_dec_model_op_parameters_t op_params[MAX_NUM_OPERATING_POINTS + 1];
330 } SequenceHeader;
331
332 typedef struct {
333 int skip_mode_allowed;
334 int skip_mode_flag;
335 int ref_frame_idx_0;
336 int ref_frame_idx_1;
337 } SkipModeInfo;
338
339 typedef struct {
340 FRAME_TYPE frame_type;
341 REFERENCE_MODE reference_mode;
342
343 unsigned int order_hint;
344 unsigned int display_order_hint;
345 // Frame's level within the hierarchical structure.
346 unsigned int pyramid_level;
347 unsigned int frame_number;
348 SkipModeInfo skip_mode_info;
349 int refresh_frame_flags; // Which ref frames are overwritten by this frame
350 int frame_refs_short_signaling;
351 } CurrentFrame;
352
353 /*!\endcond */
354
355 /*!
356 * \brief Frame level features.
357 */
358 typedef struct {
359 /*!
360 * If true, CDF update in the symbol encoding/decoding process is disabled.
361 */
362 bool disable_cdf_update;
363 /*!
364 * If true, motion vectors are specified to eighth pel precision; and
365 * if false, motion vectors are specified to quarter pel precision.
366 */
367 bool allow_high_precision_mv;
368 /*!
369 * If true, force integer motion vectors; if false, use the default.
370 */
371 bool cur_frame_force_integer_mv;
372 /*!
373 * If true, palette tool and/or intra block copy tools may be used.
374 */
375 bool allow_screen_content_tools;
376 bool allow_intrabc; /*!< If true, intra block copy tool may be used. */
377 bool allow_warped_motion; /*!< If true, frame may use warped motion mode. */
378 /*!
379 * If true, using previous frames' motion vectors for prediction is allowed.
380 */
381 bool allow_ref_frame_mvs;
382 /*!
383 * If true, frame is fully lossless at coded resolution.
384 * */
385 bool coded_lossless;
386 /*!
387 * If true, frame is fully lossless at upscaled resolution.
388 */
389 bool all_lossless;
390 /*!
391 * If true, the frame is restricted to a reduced subset of the full set of
392 * transform types.
393 */
394 bool reduced_tx_set_used;
395 /*!
396 * If true, error resilient mode is enabled.
397 * Note: Error resilient mode allows the syntax of a frame to be parsed
398 * independently of previously decoded frames.
399 */
400 bool error_resilient_mode;
401 /*!
402 * If false, only MOTION_MODE that may be used is SIMPLE_TRANSLATION;
403 * if true, all MOTION_MODES may be used.
404 */
405 bool switchable_motion_mode;
406 TX_MODE tx_mode; /*!< Transform mode at frame level. */
407 InterpFilter interp_filter; /*!< Interpolation filter at frame level. */
408 /*!
409 * The reference frame that contains the CDF values and other state that
410 * should be loaded at the start of the frame.
411 */
412 int primary_ref_frame;
413 /*!
414 * Byte alignment of the planes in the reference buffers.
415 */
416 int byte_alignment;
417 /*!
418 * Flag signaling how frame contexts should be updated at the end of
419 * a frame decode.
420 */
421 REFRESH_FRAME_CONTEXT_MODE refresh_frame_context;
422 } FeatureFlags;
423
424 /*!
425 * \brief Params related to tiles.
426 */
427 typedef struct CommonTileParams {
428 int cols; /*!< number of tile columns that frame is divided into */
429 int rows; /*!< number of tile rows that frame is divided into */
430 int max_width_sb; /*!< maximum tile width in superblock units. */
431 int max_height_sb; /*!< maximum tile height in superblock units. */
432
433 /*!
434 * Min width of non-rightmost tile in MI units. Only valid if cols > 1.
435 */
436 int min_inner_width;
437
438 /*!
439 * If true, tiles are uniformly spaced with power-of-two number of rows and
440 * columns.
441 * If false, tiles have explicitly configured widths and heights.
442 */
443 int uniform_spacing;
444
445 /**
446 * \name Members only valid when uniform_spacing == 1
447 */
448 /**@{*/
449 int log2_cols; /*!< log2 of 'cols'. */
450 int log2_rows; /*!< log2 of 'rows'. */
451 int width; /*!< tile width in MI units */
452 int height; /*!< tile height in MI units */
453 /**@}*/
454
455 /*!
456 * Min num of tile columns possible based on 'max_width_sb' and frame width.
457 */
458 int min_log2_cols;
459 /*!
460 * Min num of tile rows possible based on 'max_height_sb' and frame height.
461 */
462 int min_log2_rows;
463 /*!
464 * Max num of tile columns possible based on frame width.
465 */
466 int max_log2_cols;
467 /*!
468 * Max num of tile rows possible based on frame height.
469 */
470 int max_log2_rows;
471 /*!
472 * log2 of min number of tiles (same as min_log2_cols + min_log2_rows).
473 */
474 int min_log2;
475 /*!
476 * col_start_sb[i] is the start position of tile column i in superblock units.
477 * valid for 0 <= i <= cols
478 */
479 int col_start_sb[MAX_TILE_COLS + 1];
480 /*!
481 * row_start_sb[i] is the start position of tile row i in superblock units.
482 * valid for 0 <= i <= rows
483 */
484 int row_start_sb[MAX_TILE_ROWS + 1];
485 /*!
486 * If true, we are using large scale tile mode.
487 */
488 unsigned int large_scale;
489 /*!
490 * Only relevant when large_scale == 1.
491 * If true, the independent decoding of a single tile or a section of a frame
492 * is allowed.
493 */
494 unsigned int single_tile_decoding;
495 } CommonTileParams;
496
497 typedef struct CommonModeInfoParams CommonModeInfoParams;
498 /*!
499 * \brief Params related to MB_MODE_INFO arrays and related info.
500 */
501 struct CommonModeInfoParams {
502 /*!
503 * Number of rows in the frame in 16 pixel units.
504 * This is computed from frame height aligned to a multiple of 8.
505 */
506 int mb_rows;
507 /*!
508 * Number of cols in the frame in 16 pixel units.
509 * This is computed from frame width aligned to a multiple of 8.
510 */
511 int mb_cols;
512
513 /*!
514 * Total MBs = mb_rows * mb_cols.
515 */
516 int MBs;
517
518 /*!
519 * Number of rows in the frame in 4 pixel (MB_MODE_INFO) units.
520 * This is computed from frame height aligned to a multiple of 8.
521 */
522 int mi_rows;
523 /*!
524 * Number of cols in the frame in 4 pixel (MB_MODE_INFO) units.
525 * This is computed from frame width aligned to a multiple of 8.
526 */
527 int mi_cols;
528
529 /*!
530 * An array of MB_MODE_INFO structs for every 'mi_alloc_bsize' sized block
531 * in the frame.
532 * Note: This array should be treated like a scratch memory, and should NOT be
533 * accessed directly, in most cases. Please use 'mi_grid_base' array instead.
534 */
535 MB_MODE_INFO *mi_alloc;
536 /*!
537 * Number of allocated elements in 'mi_alloc'.
538 */
539 int mi_alloc_size;
540 /*!
541 * Stride for 'mi_alloc' array.
542 */
543 int mi_alloc_stride;
544 /*!
545 * The minimum block size that each element in 'mi_alloc' can correspond to.
546 * For decoder, this is always BLOCK_4X4.
547 * For encoder, this is BLOCK_8X8 for resolution >= 4k case or REALTIME mode
548 * case. Otherwise, this is BLOCK_4X4.
549 */
550 BLOCK_SIZE mi_alloc_bsize;
551
552 /*!
553 * Grid of pointers to 4x4 MB_MODE_INFO structs allocated in 'mi_alloc'.
554 * It's possible that:
555 * - Multiple pointers in the grid point to the same element in 'mi_alloc'
556 * (for example, for all 4x4 blocks that belong to the same partition block).
557 * - Some pointers can be NULL (for example, for blocks outside visible area).
558 */
559 MB_MODE_INFO **mi_grid_base;
560 /*!
561 * Number of allocated elements in 'mi_grid_base' (and 'tx_type_map' also).
562 */
563 int mi_grid_size;
564 /*!
565 * Stride for 'mi_grid_base' (and 'tx_type_map' also).
566 */
567 int mi_stride;
568
569 /*!
570 * An array of tx types for each 4x4 block in the frame.
571 * Number of allocated elements is same as 'mi_grid_size', and stride is
572 * same as 'mi_grid_size'. So, indexing into 'tx_type_map' is same as that of
573 * 'mi_grid_base'.
574 */
575 TX_TYPE *tx_type_map;
576
577 /**
578 * \name Function pointers to allow separate logic for encoder and decoder.
579 */
580 /**@{*/
581 /*!
582 * Free the memory allocated to arrays in 'mi_params'.
583 * \param[in,out] mi_params object containing common mode info parameters
584 */
585 void (*free_mi)(struct CommonModeInfoParams *mi_params);
586 /*!
587 * Initialize / reset appropriate arrays in 'mi_params'.
588 * \param[in,out] mi_params object containing common mode info parameters
589 */
590 void (*setup_mi)(struct CommonModeInfoParams *mi_params);
591 /*!
592 * Allocate required memory for arrays in 'mi_params'.
593 * \param[in,out] mi_params object containing common mode info
594 * parameters
595 * \param width frame width
596 * \param height frame height
597 * \param min_partition_size minimum partition size allowed while
598 * encoding
599 */
600 void (*set_mb_mi)(struct CommonModeInfoParams *mi_params, int width,
601 int height, BLOCK_SIZE min_partition_size);
602 /**@}*/
603 };
604
605 typedef struct CommonQuantParams CommonQuantParams;
606 /*!
607 * \brief Parameters related to quantization at the frame level.
608 */
609 struct CommonQuantParams {
610 /*!
611 * Base qindex of the frame in the range 0 to 255.
612 */
613 int base_qindex;
614
615 /*!
616 * Delta of qindex (from base_qindex) for Y plane DC coefficient.
617 * Note: y_ac_delta_q is implicitly 0.
618 */
619 int y_dc_delta_q;
620
621 /*!
622 * Delta of qindex (from base_qindex) for U plane DC coefficients.
623 */
624 int u_dc_delta_q;
625 /*!
626 * Delta of qindex (from base_qindex) for U plane AC coefficients.
627 */
628 int v_dc_delta_q;
629
630 /*!
631 * Delta of qindex (from base_qindex) for V plane DC coefficients.
632 * Same as those for U plane if cm->seq_params->separate_uv_delta_q == 0.
633 */
634 int u_ac_delta_q;
635 /*!
636 * Delta of qindex (from base_qindex) for V plane AC coefficients.
637 * Same as those for U plane if cm->seq_params->separate_uv_delta_q == 0.
638 */
639 int v_ac_delta_q;
640
641 /*
642 * Note: The qindex per superblock may have a delta from the qindex obtained
643 * at frame level from parameters above, based on 'cm->delta_q_info'.
644 */
645
646 /**
647 * \name True dequantizers.
648 * The dequantizers below are true dequantizers used only in the
649 * dequantization process. They have the same coefficient
650 * shift/scale as TX.
651 */
652 /**@{*/
653 int16_t y_dequant_QTX[MAX_SEGMENTS][2]; /*!< Dequant for Y plane */
654 int16_t u_dequant_QTX[MAX_SEGMENTS][2]; /*!< Dequant for U plane */
655 int16_t v_dequant_QTX[MAX_SEGMENTS][2]; /*!< Dequant for V plane */
656 /**@}*/
657
658 /**
659 * \name Global quantization matrix tables.
660 */
661 /**@{*/
662 /*!
663 * Global dequantization matrix table.
664 */
665 const qm_val_t *giqmatrix[NUM_QM_LEVELS][3][TX_SIZES_ALL];
666 /*!
667 * Global quantization matrix table.
668 */
669 const qm_val_t *gqmatrix[NUM_QM_LEVELS][3][TX_SIZES_ALL];
670 /**@}*/
671
672 /**
673 * \name Local dequantization matrix tables for each frame.
674 */
675 /**@{*/
676 /*!
677 * Local dequant matrix for Y plane.
678 */
679 const qm_val_t *y_iqmatrix[MAX_SEGMENTS][TX_SIZES_ALL];
680 /*!
681 * Local dequant matrix for U plane.
682 */
683 const qm_val_t *u_iqmatrix[MAX_SEGMENTS][TX_SIZES_ALL];
684 /*!
685 * Local dequant matrix for V plane.
686 */
687 const qm_val_t *v_iqmatrix[MAX_SEGMENTS][TX_SIZES_ALL];
688 /**@}*/
689
690 /*!
691 * Flag indicating whether quantization matrices are being used:
692 * - If true, qm_level_y, qm_level_u and qm_level_v indicate the level
693 * indices to be used to access appropriate global quant matrix tables.
694 * - If false, we implicitly use level index 'NUM_QM_LEVELS - 1'.
695 */
696 bool using_qmatrix;
697 /**
698 * \name Valid only when using_qmatrix == true
699 * Indicate the level indices to be used to access appropriate global quant
700 * matrix tables.
701 */
702 /**@{*/
703 int qmatrix_level_y; /*!< Level index for Y plane */
704 int qmatrix_level_u; /*!< Level index for U plane */
705 int qmatrix_level_v; /*!< Level index for V plane */
706 /**@}*/
707 };
708
709 typedef struct CommonContexts CommonContexts;
710 /*!
711 * \brief Contexts used for transmitting various symbols in the bitstream.
712 */
713 struct CommonContexts {
714 /*!
715 * Context used by 'FRAME_CONTEXT.partition_cdf' to transmit partition type.
716 * partition[i][j] is the context for ith tile row, jth mi_col.
717 */
718 PARTITION_CONTEXT **partition;
719
720 /*!
721 * Context used to derive context for multiple symbols:
722 * - 'TXB_CTX.txb_skip_ctx' used by 'FRAME_CONTEXT.txb_skip_cdf' to transmit
723 * to transmit skip_txfm flag.
724 * - 'TXB_CTX.dc_sign_ctx' used by 'FRAME_CONTEXT.dc_sign_cdf' to transmit
725 * sign.
726 * entropy[i][j][k] is the context for ith plane, jth tile row, kth mi_col.
727 */
728 ENTROPY_CONTEXT **entropy[MAX_MB_PLANE];
729
730 /*!
731 * Context used to derive context for 'FRAME_CONTEXT.txfm_partition_cdf' to
732 * transmit 'is_split' flag to indicate if this transform block should be
733 * split into smaller sub-blocks.
734 * txfm[i][j] is the context for ith tile row, jth mi_col.
735 */
736 TXFM_CONTEXT **txfm;
737
738 /*!
739 * Dimensions that were used to allocate the arrays above.
740 * If these dimensions change, the arrays may have to be re-allocated.
741 */
742 int num_planes; /*!< Corresponds to av1_num_planes(cm) */
743 int num_tile_rows; /*!< Corresponds to cm->tiles.row */
744 int num_mi_cols; /*!< Corresponds to cm->mi_params.mi_cols */
745 };
746
747 /*!
748 * \brief Top level common structure used by both encoder and decoder.
749 */
750 typedef struct AV1Common {
751 /*!
752 * Information about the current frame that is being coded.
753 */
754 CurrentFrame current_frame;
755 /*!
756 * Code and details about current error status.
757 */
758 struct aom_internal_error_info *error;
759
760 /*!
761 * AV1 allows two types of frame scaling operations:
762 * 1. Frame super-resolution: that allows coding a frame at lower resolution
763 * and after decoding the frame, normatively scales and restores the frame --
764 * inside the coding loop.
765 * 2. Frame resize: that allows coding frame at lower/higher resolution, and
766 * then non-normatively upscale the frame at the time of rendering -- outside
767 * the coding loop.
768 * Hence, the need for 3 types of dimensions.
769 */
770
771 /**
772 * \name Coded frame dimensions.
773 */
774 /**@{*/
775 int width; /*!< Coded frame width */
776 int height; /*!< Coded frame height */
777 /**@}*/
778
779 /**
780 * \name Rendered frame dimensions.
781 * Dimensions after applying both super-resolution and resize to the coded
782 * frame. Different from coded dimensions if super-resolution and/or resize
783 * are being used for this frame.
784 */
785 /**@{*/
786 int render_width; /*!< Rendered frame width */
787 int render_height; /*!< Rendered frame height */
788 /**@}*/
789
790 /**
791 * \name Super-resolved frame dimensions.
792 * Frame dimensions after applying super-resolution to the coded frame (if
793 * present), but before applying resize.
794 * Larger than the coded dimensions if super-resolution is being used for
795 * this frame.
796 * Different from rendered dimensions if resize is being used for this frame.
797 */
798 /**@{*/
799 int superres_upscaled_width; /*!< Super-resolved frame width */
800 int superres_upscaled_height; /*!< Super-resolved frame height */
801 /**@}*/
802
803 /*!
804 * The denominator of the superres scale used by this frame.
805 * Note: The numerator is fixed to be SCALE_NUMERATOR.
806 */
807 uint8_t superres_scale_denominator;
808
809 /*!
810 * buffer_removal_times[op_num] specifies the frame removal time in units of
811 * DecCT clock ticks counted from the removal time of the last random access
812 * point for operating point op_num.
813 * TODO(urvang): We probably don't need the +1 here.
814 */
815 uint32_t buffer_removal_times[MAX_NUM_OPERATING_POINTS + 1];
816 /*!
817 * Presentation time of the frame in clock ticks DispCT counted from the
818 * removal time of the last random access point for the operating point that
819 * is being decoded.
820 */
821 uint32_t frame_presentation_time;
822
823 /*!
824 * Buffer where previous frame is stored.
825 */
826 RefCntBuffer *prev_frame;
827
828 /*!
829 * Buffer into which the current frame will be stored and other related info.
830 * TODO(hkuang): Combine this with cur_buf in macroblockd.
831 */
832 RefCntBuffer *cur_frame;
833
834 /*!
835 * For encoder, we have a two-level mapping from reference frame type to the
836 * corresponding buffer in the buffer pool:
837 * * 'remapped_ref_idx[i - 1]' maps reference type 'i' (range: LAST_FRAME ...
838 * EXTREF_FRAME) to a remapped index 'j' (in range: 0 ... REF_FRAMES - 1)
839 * * Later, 'cm->ref_frame_map[j]' maps the remapped index 'j' to a pointer to
840 * the reference counted buffer structure RefCntBuffer, taken from the buffer
841 * pool cm->buffer_pool->frame_bufs.
842 *
843 * LAST_FRAME, ..., EXTREF_FRAME
844 * | |
845 * v v
846 * remapped_ref_idx[LAST_FRAME - 1], ..., remapped_ref_idx[EXTREF_FRAME - 1]
847 * | |
848 * v v
849 * ref_frame_map[], ..., ref_frame_map[]
850 *
851 * Note: INTRA_FRAME always refers to the current frame, so there's no need to
852 * have a remapped index for the same.
853 */
854 int remapped_ref_idx[REF_FRAMES];
855
856 /*!
857 * Scale of the current frame with respect to itself.
858 * This is currently used for intra block copy, which behaves like an inter
859 * prediction mode, where the reference frame is the current frame itself.
860 */
861 struct scale_factors sf_identity;
862
863 /*!
864 * Scale factors of the reference frame with respect to the current frame.
865 * This is required for generating inter prediction and will be non-identity
866 * for a reference frame, if it has different dimensions than the coded
867 * dimensions of the current frame.
868 */
869 struct scale_factors ref_scale_factors[REF_FRAMES];
870
871 /*!
872 * For decoder, ref_frame_map[i] maps reference type 'i' to a pointer to
873 * the buffer in the buffer pool 'cm->buffer_pool.frame_bufs'.
874 * For encoder, ref_frame_map[j] (where j = remapped_ref_idx[i]) maps
875 * remapped reference index 'j' (that is, original reference type 'i') to
876 * a pointer to the buffer in the buffer pool 'cm->buffer_pool.frame_bufs'.
877 */
878 RefCntBuffer *ref_frame_map[REF_FRAMES];
879
880 /*!
881 * If true, this frame is actually shown after decoding.
882 * If false, this frame is coded in the bitstream, but not shown. It is only
883 * used as a reference for other frames coded later.
884 */
885 int show_frame;
886
887 /*!
888 * If true, this frame can be used as a show-existing frame for other frames
889 * coded later.
890 * When 'show_frame' is true, this is always true for all non-keyframes.
891 * When 'show_frame' is false, this value is transmitted in the bitstream.
892 */
893 int showable_frame;
894
895 /*!
896 * If true, show an existing frame coded before, instead of actually coding a
897 * frame. The existing frame comes from one of the existing reference buffers,
898 * as signaled in the bitstream.
899 */
900 int show_existing_frame;
901
902 /*!
903 * Whether some features are allowed or not.
904 */
905 FeatureFlags features;
906
907 /*!
908 * Params related to MB_MODE_INFO arrays and related info.
909 */
910 CommonModeInfoParams mi_params;
911
912 #if CONFIG_ENTROPY_STATS
913 /*!
914 * Context type used by token CDFs, in the range 0 .. (TOKEN_CDF_Q_CTXS - 1).
915 */
916 int coef_cdf_category;
917 #endif // CONFIG_ENTROPY_STATS
918
919 /*!
920 * Quantization params.
921 */
922 CommonQuantParams quant_params;
923
924 /*!
925 * Segmentation info for current frame.
926 */
927 struct segmentation seg;
928
929 /*!
930 * Segmentation map for previous frame.
931 */
932 uint8_t *last_frame_seg_map;
933
934 /**
935 * \name Deblocking filter parameters.
936 */
937 /**@{*/
938 loop_filter_info_n lf_info; /*!< Loop filter info */
939 struct loopfilter lf; /*!< Loop filter parameters */
940 /**@}*/
941
942 /**
943 * \name Loop Restoration filter parameters.
944 */
945 /**@{*/
946 RestorationInfo rst_info[MAX_MB_PLANE]; /*!< Loop Restoration filter info */
947 int32_t *rst_tmpbuf; /*!< Scratch buffer for self-guided restoration */
948 RestorationLineBuffers *rlbs; /*!< Line buffers needed by loop restoration */
949 YV12_BUFFER_CONFIG rst_frame; /*!< Stores the output of loop restoration */
950 /**@}*/
951
952 /*!
953 * CDEF (Constrained Directional Enhancement Filter) parameters.
954 */
955 CdefInfo cdef_info;
956
957 /*!
958 * Parameters for film grain synthesis.
959 */
960 aom_film_grain_t film_grain_params;
961
962 /*!
963 * Parameters for delta quantization and delta loop filter level.
964 */
965 DeltaQInfo delta_q_info;
966
967 /*!
968 * Global motion parameters for each reference frame.
969 */
970 WarpedMotionParams global_motion[REF_FRAMES];
971
972 /*!
973 * Elements part of the sequence header, that are applicable for all the
974 * frames in the video.
975 */
976 SequenceHeader *seq_params;
977
978 /*!
979 * Current CDFs of all the symbols for the current frame.
980 */
981 FRAME_CONTEXT *fc;
982 /*!
983 * Default CDFs used when features.primary_ref_frame = PRIMARY_REF_NONE
984 * (e.g. for a keyframe). These default CDFs are defined by the bitstream and
985 * copied from default CDF tables for each symbol.
986 */
987 FRAME_CONTEXT *default_frame_context;
988
989 /*!
990 * Parameters related to tiling.
991 */
992 CommonTileParams tiles;
993
994 /*!
995 * External BufferPool passed from outside.
996 */
997 BufferPool *buffer_pool;
998
999 /*!
1000 * Above context buffers and their sizes.
1001 * Note: above contexts are allocated in this struct, as their size is
1002 * dependent on frame width, while left contexts are declared and allocated in
1003 * MACROBLOCKD struct, as they have a fixed size.
1004 */
1005 CommonContexts above_contexts;
1006
1007 /**
1008 * \name Signaled when cm->seq_params->frame_id_numbers_present_flag == 1
1009 */
1010 /**@{*/
1011 int current_frame_id; /*!< frame ID for the current frame. */
1012 int ref_frame_id[REF_FRAMES]; /*!< frame IDs for the reference frames. */
1013 /**@}*/
1014
1015 /*!
1016 * Motion vectors provided by motion field estimation.
1017 * tpl_mvs[row * stride + col] stores MV for block at [mi_row, mi_col] where:
1018 * mi_row = 2 * row,
1019 * mi_col = 2 * col, and
1020 * stride = cm->mi_params.mi_stride / 2
1021 */
1022 TPL_MV_REF *tpl_mvs;
1023 /*!
1024 * Allocated size of 'tpl_mvs' array. Refer to 'ensure_mv_buffer()' function.
1025 */
1026 int tpl_mvs_mem_size;
1027 /*!
1028 * ref_frame_sign_bias[k] is 1 if relative distance between reference 'k' and
1029 * current frame is positive; and 0 otherwise.
1030 */
1031 int ref_frame_sign_bias[REF_FRAMES];
1032 /*!
1033 * ref_frame_side[k] is 1 if relative distance between reference 'k' and
1034 * current frame is positive, -1 if relative distance is 0; and 0 otherwise.
1035 * TODO(jingning): This can be combined with sign_bias later.
1036 */
1037 int8_t ref_frame_side[REF_FRAMES];
1038
1039 /*!
1040 * Temporal layer ID of this frame
1041 * (in the range 0 ... (number_temporal_layers - 1)).
1042 */
1043 int temporal_layer_id;
1044
1045 /*!
1046 * Spatial layer ID of this frame
1047 * (in the range 0 ... (number_spatial_layers - 1)).
1048 */
1049 int spatial_layer_id;
1050
1051 #if TXCOEFF_TIMER
1052 int64_t cum_txcoeff_timer;
1053 int64_t txcoeff_timer;
1054 int txb_count;
1055 #endif // TXCOEFF_TIMER
1056
1057 #if TXCOEFF_COST_TIMER
1058 int64_t cum_txcoeff_cost_timer;
1059 int64_t txcoeff_cost_timer;
1060 int64_t txcoeff_cost_count;
1061 #endif // TXCOEFF_COST_TIMER
1062 } AV1_COMMON;
1063
1064 /*!\cond */
1065
1066 // TODO(hkuang): Don't need to lock the whole pool after implementing atomic
1067 // frame reference count.
lock_buffer_pool(BufferPool * const pool)1068 static void lock_buffer_pool(BufferPool *const pool) {
1069 #if CONFIG_MULTITHREAD
1070 pthread_mutex_lock(&pool->pool_mutex);
1071 #else
1072 (void)pool;
1073 #endif
1074 }
1075
unlock_buffer_pool(BufferPool * const pool)1076 static void unlock_buffer_pool(BufferPool *const pool) {
1077 #if CONFIG_MULTITHREAD
1078 pthread_mutex_unlock(&pool->pool_mutex);
1079 #else
1080 (void)pool;
1081 #endif
1082 }
1083
get_ref_frame(AV1_COMMON * cm,int index)1084 static INLINE YV12_BUFFER_CONFIG *get_ref_frame(AV1_COMMON *cm, int index) {
1085 if (index < 0 || index >= REF_FRAMES) return NULL;
1086 if (cm->ref_frame_map[index] == NULL) return NULL;
1087 return &cm->ref_frame_map[index]->buf;
1088 }
1089
get_free_fb(AV1_COMMON * cm)1090 static INLINE int get_free_fb(AV1_COMMON *cm) {
1091 RefCntBuffer *const frame_bufs = cm->buffer_pool->frame_bufs;
1092 int i;
1093
1094 lock_buffer_pool(cm->buffer_pool);
1095 for (i = 0; i < FRAME_BUFFERS; ++i)
1096 if (frame_bufs[i].ref_count == 0) break;
1097
1098 if (i != FRAME_BUFFERS) {
1099 if (frame_bufs[i].buf.use_external_reference_buffers) {
1100 // If this frame buffer's y_buffer, u_buffer, and v_buffer point to the
1101 // external reference buffers. Restore the buffer pointers to point to the
1102 // internally allocated memory.
1103 YV12_BUFFER_CONFIG *ybf = &frame_bufs[i].buf;
1104 ybf->y_buffer = ybf->store_buf_adr[0];
1105 ybf->u_buffer = ybf->store_buf_adr[1];
1106 ybf->v_buffer = ybf->store_buf_adr[2];
1107 ybf->use_external_reference_buffers = 0;
1108 }
1109
1110 frame_bufs[i].ref_count = 1;
1111 } else {
1112 // We should never run out of free buffers. If this assertion fails, there
1113 // is a reference leak.
1114 assert(0 && "Ran out of free frame buffers. Likely a reference leak.");
1115 // Reset i to be INVALID_IDX to indicate no free buffer found.
1116 i = INVALID_IDX;
1117 }
1118
1119 unlock_buffer_pool(cm->buffer_pool);
1120 return i;
1121 }
1122
assign_cur_frame_new_fb(AV1_COMMON * const cm)1123 static INLINE RefCntBuffer *assign_cur_frame_new_fb(AV1_COMMON *const cm) {
1124 // Release the previously-used frame-buffer
1125 if (cm->cur_frame != NULL) {
1126 --cm->cur_frame->ref_count;
1127 cm->cur_frame = NULL;
1128 }
1129
1130 // Assign a new framebuffer
1131 const int new_fb_idx = get_free_fb(cm);
1132 if (new_fb_idx == INVALID_IDX) return NULL;
1133
1134 cm->cur_frame = &cm->buffer_pool->frame_bufs[new_fb_idx];
1135 cm->cur_frame->buf.buf_8bit_valid = 0;
1136 av1_zero(cm->cur_frame->interp_filter_selected);
1137 return cm->cur_frame;
1138 }
1139
1140 // Modify 'lhs_ptr' to reference the buffer at 'rhs_ptr', and update the ref
1141 // counts accordingly.
assign_frame_buffer_p(RefCntBuffer ** lhs_ptr,RefCntBuffer * rhs_ptr)1142 static INLINE void assign_frame_buffer_p(RefCntBuffer **lhs_ptr,
1143 RefCntBuffer *rhs_ptr) {
1144 RefCntBuffer *const old_ptr = *lhs_ptr;
1145 if (old_ptr != NULL) {
1146 assert(old_ptr->ref_count > 0);
1147 // One less reference to the buffer at 'old_ptr', so decrease ref count.
1148 --old_ptr->ref_count;
1149 }
1150
1151 *lhs_ptr = rhs_ptr;
1152 // One more reference to the buffer at 'rhs_ptr', so increase ref count.
1153 ++rhs_ptr->ref_count;
1154 }
1155
frame_is_intra_only(const AV1_COMMON * const cm)1156 static INLINE int frame_is_intra_only(const AV1_COMMON *const cm) {
1157 return cm->current_frame.frame_type == KEY_FRAME ||
1158 cm->current_frame.frame_type == INTRA_ONLY_FRAME;
1159 }
1160
frame_is_sframe(const AV1_COMMON * cm)1161 static INLINE int frame_is_sframe(const AV1_COMMON *cm) {
1162 return cm->current_frame.frame_type == S_FRAME;
1163 }
1164
1165 // These functions take a reference frame label between LAST_FRAME and
1166 // EXTREF_FRAME inclusive. Note that this is different to the indexing
1167 // previously used by the frame_refs[] array.
get_ref_frame_map_idx(const AV1_COMMON * const cm,const MV_REFERENCE_FRAME ref_frame)1168 static INLINE int get_ref_frame_map_idx(const AV1_COMMON *const cm,
1169 const MV_REFERENCE_FRAME ref_frame) {
1170 return (ref_frame >= LAST_FRAME && ref_frame <= EXTREF_FRAME)
1171 ? cm->remapped_ref_idx[ref_frame - LAST_FRAME]
1172 : INVALID_IDX;
1173 }
1174
get_ref_frame_buf(const AV1_COMMON * const cm,const MV_REFERENCE_FRAME ref_frame)1175 static INLINE RefCntBuffer *get_ref_frame_buf(
1176 const AV1_COMMON *const cm, const MV_REFERENCE_FRAME ref_frame) {
1177 const int map_idx = get_ref_frame_map_idx(cm, ref_frame);
1178 return (map_idx != INVALID_IDX) ? cm->ref_frame_map[map_idx] : NULL;
1179 }
1180
1181 // Both const and non-const versions of this function are provided so that it
1182 // can be used with a const AV1_COMMON if needed.
get_ref_scale_factors_const(const AV1_COMMON * const cm,const MV_REFERENCE_FRAME ref_frame)1183 static INLINE const struct scale_factors *get_ref_scale_factors_const(
1184 const AV1_COMMON *const cm, const MV_REFERENCE_FRAME ref_frame) {
1185 const int map_idx = get_ref_frame_map_idx(cm, ref_frame);
1186 return (map_idx != INVALID_IDX) ? &cm->ref_scale_factors[map_idx] : NULL;
1187 }
1188
get_ref_scale_factors(AV1_COMMON * const cm,const MV_REFERENCE_FRAME ref_frame)1189 static INLINE struct scale_factors *get_ref_scale_factors(
1190 AV1_COMMON *const cm, const MV_REFERENCE_FRAME ref_frame) {
1191 const int map_idx = get_ref_frame_map_idx(cm, ref_frame);
1192 return (map_idx != INVALID_IDX) ? &cm->ref_scale_factors[map_idx] : NULL;
1193 }
1194
get_primary_ref_frame_buf(const AV1_COMMON * const cm)1195 static INLINE RefCntBuffer *get_primary_ref_frame_buf(
1196 const AV1_COMMON *const cm) {
1197 const int primary_ref_frame = cm->features.primary_ref_frame;
1198 if (primary_ref_frame == PRIMARY_REF_NONE) return NULL;
1199 const int map_idx = get_ref_frame_map_idx(cm, primary_ref_frame + 1);
1200 return (map_idx != INVALID_IDX) ? cm->ref_frame_map[map_idx] : NULL;
1201 }
1202
1203 // Returns 1 if this frame might allow mvs from some reference frame.
frame_might_allow_ref_frame_mvs(const AV1_COMMON * cm)1204 static INLINE int frame_might_allow_ref_frame_mvs(const AV1_COMMON *cm) {
1205 return !cm->features.error_resilient_mode &&
1206 cm->seq_params->order_hint_info.enable_ref_frame_mvs &&
1207 cm->seq_params->order_hint_info.enable_order_hint &&
1208 !frame_is_intra_only(cm);
1209 }
1210
1211 // Returns 1 if this frame might use warped_motion
frame_might_allow_warped_motion(const AV1_COMMON * cm)1212 static INLINE int frame_might_allow_warped_motion(const AV1_COMMON *cm) {
1213 return !cm->features.error_resilient_mode && !frame_is_intra_only(cm) &&
1214 cm->seq_params->enable_warped_motion;
1215 }
1216
ensure_mv_buffer(RefCntBuffer * buf,AV1_COMMON * cm)1217 static INLINE void ensure_mv_buffer(RefCntBuffer *buf, AV1_COMMON *cm) {
1218 const int buf_rows = buf->mi_rows;
1219 const int buf_cols = buf->mi_cols;
1220 const CommonModeInfoParams *const mi_params = &cm->mi_params;
1221
1222 if (buf->mvs == NULL || buf_rows != mi_params->mi_rows ||
1223 buf_cols != mi_params->mi_cols) {
1224 aom_free(buf->mvs);
1225 buf->mi_rows = mi_params->mi_rows;
1226 buf->mi_cols = mi_params->mi_cols;
1227 CHECK_MEM_ERROR(cm, buf->mvs,
1228 (MV_REF *)aom_calloc(((mi_params->mi_rows + 1) >> 1) *
1229 ((mi_params->mi_cols + 1) >> 1),
1230 sizeof(*buf->mvs)));
1231 aom_free(buf->seg_map);
1232 CHECK_MEM_ERROR(
1233 cm, buf->seg_map,
1234 (uint8_t *)aom_calloc(mi_params->mi_rows * mi_params->mi_cols,
1235 sizeof(*buf->seg_map)));
1236 }
1237
1238 const int mem_size =
1239 ((mi_params->mi_rows + MAX_MIB_SIZE) >> 1) * (mi_params->mi_stride >> 1);
1240 int realloc = cm->tpl_mvs == NULL;
1241 if (cm->tpl_mvs) realloc |= cm->tpl_mvs_mem_size < mem_size;
1242
1243 if (realloc) {
1244 aom_free(cm->tpl_mvs);
1245 CHECK_MEM_ERROR(cm, cm->tpl_mvs,
1246 (TPL_MV_REF *)aom_calloc(mem_size, sizeof(*cm->tpl_mvs)));
1247 cm->tpl_mvs_mem_size = mem_size;
1248 }
1249 }
1250
1251 void cfl_init(CFL_CTX *cfl, const SequenceHeader *seq_params);
1252
av1_num_planes(const AV1_COMMON * cm)1253 static INLINE int av1_num_planes(const AV1_COMMON *cm) {
1254 return cm->seq_params->monochrome ? 1 : MAX_MB_PLANE;
1255 }
1256
av1_init_above_context(CommonContexts * above_contexts,int num_planes,int tile_row,MACROBLOCKD * xd)1257 static INLINE void av1_init_above_context(CommonContexts *above_contexts,
1258 int num_planes, int tile_row,
1259 MACROBLOCKD *xd) {
1260 for (int i = 0; i < num_planes; ++i) {
1261 xd->above_entropy_context[i] = above_contexts->entropy[i][tile_row];
1262 }
1263 xd->above_partition_context = above_contexts->partition[tile_row];
1264 xd->above_txfm_context = above_contexts->txfm[tile_row];
1265 }
1266
av1_init_macroblockd(AV1_COMMON * cm,MACROBLOCKD * xd)1267 static INLINE void av1_init_macroblockd(AV1_COMMON *cm, MACROBLOCKD *xd) {
1268 const int num_planes = av1_num_planes(cm);
1269 const CommonQuantParams *const quant_params = &cm->quant_params;
1270
1271 for (int i = 0; i < num_planes; ++i) {
1272 if (xd->plane[i].plane_type == PLANE_TYPE_Y) {
1273 memcpy(xd->plane[i].seg_dequant_QTX, quant_params->y_dequant_QTX,
1274 sizeof(quant_params->y_dequant_QTX));
1275 memcpy(xd->plane[i].seg_iqmatrix, quant_params->y_iqmatrix,
1276 sizeof(quant_params->y_iqmatrix));
1277
1278 } else {
1279 if (i == AOM_PLANE_U) {
1280 memcpy(xd->plane[i].seg_dequant_QTX, quant_params->u_dequant_QTX,
1281 sizeof(quant_params->u_dequant_QTX));
1282 memcpy(xd->plane[i].seg_iqmatrix, quant_params->u_iqmatrix,
1283 sizeof(quant_params->u_iqmatrix));
1284 } else {
1285 memcpy(xd->plane[i].seg_dequant_QTX, quant_params->v_dequant_QTX,
1286 sizeof(quant_params->v_dequant_QTX));
1287 memcpy(xd->plane[i].seg_iqmatrix, quant_params->v_iqmatrix,
1288 sizeof(quant_params->v_iqmatrix));
1289 }
1290 }
1291 }
1292 xd->mi_stride = cm->mi_params.mi_stride;
1293 xd->error_info = cm->error;
1294 cfl_init(&xd->cfl, cm->seq_params);
1295 }
1296
set_entropy_context(MACROBLOCKD * xd,int mi_row,int mi_col,const int num_planes)1297 static INLINE void set_entropy_context(MACROBLOCKD *xd, int mi_row, int mi_col,
1298 const int num_planes) {
1299 int i;
1300 int row_offset = mi_row;
1301 int col_offset = mi_col;
1302 for (i = 0; i < num_planes; ++i) {
1303 struct macroblockd_plane *const pd = &xd->plane[i];
1304 // Offset the buffer pointer
1305 const BLOCK_SIZE bsize = xd->mi[0]->bsize;
1306 if (pd->subsampling_y && (mi_row & 0x01) && (mi_size_high[bsize] == 1))
1307 row_offset = mi_row - 1;
1308 if (pd->subsampling_x && (mi_col & 0x01) && (mi_size_wide[bsize] == 1))
1309 col_offset = mi_col - 1;
1310 int above_idx = col_offset;
1311 int left_idx = row_offset & MAX_MIB_MASK;
1312 pd->above_entropy_context =
1313 &xd->above_entropy_context[i][above_idx >> pd->subsampling_x];
1314 pd->left_entropy_context =
1315 &xd->left_entropy_context[i][left_idx >> pd->subsampling_y];
1316 }
1317 }
1318
calc_mi_size(int len)1319 static INLINE int calc_mi_size(int len) {
1320 // len is in mi units. Align to a multiple of SBs.
1321 return ALIGN_POWER_OF_TWO(len, MAX_MIB_SIZE_LOG2);
1322 }
1323
set_plane_n4(MACROBLOCKD * const xd,int bw,int bh,const int num_planes)1324 static INLINE void set_plane_n4(MACROBLOCKD *const xd, int bw, int bh,
1325 const int num_planes) {
1326 int i;
1327 for (i = 0; i < num_planes; i++) {
1328 xd->plane[i].width = (bw * MI_SIZE) >> xd->plane[i].subsampling_x;
1329 xd->plane[i].height = (bh * MI_SIZE) >> xd->plane[i].subsampling_y;
1330
1331 xd->plane[i].width = AOMMAX(xd->plane[i].width, 4);
1332 xd->plane[i].height = AOMMAX(xd->plane[i].height, 4);
1333 }
1334 }
1335
set_mi_row_col(MACROBLOCKD * xd,const TileInfo * const tile,int mi_row,int bh,int mi_col,int bw,int mi_rows,int mi_cols)1336 static INLINE void set_mi_row_col(MACROBLOCKD *xd, const TileInfo *const tile,
1337 int mi_row, int bh, int mi_col, int bw,
1338 int mi_rows, int mi_cols) {
1339 xd->mb_to_top_edge = -GET_MV_SUBPEL(mi_row * MI_SIZE);
1340 xd->mb_to_bottom_edge = GET_MV_SUBPEL((mi_rows - bh - mi_row) * MI_SIZE);
1341 xd->mb_to_left_edge = -GET_MV_SUBPEL((mi_col * MI_SIZE));
1342 xd->mb_to_right_edge = GET_MV_SUBPEL((mi_cols - bw - mi_col) * MI_SIZE);
1343
1344 xd->mi_row = mi_row;
1345 xd->mi_col = mi_col;
1346
1347 // Are edges available for intra prediction?
1348 xd->up_available = (mi_row > tile->mi_row_start);
1349
1350 const int ss_x = xd->plane[1].subsampling_x;
1351 const int ss_y = xd->plane[1].subsampling_y;
1352
1353 xd->left_available = (mi_col > tile->mi_col_start);
1354 xd->chroma_up_available = xd->up_available;
1355 xd->chroma_left_available = xd->left_available;
1356 if (ss_x && bw < mi_size_wide[BLOCK_8X8])
1357 xd->chroma_left_available = (mi_col - 1) > tile->mi_col_start;
1358 if (ss_y && bh < mi_size_high[BLOCK_8X8])
1359 xd->chroma_up_available = (mi_row - 1) > tile->mi_row_start;
1360 if (xd->up_available) {
1361 xd->above_mbmi = xd->mi[-xd->mi_stride];
1362 } else {
1363 xd->above_mbmi = NULL;
1364 }
1365
1366 if (xd->left_available) {
1367 xd->left_mbmi = xd->mi[-1];
1368 } else {
1369 xd->left_mbmi = NULL;
1370 }
1371
1372 const int chroma_ref = ((mi_row & 0x01) || !(bh & 0x01) || !ss_y) &&
1373 ((mi_col & 0x01) || !(bw & 0x01) || !ss_x);
1374 xd->is_chroma_ref = chroma_ref;
1375 if (chroma_ref) {
1376 // To help calculate the "above" and "left" chroma blocks, note that the
1377 // current block may cover multiple luma blocks (e.g., if partitioned into
1378 // 4x4 luma blocks).
1379 // First, find the top-left-most luma block covered by this chroma block
1380 MB_MODE_INFO **base_mi =
1381 &xd->mi[-(mi_row & ss_y) * xd->mi_stride - (mi_col & ss_x)];
1382
1383 // Then, we consider the luma region covered by the left or above 4x4 chroma
1384 // prediction. We want to point to the chroma reference block in that
1385 // region, which is the bottom-right-most mi unit.
1386 // This leads to the following offsets:
1387 MB_MODE_INFO *chroma_above_mi =
1388 xd->chroma_up_available ? base_mi[-xd->mi_stride + ss_x] : NULL;
1389 xd->chroma_above_mbmi = chroma_above_mi;
1390
1391 MB_MODE_INFO *chroma_left_mi =
1392 xd->chroma_left_available ? base_mi[ss_y * xd->mi_stride - 1] : NULL;
1393 xd->chroma_left_mbmi = chroma_left_mi;
1394 }
1395
1396 xd->height = bh;
1397 xd->width = bw;
1398
1399 xd->is_last_vertical_rect = 0;
1400 if (xd->width < xd->height) {
1401 if (!((mi_col + xd->width) & (xd->height - 1))) {
1402 xd->is_last_vertical_rect = 1;
1403 }
1404 }
1405
1406 xd->is_first_horizontal_rect = 0;
1407 if (xd->width > xd->height)
1408 if (!(mi_row & (xd->width - 1))) xd->is_first_horizontal_rect = 1;
1409 }
1410
get_y_mode_cdf(FRAME_CONTEXT * tile_ctx,const MB_MODE_INFO * above_mi,const MB_MODE_INFO * left_mi)1411 static INLINE aom_cdf_prob *get_y_mode_cdf(FRAME_CONTEXT *tile_ctx,
1412 const MB_MODE_INFO *above_mi,
1413 const MB_MODE_INFO *left_mi) {
1414 const PREDICTION_MODE above = av1_above_block_mode(above_mi);
1415 const PREDICTION_MODE left = av1_left_block_mode(left_mi);
1416 const int above_ctx = intra_mode_context[above];
1417 const int left_ctx = intra_mode_context[left];
1418 return tile_ctx->kf_y_cdf[above_ctx][left_ctx];
1419 }
1420
update_partition_context(MACROBLOCKD * xd,int mi_row,int mi_col,BLOCK_SIZE subsize,BLOCK_SIZE bsize)1421 static INLINE void update_partition_context(MACROBLOCKD *xd, int mi_row,
1422 int mi_col, BLOCK_SIZE subsize,
1423 BLOCK_SIZE bsize) {
1424 PARTITION_CONTEXT *const above_ctx = xd->above_partition_context + mi_col;
1425 PARTITION_CONTEXT *const left_ctx =
1426 xd->left_partition_context + (mi_row & MAX_MIB_MASK);
1427
1428 const int bw = mi_size_wide[bsize];
1429 const int bh = mi_size_high[bsize];
1430 memset(above_ctx, partition_context_lookup[subsize].above, bw);
1431 memset(left_ctx, partition_context_lookup[subsize].left, bh);
1432 }
1433
is_chroma_reference(int mi_row,int mi_col,BLOCK_SIZE bsize,int subsampling_x,int subsampling_y)1434 static INLINE int is_chroma_reference(int mi_row, int mi_col, BLOCK_SIZE bsize,
1435 int subsampling_x, int subsampling_y) {
1436 assert(bsize < BLOCK_SIZES_ALL);
1437 const int bw = mi_size_wide[bsize];
1438 const int bh = mi_size_high[bsize];
1439 int ref_pos = ((mi_row & 0x01) || !(bh & 0x01) || !subsampling_y) &&
1440 ((mi_col & 0x01) || !(bw & 0x01) || !subsampling_x);
1441 return ref_pos;
1442 }
1443
cdf_element_prob(const aom_cdf_prob * cdf,size_t element)1444 static INLINE aom_cdf_prob cdf_element_prob(const aom_cdf_prob *cdf,
1445 size_t element) {
1446 assert(cdf != NULL);
1447 return (element > 0 ? cdf[element - 1] : CDF_PROB_TOP) - cdf[element];
1448 }
1449
partition_gather_horz_alike(aom_cdf_prob * out,const aom_cdf_prob * const in,BLOCK_SIZE bsize)1450 static INLINE void partition_gather_horz_alike(aom_cdf_prob *out,
1451 const aom_cdf_prob *const in,
1452 BLOCK_SIZE bsize) {
1453 (void)bsize;
1454 out[0] = CDF_PROB_TOP;
1455 out[0] -= cdf_element_prob(in, PARTITION_HORZ);
1456 out[0] -= cdf_element_prob(in, PARTITION_SPLIT);
1457 out[0] -= cdf_element_prob(in, PARTITION_HORZ_A);
1458 out[0] -= cdf_element_prob(in, PARTITION_HORZ_B);
1459 out[0] -= cdf_element_prob(in, PARTITION_VERT_A);
1460 if (bsize != BLOCK_128X128) out[0] -= cdf_element_prob(in, PARTITION_HORZ_4);
1461 out[0] = AOM_ICDF(out[0]);
1462 out[1] = AOM_ICDF(CDF_PROB_TOP);
1463 }
1464
partition_gather_vert_alike(aom_cdf_prob * out,const aom_cdf_prob * const in,BLOCK_SIZE bsize)1465 static INLINE void partition_gather_vert_alike(aom_cdf_prob *out,
1466 const aom_cdf_prob *const in,
1467 BLOCK_SIZE bsize) {
1468 (void)bsize;
1469 out[0] = CDF_PROB_TOP;
1470 out[0] -= cdf_element_prob(in, PARTITION_VERT);
1471 out[0] -= cdf_element_prob(in, PARTITION_SPLIT);
1472 out[0] -= cdf_element_prob(in, PARTITION_HORZ_A);
1473 out[0] -= cdf_element_prob(in, PARTITION_VERT_A);
1474 out[0] -= cdf_element_prob(in, PARTITION_VERT_B);
1475 if (bsize != BLOCK_128X128) out[0] -= cdf_element_prob(in, PARTITION_VERT_4);
1476 out[0] = AOM_ICDF(out[0]);
1477 out[1] = AOM_ICDF(CDF_PROB_TOP);
1478 }
1479
update_ext_partition_context(MACROBLOCKD * xd,int mi_row,int mi_col,BLOCK_SIZE subsize,BLOCK_SIZE bsize,PARTITION_TYPE partition)1480 static INLINE void update_ext_partition_context(MACROBLOCKD *xd, int mi_row,
1481 int mi_col, BLOCK_SIZE subsize,
1482 BLOCK_SIZE bsize,
1483 PARTITION_TYPE partition) {
1484 if (bsize >= BLOCK_8X8) {
1485 const int hbs = mi_size_wide[bsize] / 2;
1486 BLOCK_SIZE bsize2 = get_partition_subsize(bsize, PARTITION_SPLIT);
1487 switch (partition) {
1488 case PARTITION_SPLIT:
1489 if (bsize != BLOCK_8X8) break;
1490 AOM_FALLTHROUGH_INTENDED;
1491 case PARTITION_NONE:
1492 case PARTITION_HORZ:
1493 case PARTITION_VERT:
1494 case PARTITION_HORZ_4:
1495 case PARTITION_VERT_4:
1496 update_partition_context(xd, mi_row, mi_col, subsize, bsize);
1497 break;
1498 case PARTITION_HORZ_A:
1499 update_partition_context(xd, mi_row, mi_col, bsize2, subsize);
1500 update_partition_context(xd, mi_row + hbs, mi_col, subsize, subsize);
1501 break;
1502 case PARTITION_HORZ_B:
1503 update_partition_context(xd, mi_row, mi_col, subsize, subsize);
1504 update_partition_context(xd, mi_row + hbs, mi_col, bsize2, subsize);
1505 break;
1506 case PARTITION_VERT_A:
1507 update_partition_context(xd, mi_row, mi_col, bsize2, subsize);
1508 update_partition_context(xd, mi_row, mi_col + hbs, subsize, subsize);
1509 break;
1510 case PARTITION_VERT_B:
1511 update_partition_context(xd, mi_row, mi_col, subsize, subsize);
1512 update_partition_context(xd, mi_row, mi_col + hbs, bsize2, subsize);
1513 break;
1514 default: assert(0 && "Invalid partition type");
1515 }
1516 }
1517 }
1518
partition_plane_context(const MACROBLOCKD * xd,int mi_row,int mi_col,BLOCK_SIZE bsize)1519 static INLINE int partition_plane_context(const MACROBLOCKD *xd, int mi_row,
1520 int mi_col, BLOCK_SIZE bsize) {
1521 const PARTITION_CONTEXT *above_ctx = xd->above_partition_context + mi_col;
1522 const PARTITION_CONTEXT *left_ctx =
1523 xd->left_partition_context + (mi_row & MAX_MIB_MASK);
1524 // Minimum partition point is 8x8. Offset the bsl accordingly.
1525 const int bsl = mi_size_wide_log2[bsize] - mi_size_wide_log2[BLOCK_8X8];
1526 int above = (*above_ctx >> bsl) & 1, left = (*left_ctx >> bsl) & 1;
1527
1528 assert(mi_size_wide_log2[bsize] == mi_size_high_log2[bsize]);
1529 assert(bsl >= 0);
1530
1531 return (left * 2 + above) + bsl * PARTITION_PLOFFSET;
1532 }
1533
1534 // Return the number of elements in the partition CDF when
1535 // partitioning the (square) block with luma block size of bsize.
partition_cdf_length(BLOCK_SIZE bsize)1536 static INLINE int partition_cdf_length(BLOCK_SIZE bsize) {
1537 if (bsize <= BLOCK_8X8)
1538 return PARTITION_TYPES;
1539 else if (bsize == BLOCK_128X128)
1540 return EXT_PARTITION_TYPES - 2;
1541 else
1542 return EXT_PARTITION_TYPES;
1543 }
1544
max_block_wide(const MACROBLOCKD * xd,BLOCK_SIZE bsize,int plane)1545 static INLINE int max_block_wide(const MACROBLOCKD *xd, BLOCK_SIZE bsize,
1546 int plane) {
1547 assert(bsize < BLOCK_SIZES_ALL);
1548 int max_blocks_wide = block_size_wide[bsize];
1549
1550 if (xd->mb_to_right_edge < 0) {
1551 const struct macroblockd_plane *const pd = &xd->plane[plane];
1552 max_blocks_wide += xd->mb_to_right_edge >> (3 + pd->subsampling_x);
1553 }
1554
1555 // Scale the width in the transform block unit.
1556 return max_blocks_wide >> MI_SIZE_LOG2;
1557 }
1558
max_block_high(const MACROBLOCKD * xd,BLOCK_SIZE bsize,int plane)1559 static INLINE int max_block_high(const MACROBLOCKD *xd, BLOCK_SIZE bsize,
1560 int plane) {
1561 int max_blocks_high = block_size_high[bsize];
1562
1563 if (xd->mb_to_bottom_edge < 0) {
1564 const struct macroblockd_plane *const pd = &xd->plane[plane];
1565 max_blocks_high += xd->mb_to_bottom_edge >> (3 + pd->subsampling_y);
1566 }
1567
1568 // Scale the height in the transform block unit.
1569 return max_blocks_high >> MI_SIZE_LOG2;
1570 }
1571
av1_zero_above_context(AV1_COMMON * const cm,const MACROBLOCKD * xd,int mi_col_start,int mi_col_end,const int tile_row)1572 static INLINE void av1_zero_above_context(AV1_COMMON *const cm,
1573 const MACROBLOCKD *xd,
1574 int mi_col_start, int mi_col_end,
1575 const int tile_row) {
1576 const SequenceHeader *const seq_params = cm->seq_params;
1577 const int num_planes = av1_num_planes(cm);
1578 const int width = mi_col_end - mi_col_start;
1579 const int aligned_width =
1580 ALIGN_POWER_OF_TWO(width, seq_params->mib_size_log2);
1581 const int offset_y = mi_col_start;
1582 const int width_y = aligned_width;
1583 const int offset_uv = offset_y >> seq_params->subsampling_x;
1584 const int width_uv = width_y >> seq_params->subsampling_x;
1585 CommonContexts *const above_contexts = &cm->above_contexts;
1586
1587 av1_zero_array(above_contexts->entropy[0][tile_row] + offset_y, width_y);
1588 if (num_planes > 1) {
1589 if (above_contexts->entropy[1][tile_row] &&
1590 above_contexts->entropy[2][tile_row]) {
1591 av1_zero_array(above_contexts->entropy[1][tile_row] + offset_uv,
1592 width_uv);
1593 av1_zero_array(above_contexts->entropy[2][tile_row] + offset_uv,
1594 width_uv);
1595 } else {
1596 aom_internal_error(xd->error_info, AOM_CODEC_CORRUPT_FRAME,
1597 "Invalid value of planes");
1598 }
1599 }
1600
1601 av1_zero_array(above_contexts->partition[tile_row] + mi_col_start,
1602 aligned_width);
1603
1604 memset(above_contexts->txfm[tile_row] + mi_col_start,
1605 tx_size_wide[TX_SIZES_LARGEST], aligned_width * sizeof(TXFM_CONTEXT));
1606 }
1607
av1_zero_left_context(MACROBLOCKD * const xd)1608 static INLINE void av1_zero_left_context(MACROBLOCKD *const xd) {
1609 av1_zero(xd->left_entropy_context);
1610 av1_zero(xd->left_partition_context);
1611
1612 memset(xd->left_txfm_context_buffer, tx_size_high[TX_SIZES_LARGEST],
1613 sizeof(xd->left_txfm_context_buffer));
1614 }
1615
1616 // Disable array-bounds checks as the TX_SIZE enum contains values larger than
1617 // TX_SIZES_ALL (TX_INVALID) which make extending the array as a workaround
1618 // infeasible. The assert is enough for static analysis and this or other tools
1619 // asan, valgrind would catch oob access at runtime.
1620 #if defined(__GNUC__) && __GNUC__ >= 4
1621 #pragma GCC diagnostic ignored "-Warray-bounds"
1622 #endif
1623
1624 #if defined(__GNUC__) && __GNUC__ >= 4
1625 #pragma GCC diagnostic warning "-Warray-bounds"
1626 #endif
1627
set_txfm_ctx(TXFM_CONTEXT * txfm_ctx,uint8_t txs,int len)1628 static INLINE void set_txfm_ctx(TXFM_CONTEXT *txfm_ctx, uint8_t txs, int len) {
1629 int i;
1630 for (i = 0; i < len; ++i) txfm_ctx[i] = txs;
1631 }
1632
set_txfm_ctxs(TX_SIZE tx_size,int n4_w,int n4_h,int skip,const MACROBLOCKD * xd)1633 static INLINE void set_txfm_ctxs(TX_SIZE tx_size, int n4_w, int n4_h, int skip,
1634 const MACROBLOCKD *xd) {
1635 uint8_t bw = tx_size_wide[tx_size];
1636 uint8_t bh = tx_size_high[tx_size];
1637
1638 if (skip) {
1639 bw = n4_w * MI_SIZE;
1640 bh = n4_h * MI_SIZE;
1641 }
1642
1643 set_txfm_ctx(xd->above_txfm_context, bw, n4_w);
1644 set_txfm_ctx(xd->left_txfm_context, bh, n4_h);
1645 }
1646
get_mi_grid_idx(const CommonModeInfoParams * const mi_params,int mi_row,int mi_col)1647 static INLINE int get_mi_grid_idx(const CommonModeInfoParams *const mi_params,
1648 int mi_row, int mi_col) {
1649 return mi_row * mi_params->mi_stride + mi_col;
1650 }
1651
get_alloc_mi_idx(const CommonModeInfoParams * const mi_params,int mi_row,int mi_col)1652 static INLINE int get_alloc_mi_idx(const CommonModeInfoParams *const mi_params,
1653 int mi_row, int mi_col) {
1654 const int mi_alloc_size_1d = mi_size_wide[mi_params->mi_alloc_bsize];
1655 const int mi_alloc_row = mi_row / mi_alloc_size_1d;
1656 const int mi_alloc_col = mi_col / mi_alloc_size_1d;
1657
1658 return mi_alloc_row * mi_params->mi_alloc_stride + mi_alloc_col;
1659 }
1660
1661 // For this partition block, set pointers in mi_params->mi_grid_base and xd->mi.
set_mi_offsets(const CommonModeInfoParams * const mi_params,MACROBLOCKD * const xd,int mi_row,int mi_col)1662 static INLINE void set_mi_offsets(const CommonModeInfoParams *const mi_params,
1663 MACROBLOCKD *const xd, int mi_row,
1664 int mi_col) {
1665 // 'mi_grid_base' should point to appropriate memory in 'mi'.
1666 const int mi_grid_idx = get_mi_grid_idx(mi_params, mi_row, mi_col);
1667 const int mi_alloc_idx = get_alloc_mi_idx(mi_params, mi_row, mi_col);
1668 mi_params->mi_grid_base[mi_grid_idx] = &mi_params->mi_alloc[mi_alloc_idx];
1669 // 'xd->mi' should point to an offset in 'mi_grid_base';
1670 xd->mi = mi_params->mi_grid_base + mi_grid_idx;
1671 // 'xd->tx_type_map' should point to an offset in 'mi_params->tx_type_map'.
1672 xd->tx_type_map = mi_params->tx_type_map + mi_grid_idx;
1673 xd->tx_type_map_stride = mi_params->mi_stride;
1674 }
1675
txfm_partition_update(TXFM_CONTEXT * above_ctx,TXFM_CONTEXT * left_ctx,TX_SIZE tx_size,TX_SIZE txb_size)1676 static INLINE void txfm_partition_update(TXFM_CONTEXT *above_ctx,
1677 TXFM_CONTEXT *left_ctx,
1678 TX_SIZE tx_size, TX_SIZE txb_size) {
1679 BLOCK_SIZE bsize = txsize_to_bsize[txb_size];
1680 int bh = mi_size_high[bsize];
1681 int bw = mi_size_wide[bsize];
1682 uint8_t txw = tx_size_wide[tx_size];
1683 uint8_t txh = tx_size_high[tx_size];
1684 int i;
1685 for (i = 0; i < bh; ++i) left_ctx[i] = txh;
1686 for (i = 0; i < bw; ++i) above_ctx[i] = txw;
1687 }
1688
get_sqr_tx_size(int tx_dim)1689 static INLINE TX_SIZE get_sqr_tx_size(int tx_dim) {
1690 switch (tx_dim) {
1691 case 128:
1692 case 64: return TX_64X64; break;
1693 case 32: return TX_32X32; break;
1694 case 16: return TX_16X16; break;
1695 case 8: return TX_8X8; break;
1696 default: return TX_4X4;
1697 }
1698 }
1699
get_tx_size(int width,int height)1700 static INLINE TX_SIZE get_tx_size(int width, int height) {
1701 if (width == height) {
1702 return get_sqr_tx_size(width);
1703 }
1704 if (width < height) {
1705 if (width + width == height) {
1706 switch (width) {
1707 case 4: return TX_4X8; break;
1708 case 8: return TX_8X16; break;
1709 case 16: return TX_16X32; break;
1710 case 32: return TX_32X64; break;
1711 }
1712 } else {
1713 switch (width) {
1714 case 4: return TX_4X16; break;
1715 case 8: return TX_8X32; break;
1716 case 16: return TX_16X64; break;
1717 }
1718 }
1719 } else {
1720 if (height + height == width) {
1721 switch (height) {
1722 case 4: return TX_8X4; break;
1723 case 8: return TX_16X8; break;
1724 case 16: return TX_32X16; break;
1725 case 32: return TX_64X32; break;
1726 }
1727 } else {
1728 switch (height) {
1729 case 4: return TX_16X4; break;
1730 case 8: return TX_32X8; break;
1731 case 16: return TX_64X16; break;
1732 }
1733 }
1734 }
1735 assert(0);
1736 return TX_4X4;
1737 }
1738
txfm_partition_context(const TXFM_CONTEXT * const above_ctx,const TXFM_CONTEXT * const left_ctx,BLOCK_SIZE bsize,TX_SIZE tx_size)1739 static INLINE int txfm_partition_context(const TXFM_CONTEXT *const above_ctx,
1740 const TXFM_CONTEXT *const left_ctx,
1741 BLOCK_SIZE bsize, TX_SIZE tx_size) {
1742 const uint8_t txw = tx_size_wide[tx_size];
1743 const uint8_t txh = tx_size_high[tx_size];
1744 const int above = *above_ctx < txw;
1745 const int left = *left_ctx < txh;
1746 int category = TXFM_PARTITION_CONTEXTS;
1747
1748 // dummy return, not used by others.
1749 if (tx_size <= TX_4X4) return 0;
1750
1751 TX_SIZE max_tx_size =
1752 get_sqr_tx_size(AOMMAX(block_size_wide[bsize], block_size_high[bsize]));
1753
1754 if (max_tx_size >= TX_8X8) {
1755 category =
1756 (txsize_sqr_up_map[tx_size] != max_tx_size && max_tx_size > TX_8X8) +
1757 (TX_SIZES - 1 - max_tx_size) * 2;
1758 }
1759 assert(category != TXFM_PARTITION_CONTEXTS);
1760 return category * 3 + above + left;
1761 }
1762
1763 // Compute the next partition in the direction of the sb_type stored in the mi
1764 // array, starting with bsize.
get_partition(const AV1_COMMON * const cm,int mi_row,int mi_col,BLOCK_SIZE bsize)1765 static INLINE PARTITION_TYPE get_partition(const AV1_COMMON *const cm,
1766 int mi_row, int mi_col,
1767 BLOCK_SIZE bsize) {
1768 const CommonModeInfoParams *const mi_params = &cm->mi_params;
1769 if (mi_row >= mi_params->mi_rows || mi_col >= mi_params->mi_cols)
1770 return PARTITION_INVALID;
1771
1772 const int offset = mi_row * mi_params->mi_stride + mi_col;
1773 MB_MODE_INFO **mi = mi_params->mi_grid_base + offset;
1774 const BLOCK_SIZE subsize = mi[0]->bsize;
1775
1776 assert(bsize < BLOCK_SIZES_ALL);
1777
1778 if (subsize == bsize) return PARTITION_NONE;
1779
1780 const int bhigh = mi_size_high[bsize];
1781 const int bwide = mi_size_wide[bsize];
1782 const int sshigh = mi_size_high[subsize];
1783 const int sswide = mi_size_wide[subsize];
1784
1785 if (bsize > BLOCK_8X8 && mi_row + bwide / 2 < mi_params->mi_rows &&
1786 mi_col + bhigh / 2 < mi_params->mi_cols) {
1787 // In this case, the block might be using an extended partition
1788 // type.
1789 const MB_MODE_INFO *const mbmi_right = mi[bwide / 2];
1790 const MB_MODE_INFO *const mbmi_below = mi[bhigh / 2 * mi_params->mi_stride];
1791
1792 if (sswide == bwide) {
1793 // Smaller height but same width. Is PARTITION_HORZ_4, PARTITION_HORZ or
1794 // PARTITION_HORZ_B. To distinguish the latter two, check if the lower
1795 // half was split.
1796 if (sshigh * 4 == bhigh) return PARTITION_HORZ_4;
1797 assert(sshigh * 2 == bhigh);
1798
1799 if (mbmi_below->bsize == subsize)
1800 return PARTITION_HORZ;
1801 else
1802 return PARTITION_HORZ_B;
1803 } else if (sshigh == bhigh) {
1804 // Smaller width but same height. Is PARTITION_VERT_4, PARTITION_VERT or
1805 // PARTITION_VERT_B. To distinguish the latter two, check if the right
1806 // half was split.
1807 if (sswide * 4 == bwide) return PARTITION_VERT_4;
1808 assert(sswide * 2 == bhigh);
1809
1810 if (mbmi_right->bsize == subsize)
1811 return PARTITION_VERT;
1812 else
1813 return PARTITION_VERT_B;
1814 } else {
1815 // Smaller width and smaller height. Might be PARTITION_SPLIT or could be
1816 // PARTITION_HORZ_A or PARTITION_VERT_A. If subsize isn't halved in both
1817 // dimensions, we immediately know this is a split (which will recurse to
1818 // get to subsize). Otherwise look down and to the right. With
1819 // PARTITION_VERT_A, the right block will have height bhigh; with
1820 // PARTITION_HORZ_A, the lower block with have width bwide. Otherwise
1821 // it's PARTITION_SPLIT.
1822 if (sswide * 2 != bwide || sshigh * 2 != bhigh) return PARTITION_SPLIT;
1823
1824 if (mi_size_wide[mbmi_below->bsize] == bwide) return PARTITION_HORZ_A;
1825 if (mi_size_high[mbmi_right->bsize] == bhigh) return PARTITION_VERT_A;
1826
1827 return PARTITION_SPLIT;
1828 }
1829 }
1830 const int vert_split = sswide < bwide;
1831 const int horz_split = sshigh < bhigh;
1832 const int split_idx = (vert_split << 1) | horz_split;
1833 assert(split_idx != 0);
1834
1835 static const PARTITION_TYPE base_partitions[4] = {
1836 PARTITION_INVALID, PARTITION_HORZ, PARTITION_VERT, PARTITION_SPLIT
1837 };
1838
1839 return base_partitions[split_idx];
1840 }
1841
set_sb_size(SequenceHeader * const seq_params,BLOCK_SIZE sb_size)1842 static INLINE void set_sb_size(SequenceHeader *const seq_params,
1843 BLOCK_SIZE sb_size) {
1844 seq_params->sb_size = sb_size;
1845 seq_params->mib_size = mi_size_wide[seq_params->sb_size];
1846 seq_params->mib_size_log2 = mi_size_wide_log2[seq_params->sb_size];
1847 }
1848
1849 // Returns true if the frame is fully lossless at the coded resolution.
1850 // Note: If super-resolution is used, such a frame will still NOT be lossless at
1851 // the upscaled resolution.
is_coded_lossless(const AV1_COMMON * cm,const MACROBLOCKD * xd)1852 static INLINE int is_coded_lossless(const AV1_COMMON *cm,
1853 const MACROBLOCKD *xd) {
1854 int coded_lossless = 1;
1855 if (cm->seg.enabled) {
1856 for (int i = 0; i < MAX_SEGMENTS; ++i) {
1857 if (!xd->lossless[i]) {
1858 coded_lossless = 0;
1859 break;
1860 }
1861 }
1862 } else {
1863 coded_lossless = xd->lossless[0];
1864 }
1865 return coded_lossless;
1866 }
1867
is_valid_seq_level_idx(AV1_LEVEL seq_level_idx)1868 static INLINE int is_valid_seq_level_idx(AV1_LEVEL seq_level_idx) {
1869 return seq_level_idx == SEQ_LEVEL_MAX ||
1870 (seq_level_idx < SEQ_LEVELS &&
1871 // The following levels are currently undefined.
1872 seq_level_idx != SEQ_LEVEL_2_2 && seq_level_idx != SEQ_LEVEL_2_3 &&
1873 seq_level_idx != SEQ_LEVEL_3_2 && seq_level_idx != SEQ_LEVEL_3_3 &&
1874 seq_level_idx != SEQ_LEVEL_4_2 && seq_level_idx != SEQ_LEVEL_4_3);
1875 }
1876
1877 /*!\endcond */
1878
1879 #ifdef __cplusplus
1880 } // extern "C"
1881 #endif
1882
1883 #endif // AOM_AV1_COMMON_AV1_COMMON_INT_H_
1884