1 /*
2 * Copyright (c) 2016, Alliance for Open Media. All rights reserved
3 *
4 * This source code is subject to the terms of the BSD 2 Clause License and
5 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6 * was not distributed with this source code in the LICENSE file, you can
7 * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8 * Media Patent License 1.0 was not distributed with this source code in the
9 * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10 */
11
12 #include <assert.h>
13 #include <stdbool.h>
14 #include <stddef.h>
15
16 #include "config/aom_config.h"
17 #include "config/aom_scale_rtcd.h"
18
19 #include "aom/aom_codec.h"
20 #include "aom/aom_image.h"
21 #include "aom/internal/aom_codec_internal.h"
22 #include "aom_dsp/aom_dsp_common.h"
23 #include "aom_dsp/binary_codes_reader.h"
24 #include "aom_dsp/bitreader.h"
25 #include "aom_dsp/bitreader_buffer.h"
26 #include "aom_dsp/txfm_common.h"
27 #include "aom_mem/aom_mem.h"
28 #include "aom_ports/aom_timer.h"
29 #include "aom_ports/mem.h"
30 #include "aom_ports/mem_ops.h"
31 #include "aom_scale/aom_scale.h"
32 #include "aom_scale/yv12config.h"
33 #include "aom_util/aom_pthread.h"
34 #include "aom_util/aom_thread.h"
35
36 #if CONFIG_BITSTREAM_DEBUG || CONFIG_MISMATCH_DEBUG
37 #include "aom_util/debug_util.h"
38 #endif // CONFIG_BITSTREAM_DEBUG || CONFIG_MISMATCH_DEBUG
39
40 #include "av1/common/alloccommon.h"
41 #include "av1/common/av1_common_int.h"
42 #include "av1/common/blockd.h"
43 #include "av1/common/cdef.h"
44 #include "av1/common/cfl.h"
45 #include "av1/common/common_data.h"
46 #include "av1/common/common.h"
47 #include "av1/common/entropy.h"
48 #include "av1/common/entropymode.h"
49 #include "av1/common/entropymv.h"
50 #include "av1/common/enums.h"
51 #include "av1/common/frame_buffers.h"
52 #include "av1/common/idct.h"
53 #include "av1/common/mv.h"
54 #include "av1/common/mvref_common.h"
55 #include "av1/common/obmc.h"
56 #include "av1/common/pred_common.h"
57 #include "av1/common/quant_common.h"
58 #include "av1/common/reconinter.h"
59 #include "av1/common/reconintra.h"
60 #include "av1/common/resize.h"
61 #include "av1/common/restoration.h"
62 #include "av1/common/scale.h"
63 #include "av1/common/seg_common.h"
64 #include "av1/common/thread_common.h"
65 #include "av1/common/tile_common.h"
66 #include "av1/common/warped_motion.h"
67
68 #include "av1/decoder/decodeframe.h"
69 #include "av1/decoder/decodemv.h"
70 #include "av1/decoder/decoder.h"
71 #include "av1/decoder/decodetxb.h"
72 #include "av1/decoder/detokenize.h"
73 #if CONFIG_INSPECTION
74 #include "av1/decoder/inspection.h"
75 #endif
76
77 #define ACCT_STR __func__
78
79 #define AOM_MIN_THREADS_PER_TILE 1
80 #define AOM_MAX_THREADS_PER_TILE 2
81
82 // This is needed by ext_tile related unit tests.
83 #define EXT_TILE_DEBUG 1
84 #define MC_TEMP_BUF_PELS \
85 (((MAX_SB_SIZE)*2 + (AOM_INTERP_EXTEND)*2) * \
86 ((MAX_SB_SIZE)*2 + (AOM_INTERP_EXTEND)*2))
87
88 // Checks that the remaining bits start with a 1 and ends with 0s.
89 // It consumes an additional byte, if already byte aligned before the check.
av1_check_trailing_bits(AV1Decoder * pbi,struct aom_read_bit_buffer * rb)90 int av1_check_trailing_bits(AV1Decoder *pbi, struct aom_read_bit_buffer *rb) {
91 // bit_offset is set to 0 (mod 8) when the reader is already byte aligned
92 int bits_before_alignment = 8 - rb->bit_offset % 8;
93 int trailing = aom_rb_read_literal(rb, bits_before_alignment);
94 if (trailing != (1 << (bits_before_alignment - 1))) {
95 pbi->error.error_code = AOM_CODEC_CORRUPT_FRAME;
96 return -1;
97 }
98 return 0;
99 }
100
101 // Use only_chroma = 1 to only set the chroma planes
set_planes_to_neutral_grey(const SequenceHeader * const seq_params,const YV12_BUFFER_CONFIG * const buf,int only_chroma)102 static AOM_INLINE void set_planes_to_neutral_grey(
103 const SequenceHeader *const seq_params, const YV12_BUFFER_CONFIG *const buf,
104 int only_chroma) {
105 if (seq_params->use_highbitdepth) {
106 const int val = 1 << (seq_params->bit_depth - 1);
107 for (int plane = only_chroma; plane < MAX_MB_PLANE; plane++) {
108 const int is_uv = plane > 0;
109 uint16_t *const base = CONVERT_TO_SHORTPTR(buf->buffers[plane]);
110 // Set the first row to neutral grey. Then copy the first row to all
111 // subsequent rows.
112 if (buf->crop_heights[is_uv] > 0) {
113 aom_memset16(base, val, buf->crop_widths[is_uv]);
114 for (int row_idx = 1; row_idx < buf->crop_heights[is_uv]; row_idx++) {
115 memcpy(&base[row_idx * buf->strides[is_uv]], base,
116 sizeof(*base) * buf->crop_widths[is_uv]);
117 }
118 }
119 }
120 } else {
121 for (int plane = only_chroma; plane < MAX_MB_PLANE; plane++) {
122 const int is_uv = plane > 0;
123 for (int row_idx = 0; row_idx < buf->crop_heights[is_uv]; row_idx++) {
124 memset(&buf->buffers[plane][row_idx * buf->strides[is_uv]], 1 << 7,
125 buf->crop_widths[is_uv]);
126 }
127 }
128 }
129 }
130
131 static AOM_INLINE void loop_restoration_read_sb_coeffs(
132 const AV1_COMMON *const cm, MACROBLOCKD *xd, aom_reader *const r, int plane,
133 int runit_idx);
134
read_is_valid(const uint8_t * start,size_t len,const uint8_t * end)135 static int read_is_valid(const uint8_t *start, size_t len, const uint8_t *end) {
136 return len != 0 && len <= (size_t)(end - start);
137 }
138
read_tx_mode(struct aom_read_bit_buffer * rb,int coded_lossless)139 static TX_MODE read_tx_mode(struct aom_read_bit_buffer *rb,
140 int coded_lossless) {
141 if (coded_lossless) return ONLY_4X4;
142 return aom_rb_read_bit(rb) ? TX_MODE_SELECT : TX_MODE_LARGEST;
143 }
144
read_frame_reference_mode(const AV1_COMMON * cm,struct aom_read_bit_buffer * rb)145 static REFERENCE_MODE read_frame_reference_mode(
146 const AV1_COMMON *cm, struct aom_read_bit_buffer *rb) {
147 if (frame_is_intra_only(cm)) {
148 return SINGLE_REFERENCE;
149 } else {
150 return aom_rb_read_bit(rb) ? REFERENCE_MODE_SELECT : SINGLE_REFERENCE;
151 }
152 }
153
inverse_transform_block(DecoderCodingBlock * dcb,int plane,const TX_TYPE tx_type,const TX_SIZE tx_size,uint8_t * dst,int stride,int reduced_tx_set)154 static AOM_INLINE void inverse_transform_block(DecoderCodingBlock *dcb,
155 int plane, const TX_TYPE tx_type,
156 const TX_SIZE tx_size,
157 uint8_t *dst, int stride,
158 int reduced_tx_set) {
159 tran_low_t *const dqcoeff = dcb->dqcoeff_block[plane] + dcb->cb_offset[plane];
160 eob_info *eob_data = dcb->eob_data[plane] + dcb->txb_offset[plane];
161 uint16_t scan_line = eob_data->max_scan_line;
162 uint16_t eob = eob_data->eob;
163 av1_inverse_transform_block(&dcb->xd, dqcoeff, plane, tx_type, tx_size, dst,
164 stride, eob, reduced_tx_set);
165 memset(dqcoeff, 0, (scan_line + 1) * sizeof(dqcoeff[0]));
166 }
167
read_coeffs_tx_intra_block(const AV1_COMMON * const cm,DecoderCodingBlock * dcb,aom_reader * const r,const int plane,const int row,const int col,const TX_SIZE tx_size)168 static AOM_INLINE void read_coeffs_tx_intra_block(
169 const AV1_COMMON *const cm, DecoderCodingBlock *dcb, aom_reader *const r,
170 const int plane, const int row, const int col, const TX_SIZE tx_size) {
171 MB_MODE_INFO *mbmi = dcb->xd.mi[0];
172 if (!mbmi->skip_txfm) {
173 #if TXCOEFF_TIMER
174 struct aom_usec_timer timer;
175 aom_usec_timer_start(&timer);
176 #endif
177 av1_read_coeffs_txb_facade(cm, dcb, r, plane, row, col, tx_size);
178 #if TXCOEFF_TIMER
179 aom_usec_timer_mark(&timer);
180 const int64_t elapsed_time = aom_usec_timer_elapsed(&timer);
181 cm->txcoeff_timer += elapsed_time;
182 ++cm->txb_count;
183 #endif
184 }
185 }
186
decode_block_void(const AV1_COMMON * const cm,DecoderCodingBlock * dcb,aom_reader * const r,const int plane,const int row,const int col,const TX_SIZE tx_size)187 static AOM_INLINE void decode_block_void(const AV1_COMMON *const cm,
188 DecoderCodingBlock *dcb,
189 aom_reader *const r, const int plane,
190 const int row, const int col,
191 const TX_SIZE tx_size) {
192 (void)cm;
193 (void)dcb;
194 (void)r;
195 (void)plane;
196 (void)row;
197 (void)col;
198 (void)tx_size;
199 }
200
predict_inter_block_void(AV1_COMMON * const cm,DecoderCodingBlock * dcb,BLOCK_SIZE bsize)201 static AOM_INLINE void predict_inter_block_void(AV1_COMMON *const cm,
202 DecoderCodingBlock *dcb,
203 BLOCK_SIZE bsize) {
204 (void)cm;
205 (void)dcb;
206 (void)bsize;
207 }
208
cfl_store_inter_block_void(AV1_COMMON * const cm,MACROBLOCKD * const xd)209 static AOM_INLINE void cfl_store_inter_block_void(AV1_COMMON *const cm,
210 MACROBLOCKD *const xd) {
211 (void)cm;
212 (void)xd;
213 }
214
predict_and_reconstruct_intra_block(const AV1_COMMON * const cm,DecoderCodingBlock * dcb,aom_reader * const r,const int plane,const int row,const int col,const TX_SIZE tx_size)215 static AOM_INLINE void predict_and_reconstruct_intra_block(
216 const AV1_COMMON *const cm, DecoderCodingBlock *dcb, aom_reader *const r,
217 const int plane, const int row, const int col, const TX_SIZE tx_size) {
218 (void)r;
219 MACROBLOCKD *const xd = &dcb->xd;
220 MB_MODE_INFO *mbmi = xd->mi[0];
221 PLANE_TYPE plane_type = get_plane_type(plane);
222
223 av1_predict_intra_block_facade(cm, xd, plane, col, row, tx_size);
224
225 if (!mbmi->skip_txfm) {
226 eob_info *eob_data = dcb->eob_data[plane] + dcb->txb_offset[plane];
227 if (eob_data->eob) {
228 const bool reduced_tx_set_used = cm->features.reduced_tx_set_used;
229 // tx_type was read out in av1_read_coeffs_txb.
230 const TX_TYPE tx_type = av1_get_tx_type(xd, plane_type, row, col, tx_size,
231 reduced_tx_set_used);
232 struct macroblockd_plane *const pd = &xd->plane[plane];
233 uint8_t *dst = &pd->dst.buf[(row * pd->dst.stride + col) << MI_SIZE_LOG2];
234 inverse_transform_block(dcb, plane, tx_type, tx_size, dst, pd->dst.stride,
235 reduced_tx_set_used);
236 }
237 }
238 if (plane == AOM_PLANE_Y && store_cfl_required(cm, xd)) {
239 cfl_store_tx(xd, row, col, tx_size, mbmi->bsize);
240 }
241 }
242
inverse_transform_inter_block(const AV1_COMMON * const cm,DecoderCodingBlock * dcb,aom_reader * const r,const int plane,const int blk_row,const int blk_col,const TX_SIZE tx_size)243 static AOM_INLINE void inverse_transform_inter_block(
244 const AV1_COMMON *const cm, DecoderCodingBlock *dcb, aom_reader *const r,
245 const int plane, const int blk_row, const int blk_col,
246 const TX_SIZE tx_size) {
247 (void)r;
248 MACROBLOCKD *const xd = &dcb->xd;
249 PLANE_TYPE plane_type = get_plane_type(plane);
250 const struct macroblockd_plane *const pd = &xd->plane[plane];
251 const bool reduced_tx_set_used = cm->features.reduced_tx_set_used;
252 // tx_type was read out in av1_read_coeffs_txb.
253 const TX_TYPE tx_type = av1_get_tx_type(xd, plane_type, blk_row, blk_col,
254 tx_size, reduced_tx_set_used);
255
256 uint8_t *dst =
257 &pd->dst.buf[(blk_row * pd->dst.stride + blk_col) << MI_SIZE_LOG2];
258 inverse_transform_block(dcb, plane, tx_type, tx_size, dst, pd->dst.stride,
259 reduced_tx_set_used);
260 #if CONFIG_MISMATCH_DEBUG
261 int pixel_c, pixel_r;
262 BLOCK_SIZE bsize = txsize_to_bsize[tx_size];
263 int blk_w = block_size_wide[bsize];
264 int blk_h = block_size_high[bsize];
265 const int mi_row = -xd->mb_to_top_edge >> (3 + MI_SIZE_LOG2);
266 const int mi_col = -xd->mb_to_left_edge >> (3 + MI_SIZE_LOG2);
267 mi_to_pixel_loc(&pixel_c, &pixel_r, mi_col, mi_row, blk_col, blk_row,
268 pd->subsampling_x, pd->subsampling_y);
269 mismatch_check_block_tx(dst, pd->dst.stride, cm->current_frame.order_hint,
270 plane, pixel_c, pixel_r, blk_w, blk_h,
271 xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH);
272 #endif
273 }
274
set_cb_buffer_offsets(DecoderCodingBlock * dcb,TX_SIZE tx_size,int plane)275 static AOM_INLINE void set_cb_buffer_offsets(DecoderCodingBlock *dcb,
276 TX_SIZE tx_size, int plane) {
277 dcb->cb_offset[plane] += tx_size_wide[tx_size] * tx_size_high[tx_size];
278 dcb->txb_offset[plane] =
279 dcb->cb_offset[plane] / (TX_SIZE_W_MIN * TX_SIZE_H_MIN);
280 }
281
decode_reconstruct_tx(AV1_COMMON * cm,ThreadData * const td,aom_reader * r,MB_MODE_INFO * const mbmi,int plane,BLOCK_SIZE plane_bsize,int blk_row,int blk_col,int block,TX_SIZE tx_size,int * eob_total)282 static AOM_INLINE void decode_reconstruct_tx(
283 AV1_COMMON *cm, ThreadData *const td, aom_reader *r,
284 MB_MODE_INFO *const mbmi, int plane, BLOCK_SIZE plane_bsize, int blk_row,
285 int blk_col, int block, TX_SIZE tx_size, int *eob_total) {
286 DecoderCodingBlock *const dcb = &td->dcb;
287 MACROBLOCKD *const xd = &dcb->xd;
288 const struct macroblockd_plane *const pd = &xd->plane[plane];
289 const TX_SIZE plane_tx_size =
290 plane ? av1_get_max_uv_txsize(mbmi->bsize, pd->subsampling_x,
291 pd->subsampling_y)
292 : mbmi->inter_tx_size[av1_get_txb_size_index(plane_bsize, blk_row,
293 blk_col)];
294 // Scale to match transform block unit.
295 const int max_blocks_high = max_block_high(xd, plane_bsize, plane);
296 const int max_blocks_wide = max_block_wide(xd, plane_bsize, plane);
297
298 if (blk_row >= max_blocks_high || blk_col >= max_blocks_wide) return;
299
300 if (tx_size == plane_tx_size || plane) {
301 td->read_coeffs_tx_inter_block_visit(cm, dcb, r, plane, blk_row, blk_col,
302 tx_size);
303
304 td->inverse_tx_inter_block_visit(cm, dcb, r, plane, blk_row, blk_col,
305 tx_size);
306 eob_info *eob_data = dcb->eob_data[plane] + dcb->txb_offset[plane];
307 *eob_total += eob_data->eob;
308 set_cb_buffer_offsets(dcb, tx_size, plane);
309 } else {
310 const TX_SIZE sub_txs = sub_tx_size_map[tx_size];
311 assert(IMPLIES(tx_size <= TX_4X4, sub_txs == tx_size));
312 assert(IMPLIES(tx_size > TX_4X4, sub_txs < tx_size));
313 const int bsw = tx_size_wide_unit[sub_txs];
314 const int bsh = tx_size_high_unit[sub_txs];
315 const int sub_step = bsw * bsh;
316 const int row_end =
317 AOMMIN(tx_size_high_unit[tx_size], max_blocks_high - blk_row);
318 const int col_end =
319 AOMMIN(tx_size_wide_unit[tx_size], max_blocks_wide - blk_col);
320
321 assert(bsw > 0 && bsh > 0);
322
323 for (int row = 0; row < row_end; row += bsh) {
324 const int offsetr = blk_row + row;
325 for (int col = 0; col < col_end; col += bsw) {
326 const int offsetc = blk_col + col;
327
328 decode_reconstruct_tx(cm, td, r, mbmi, plane, plane_bsize, offsetr,
329 offsetc, block, sub_txs, eob_total);
330 block += sub_step;
331 }
332 }
333 }
334 }
335
set_offsets(AV1_COMMON * const cm,MACROBLOCKD * const xd,BLOCK_SIZE bsize,int mi_row,int mi_col,int bw,int bh,int x_mis,int y_mis)336 static AOM_INLINE void set_offsets(AV1_COMMON *const cm, MACROBLOCKD *const xd,
337 BLOCK_SIZE bsize, int mi_row, int mi_col,
338 int bw, int bh, int x_mis, int y_mis) {
339 const int num_planes = av1_num_planes(cm);
340 const CommonModeInfoParams *const mi_params = &cm->mi_params;
341 const TileInfo *const tile = &xd->tile;
342
343 set_mi_offsets(mi_params, xd, mi_row, mi_col);
344 xd->mi[0]->bsize = bsize;
345 #if CONFIG_RD_DEBUG
346 xd->mi[0]->mi_row = mi_row;
347 xd->mi[0]->mi_col = mi_col;
348 #endif
349
350 assert(x_mis && y_mis);
351 for (int x = 1; x < x_mis; ++x) xd->mi[x] = xd->mi[0];
352 int idx = mi_params->mi_stride;
353 for (int y = 1; y < y_mis; ++y) {
354 memcpy(&xd->mi[idx], &xd->mi[0], x_mis * sizeof(xd->mi[0]));
355 idx += mi_params->mi_stride;
356 }
357
358 set_plane_n4(xd, bw, bh, num_planes);
359 set_entropy_context(xd, mi_row, mi_col, num_planes);
360
361 // Distance of Mb to the various image edges. These are specified to 8th pel
362 // as they are always compared to values that are in 1/8th pel units
363 set_mi_row_col(xd, tile, mi_row, bh, mi_col, bw, mi_params->mi_rows,
364 mi_params->mi_cols);
365
366 av1_setup_dst_planes(xd->plane, bsize, &cm->cur_frame->buf, mi_row, mi_col, 0,
367 num_planes);
368 }
369
decode_mbmi_block(AV1Decoder * const pbi,DecoderCodingBlock * dcb,int mi_row,int mi_col,aom_reader * r,PARTITION_TYPE partition,BLOCK_SIZE bsize)370 static AOM_INLINE void decode_mbmi_block(AV1Decoder *const pbi,
371 DecoderCodingBlock *dcb, int mi_row,
372 int mi_col, aom_reader *r,
373 PARTITION_TYPE partition,
374 BLOCK_SIZE bsize) {
375 AV1_COMMON *const cm = &pbi->common;
376 const SequenceHeader *const seq_params = cm->seq_params;
377 const int bw = mi_size_wide[bsize];
378 const int bh = mi_size_high[bsize];
379 const int x_mis = AOMMIN(bw, cm->mi_params.mi_cols - mi_col);
380 const int y_mis = AOMMIN(bh, cm->mi_params.mi_rows - mi_row);
381 MACROBLOCKD *const xd = &dcb->xd;
382
383 #if CONFIG_ACCOUNTING
384 aom_accounting_set_context(&pbi->accounting, mi_col, mi_row);
385 #endif
386 set_offsets(cm, xd, bsize, mi_row, mi_col, bw, bh, x_mis, y_mis);
387 xd->mi[0]->partition = partition;
388 av1_read_mode_info(pbi, dcb, r, x_mis, y_mis);
389 if (bsize >= BLOCK_8X8 &&
390 (seq_params->subsampling_x || seq_params->subsampling_y)) {
391 const BLOCK_SIZE uv_subsize =
392 av1_ss_size_lookup[bsize][seq_params->subsampling_x]
393 [seq_params->subsampling_y];
394 if (uv_subsize == BLOCK_INVALID)
395 aom_internal_error(xd->error_info, AOM_CODEC_CORRUPT_FRAME,
396 "Invalid block size.");
397 }
398 }
399
400 typedef struct PadBlock {
401 int x0;
402 int x1;
403 int y0;
404 int y1;
405 } PadBlock;
406
407 #if CONFIG_AV1_HIGHBITDEPTH
highbd_build_mc_border(const uint8_t * src8,int src_stride,uint8_t * dst8,int dst_stride,int x,int y,int b_w,int b_h,int w,int h)408 static AOM_INLINE void highbd_build_mc_border(const uint8_t *src8,
409 int src_stride, uint8_t *dst8,
410 int dst_stride, int x, int y,
411 int b_w, int b_h, int w, int h) {
412 // Get a pointer to the start of the real data for this row.
413 const uint16_t *src = CONVERT_TO_SHORTPTR(src8);
414 uint16_t *dst = CONVERT_TO_SHORTPTR(dst8);
415 const uint16_t *ref_row = src - x - y * src_stride;
416
417 if (y >= h)
418 ref_row += (h - 1) * src_stride;
419 else if (y > 0)
420 ref_row += y * src_stride;
421
422 do {
423 int right = 0, copy;
424 int left = x < 0 ? -x : 0;
425
426 if (left > b_w) left = b_w;
427
428 if (x + b_w > w) right = x + b_w - w;
429
430 if (right > b_w) right = b_w;
431
432 copy = b_w - left - right;
433
434 if (left) aom_memset16(dst, ref_row[0], left);
435
436 if (copy) memcpy(dst + left, ref_row + x + left, copy * sizeof(uint16_t));
437
438 if (right) aom_memset16(dst + left + copy, ref_row[w - 1], right);
439
440 dst += dst_stride;
441 ++y;
442
443 if (y > 0 && y < h) ref_row += src_stride;
444 } while (--b_h);
445 }
446 #endif // CONFIG_AV1_HIGHBITDEPTH
447
build_mc_border(const uint8_t * src,int src_stride,uint8_t * dst,int dst_stride,int x,int y,int b_w,int b_h,int w,int h)448 static AOM_INLINE void build_mc_border(const uint8_t *src, int src_stride,
449 uint8_t *dst, int dst_stride, int x,
450 int y, int b_w, int b_h, int w, int h) {
451 // Get a pointer to the start of the real data for this row.
452 const uint8_t *ref_row = src - x - y * src_stride;
453
454 if (y >= h)
455 ref_row += (h - 1) * src_stride;
456 else if (y > 0)
457 ref_row += y * src_stride;
458
459 do {
460 int right = 0, copy;
461 int left = x < 0 ? -x : 0;
462
463 if (left > b_w) left = b_w;
464
465 if (x + b_w > w) right = x + b_w - w;
466
467 if (right > b_w) right = b_w;
468
469 copy = b_w - left - right;
470
471 if (left) memset(dst, ref_row[0], left);
472
473 if (copy) memcpy(dst + left, ref_row + x + left, copy);
474
475 if (right) memset(dst + left + copy, ref_row[w - 1], right);
476
477 dst += dst_stride;
478 ++y;
479
480 if (y > 0 && y < h) ref_row += src_stride;
481 } while (--b_h);
482 }
483
update_extend_mc_border_params(const struct scale_factors * const sf,struct buf_2d * const pre_buf,MV32 scaled_mv,PadBlock * block,int subpel_x_mv,int subpel_y_mv,int do_warp,int is_intrabc,int * x_pad,int * y_pad)484 static INLINE int update_extend_mc_border_params(
485 const struct scale_factors *const sf, struct buf_2d *const pre_buf,
486 MV32 scaled_mv, PadBlock *block, int subpel_x_mv, int subpel_y_mv,
487 int do_warp, int is_intrabc, int *x_pad, int *y_pad) {
488 const int is_scaled = av1_is_scaled(sf);
489 // Get reference width and height.
490 int frame_width = pre_buf->width;
491 int frame_height = pre_buf->height;
492
493 // Do border extension if there is motion or
494 // width/height is not a multiple of 8 pixels.
495 if ((!is_intrabc) && (!do_warp) &&
496 (is_scaled || scaled_mv.col || scaled_mv.row || (frame_width & 0x7) ||
497 (frame_height & 0x7))) {
498 if (subpel_x_mv || (sf->x_step_q4 != SUBPEL_SHIFTS)) {
499 block->x0 -= AOM_INTERP_EXTEND - 1;
500 block->x1 += AOM_INTERP_EXTEND;
501 *x_pad = 1;
502 }
503
504 if (subpel_y_mv || (sf->y_step_q4 != SUBPEL_SHIFTS)) {
505 block->y0 -= AOM_INTERP_EXTEND - 1;
506 block->y1 += AOM_INTERP_EXTEND;
507 *y_pad = 1;
508 }
509
510 // Skip border extension if block is inside the frame.
511 if (block->x0 < 0 || block->x1 > frame_width - 1 || block->y0 < 0 ||
512 block->y1 > frame_height - 1) {
513 return 1;
514 }
515 }
516 return 0;
517 }
518
extend_mc_border(const struct scale_factors * const sf,struct buf_2d * const pre_buf,MV32 scaled_mv,PadBlock block,int subpel_x_mv,int subpel_y_mv,int do_warp,int is_intrabc,int highbd,uint8_t * mc_buf,uint8_t ** pre,int * src_stride)519 static INLINE void extend_mc_border(const struct scale_factors *const sf,
520 struct buf_2d *const pre_buf,
521 MV32 scaled_mv, PadBlock block,
522 int subpel_x_mv, int subpel_y_mv,
523 int do_warp, int is_intrabc, int highbd,
524 uint8_t *mc_buf, uint8_t **pre,
525 int *src_stride) {
526 int x_pad = 0, y_pad = 0;
527 if (update_extend_mc_border_params(sf, pre_buf, scaled_mv, &block,
528 subpel_x_mv, subpel_y_mv, do_warp,
529 is_intrabc, &x_pad, &y_pad)) {
530 // Get reference block pointer.
531 const uint8_t *const buf_ptr =
532 pre_buf->buf0 + block.y0 * pre_buf->stride + block.x0;
533 int buf_stride = pre_buf->stride;
534 const int b_w = block.x1 - block.x0;
535 const int b_h = block.y1 - block.y0;
536
537 #if CONFIG_AV1_HIGHBITDEPTH
538 // Extend the border.
539 if (highbd) {
540 highbd_build_mc_border(buf_ptr, buf_stride, mc_buf, b_w, block.x0,
541 block.y0, b_w, b_h, pre_buf->width,
542 pre_buf->height);
543 } else {
544 build_mc_border(buf_ptr, buf_stride, mc_buf, b_w, block.x0, block.y0, b_w,
545 b_h, pre_buf->width, pre_buf->height);
546 }
547 #else
548 (void)highbd;
549 build_mc_border(buf_ptr, buf_stride, mc_buf, b_w, block.x0, block.y0, b_w,
550 b_h, pre_buf->width, pre_buf->height);
551 #endif
552 *src_stride = b_w;
553 *pre = mc_buf + y_pad * (AOM_INTERP_EXTEND - 1) * b_w +
554 x_pad * (AOM_INTERP_EXTEND - 1);
555 }
556 }
557
dec_calc_subpel_params(const MV * const src_mv,InterPredParams * const inter_pred_params,const MACROBLOCKD * const xd,int mi_x,int mi_y,uint8_t ** pre,SubpelParams * subpel_params,int * src_stride,PadBlock * block,MV32 * scaled_mv,int * subpel_x_mv,int * subpel_y_mv)558 static AOM_INLINE void dec_calc_subpel_params(
559 const MV *const src_mv, InterPredParams *const inter_pred_params,
560 const MACROBLOCKD *const xd, int mi_x, int mi_y, uint8_t **pre,
561 SubpelParams *subpel_params, int *src_stride, PadBlock *block,
562 MV32 *scaled_mv, int *subpel_x_mv, int *subpel_y_mv) {
563 const struct scale_factors *sf = inter_pred_params->scale_factors;
564 struct buf_2d *pre_buf = &inter_pred_params->ref_frame_buf;
565 const int bw = inter_pred_params->block_width;
566 const int bh = inter_pred_params->block_height;
567 const int is_scaled = av1_is_scaled(sf);
568 if (is_scaled) {
569 int ssx = inter_pred_params->subsampling_x;
570 int ssy = inter_pred_params->subsampling_y;
571 int orig_pos_y = inter_pred_params->pix_row << SUBPEL_BITS;
572 orig_pos_y += src_mv->row * (1 << (1 - ssy));
573 int orig_pos_x = inter_pred_params->pix_col << SUBPEL_BITS;
574 orig_pos_x += src_mv->col * (1 << (1 - ssx));
575 int pos_y = av1_scaled_y(orig_pos_y, sf);
576 int pos_x = av1_scaled_x(orig_pos_x, sf);
577 pos_x += SCALE_EXTRA_OFF;
578 pos_y += SCALE_EXTRA_OFF;
579
580 const int top = -AOM_LEFT_TOP_MARGIN_SCALED(ssy);
581 const int left = -AOM_LEFT_TOP_MARGIN_SCALED(ssx);
582 const int bottom = (pre_buf->height + AOM_INTERP_EXTEND)
583 << SCALE_SUBPEL_BITS;
584 const int right = (pre_buf->width + AOM_INTERP_EXTEND) << SCALE_SUBPEL_BITS;
585 pos_y = clamp(pos_y, top, bottom);
586 pos_x = clamp(pos_x, left, right);
587
588 subpel_params->subpel_x = pos_x & SCALE_SUBPEL_MASK;
589 subpel_params->subpel_y = pos_y & SCALE_SUBPEL_MASK;
590 subpel_params->xs = sf->x_step_q4;
591 subpel_params->ys = sf->y_step_q4;
592
593 // Get reference block top left coordinate.
594 block->x0 = pos_x >> SCALE_SUBPEL_BITS;
595 block->y0 = pos_y >> SCALE_SUBPEL_BITS;
596
597 // Get reference block bottom right coordinate.
598 block->x1 =
599 ((pos_x + (bw - 1) * subpel_params->xs) >> SCALE_SUBPEL_BITS) + 1;
600 block->y1 =
601 ((pos_y + (bh - 1) * subpel_params->ys) >> SCALE_SUBPEL_BITS) + 1;
602
603 MV temp_mv;
604 temp_mv = clamp_mv_to_umv_border_sb(xd, src_mv, bw, bh,
605 inter_pred_params->subsampling_x,
606 inter_pred_params->subsampling_y);
607 *scaled_mv = av1_scale_mv(&temp_mv, mi_x, mi_y, sf);
608 scaled_mv->row += SCALE_EXTRA_OFF;
609 scaled_mv->col += SCALE_EXTRA_OFF;
610
611 *subpel_x_mv = scaled_mv->col & SCALE_SUBPEL_MASK;
612 *subpel_y_mv = scaled_mv->row & SCALE_SUBPEL_MASK;
613 } else {
614 // Get block position in current frame.
615 int pos_x = inter_pred_params->pix_col << SUBPEL_BITS;
616 int pos_y = inter_pred_params->pix_row << SUBPEL_BITS;
617
618 const MV mv_q4 = clamp_mv_to_umv_border_sb(
619 xd, src_mv, bw, bh, inter_pred_params->subsampling_x,
620 inter_pred_params->subsampling_y);
621 subpel_params->xs = subpel_params->ys = SCALE_SUBPEL_SHIFTS;
622 subpel_params->subpel_x = (mv_q4.col & SUBPEL_MASK) << SCALE_EXTRA_BITS;
623 subpel_params->subpel_y = (mv_q4.row & SUBPEL_MASK) << SCALE_EXTRA_BITS;
624
625 // Get reference block top left coordinate.
626 pos_x += mv_q4.col;
627 pos_y += mv_q4.row;
628 block->x0 = pos_x >> SUBPEL_BITS;
629 block->y0 = pos_y >> SUBPEL_BITS;
630
631 // Get reference block bottom right coordinate.
632 block->x1 = (pos_x >> SUBPEL_BITS) + (bw - 1) + 1;
633 block->y1 = (pos_y >> SUBPEL_BITS) + (bh - 1) + 1;
634
635 scaled_mv->row = mv_q4.row;
636 scaled_mv->col = mv_q4.col;
637 *subpel_x_mv = scaled_mv->col & SUBPEL_MASK;
638 *subpel_y_mv = scaled_mv->row & SUBPEL_MASK;
639 }
640 *pre = pre_buf->buf0 + block->y0 * pre_buf->stride + block->x0;
641 *src_stride = pre_buf->stride;
642 }
643
dec_calc_subpel_params_and_extend(const MV * const src_mv,InterPredParams * const inter_pred_params,MACROBLOCKD * const xd,int mi_x,int mi_y,int ref,uint8_t ** mc_buf,uint8_t ** pre,SubpelParams * subpel_params,int * src_stride)644 static AOM_INLINE void dec_calc_subpel_params_and_extend(
645 const MV *const src_mv, InterPredParams *const inter_pred_params,
646 MACROBLOCKD *const xd, int mi_x, int mi_y, int ref, uint8_t **mc_buf,
647 uint8_t **pre, SubpelParams *subpel_params, int *src_stride) {
648 PadBlock block;
649 MV32 scaled_mv;
650 int subpel_x_mv, subpel_y_mv;
651 dec_calc_subpel_params(src_mv, inter_pred_params, xd, mi_x, mi_y, pre,
652 subpel_params, src_stride, &block, &scaled_mv,
653 &subpel_x_mv, &subpel_y_mv);
654 extend_mc_border(
655 inter_pred_params->scale_factors, &inter_pred_params->ref_frame_buf,
656 scaled_mv, block, subpel_x_mv, subpel_y_mv,
657 inter_pred_params->mode == WARP_PRED, inter_pred_params->is_intrabc,
658 inter_pred_params->use_hbd_buf, mc_buf[ref], pre, src_stride);
659 }
660
661 #define IS_DEC 1
662 #include "av1/common/reconinter_template.inc"
663 #undef IS_DEC
664
dec_build_inter_predictors(const AV1_COMMON * cm,DecoderCodingBlock * dcb,int plane,const MB_MODE_INFO * mi,int build_for_obmc,int bw,int bh,int mi_x,int mi_y)665 static void dec_build_inter_predictors(const AV1_COMMON *cm,
666 DecoderCodingBlock *dcb, int plane,
667 const MB_MODE_INFO *mi,
668 int build_for_obmc, int bw, int bh,
669 int mi_x, int mi_y) {
670 build_inter_predictors(cm, &dcb->xd, plane, mi, build_for_obmc, bw, bh, mi_x,
671 mi_y, dcb->mc_buf);
672 }
673
dec_build_inter_predictor(const AV1_COMMON * cm,DecoderCodingBlock * dcb,int mi_row,int mi_col,BLOCK_SIZE bsize)674 static AOM_INLINE void dec_build_inter_predictor(const AV1_COMMON *cm,
675 DecoderCodingBlock *dcb,
676 int mi_row, int mi_col,
677 BLOCK_SIZE bsize) {
678 MACROBLOCKD *const xd = &dcb->xd;
679 const int num_planes = av1_num_planes(cm);
680 for (int plane = 0; plane < num_planes; ++plane) {
681 if (plane && !xd->is_chroma_ref) break;
682 const int mi_x = mi_col * MI_SIZE;
683 const int mi_y = mi_row * MI_SIZE;
684 dec_build_inter_predictors(cm, dcb, plane, xd->mi[0], 0,
685 xd->plane[plane].width, xd->plane[plane].height,
686 mi_x, mi_y);
687 if (is_interintra_pred(xd->mi[0])) {
688 BUFFER_SET ctx = { { xd->plane[0].dst.buf, xd->plane[1].dst.buf,
689 xd->plane[2].dst.buf },
690 { xd->plane[0].dst.stride, xd->plane[1].dst.stride,
691 xd->plane[2].dst.stride } };
692 av1_build_interintra_predictor(cm, xd, xd->plane[plane].dst.buf,
693 xd->plane[plane].dst.stride, &ctx, plane,
694 bsize);
695 }
696 }
697 }
698
dec_build_prediction_by_above_pred(MACROBLOCKD * const xd,int rel_mi_row,int rel_mi_col,uint8_t op_mi_size,int dir,MB_MODE_INFO * above_mbmi,void * fun_ctxt,const int num_planes)699 static INLINE void dec_build_prediction_by_above_pred(
700 MACROBLOCKD *const xd, int rel_mi_row, int rel_mi_col, uint8_t op_mi_size,
701 int dir, MB_MODE_INFO *above_mbmi, void *fun_ctxt, const int num_planes) {
702 struct build_prediction_ctxt *ctxt = (struct build_prediction_ctxt *)fun_ctxt;
703 const int above_mi_col = xd->mi_col + rel_mi_col;
704 int mi_x, mi_y;
705 MB_MODE_INFO backup_mbmi = *above_mbmi;
706
707 (void)rel_mi_row;
708 (void)dir;
709
710 av1_setup_build_prediction_by_above_pred(xd, rel_mi_col, op_mi_size,
711 &backup_mbmi, ctxt, num_planes);
712 mi_x = above_mi_col << MI_SIZE_LOG2;
713 mi_y = xd->mi_row << MI_SIZE_LOG2;
714
715 const BLOCK_SIZE bsize = xd->mi[0]->bsize;
716
717 for (int j = 0; j < num_planes; ++j) {
718 const struct macroblockd_plane *pd = &xd->plane[j];
719 int bw = (op_mi_size * MI_SIZE) >> pd->subsampling_x;
720 int bh = clamp(block_size_high[bsize] >> (pd->subsampling_y + 1), 4,
721 block_size_high[BLOCK_64X64] >> (pd->subsampling_y + 1));
722
723 if (av1_skip_u4x4_pred_in_obmc(bsize, pd, 0)) continue;
724 dec_build_inter_predictors(ctxt->cm, (DecoderCodingBlock *)ctxt->dcb, j,
725 &backup_mbmi, 1, bw, bh, mi_x, mi_y);
726 }
727 }
728
dec_build_prediction_by_above_preds(const AV1_COMMON * cm,DecoderCodingBlock * dcb,uint8_t * tmp_buf[MAX_MB_PLANE],int tmp_width[MAX_MB_PLANE],int tmp_height[MAX_MB_PLANE],int tmp_stride[MAX_MB_PLANE])729 static AOM_INLINE void dec_build_prediction_by_above_preds(
730 const AV1_COMMON *cm, DecoderCodingBlock *dcb,
731 uint8_t *tmp_buf[MAX_MB_PLANE], int tmp_width[MAX_MB_PLANE],
732 int tmp_height[MAX_MB_PLANE], int tmp_stride[MAX_MB_PLANE]) {
733 MACROBLOCKD *const xd = &dcb->xd;
734 if (!xd->up_available) return;
735
736 // Adjust mb_to_bottom_edge to have the correct value for the OBMC
737 // prediction block. This is half the height of the original block,
738 // except for 128-wide blocks, where we only use a height of 32.
739 const int this_height = xd->height * MI_SIZE;
740 const int pred_height = AOMMIN(this_height / 2, 32);
741 xd->mb_to_bottom_edge += GET_MV_SUBPEL(this_height - pred_height);
742 struct build_prediction_ctxt ctxt = {
743 cm, tmp_buf, tmp_width, tmp_height, tmp_stride, xd->mb_to_right_edge, dcb
744 };
745 const BLOCK_SIZE bsize = xd->mi[0]->bsize;
746 foreach_overlappable_nb_above(cm, xd,
747 max_neighbor_obmc[mi_size_wide_log2[bsize]],
748 dec_build_prediction_by_above_pred, &ctxt);
749
750 xd->mb_to_left_edge = -GET_MV_SUBPEL(xd->mi_col * MI_SIZE);
751 xd->mb_to_right_edge = ctxt.mb_to_far_edge;
752 xd->mb_to_bottom_edge -= GET_MV_SUBPEL(this_height - pred_height);
753 }
754
dec_build_prediction_by_left_pred(MACROBLOCKD * const xd,int rel_mi_row,int rel_mi_col,uint8_t op_mi_size,int dir,MB_MODE_INFO * left_mbmi,void * fun_ctxt,const int num_planes)755 static INLINE void dec_build_prediction_by_left_pred(
756 MACROBLOCKD *const xd, int rel_mi_row, int rel_mi_col, uint8_t op_mi_size,
757 int dir, MB_MODE_INFO *left_mbmi, void *fun_ctxt, const int num_planes) {
758 struct build_prediction_ctxt *ctxt = (struct build_prediction_ctxt *)fun_ctxt;
759 const int left_mi_row = xd->mi_row + rel_mi_row;
760 int mi_x, mi_y;
761 MB_MODE_INFO backup_mbmi = *left_mbmi;
762
763 (void)rel_mi_col;
764 (void)dir;
765
766 av1_setup_build_prediction_by_left_pred(xd, rel_mi_row, op_mi_size,
767 &backup_mbmi, ctxt, num_planes);
768 mi_x = xd->mi_col << MI_SIZE_LOG2;
769 mi_y = left_mi_row << MI_SIZE_LOG2;
770 const BLOCK_SIZE bsize = xd->mi[0]->bsize;
771
772 for (int j = 0; j < num_planes; ++j) {
773 const struct macroblockd_plane *pd = &xd->plane[j];
774 int bw = clamp(block_size_wide[bsize] >> (pd->subsampling_x + 1), 4,
775 block_size_wide[BLOCK_64X64] >> (pd->subsampling_x + 1));
776 int bh = (op_mi_size << MI_SIZE_LOG2) >> pd->subsampling_y;
777
778 if (av1_skip_u4x4_pred_in_obmc(bsize, pd, 1)) continue;
779 dec_build_inter_predictors(ctxt->cm, (DecoderCodingBlock *)ctxt->dcb, j,
780 &backup_mbmi, 1, bw, bh, mi_x, mi_y);
781 }
782 }
783
dec_build_prediction_by_left_preds(const AV1_COMMON * cm,DecoderCodingBlock * dcb,uint8_t * tmp_buf[MAX_MB_PLANE],int tmp_width[MAX_MB_PLANE],int tmp_height[MAX_MB_PLANE],int tmp_stride[MAX_MB_PLANE])784 static AOM_INLINE void dec_build_prediction_by_left_preds(
785 const AV1_COMMON *cm, DecoderCodingBlock *dcb,
786 uint8_t *tmp_buf[MAX_MB_PLANE], int tmp_width[MAX_MB_PLANE],
787 int tmp_height[MAX_MB_PLANE], int tmp_stride[MAX_MB_PLANE]) {
788 MACROBLOCKD *const xd = &dcb->xd;
789 if (!xd->left_available) return;
790
791 // Adjust mb_to_right_edge to have the correct value for the OBMC
792 // prediction block. This is half the width of the original block,
793 // except for 128-wide blocks, where we only use a width of 32.
794 const int this_width = xd->width * MI_SIZE;
795 const int pred_width = AOMMIN(this_width / 2, 32);
796 xd->mb_to_right_edge += GET_MV_SUBPEL(this_width - pred_width);
797
798 struct build_prediction_ctxt ctxt = {
799 cm, tmp_buf, tmp_width, tmp_height, tmp_stride, xd->mb_to_bottom_edge, dcb
800 };
801 const BLOCK_SIZE bsize = xd->mi[0]->bsize;
802 foreach_overlappable_nb_left(cm, xd,
803 max_neighbor_obmc[mi_size_high_log2[bsize]],
804 dec_build_prediction_by_left_pred, &ctxt);
805
806 xd->mb_to_top_edge = -GET_MV_SUBPEL(xd->mi_row * MI_SIZE);
807 xd->mb_to_right_edge -= GET_MV_SUBPEL(this_width - pred_width);
808 xd->mb_to_bottom_edge = ctxt.mb_to_far_edge;
809 }
810
dec_build_obmc_inter_predictors_sb(const AV1_COMMON * cm,DecoderCodingBlock * dcb)811 static AOM_INLINE void dec_build_obmc_inter_predictors_sb(
812 const AV1_COMMON *cm, DecoderCodingBlock *dcb) {
813 const int num_planes = av1_num_planes(cm);
814 uint8_t *dst_buf1[MAX_MB_PLANE], *dst_buf2[MAX_MB_PLANE];
815 int dst_stride1[MAX_MB_PLANE] = { MAX_SB_SIZE, MAX_SB_SIZE, MAX_SB_SIZE };
816 int dst_stride2[MAX_MB_PLANE] = { MAX_SB_SIZE, MAX_SB_SIZE, MAX_SB_SIZE };
817 int dst_width1[MAX_MB_PLANE] = { MAX_SB_SIZE, MAX_SB_SIZE, MAX_SB_SIZE };
818 int dst_width2[MAX_MB_PLANE] = { MAX_SB_SIZE, MAX_SB_SIZE, MAX_SB_SIZE };
819 int dst_height1[MAX_MB_PLANE] = { MAX_SB_SIZE, MAX_SB_SIZE, MAX_SB_SIZE };
820 int dst_height2[MAX_MB_PLANE] = { MAX_SB_SIZE, MAX_SB_SIZE, MAX_SB_SIZE };
821
822 MACROBLOCKD *const xd = &dcb->xd;
823 av1_setup_obmc_dst_bufs(xd, dst_buf1, dst_buf2);
824
825 dec_build_prediction_by_above_preds(cm, dcb, dst_buf1, dst_width1,
826 dst_height1, dst_stride1);
827 dec_build_prediction_by_left_preds(cm, dcb, dst_buf2, dst_width2, dst_height2,
828 dst_stride2);
829 const int mi_row = xd->mi_row;
830 const int mi_col = xd->mi_col;
831 av1_setup_dst_planes(xd->plane, xd->mi[0]->bsize, &cm->cur_frame->buf, mi_row,
832 mi_col, 0, num_planes);
833 av1_build_obmc_inter_prediction(cm, xd, dst_buf1, dst_stride1, dst_buf2,
834 dst_stride2);
835 }
836
cfl_store_inter_block(AV1_COMMON * const cm,MACROBLOCKD * const xd)837 static AOM_INLINE void cfl_store_inter_block(AV1_COMMON *const cm,
838 MACROBLOCKD *const xd) {
839 MB_MODE_INFO *mbmi = xd->mi[0];
840 if (store_cfl_required(cm, xd)) {
841 cfl_store_block(xd, mbmi->bsize, mbmi->tx_size);
842 }
843 }
844
predict_inter_block(AV1_COMMON * const cm,DecoderCodingBlock * dcb,BLOCK_SIZE bsize)845 static AOM_INLINE void predict_inter_block(AV1_COMMON *const cm,
846 DecoderCodingBlock *dcb,
847 BLOCK_SIZE bsize) {
848 MACROBLOCKD *const xd = &dcb->xd;
849 MB_MODE_INFO *mbmi = xd->mi[0];
850 const int num_planes = av1_num_planes(cm);
851 const int mi_row = xd->mi_row;
852 const int mi_col = xd->mi_col;
853 for (int ref = 0; ref < 1 + has_second_ref(mbmi); ++ref) {
854 const MV_REFERENCE_FRAME frame = mbmi->ref_frame[ref];
855 if (frame < LAST_FRAME) {
856 assert(is_intrabc_block(mbmi));
857 assert(frame == INTRA_FRAME);
858 assert(ref == 0);
859 } else {
860 const RefCntBuffer *ref_buf = get_ref_frame_buf(cm, frame);
861 const struct scale_factors *ref_scale_factors =
862 get_ref_scale_factors_const(cm, frame);
863
864 xd->block_ref_scale_factors[ref] = ref_scale_factors;
865 av1_setup_pre_planes(xd, ref, &ref_buf->buf, mi_row, mi_col,
866 ref_scale_factors, num_planes);
867 }
868 }
869
870 dec_build_inter_predictor(cm, dcb, mi_row, mi_col, bsize);
871 if (mbmi->motion_mode == OBMC_CAUSAL) {
872 dec_build_obmc_inter_predictors_sb(cm, dcb);
873 }
874 #if CONFIG_MISMATCH_DEBUG
875 for (int plane = 0; plane < num_planes; ++plane) {
876 const struct macroblockd_plane *pd = &xd->plane[plane];
877 int pixel_c, pixel_r;
878 mi_to_pixel_loc(&pixel_c, &pixel_r, mi_col, mi_row, 0, 0, pd->subsampling_x,
879 pd->subsampling_y);
880 if (!is_chroma_reference(mi_row, mi_col, bsize, pd->subsampling_x,
881 pd->subsampling_y))
882 continue;
883 mismatch_check_block_pre(pd->dst.buf, pd->dst.stride,
884 cm->current_frame.order_hint, plane, pixel_c,
885 pixel_r, pd->width, pd->height,
886 xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH);
887 }
888 #endif
889 }
890
set_color_index_map_offset(MACROBLOCKD * const xd,int plane,aom_reader * r)891 static AOM_INLINE void set_color_index_map_offset(MACROBLOCKD *const xd,
892 int plane, aom_reader *r) {
893 (void)r;
894 Av1ColorMapParam params;
895 const MB_MODE_INFO *const mbmi = xd->mi[0];
896 av1_get_block_dimensions(mbmi->bsize, plane, xd, ¶ms.plane_width,
897 ¶ms.plane_height, NULL, NULL);
898 xd->color_index_map_offset[plane] += params.plane_width * params.plane_height;
899 }
900
decode_token_recon_block(AV1Decoder * const pbi,ThreadData * const td,aom_reader * r,BLOCK_SIZE bsize)901 static AOM_INLINE void decode_token_recon_block(AV1Decoder *const pbi,
902 ThreadData *const td,
903 aom_reader *r,
904 BLOCK_SIZE bsize) {
905 AV1_COMMON *const cm = &pbi->common;
906 DecoderCodingBlock *const dcb = &td->dcb;
907 MACROBLOCKD *const xd = &dcb->xd;
908 const int num_planes = av1_num_planes(cm);
909 MB_MODE_INFO *mbmi = xd->mi[0];
910
911 if (!is_inter_block(mbmi)) {
912 int row, col;
913 assert(bsize == get_plane_block_size(bsize, xd->plane[0].subsampling_x,
914 xd->plane[0].subsampling_y));
915 const int max_blocks_wide = max_block_wide(xd, bsize, 0);
916 const int max_blocks_high = max_block_high(xd, bsize, 0);
917 const BLOCK_SIZE max_unit_bsize = BLOCK_64X64;
918 int mu_blocks_wide = mi_size_wide[max_unit_bsize];
919 int mu_blocks_high = mi_size_high[max_unit_bsize];
920 mu_blocks_wide = AOMMIN(max_blocks_wide, mu_blocks_wide);
921 mu_blocks_high = AOMMIN(max_blocks_high, mu_blocks_high);
922
923 for (row = 0; row < max_blocks_high; row += mu_blocks_high) {
924 for (col = 0; col < max_blocks_wide; col += mu_blocks_wide) {
925 for (int plane = 0; plane < num_planes; ++plane) {
926 if (plane && !xd->is_chroma_ref) break;
927 const struct macroblockd_plane *const pd = &xd->plane[plane];
928 const TX_SIZE tx_size = av1_get_tx_size(plane, xd);
929 const int stepr = tx_size_high_unit[tx_size];
930 const int stepc = tx_size_wide_unit[tx_size];
931
932 const int unit_height = ROUND_POWER_OF_TWO(
933 AOMMIN(mu_blocks_high + row, max_blocks_high), pd->subsampling_y);
934 const int unit_width = ROUND_POWER_OF_TWO(
935 AOMMIN(mu_blocks_wide + col, max_blocks_wide), pd->subsampling_x);
936
937 for (int blk_row = row >> pd->subsampling_y; blk_row < unit_height;
938 blk_row += stepr) {
939 for (int blk_col = col >> pd->subsampling_x; blk_col < unit_width;
940 blk_col += stepc) {
941 td->read_coeffs_tx_intra_block_visit(cm, dcb, r, plane, blk_row,
942 blk_col, tx_size);
943 td->predict_and_recon_intra_block_visit(
944 cm, dcb, r, plane, blk_row, blk_col, tx_size);
945 set_cb_buffer_offsets(dcb, tx_size, plane);
946 }
947 }
948 }
949 }
950 }
951 } else {
952 td->predict_inter_block_visit(cm, dcb, bsize);
953 // Reconstruction
954 if (!mbmi->skip_txfm) {
955 int eobtotal = 0;
956
957 const int max_blocks_wide = max_block_wide(xd, bsize, 0);
958 const int max_blocks_high = max_block_high(xd, bsize, 0);
959 int row, col;
960
961 const BLOCK_SIZE max_unit_bsize = BLOCK_64X64;
962 assert(max_unit_bsize ==
963 get_plane_block_size(BLOCK_64X64, xd->plane[0].subsampling_x,
964 xd->plane[0].subsampling_y));
965 int mu_blocks_wide = mi_size_wide[max_unit_bsize];
966 int mu_blocks_high = mi_size_high[max_unit_bsize];
967
968 mu_blocks_wide = AOMMIN(max_blocks_wide, mu_blocks_wide);
969 mu_blocks_high = AOMMIN(max_blocks_high, mu_blocks_high);
970
971 for (row = 0; row < max_blocks_high; row += mu_blocks_high) {
972 for (col = 0; col < max_blocks_wide; col += mu_blocks_wide) {
973 for (int plane = 0; plane < num_planes; ++plane) {
974 if (plane && !xd->is_chroma_ref) break;
975 const struct macroblockd_plane *const pd = &xd->plane[plane];
976 const int ss_x = pd->subsampling_x;
977 const int ss_y = pd->subsampling_y;
978 const BLOCK_SIZE plane_bsize =
979 get_plane_block_size(bsize, ss_x, ss_y);
980 const TX_SIZE max_tx_size =
981 get_vartx_max_txsize(xd, plane_bsize, plane);
982 const int bh_var_tx = tx_size_high_unit[max_tx_size];
983 const int bw_var_tx = tx_size_wide_unit[max_tx_size];
984 int block = 0;
985 int step =
986 tx_size_wide_unit[max_tx_size] * tx_size_high_unit[max_tx_size];
987 int blk_row, blk_col;
988 const int unit_height = ROUND_POWER_OF_TWO(
989 AOMMIN(mu_blocks_high + row, max_blocks_high), ss_y);
990 const int unit_width = ROUND_POWER_OF_TWO(
991 AOMMIN(mu_blocks_wide + col, max_blocks_wide), ss_x);
992
993 for (blk_row = row >> ss_y; blk_row < unit_height;
994 blk_row += bh_var_tx) {
995 for (blk_col = col >> ss_x; blk_col < unit_width;
996 blk_col += bw_var_tx) {
997 decode_reconstruct_tx(cm, td, r, mbmi, plane, plane_bsize,
998 blk_row, blk_col, block, max_tx_size,
999 &eobtotal);
1000 block += step;
1001 }
1002 }
1003 }
1004 }
1005 }
1006 }
1007 td->cfl_store_inter_block_visit(cm, xd);
1008 }
1009
1010 av1_visit_palette(pbi, xd, r, set_color_index_map_offset);
1011 }
1012
set_inter_tx_size(MB_MODE_INFO * mbmi,int stride_log2,int tx_w_log2,int tx_h_log2,int min_txs,int split_size,int txs,int blk_row,int blk_col)1013 static AOM_INLINE void set_inter_tx_size(MB_MODE_INFO *mbmi, int stride_log2,
1014 int tx_w_log2, int tx_h_log2,
1015 int min_txs, int split_size, int txs,
1016 int blk_row, int blk_col) {
1017 for (int idy = 0; idy < tx_size_high_unit[split_size];
1018 idy += tx_size_high_unit[min_txs]) {
1019 for (int idx = 0; idx < tx_size_wide_unit[split_size];
1020 idx += tx_size_wide_unit[min_txs]) {
1021 const int index = (((blk_row + idy) >> tx_h_log2) << stride_log2) +
1022 ((blk_col + idx) >> tx_w_log2);
1023 mbmi->inter_tx_size[index] = txs;
1024 }
1025 }
1026 }
1027
read_tx_size_vartx(MACROBLOCKD * xd,MB_MODE_INFO * mbmi,TX_SIZE tx_size,int depth,int blk_row,int blk_col,aom_reader * r)1028 static AOM_INLINE void read_tx_size_vartx(MACROBLOCKD *xd, MB_MODE_INFO *mbmi,
1029 TX_SIZE tx_size, int depth,
1030 int blk_row, int blk_col,
1031 aom_reader *r) {
1032 FRAME_CONTEXT *ec_ctx = xd->tile_ctx;
1033 int is_split = 0;
1034 const BLOCK_SIZE bsize = mbmi->bsize;
1035 const int max_blocks_high = max_block_high(xd, bsize, 0);
1036 const int max_blocks_wide = max_block_wide(xd, bsize, 0);
1037 if (blk_row >= max_blocks_high || blk_col >= max_blocks_wide) return;
1038 assert(tx_size > TX_4X4);
1039 TX_SIZE txs = max_txsize_rect_lookup[bsize];
1040 for (int level = 0; level < MAX_VARTX_DEPTH - 1; ++level)
1041 txs = sub_tx_size_map[txs];
1042 const int tx_w_log2 = tx_size_wide_log2[txs] - MI_SIZE_LOG2;
1043 const int tx_h_log2 = tx_size_high_log2[txs] - MI_SIZE_LOG2;
1044 const int bw_log2 = mi_size_wide_log2[bsize];
1045 const int stride_log2 = bw_log2 - tx_w_log2;
1046
1047 if (depth == MAX_VARTX_DEPTH) {
1048 set_inter_tx_size(mbmi, stride_log2, tx_w_log2, tx_h_log2, txs, tx_size,
1049 tx_size, blk_row, blk_col);
1050 mbmi->tx_size = tx_size;
1051 txfm_partition_update(xd->above_txfm_context + blk_col,
1052 xd->left_txfm_context + blk_row, tx_size, tx_size);
1053 return;
1054 }
1055
1056 const int ctx = txfm_partition_context(xd->above_txfm_context + blk_col,
1057 xd->left_txfm_context + blk_row,
1058 mbmi->bsize, tx_size);
1059 is_split = aom_read_symbol(r, ec_ctx->txfm_partition_cdf[ctx], 2, ACCT_STR);
1060
1061 if (is_split) {
1062 const TX_SIZE sub_txs = sub_tx_size_map[tx_size];
1063 const int bsw = tx_size_wide_unit[sub_txs];
1064 const int bsh = tx_size_high_unit[sub_txs];
1065
1066 if (sub_txs == TX_4X4) {
1067 set_inter_tx_size(mbmi, stride_log2, tx_w_log2, tx_h_log2, txs, tx_size,
1068 sub_txs, blk_row, blk_col);
1069 mbmi->tx_size = sub_txs;
1070 txfm_partition_update(xd->above_txfm_context + blk_col,
1071 xd->left_txfm_context + blk_row, sub_txs, tx_size);
1072 return;
1073 }
1074
1075 assert(bsw > 0 && bsh > 0);
1076 for (int row = 0; row < tx_size_high_unit[tx_size]; row += bsh) {
1077 for (int col = 0; col < tx_size_wide_unit[tx_size]; col += bsw) {
1078 int offsetr = blk_row + row;
1079 int offsetc = blk_col + col;
1080 read_tx_size_vartx(xd, mbmi, sub_txs, depth + 1, offsetr, offsetc, r);
1081 }
1082 }
1083 } else {
1084 set_inter_tx_size(mbmi, stride_log2, tx_w_log2, tx_h_log2, txs, tx_size,
1085 tx_size, blk_row, blk_col);
1086 mbmi->tx_size = tx_size;
1087 txfm_partition_update(xd->above_txfm_context + blk_col,
1088 xd->left_txfm_context + blk_row, tx_size, tx_size);
1089 }
1090 }
1091
read_selected_tx_size(const MACROBLOCKD * const xd,aom_reader * r)1092 static TX_SIZE read_selected_tx_size(const MACROBLOCKD *const xd,
1093 aom_reader *r) {
1094 // TODO(debargha): Clean up the logic here. This function should only
1095 // be called for intra.
1096 const BLOCK_SIZE bsize = xd->mi[0]->bsize;
1097 const int32_t tx_size_cat = bsize_to_tx_size_cat(bsize);
1098 const int max_depths = bsize_to_max_depth(bsize);
1099 const int ctx = get_tx_size_context(xd);
1100 FRAME_CONTEXT *ec_ctx = xd->tile_ctx;
1101 const int depth = aom_read_symbol(r, ec_ctx->tx_size_cdf[tx_size_cat][ctx],
1102 max_depths + 1, ACCT_STR);
1103 assert(depth >= 0 && depth <= max_depths);
1104 const TX_SIZE tx_size = depth_to_tx_size(depth, bsize);
1105 return tx_size;
1106 }
1107
read_tx_size(const MACROBLOCKD * const xd,TX_MODE tx_mode,int is_inter,int allow_select_inter,aom_reader * r)1108 static TX_SIZE read_tx_size(const MACROBLOCKD *const xd, TX_MODE tx_mode,
1109 int is_inter, int allow_select_inter,
1110 aom_reader *r) {
1111 const BLOCK_SIZE bsize = xd->mi[0]->bsize;
1112 if (xd->lossless[xd->mi[0]->segment_id]) return TX_4X4;
1113
1114 if (block_signals_txsize(bsize)) {
1115 if ((!is_inter || allow_select_inter) && tx_mode == TX_MODE_SELECT) {
1116 const TX_SIZE coded_tx_size = read_selected_tx_size(xd, r);
1117 return coded_tx_size;
1118 } else {
1119 return tx_size_from_tx_mode(bsize, tx_mode);
1120 }
1121 } else {
1122 assert(IMPLIES(tx_mode == ONLY_4X4, bsize == BLOCK_4X4));
1123 return max_txsize_rect_lookup[bsize];
1124 }
1125 }
1126
parse_decode_block(AV1Decoder * const pbi,ThreadData * const td,int mi_row,int mi_col,aom_reader * r,PARTITION_TYPE partition,BLOCK_SIZE bsize)1127 static AOM_INLINE void parse_decode_block(AV1Decoder *const pbi,
1128 ThreadData *const td, int mi_row,
1129 int mi_col, aom_reader *r,
1130 PARTITION_TYPE partition,
1131 BLOCK_SIZE bsize) {
1132 DecoderCodingBlock *const dcb = &td->dcb;
1133 MACROBLOCKD *const xd = &dcb->xd;
1134 decode_mbmi_block(pbi, dcb, mi_row, mi_col, r, partition, bsize);
1135
1136 av1_visit_palette(pbi, xd, r, av1_decode_palette_tokens);
1137
1138 AV1_COMMON *cm = &pbi->common;
1139 const int num_planes = av1_num_planes(cm);
1140 MB_MODE_INFO *mbmi = xd->mi[0];
1141 int inter_block_tx = is_inter_block(mbmi) || is_intrabc_block(mbmi);
1142 if (cm->features.tx_mode == TX_MODE_SELECT && block_signals_txsize(bsize) &&
1143 !mbmi->skip_txfm && inter_block_tx && !xd->lossless[mbmi->segment_id]) {
1144 const TX_SIZE max_tx_size = max_txsize_rect_lookup[bsize];
1145 const int bh = tx_size_high_unit[max_tx_size];
1146 const int bw = tx_size_wide_unit[max_tx_size];
1147 const int width = mi_size_wide[bsize];
1148 const int height = mi_size_high[bsize];
1149
1150 for (int idy = 0; idy < height; idy += bh)
1151 for (int idx = 0; idx < width; idx += bw)
1152 read_tx_size_vartx(xd, mbmi, max_tx_size, 0, idy, idx, r);
1153 } else {
1154 mbmi->tx_size = read_tx_size(xd, cm->features.tx_mode, inter_block_tx,
1155 !mbmi->skip_txfm, r);
1156 if (inter_block_tx)
1157 memset(mbmi->inter_tx_size, mbmi->tx_size, sizeof(mbmi->inter_tx_size));
1158 set_txfm_ctxs(mbmi->tx_size, xd->width, xd->height,
1159 mbmi->skip_txfm && is_inter_block(mbmi), xd);
1160 }
1161
1162 if (cm->delta_q_info.delta_q_present_flag) {
1163 for (int i = 0; i < MAX_SEGMENTS; i++) {
1164 const int current_qindex =
1165 av1_get_qindex(&cm->seg, i, xd->current_base_qindex);
1166 const CommonQuantParams *const quant_params = &cm->quant_params;
1167 for (int j = 0; j < num_planes; ++j) {
1168 const int dc_delta_q = j == 0 ? quant_params->y_dc_delta_q
1169 : (j == 1 ? quant_params->u_dc_delta_q
1170 : quant_params->v_dc_delta_q);
1171 const int ac_delta_q = j == 0 ? 0
1172 : (j == 1 ? quant_params->u_ac_delta_q
1173 : quant_params->v_ac_delta_q);
1174 xd->plane[j].seg_dequant_QTX[i][0] = av1_dc_quant_QTX(
1175 current_qindex, dc_delta_q, cm->seq_params->bit_depth);
1176 xd->plane[j].seg_dequant_QTX[i][1] = av1_ac_quant_QTX(
1177 current_qindex, ac_delta_q, cm->seq_params->bit_depth);
1178 }
1179 }
1180 }
1181 if (mbmi->skip_txfm) av1_reset_entropy_context(xd, bsize, num_planes);
1182
1183 decode_token_recon_block(pbi, td, r, bsize);
1184 }
1185
set_offsets_for_pred_and_recon(AV1Decoder * const pbi,ThreadData * const td,int mi_row,int mi_col,BLOCK_SIZE bsize)1186 static AOM_INLINE void set_offsets_for_pred_and_recon(AV1Decoder *const pbi,
1187 ThreadData *const td,
1188 int mi_row, int mi_col,
1189 BLOCK_SIZE bsize) {
1190 AV1_COMMON *const cm = &pbi->common;
1191 const CommonModeInfoParams *const mi_params = &cm->mi_params;
1192 DecoderCodingBlock *const dcb = &td->dcb;
1193 MACROBLOCKD *const xd = &dcb->xd;
1194 const int bw = mi_size_wide[bsize];
1195 const int bh = mi_size_high[bsize];
1196 const int num_planes = av1_num_planes(cm);
1197
1198 const int offset = mi_row * mi_params->mi_stride + mi_col;
1199 const TileInfo *const tile = &xd->tile;
1200
1201 xd->mi = mi_params->mi_grid_base + offset;
1202 xd->tx_type_map =
1203 &mi_params->tx_type_map[mi_row * mi_params->mi_stride + mi_col];
1204 xd->tx_type_map_stride = mi_params->mi_stride;
1205
1206 set_plane_n4(xd, bw, bh, num_planes);
1207
1208 // Distance of Mb to the various image edges. These are specified to 8th pel
1209 // as they are always compared to values that are in 1/8th pel units
1210 set_mi_row_col(xd, tile, mi_row, bh, mi_col, bw, mi_params->mi_rows,
1211 mi_params->mi_cols);
1212
1213 av1_setup_dst_planes(xd->plane, bsize, &cm->cur_frame->buf, mi_row, mi_col, 0,
1214 num_planes);
1215 }
1216
decode_block(AV1Decoder * const pbi,ThreadData * const td,int mi_row,int mi_col,aom_reader * r,PARTITION_TYPE partition,BLOCK_SIZE bsize)1217 static AOM_INLINE void decode_block(AV1Decoder *const pbi, ThreadData *const td,
1218 int mi_row, int mi_col, aom_reader *r,
1219 PARTITION_TYPE partition,
1220 BLOCK_SIZE bsize) {
1221 (void)partition;
1222 set_offsets_for_pred_and_recon(pbi, td, mi_row, mi_col, bsize);
1223 decode_token_recon_block(pbi, td, r, bsize);
1224 }
1225
read_partition(MACROBLOCKD * xd,int mi_row,int mi_col,aom_reader * r,int has_rows,int has_cols,BLOCK_SIZE bsize)1226 static PARTITION_TYPE read_partition(MACROBLOCKD *xd, int mi_row, int mi_col,
1227 aom_reader *r, int has_rows, int has_cols,
1228 BLOCK_SIZE bsize) {
1229 const int ctx = partition_plane_context(xd, mi_row, mi_col, bsize);
1230 FRAME_CONTEXT *ec_ctx = xd->tile_ctx;
1231
1232 if (!has_rows && !has_cols) return PARTITION_SPLIT;
1233
1234 assert(ctx >= 0);
1235 aom_cdf_prob *partition_cdf = ec_ctx->partition_cdf[ctx];
1236 if (has_rows && has_cols) {
1237 return (PARTITION_TYPE)aom_read_symbol(
1238 r, partition_cdf, partition_cdf_length(bsize), ACCT_STR);
1239 } else if (!has_rows && has_cols) {
1240 assert(bsize > BLOCK_8X8);
1241 aom_cdf_prob cdf[2];
1242 partition_gather_vert_alike(cdf, partition_cdf, bsize);
1243 assert(cdf[1] == AOM_ICDF(CDF_PROB_TOP));
1244 return aom_read_cdf(r, cdf, 2, ACCT_STR) ? PARTITION_SPLIT : PARTITION_HORZ;
1245 } else {
1246 assert(has_rows && !has_cols);
1247 assert(bsize > BLOCK_8X8);
1248 aom_cdf_prob cdf[2];
1249 partition_gather_horz_alike(cdf, partition_cdf, bsize);
1250 assert(cdf[1] == AOM_ICDF(CDF_PROB_TOP));
1251 return aom_read_cdf(r, cdf, 2, ACCT_STR) ? PARTITION_SPLIT : PARTITION_VERT;
1252 }
1253 }
1254
1255 // TODO(slavarnway): eliminate bsize and subsize in future commits
decode_partition(AV1Decoder * const pbi,ThreadData * const td,int mi_row,int mi_col,aom_reader * reader,BLOCK_SIZE bsize,int parse_decode_flag)1256 static AOM_INLINE void decode_partition(AV1Decoder *const pbi,
1257 ThreadData *const td, int mi_row,
1258 int mi_col, aom_reader *reader,
1259 BLOCK_SIZE bsize,
1260 int parse_decode_flag) {
1261 assert(bsize < BLOCK_SIZES_ALL);
1262 AV1_COMMON *const cm = &pbi->common;
1263 DecoderCodingBlock *const dcb = &td->dcb;
1264 MACROBLOCKD *const xd = &dcb->xd;
1265 const int bw = mi_size_wide[bsize];
1266 const int hbs = bw >> 1;
1267 PARTITION_TYPE partition;
1268 BLOCK_SIZE subsize;
1269 const int quarter_step = bw / 4;
1270 BLOCK_SIZE bsize2 = get_partition_subsize(bsize, PARTITION_SPLIT);
1271 const int has_rows = (mi_row + hbs) < cm->mi_params.mi_rows;
1272 const int has_cols = (mi_col + hbs) < cm->mi_params.mi_cols;
1273
1274 if (mi_row >= cm->mi_params.mi_rows || mi_col >= cm->mi_params.mi_cols)
1275 return;
1276
1277 // parse_decode_flag takes the following values :
1278 // 01 - do parse only
1279 // 10 - do decode only
1280 // 11 - do parse and decode
1281 static const block_visitor_fn_t block_visit[4] = { NULL, parse_decode_block,
1282 decode_block,
1283 parse_decode_block };
1284
1285 if (parse_decode_flag & 1) {
1286 const int num_planes = av1_num_planes(cm);
1287 for (int plane = 0; plane < num_planes; ++plane) {
1288 int rcol0, rcol1, rrow0, rrow1;
1289
1290 // Skip some unnecessary work if loop restoration is disabled
1291 if (cm->rst_info[plane].frame_restoration_type == RESTORE_NONE) continue;
1292
1293 if (av1_loop_restoration_corners_in_sb(cm, plane, mi_row, mi_col, bsize,
1294 &rcol0, &rcol1, &rrow0, &rrow1)) {
1295 const int rstride = cm->rst_info[plane].horz_units;
1296 for (int rrow = rrow0; rrow < rrow1; ++rrow) {
1297 for (int rcol = rcol0; rcol < rcol1; ++rcol) {
1298 const int runit_idx = rcol + rrow * rstride;
1299 loop_restoration_read_sb_coeffs(cm, xd, reader, plane, runit_idx);
1300 }
1301 }
1302 }
1303 }
1304
1305 partition = (bsize < BLOCK_8X8) ? PARTITION_NONE
1306 : read_partition(xd, mi_row, mi_col, reader,
1307 has_rows, has_cols, bsize);
1308 } else {
1309 partition = get_partition(cm, mi_row, mi_col, bsize);
1310 }
1311 subsize = get_partition_subsize(bsize, partition);
1312 if (subsize == BLOCK_INVALID) {
1313 // When an internal error occurs ensure that xd->mi_row is set appropriately
1314 // w.r.t. current tile, which is used to signal processing of current row is
1315 // done.
1316 xd->mi_row = mi_row;
1317 aom_internal_error(xd->error_info, AOM_CODEC_CORRUPT_FRAME,
1318 "Partition is invalid for block size %dx%d",
1319 block_size_wide[bsize], block_size_high[bsize]);
1320 }
1321 // Check the bitstream is conformant: if there is subsampling on the
1322 // chroma planes, subsize must subsample to a valid block size.
1323 const struct macroblockd_plane *const pd_u = &xd->plane[1];
1324 if (get_plane_block_size(subsize, pd_u->subsampling_x, pd_u->subsampling_y) ==
1325 BLOCK_INVALID) {
1326 // When an internal error occurs ensure that xd->mi_row is set appropriately
1327 // w.r.t. current tile, which is used to signal processing of current row is
1328 // done.
1329 xd->mi_row = mi_row;
1330 aom_internal_error(xd->error_info, AOM_CODEC_CORRUPT_FRAME,
1331 "Block size %dx%d invalid with this subsampling mode",
1332 block_size_wide[subsize], block_size_high[subsize]);
1333 }
1334
1335 #define DEC_BLOCK_STX_ARG
1336 #define DEC_BLOCK_EPT_ARG partition,
1337 #define DEC_BLOCK(db_r, db_c, db_subsize) \
1338 block_visit[parse_decode_flag](pbi, td, DEC_BLOCK_STX_ARG(db_r), (db_c), \
1339 reader, DEC_BLOCK_EPT_ARG(db_subsize))
1340 #define DEC_PARTITION(db_r, db_c, db_subsize) \
1341 decode_partition(pbi, td, DEC_BLOCK_STX_ARG(db_r), (db_c), reader, \
1342 (db_subsize), parse_decode_flag)
1343
1344 switch (partition) {
1345 case PARTITION_NONE: DEC_BLOCK(mi_row, mi_col, subsize); break;
1346 case PARTITION_HORZ:
1347 DEC_BLOCK(mi_row, mi_col, subsize);
1348 if (has_rows) DEC_BLOCK(mi_row + hbs, mi_col, subsize);
1349 break;
1350 case PARTITION_VERT:
1351 DEC_BLOCK(mi_row, mi_col, subsize);
1352 if (has_cols) DEC_BLOCK(mi_row, mi_col + hbs, subsize);
1353 break;
1354 case PARTITION_SPLIT:
1355 DEC_PARTITION(mi_row, mi_col, subsize);
1356 DEC_PARTITION(mi_row, mi_col + hbs, subsize);
1357 DEC_PARTITION(mi_row + hbs, mi_col, subsize);
1358 DEC_PARTITION(mi_row + hbs, mi_col + hbs, subsize);
1359 break;
1360 case PARTITION_HORZ_A:
1361 DEC_BLOCK(mi_row, mi_col, bsize2);
1362 DEC_BLOCK(mi_row, mi_col + hbs, bsize2);
1363 DEC_BLOCK(mi_row + hbs, mi_col, subsize);
1364 break;
1365 case PARTITION_HORZ_B:
1366 DEC_BLOCK(mi_row, mi_col, subsize);
1367 DEC_BLOCK(mi_row + hbs, mi_col, bsize2);
1368 DEC_BLOCK(mi_row + hbs, mi_col + hbs, bsize2);
1369 break;
1370 case PARTITION_VERT_A:
1371 DEC_BLOCK(mi_row, mi_col, bsize2);
1372 DEC_BLOCK(mi_row + hbs, mi_col, bsize2);
1373 DEC_BLOCK(mi_row, mi_col + hbs, subsize);
1374 break;
1375 case PARTITION_VERT_B:
1376 DEC_BLOCK(mi_row, mi_col, subsize);
1377 DEC_BLOCK(mi_row, mi_col + hbs, bsize2);
1378 DEC_BLOCK(mi_row + hbs, mi_col + hbs, bsize2);
1379 break;
1380 case PARTITION_HORZ_4:
1381 for (int i = 0; i < 4; ++i) {
1382 int this_mi_row = mi_row + i * quarter_step;
1383 if (i > 0 && this_mi_row >= cm->mi_params.mi_rows) break;
1384 DEC_BLOCK(this_mi_row, mi_col, subsize);
1385 }
1386 break;
1387 case PARTITION_VERT_4:
1388 for (int i = 0; i < 4; ++i) {
1389 int this_mi_col = mi_col + i * quarter_step;
1390 if (i > 0 && this_mi_col >= cm->mi_params.mi_cols) break;
1391 DEC_BLOCK(mi_row, this_mi_col, subsize);
1392 }
1393 break;
1394 default: assert(0 && "Invalid partition type");
1395 }
1396
1397 #undef DEC_PARTITION
1398 #undef DEC_BLOCK
1399 #undef DEC_BLOCK_EPT_ARG
1400 #undef DEC_BLOCK_STX_ARG
1401
1402 if (parse_decode_flag & 1)
1403 update_ext_partition_context(xd, mi_row, mi_col, subsize, bsize, partition);
1404 }
1405
setup_bool_decoder(MACROBLOCKD * const xd,const uint8_t * data,const uint8_t * data_end,const size_t read_size,struct aom_internal_error_info * error_info,aom_reader * r,uint8_t allow_update_cdf)1406 static AOM_INLINE void setup_bool_decoder(
1407 MACROBLOCKD *const xd, const uint8_t *data, const uint8_t *data_end,
1408 const size_t read_size, struct aom_internal_error_info *error_info,
1409 aom_reader *r, uint8_t allow_update_cdf) {
1410 // Validate the calculated partition length. If the buffer
1411 // described by the partition can't be fully read, then restrict
1412 // it to the portion that can be (for EC mode) or throw an error.
1413 if (!read_is_valid(data, read_size, data_end)) {
1414 // When internal error occurs ensure that xd->mi_row is set appropriately
1415 // w.r.t. current tile, which is used to signal processing of current row is
1416 // done in row-mt decoding.
1417 xd->mi_row = xd->tile.mi_row_start;
1418
1419 aom_internal_error(error_info, AOM_CODEC_CORRUPT_FRAME,
1420 "Truncated packet or corrupt tile length");
1421 }
1422 if (aom_reader_init(r, data, read_size)) {
1423 // When internal error occurs ensure that xd->mi_row is set appropriately
1424 // w.r.t. current tile, which is used to signal processing of current row is
1425 // done in row-mt decoding.
1426 xd->mi_row = xd->tile.mi_row_start;
1427
1428 aom_internal_error(error_info, AOM_CODEC_MEM_ERROR,
1429 "Failed to allocate bool decoder %d", 1);
1430 }
1431
1432 r->allow_update_cdf = allow_update_cdf;
1433 }
1434
setup_segmentation(AV1_COMMON * const cm,struct aom_read_bit_buffer * rb)1435 static AOM_INLINE void setup_segmentation(AV1_COMMON *const cm,
1436 struct aom_read_bit_buffer *rb) {
1437 struct segmentation *const seg = &cm->seg;
1438
1439 seg->update_map = 0;
1440 seg->update_data = 0;
1441 seg->temporal_update = 0;
1442
1443 seg->enabled = aom_rb_read_bit(rb);
1444 if (!seg->enabled) {
1445 if (cm->cur_frame->seg_map) {
1446 memset(cm->cur_frame->seg_map, 0,
1447 (cm->cur_frame->mi_rows * cm->cur_frame->mi_cols));
1448 }
1449
1450 memset(seg, 0, sizeof(*seg));
1451 segfeatures_copy(&cm->cur_frame->seg, seg);
1452 return;
1453 }
1454 if (cm->seg.enabled && cm->prev_frame &&
1455 (cm->mi_params.mi_rows == cm->prev_frame->mi_rows) &&
1456 (cm->mi_params.mi_cols == cm->prev_frame->mi_cols)) {
1457 cm->last_frame_seg_map = cm->prev_frame->seg_map;
1458 } else {
1459 cm->last_frame_seg_map = NULL;
1460 }
1461 // Read update flags
1462 if (cm->features.primary_ref_frame == PRIMARY_REF_NONE) {
1463 // These frames can't use previous frames, so must signal map + features
1464 seg->update_map = 1;
1465 seg->temporal_update = 0;
1466 seg->update_data = 1;
1467 } else {
1468 seg->update_map = aom_rb_read_bit(rb);
1469 if (seg->update_map) {
1470 seg->temporal_update = aom_rb_read_bit(rb);
1471 } else {
1472 seg->temporal_update = 0;
1473 }
1474 seg->update_data = aom_rb_read_bit(rb);
1475 }
1476
1477 // Segmentation data update
1478 if (seg->update_data) {
1479 av1_clearall_segfeatures(seg);
1480
1481 for (int i = 0; i < MAX_SEGMENTS; i++) {
1482 for (int j = 0; j < SEG_LVL_MAX; j++) {
1483 int data = 0;
1484 const int feature_enabled = aom_rb_read_bit(rb);
1485 if (feature_enabled) {
1486 av1_enable_segfeature(seg, i, j);
1487
1488 const int data_max = av1_seg_feature_data_max(j);
1489 const int data_min = -data_max;
1490 const int ubits = get_unsigned_bits(data_max);
1491
1492 if (av1_is_segfeature_signed(j)) {
1493 data = aom_rb_read_inv_signed_literal(rb, ubits);
1494 } else {
1495 data = aom_rb_read_literal(rb, ubits);
1496 }
1497
1498 data = clamp(data, data_min, data_max);
1499 }
1500 av1_set_segdata(seg, i, j, data);
1501 }
1502 }
1503 av1_calculate_segdata(seg);
1504 } else if (cm->prev_frame) {
1505 segfeatures_copy(seg, &cm->prev_frame->seg);
1506 }
1507 segfeatures_copy(&cm->cur_frame->seg, seg);
1508 }
1509
decode_restoration_mode(AV1_COMMON * cm,struct aom_read_bit_buffer * rb)1510 static AOM_INLINE void decode_restoration_mode(AV1_COMMON *cm,
1511 struct aom_read_bit_buffer *rb) {
1512 assert(!cm->features.all_lossless);
1513 const int num_planes = av1_num_planes(cm);
1514 if (cm->features.allow_intrabc) return;
1515 int all_none = 1, chroma_none = 1;
1516 for (int p = 0; p < num_planes; ++p) {
1517 RestorationInfo *rsi = &cm->rst_info[p];
1518 if (aom_rb_read_bit(rb)) {
1519 rsi->frame_restoration_type =
1520 aom_rb_read_bit(rb) ? RESTORE_SGRPROJ : RESTORE_WIENER;
1521 } else {
1522 rsi->frame_restoration_type =
1523 aom_rb_read_bit(rb) ? RESTORE_SWITCHABLE : RESTORE_NONE;
1524 }
1525 if (rsi->frame_restoration_type != RESTORE_NONE) {
1526 all_none = 0;
1527 chroma_none &= p == 0;
1528 }
1529 }
1530 if (!all_none) {
1531 assert(cm->seq_params->sb_size == BLOCK_64X64 ||
1532 cm->seq_params->sb_size == BLOCK_128X128);
1533 const int sb_size = cm->seq_params->sb_size == BLOCK_128X128 ? 128 : 64;
1534
1535 for (int p = 0; p < num_planes; ++p)
1536 cm->rst_info[p].restoration_unit_size = sb_size;
1537
1538 RestorationInfo *rsi = &cm->rst_info[0];
1539
1540 if (sb_size == 64) {
1541 rsi->restoration_unit_size <<= aom_rb_read_bit(rb);
1542 }
1543 if (rsi->restoration_unit_size > 64) {
1544 rsi->restoration_unit_size <<= aom_rb_read_bit(rb);
1545 }
1546 } else {
1547 const int size = RESTORATION_UNITSIZE_MAX;
1548 for (int p = 0; p < num_planes; ++p)
1549 cm->rst_info[p].restoration_unit_size = size;
1550 }
1551
1552 if (num_planes > 1) {
1553 int s =
1554 AOMMIN(cm->seq_params->subsampling_x, cm->seq_params->subsampling_y);
1555 if (s && !chroma_none) {
1556 cm->rst_info[1].restoration_unit_size =
1557 cm->rst_info[0].restoration_unit_size >> (aom_rb_read_bit(rb) * s);
1558 } else {
1559 cm->rst_info[1].restoration_unit_size =
1560 cm->rst_info[0].restoration_unit_size;
1561 }
1562 cm->rst_info[2].restoration_unit_size =
1563 cm->rst_info[1].restoration_unit_size;
1564 }
1565 }
1566
read_wiener_filter(int wiener_win,WienerInfo * wiener_info,WienerInfo * ref_wiener_info,aom_reader * rb)1567 static AOM_INLINE void read_wiener_filter(int wiener_win,
1568 WienerInfo *wiener_info,
1569 WienerInfo *ref_wiener_info,
1570 aom_reader *rb) {
1571 memset(wiener_info->vfilter, 0, sizeof(wiener_info->vfilter));
1572 memset(wiener_info->hfilter, 0, sizeof(wiener_info->hfilter));
1573
1574 if (wiener_win == WIENER_WIN)
1575 wiener_info->vfilter[0] = wiener_info->vfilter[WIENER_WIN - 1] =
1576 aom_read_primitive_refsubexpfin(
1577 rb, WIENER_FILT_TAP0_MAXV - WIENER_FILT_TAP0_MINV + 1,
1578 WIENER_FILT_TAP0_SUBEXP_K,
1579 ref_wiener_info->vfilter[0] - WIENER_FILT_TAP0_MINV, ACCT_STR) +
1580 WIENER_FILT_TAP0_MINV;
1581 else
1582 wiener_info->vfilter[0] = wiener_info->vfilter[WIENER_WIN - 1] = 0;
1583 wiener_info->vfilter[1] = wiener_info->vfilter[WIENER_WIN - 2] =
1584 aom_read_primitive_refsubexpfin(
1585 rb, WIENER_FILT_TAP1_MAXV - WIENER_FILT_TAP1_MINV + 1,
1586 WIENER_FILT_TAP1_SUBEXP_K,
1587 ref_wiener_info->vfilter[1] - WIENER_FILT_TAP1_MINV, ACCT_STR) +
1588 WIENER_FILT_TAP1_MINV;
1589 wiener_info->vfilter[2] = wiener_info->vfilter[WIENER_WIN - 3] =
1590 aom_read_primitive_refsubexpfin(
1591 rb, WIENER_FILT_TAP2_MAXV - WIENER_FILT_TAP2_MINV + 1,
1592 WIENER_FILT_TAP2_SUBEXP_K,
1593 ref_wiener_info->vfilter[2] - WIENER_FILT_TAP2_MINV, ACCT_STR) +
1594 WIENER_FILT_TAP2_MINV;
1595 // The central element has an implicit +WIENER_FILT_STEP
1596 wiener_info->vfilter[WIENER_HALFWIN] =
1597 -2 * (wiener_info->vfilter[0] + wiener_info->vfilter[1] +
1598 wiener_info->vfilter[2]);
1599
1600 if (wiener_win == WIENER_WIN)
1601 wiener_info->hfilter[0] = wiener_info->hfilter[WIENER_WIN - 1] =
1602 aom_read_primitive_refsubexpfin(
1603 rb, WIENER_FILT_TAP0_MAXV - WIENER_FILT_TAP0_MINV + 1,
1604 WIENER_FILT_TAP0_SUBEXP_K,
1605 ref_wiener_info->hfilter[0] - WIENER_FILT_TAP0_MINV, ACCT_STR) +
1606 WIENER_FILT_TAP0_MINV;
1607 else
1608 wiener_info->hfilter[0] = wiener_info->hfilter[WIENER_WIN - 1] = 0;
1609 wiener_info->hfilter[1] = wiener_info->hfilter[WIENER_WIN - 2] =
1610 aom_read_primitive_refsubexpfin(
1611 rb, WIENER_FILT_TAP1_MAXV - WIENER_FILT_TAP1_MINV + 1,
1612 WIENER_FILT_TAP1_SUBEXP_K,
1613 ref_wiener_info->hfilter[1] - WIENER_FILT_TAP1_MINV, ACCT_STR) +
1614 WIENER_FILT_TAP1_MINV;
1615 wiener_info->hfilter[2] = wiener_info->hfilter[WIENER_WIN - 3] =
1616 aom_read_primitive_refsubexpfin(
1617 rb, WIENER_FILT_TAP2_MAXV - WIENER_FILT_TAP2_MINV + 1,
1618 WIENER_FILT_TAP2_SUBEXP_K,
1619 ref_wiener_info->hfilter[2] - WIENER_FILT_TAP2_MINV, ACCT_STR) +
1620 WIENER_FILT_TAP2_MINV;
1621 // The central element has an implicit +WIENER_FILT_STEP
1622 wiener_info->hfilter[WIENER_HALFWIN] =
1623 -2 * (wiener_info->hfilter[0] + wiener_info->hfilter[1] +
1624 wiener_info->hfilter[2]);
1625 memcpy(ref_wiener_info, wiener_info, sizeof(*wiener_info));
1626 }
1627
read_sgrproj_filter(SgrprojInfo * sgrproj_info,SgrprojInfo * ref_sgrproj_info,aom_reader * rb)1628 static AOM_INLINE void read_sgrproj_filter(SgrprojInfo *sgrproj_info,
1629 SgrprojInfo *ref_sgrproj_info,
1630 aom_reader *rb) {
1631 sgrproj_info->ep = aom_read_literal(rb, SGRPROJ_PARAMS_BITS, ACCT_STR);
1632 const sgr_params_type *params = &av1_sgr_params[sgrproj_info->ep];
1633
1634 if (params->r[0] == 0) {
1635 sgrproj_info->xqd[0] = 0;
1636 sgrproj_info->xqd[1] =
1637 aom_read_primitive_refsubexpfin(
1638 rb, SGRPROJ_PRJ_MAX1 - SGRPROJ_PRJ_MIN1 + 1, SGRPROJ_PRJ_SUBEXP_K,
1639 ref_sgrproj_info->xqd[1] - SGRPROJ_PRJ_MIN1, ACCT_STR) +
1640 SGRPROJ_PRJ_MIN1;
1641 } else if (params->r[1] == 0) {
1642 sgrproj_info->xqd[0] =
1643 aom_read_primitive_refsubexpfin(
1644 rb, SGRPROJ_PRJ_MAX0 - SGRPROJ_PRJ_MIN0 + 1, SGRPROJ_PRJ_SUBEXP_K,
1645 ref_sgrproj_info->xqd[0] - SGRPROJ_PRJ_MIN0, ACCT_STR) +
1646 SGRPROJ_PRJ_MIN0;
1647 sgrproj_info->xqd[1] = clamp((1 << SGRPROJ_PRJ_BITS) - sgrproj_info->xqd[0],
1648 SGRPROJ_PRJ_MIN1, SGRPROJ_PRJ_MAX1);
1649 } else {
1650 sgrproj_info->xqd[0] =
1651 aom_read_primitive_refsubexpfin(
1652 rb, SGRPROJ_PRJ_MAX0 - SGRPROJ_PRJ_MIN0 + 1, SGRPROJ_PRJ_SUBEXP_K,
1653 ref_sgrproj_info->xqd[0] - SGRPROJ_PRJ_MIN0, ACCT_STR) +
1654 SGRPROJ_PRJ_MIN0;
1655 sgrproj_info->xqd[1] =
1656 aom_read_primitive_refsubexpfin(
1657 rb, SGRPROJ_PRJ_MAX1 - SGRPROJ_PRJ_MIN1 + 1, SGRPROJ_PRJ_SUBEXP_K,
1658 ref_sgrproj_info->xqd[1] - SGRPROJ_PRJ_MIN1, ACCT_STR) +
1659 SGRPROJ_PRJ_MIN1;
1660 }
1661
1662 memcpy(ref_sgrproj_info, sgrproj_info, sizeof(*sgrproj_info));
1663 }
1664
loop_restoration_read_sb_coeffs(const AV1_COMMON * const cm,MACROBLOCKD * xd,aom_reader * const r,int plane,int runit_idx)1665 static AOM_INLINE void loop_restoration_read_sb_coeffs(
1666 const AV1_COMMON *const cm, MACROBLOCKD *xd, aom_reader *const r, int plane,
1667 int runit_idx) {
1668 const RestorationInfo *rsi = &cm->rst_info[plane];
1669 RestorationUnitInfo *rui = &rsi->unit_info[runit_idx];
1670 assert(rsi->frame_restoration_type != RESTORE_NONE);
1671
1672 assert(!cm->features.all_lossless);
1673
1674 const int wiener_win = (plane > 0) ? WIENER_WIN_CHROMA : WIENER_WIN;
1675 WienerInfo *wiener_info = xd->wiener_info + plane;
1676 SgrprojInfo *sgrproj_info = xd->sgrproj_info + plane;
1677
1678 if (rsi->frame_restoration_type == RESTORE_SWITCHABLE) {
1679 rui->restoration_type =
1680 aom_read_symbol(r, xd->tile_ctx->switchable_restore_cdf,
1681 RESTORE_SWITCHABLE_TYPES, ACCT_STR);
1682 switch (rui->restoration_type) {
1683 case RESTORE_WIENER:
1684 read_wiener_filter(wiener_win, &rui->wiener_info, wiener_info, r);
1685 break;
1686 case RESTORE_SGRPROJ:
1687 read_sgrproj_filter(&rui->sgrproj_info, sgrproj_info, r);
1688 break;
1689 default: assert(rui->restoration_type == RESTORE_NONE); break;
1690 }
1691 } else if (rsi->frame_restoration_type == RESTORE_WIENER) {
1692 if (aom_read_symbol(r, xd->tile_ctx->wiener_restore_cdf, 2, ACCT_STR)) {
1693 rui->restoration_type = RESTORE_WIENER;
1694 read_wiener_filter(wiener_win, &rui->wiener_info, wiener_info, r);
1695 } else {
1696 rui->restoration_type = RESTORE_NONE;
1697 }
1698 } else if (rsi->frame_restoration_type == RESTORE_SGRPROJ) {
1699 if (aom_read_symbol(r, xd->tile_ctx->sgrproj_restore_cdf, 2, ACCT_STR)) {
1700 rui->restoration_type = RESTORE_SGRPROJ;
1701 read_sgrproj_filter(&rui->sgrproj_info, sgrproj_info, r);
1702 } else {
1703 rui->restoration_type = RESTORE_NONE;
1704 }
1705 }
1706 }
1707
setup_loopfilter(AV1_COMMON * cm,struct aom_read_bit_buffer * rb)1708 static AOM_INLINE void setup_loopfilter(AV1_COMMON *cm,
1709 struct aom_read_bit_buffer *rb) {
1710 const int num_planes = av1_num_planes(cm);
1711 struct loopfilter *lf = &cm->lf;
1712
1713 if (cm->features.allow_intrabc || cm->features.coded_lossless) {
1714 // write default deltas to frame buffer
1715 av1_set_default_ref_deltas(cm->cur_frame->ref_deltas);
1716 av1_set_default_mode_deltas(cm->cur_frame->mode_deltas);
1717 return;
1718 }
1719 assert(!cm->features.coded_lossless);
1720 if (cm->prev_frame) {
1721 // write deltas to frame buffer
1722 memcpy(lf->ref_deltas, cm->prev_frame->ref_deltas, REF_FRAMES);
1723 memcpy(lf->mode_deltas, cm->prev_frame->mode_deltas, MAX_MODE_LF_DELTAS);
1724 } else {
1725 av1_set_default_ref_deltas(lf->ref_deltas);
1726 av1_set_default_mode_deltas(lf->mode_deltas);
1727 }
1728 lf->filter_level[0] = aom_rb_read_literal(rb, 6);
1729 lf->filter_level[1] = aom_rb_read_literal(rb, 6);
1730 if (num_planes > 1) {
1731 if (lf->filter_level[0] || lf->filter_level[1]) {
1732 lf->filter_level_u = aom_rb_read_literal(rb, 6);
1733 lf->filter_level_v = aom_rb_read_literal(rb, 6);
1734 }
1735 }
1736 lf->sharpness_level = aom_rb_read_literal(rb, 3);
1737
1738 // Read in loop filter deltas applied at the MB level based on mode or ref
1739 // frame.
1740 lf->mode_ref_delta_update = 0;
1741
1742 lf->mode_ref_delta_enabled = aom_rb_read_bit(rb);
1743 if (lf->mode_ref_delta_enabled) {
1744 lf->mode_ref_delta_update = aom_rb_read_bit(rb);
1745 if (lf->mode_ref_delta_update) {
1746 for (int i = 0; i < REF_FRAMES; i++)
1747 if (aom_rb_read_bit(rb))
1748 lf->ref_deltas[i] = aom_rb_read_inv_signed_literal(rb, 6);
1749
1750 for (int i = 0; i < MAX_MODE_LF_DELTAS; i++)
1751 if (aom_rb_read_bit(rb))
1752 lf->mode_deltas[i] = aom_rb_read_inv_signed_literal(rb, 6);
1753 }
1754 }
1755
1756 // write deltas to frame buffer
1757 memcpy(cm->cur_frame->ref_deltas, lf->ref_deltas, REF_FRAMES);
1758 memcpy(cm->cur_frame->mode_deltas, lf->mode_deltas, MAX_MODE_LF_DELTAS);
1759 }
1760
setup_cdef(AV1_COMMON * cm,struct aom_read_bit_buffer * rb)1761 static AOM_INLINE void setup_cdef(AV1_COMMON *cm,
1762 struct aom_read_bit_buffer *rb) {
1763 const int num_planes = av1_num_planes(cm);
1764 CdefInfo *const cdef_info = &cm->cdef_info;
1765
1766 if (cm->features.allow_intrabc) return;
1767 cdef_info->cdef_damping = aom_rb_read_literal(rb, 2) + 3;
1768 cdef_info->cdef_bits = aom_rb_read_literal(rb, 2);
1769 cdef_info->nb_cdef_strengths = 1 << cdef_info->cdef_bits;
1770 for (int i = 0; i < cdef_info->nb_cdef_strengths; i++) {
1771 cdef_info->cdef_strengths[i] = aom_rb_read_literal(rb, CDEF_STRENGTH_BITS);
1772 cdef_info->cdef_uv_strengths[i] =
1773 num_planes > 1 ? aom_rb_read_literal(rb, CDEF_STRENGTH_BITS) : 0;
1774 }
1775 }
1776
read_delta_q(struct aom_read_bit_buffer * rb)1777 static INLINE int read_delta_q(struct aom_read_bit_buffer *rb) {
1778 return aom_rb_read_bit(rb) ? aom_rb_read_inv_signed_literal(rb, 6) : 0;
1779 }
1780
setup_quantization(CommonQuantParams * quant_params,int num_planes,bool separate_uv_delta_q,struct aom_read_bit_buffer * rb)1781 static AOM_INLINE void setup_quantization(CommonQuantParams *quant_params,
1782 int num_planes,
1783 bool separate_uv_delta_q,
1784 struct aom_read_bit_buffer *rb) {
1785 quant_params->base_qindex = aom_rb_read_literal(rb, QINDEX_BITS);
1786 quant_params->y_dc_delta_q = read_delta_q(rb);
1787 if (num_planes > 1) {
1788 int diff_uv_delta = 0;
1789 if (separate_uv_delta_q) diff_uv_delta = aom_rb_read_bit(rb);
1790 quant_params->u_dc_delta_q = read_delta_q(rb);
1791 quant_params->u_ac_delta_q = read_delta_q(rb);
1792 if (diff_uv_delta) {
1793 quant_params->v_dc_delta_q = read_delta_q(rb);
1794 quant_params->v_ac_delta_q = read_delta_q(rb);
1795 } else {
1796 quant_params->v_dc_delta_q = quant_params->u_dc_delta_q;
1797 quant_params->v_ac_delta_q = quant_params->u_ac_delta_q;
1798 }
1799 } else {
1800 quant_params->u_dc_delta_q = 0;
1801 quant_params->u_ac_delta_q = 0;
1802 quant_params->v_dc_delta_q = 0;
1803 quant_params->v_ac_delta_q = 0;
1804 }
1805 quant_params->using_qmatrix = aom_rb_read_bit(rb);
1806 if (quant_params->using_qmatrix) {
1807 quant_params->qmatrix_level_y = aom_rb_read_literal(rb, QM_LEVEL_BITS);
1808 quant_params->qmatrix_level_u = aom_rb_read_literal(rb, QM_LEVEL_BITS);
1809 if (!separate_uv_delta_q)
1810 quant_params->qmatrix_level_v = quant_params->qmatrix_level_u;
1811 else
1812 quant_params->qmatrix_level_v = aom_rb_read_literal(rb, QM_LEVEL_BITS);
1813 } else {
1814 quant_params->qmatrix_level_y = 0;
1815 quant_params->qmatrix_level_u = 0;
1816 quant_params->qmatrix_level_v = 0;
1817 }
1818 }
1819
1820 // Build y/uv dequant values based on segmentation.
setup_segmentation_dequant(AV1_COMMON * const cm,MACROBLOCKD * const xd)1821 static AOM_INLINE void setup_segmentation_dequant(AV1_COMMON *const cm,
1822 MACROBLOCKD *const xd) {
1823 const int bit_depth = cm->seq_params->bit_depth;
1824 // When segmentation is disabled, only the first value is used. The
1825 // remaining are don't cares.
1826 const int max_segments = cm->seg.enabled ? MAX_SEGMENTS : 1;
1827 CommonQuantParams *const quant_params = &cm->quant_params;
1828 for (int i = 0; i < max_segments; ++i) {
1829 const int qindex = xd->qindex[i];
1830 quant_params->y_dequant_QTX[i][0] =
1831 av1_dc_quant_QTX(qindex, quant_params->y_dc_delta_q, bit_depth);
1832 quant_params->y_dequant_QTX[i][1] = av1_ac_quant_QTX(qindex, 0, bit_depth);
1833 quant_params->u_dequant_QTX[i][0] =
1834 av1_dc_quant_QTX(qindex, quant_params->u_dc_delta_q, bit_depth);
1835 quant_params->u_dequant_QTX[i][1] =
1836 av1_ac_quant_QTX(qindex, quant_params->u_ac_delta_q, bit_depth);
1837 quant_params->v_dequant_QTX[i][0] =
1838 av1_dc_quant_QTX(qindex, quant_params->v_dc_delta_q, bit_depth);
1839 quant_params->v_dequant_QTX[i][1] =
1840 av1_ac_quant_QTX(qindex, quant_params->v_ac_delta_q, bit_depth);
1841 const int use_qmatrix = av1_use_qmatrix(quant_params, xd, i);
1842 // NB: depends on base index so there is only 1 set per frame
1843 // No quant weighting when lossless or signalled not using QM
1844 const int qmlevel_y =
1845 use_qmatrix ? quant_params->qmatrix_level_y : NUM_QM_LEVELS - 1;
1846 for (int j = 0; j < TX_SIZES_ALL; ++j) {
1847 quant_params->y_iqmatrix[i][j] =
1848 av1_iqmatrix(quant_params, qmlevel_y, AOM_PLANE_Y, j);
1849 }
1850 const int qmlevel_u =
1851 use_qmatrix ? quant_params->qmatrix_level_u : NUM_QM_LEVELS - 1;
1852 for (int j = 0; j < TX_SIZES_ALL; ++j) {
1853 quant_params->u_iqmatrix[i][j] =
1854 av1_iqmatrix(quant_params, qmlevel_u, AOM_PLANE_U, j);
1855 }
1856 const int qmlevel_v =
1857 use_qmatrix ? quant_params->qmatrix_level_v : NUM_QM_LEVELS - 1;
1858 for (int j = 0; j < TX_SIZES_ALL; ++j) {
1859 quant_params->v_iqmatrix[i][j] =
1860 av1_iqmatrix(quant_params, qmlevel_v, AOM_PLANE_V, j);
1861 }
1862 }
1863 }
1864
read_frame_interp_filter(struct aom_read_bit_buffer * rb)1865 static InterpFilter read_frame_interp_filter(struct aom_read_bit_buffer *rb) {
1866 return aom_rb_read_bit(rb) ? SWITCHABLE
1867 : aom_rb_read_literal(rb, LOG_SWITCHABLE_FILTERS);
1868 }
1869
setup_render_size(AV1_COMMON * cm,struct aom_read_bit_buffer * rb)1870 static AOM_INLINE void setup_render_size(AV1_COMMON *cm,
1871 struct aom_read_bit_buffer *rb) {
1872 cm->render_width = cm->superres_upscaled_width;
1873 cm->render_height = cm->superres_upscaled_height;
1874 if (aom_rb_read_bit(rb))
1875 av1_read_frame_size(rb, 16, 16, &cm->render_width, &cm->render_height);
1876 }
1877
1878 // TODO(afergs): make "struct aom_read_bit_buffer *const rb"?
setup_superres(AV1_COMMON * const cm,struct aom_read_bit_buffer * rb,int * width,int * height)1879 static AOM_INLINE void setup_superres(AV1_COMMON *const cm,
1880 struct aom_read_bit_buffer *rb,
1881 int *width, int *height) {
1882 cm->superres_upscaled_width = *width;
1883 cm->superres_upscaled_height = *height;
1884
1885 const SequenceHeader *const seq_params = cm->seq_params;
1886 if (!seq_params->enable_superres) return;
1887
1888 if (aom_rb_read_bit(rb)) {
1889 cm->superres_scale_denominator =
1890 (uint8_t)aom_rb_read_literal(rb, SUPERRES_SCALE_BITS);
1891 cm->superres_scale_denominator += SUPERRES_SCALE_DENOMINATOR_MIN;
1892 // Don't edit cm->width or cm->height directly, or the buffers won't get
1893 // resized correctly
1894 av1_calculate_scaled_superres_size(width, height,
1895 cm->superres_scale_denominator);
1896 } else {
1897 // 1:1 scaling - ie. no scaling, scale not provided
1898 cm->superres_scale_denominator = SCALE_NUMERATOR;
1899 }
1900 }
1901
resize_context_buffers(AV1_COMMON * cm,int width,int height)1902 static AOM_INLINE void resize_context_buffers(AV1_COMMON *cm, int width,
1903 int height) {
1904 #if CONFIG_SIZE_LIMIT
1905 if (width > DECODE_WIDTH_LIMIT || height > DECODE_HEIGHT_LIMIT)
1906 aom_internal_error(cm->error, AOM_CODEC_CORRUPT_FRAME,
1907 "Dimensions of %dx%d beyond allowed size of %dx%d.",
1908 width, height, DECODE_WIDTH_LIMIT, DECODE_HEIGHT_LIMIT);
1909 #endif
1910 if (cm->width != width || cm->height != height) {
1911 const int new_mi_rows = CEIL_POWER_OF_TWO(height, MI_SIZE_LOG2);
1912 const int new_mi_cols = CEIL_POWER_OF_TWO(width, MI_SIZE_LOG2);
1913
1914 // Allocations in av1_alloc_context_buffers() depend on individual
1915 // dimensions as well as the overall size.
1916 if (new_mi_cols > cm->mi_params.mi_cols ||
1917 new_mi_rows > cm->mi_params.mi_rows) {
1918 if (av1_alloc_context_buffers(cm, width, height, BLOCK_4X4)) {
1919 // The cm->mi_* values have been cleared and any existing context
1920 // buffers have been freed. Clear cm->width and cm->height to be
1921 // consistent and to force a realloc next time.
1922 cm->width = 0;
1923 cm->height = 0;
1924 aom_internal_error(cm->error, AOM_CODEC_MEM_ERROR,
1925 "Failed to allocate context buffers");
1926 }
1927 } else {
1928 cm->mi_params.set_mb_mi(&cm->mi_params, width, height, BLOCK_4X4);
1929 }
1930 av1_init_mi_buffers(&cm->mi_params);
1931 cm->width = width;
1932 cm->height = height;
1933 }
1934
1935 ensure_mv_buffer(cm->cur_frame, cm);
1936 cm->cur_frame->width = cm->width;
1937 cm->cur_frame->height = cm->height;
1938 }
1939
setup_buffer_pool(AV1_COMMON * cm)1940 static AOM_INLINE void setup_buffer_pool(AV1_COMMON *cm) {
1941 BufferPool *const pool = cm->buffer_pool;
1942 const SequenceHeader *const seq_params = cm->seq_params;
1943
1944 lock_buffer_pool(pool);
1945 if (aom_realloc_frame_buffer(
1946 &cm->cur_frame->buf, cm->width, cm->height, seq_params->subsampling_x,
1947 seq_params->subsampling_y, seq_params->use_highbitdepth,
1948 AOM_DEC_BORDER_IN_PIXELS, cm->features.byte_alignment,
1949 &cm->cur_frame->raw_frame_buffer, pool->get_fb_cb, pool->cb_priv,
1950 false, 0)) {
1951 unlock_buffer_pool(pool);
1952 aom_internal_error(cm->error, AOM_CODEC_MEM_ERROR,
1953 "Failed to allocate frame buffer");
1954 }
1955 unlock_buffer_pool(pool);
1956
1957 cm->cur_frame->buf.bit_depth = (unsigned int)seq_params->bit_depth;
1958 cm->cur_frame->buf.color_primaries = seq_params->color_primaries;
1959 cm->cur_frame->buf.transfer_characteristics =
1960 seq_params->transfer_characteristics;
1961 cm->cur_frame->buf.matrix_coefficients = seq_params->matrix_coefficients;
1962 cm->cur_frame->buf.monochrome = seq_params->monochrome;
1963 cm->cur_frame->buf.chroma_sample_position =
1964 seq_params->chroma_sample_position;
1965 cm->cur_frame->buf.color_range = seq_params->color_range;
1966 cm->cur_frame->buf.render_width = cm->render_width;
1967 cm->cur_frame->buf.render_height = cm->render_height;
1968 }
1969
setup_frame_size(AV1_COMMON * cm,int frame_size_override_flag,struct aom_read_bit_buffer * rb)1970 static AOM_INLINE void setup_frame_size(AV1_COMMON *cm,
1971 int frame_size_override_flag,
1972 struct aom_read_bit_buffer *rb) {
1973 const SequenceHeader *const seq_params = cm->seq_params;
1974 int width, height;
1975
1976 if (frame_size_override_flag) {
1977 int num_bits_width = seq_params->num_bits_width;
1978 int num_bits_height = seq_params->num_bits_height;
1979 av1_read_frame_size(rb, num_bits_width, num_bits_height, &width, &height);
1980 if (width > seq_params->max_frame_width ||
1981 height > seq_params->max_frame_height) {
1982 aom_internal_error(cm->error, AOM_CODEC_CORRUPT_FRAME,
1983 "Frame dimensions are larger than the maximum values");
1984 }
1985 } else {
1986 width = seq_params->max_frame_width;
1987 height = seq_params->max_frame_height;
1988 }
1989
1990 setup_superres(cm, rb, &width, &height);
1991 resize_context_buffers(cm, width, height);
1992 setup_render_size(cm, rb);
1993 setup_buffer_pool(cm);
1994 }
1995
setup_sb_size(SequenceHeader * seq_params,struct aom_read_bit_buffer * rb)1996 static AOM_INLINE void setup_sb_size(SequenceHeader *seq_params,
1997 struct aom_read_bit_buffer *rb) {
1998 set_sb_size(seq_params, aom_rb_read_bit(rb) ? BLOCK_128X128 : BLOCK_64X64);
1999 }
2000
valid_ref_frame_img_fmt(aom_bit_depth_t ref_bit_depth,int ref_xss,int ref_yss,aom_bit_depth_t this_bit_depth,int this_xss,int this_yss)2001 static INLINE int valid_ref_frame_img_fmt(aom_bit_depth_t ref_bit_depth,
2002 int ref_xss, int ref_yss,
2003 aom_bit_depth_t this_bit_depth,
2004 int this_xss, int this_yss) {
2005 return ref_bit_depth == this_bit_depth && ref_xss == this_xss &&
2006 ref_yss == this_yss;
2007 }
2008
setup_frame_size_with_refs(AV1_COMMON * cm,struct aom_read_bit_buffer * rb)2009 static AOM_INLINE void setup_frame_size_with_refs(
2010 AV1_COMMON *cm, struct aom_read_bit_buffer *rb) {
2011 int width, height;
2012 int found = 0;
2013 int has_valid_ref_frame = 0;
2014 for (int i = LAST_FRAME; i <= ALTREF_FRAME; ++i) {
2015 if (aom_rb_read_bit(rb)) {
2016 const RefCntBuffer *const ref_buf = get_ref_frame_buf(cm, i);
2017 // This will never be NULL in a normal stream, as streams are required to
2018 // have a shown keyframe before any inter frames, which would refresh all
2019 // the reference buffers. However, it might be null if we're starting in
2020 // the middle of a stream, and static analysis will error if we don't do
2021 // a null check here.
2022 if (ref_buf == NULL) {
2023 aom_internal_error(cm->error, AOM_CODEC_CORRUPT_FRAME,
2024 "Invalid condition: invalid reference buffer");
2025 } else {
2026 const YV12_BUFFER_CONFIG *const buf = &ref_buf->buf;
2027 width = buf->y_crop_width;
2028 height = buf->y_crop_height;
2029 cm->render_width = buf->render_width;
2030 cm->render_height = buf->render_height;
2031 setup_superres(cm, rb, &width, &height);
2032 resize_context_buffers(cm, width, height);
2033 found = 1;
2034 break;
2035 }
2036 }
2037 }
2038
2039 const SequenceHeader *const seq_params = cm->seq_params;
2040 if (!found) {
2041 int num_bits_width = seq_params->num_bits_width;
2042 int num_bits_height = seq_params->num_bits_height;
2043
2044 av1_read_frame_size(rb, num_bits_width, num_bits_height, &width, &height);
2045 setup_superres(cm, rb, &width, &height);
2046 resize_context_buffers(cm, width, height);
2047 setup_render_size(cm, rb);
2048 }
2049
2050 if (width <= 0 || height <= 0)
2051 aom_internal_error(cm->error, AOM_CODEC_CORRUPT_FRAME,
2052 "Invalid frame size");
2053
2054 // Check to make sure at least one of frames that this frame references
2055 // has valid dimensions.
2056 for (int i = LAST_FRAME; i <= ALTREF_FRAME; ++i) {
2057 const RefCntBuffer *const ref_frame = get_ref_frame_buf(cm, i);
2058 has_valid_ref_frame |=
2059 valid_ref_frame_size(ref_frame->buf.y_crop_width,
2060 ref_frame->buf.y_crop_height, width, height);
2061 }
2062 if (!has_valid_ref_frame)
2063 aom_internal_error(cm->error, AOM_CODEC_CORRUPT_FRAME,
2064 "Referenced frame has invalid size");
2065 for (int i = LAST_FRAME; i <= ALTREF_FRAME; ++i) {
2066 const RefCntBuffer *const ref_frame = get_ref_frame_buf(cm, i);
2067 if (!valid_ref_frame_img_fmt(
2068 ref_frame->buf.bit_depth, ref_frame->buf.subsampling_x,
2069 ref_frame->buf.subsampling_y, seq_params->bit_depth,
2070 seq_params->subsampling_x, seq_params->subsampling_y))
2071 aom_internal_error(cm->error, AOM_CODEC_CORRUPT_FRAME,
2072 "Referenced frame has incompatible color format");
2073 }
2074 setup_buffer_pool(cm);
2075 }
2076
2077 // Same function as av1_read_uniform but reading from uncompresses header wb
rb_read_uniform(struct aom_read_bit_buffer * const rb,int n)2078 static int rb_read_uniform(struct aom_read_bit_buffer *const rb, int n) {
2079 const int l = get_unsigned_bits(n);
2080 const int m = (1 << l) - n;
2081 const int v = aom_rb_read_literal(rb, l - 1);
2082 assert(l != 0);
2083 if (v < m)
2084 return v;
2085 else
2086 return (v << 1) - m + aom_rb_read_bit(rb);
2087 }
2088
read_tile_info_max_tile(AV1_COMMON * const cm,struct aom_read_bit_buffer * const rb)2089 static AOM_INLINE void read_tile_info_max_tile(
2090 AV1_COMMON *const cm, struct aom_read_bit_buffer *const rb) {
2091 const SequenceHeader *const seq_params = cm->seq_params;
2092 CommonTileParams *const tiles = &cm->tiles;
2093 int width_sb =
2094 CEIL_POWER_OF_TWO(cm->mi_params.mi_cols, seq_params->mib_size_log2);
2095 int height_sb =
2096 CEIL_POWER_OF_TWO(cm->mi_params.mi_rows, seq_params->mib_size_log2);
2097
2098 av1_get_tile_limits(cm);
2099 tiles->uniform_spacing = aom_rb_read_bit(rb);
2100
2101 // Read tile columns
2102 if (tiles->uniform_spacing) {
2103 tiles->log2_cols = tiles->min_log2_cols;
2104 while (tiles->log2_cols < tiles->max_log2_cols) {
2105 if (!aom_rb_read_bit(rb)) {
2106 break;
2107 }
2108 tiles->log2_cols++;
2109 }
2110 } else {
2111 int i;
2112 int start_sb;
2113 for (i = 0, start_sb = 0; width_sb > 0 && i < MAX_TILE_COLS; i++) {
2114 const int size_sb =
2115 1 + rb_read_uniform(rb, AOMMIN(width_sb, tiles->max_width_sb));
2116 tiles->col_start_sb[i] = start_sb;
2117 start_sb += size_sb;
2118 width_sb -= size_sb;
2119 }
2120 tiles->cols = i;
2121 tiles->col_start_sb[i] = start_sb + width_sb;
2122 }
2123 av1_calculate_tile_cols(seq_params, cm->mi_params.mi_rows,
2124 cm->mi_params.mi_cols, tiles);
2125
2126 // Read tile rows
2127 if (tiles->uniform_spacing) {
2128 tiles->log2_rows = tiles->min_log2_rows;
2129 while (tiles->log2_rows < tiles->max_log2_rows) {
2130 if (!aom_rb_read_bit(rb)) {
2131 break;
2132 }
2133 tiles->log2_rows++;
2134 }
2135 } else {
2136 int i;
2137 int start_sb;
2138 for (i = 0, start_sb = 0; height_sb > 0 && i < MAX_TILE_ROWS; i++) {
2139 const int size_sb =
2140 1 + rb_read_uniform(rb, AOMMIN(height_sb, tiles->max_height_sb));
2141 tiles->row_start_sb[i] = start_sb;
2142 start_sb += size_sb;
2143 height_sb -= size_sb;
2144 }
2145 tiles->rows = i;
2146 tiles->row_start_sb[i] = start_sb + height_sb;
2147 }
2148 av1_calculate_tile_rows(seq_params, cm->mi_params.mi_rows, tiles);
2149 }
2150
av1_set_single_tile_decoding_mode(AV1_COMMON * const cm)2151 void av1_set_single_tile_decoding_mode(AV1_COMMON *const cm) {
2152 cm->tiles.single_tile_decoding = 0;
2153 if (cm->tiles.large_scale) {
2154 struct loopfilter *lf = &cm->lf;
2155 RestorationInfo *const rst_info = cm->rst_info;
2156 const CdefInfo *const cdef_info = &cm->cdef_info;
2157
2158 // Figure out single_tile_decoding by loopfilter_level.
2159 const int no_loopfilter = !(lf->filter_level[0] || lf->filter_level[1]);
2160 const int no_cdef = cdef_info->cdef_bits == 0 &&
2161 cdef_info->cdef_strengths[0] == 0 &&
2162 cdef_info->cdef_uv_strengths[0] == 0;
2163 const int no_restoration =
2164 rst_info[0].frame_restoration_type == RESTORE_NONE &&
2165 rst_info[1].frame_restoration_type == RESTORE_NONE &&
2166 rst_info[2].frame_restoration_type == RESTORE_NONE;
2167 assert(IMPLIES(cm->features.coded_lossless, no_loopfilter && no_cdef));
2168 assert(IMPLIES(cm->features.all_lossless, no_restoration));
2169 cm->tiles.single_tile_decoding = no_loopfilter && no_cdef && no_restoration;
2170 }
2171 }
2172
read_tile_info(AV1Decoder * const pbi,struct aom_read_bit_buffer * const rb)2173 static AOM_INLINE void read_tile_info(AV1Decoder *const pbi,
2174 struct aom_read_bit_buffer *const rb) {
2175 AV1_COMMON *const cm = &pbi->common;
2176
2177 read_tile_info_max_tile(cm, rb);
2178
2179 pbi->context_update_tile_id = 0;
2180 if (cm->tiles.rows * cm->tiles.cols > 1) {
2181 // tile to use for cdf update
2182 pbi->context_update_tile_id =
2183 aom_rb_read_literal(rb, cm->tiles.log2_rows + cm->tiles.log2_cols);
2184 if (pbi->context_update_tile_id >= cm->tiles.rows * cm->tiles.cols) {
2185 aom_internal_error(&pbi->error, AOM_CODEC_CORRUPT_FRAME,
2186 "Invalid context_update_tile_id");
2187 }
2188 // tile size magnitude
2189 pbi->tile_size_bytes = aom_rb_read_literal(rb, 2) + 1;
2190 }
2191 }
2192
2193 #if EXT_TILE_DEBUG
read_ext_tile_info(AV1Decoder * const pbi,struct aom_read_bit_buffer * const rb)2194 static AOM_INLINE void read_ext_tile_info(
2195 AV1Decoder *const pbi, struct aom_read_bit_buffer *const rb) {
2196 AV1_COMMON *const cm = &pbi->common;
2197
2198 // This information is stored as a separate byte.
2199 int mod = rb->bit_offset % CHAR_BIT;
2200 if (mod > 0) aom_rb_read_literal(rb, CHAR_BIT - mod);
2201 assert(rb->bit_offset % CHAR_BIT == 0);
2202
2203 if (cm->tiles.cols * cm->tiles.rows > 1) {
2204 // Read the number of bytes used to store tile size
2205 pbi->tile_col_size_bytes = aom_rb_read_literal(rb, 2) + 1;
2206 pbi->tile_size_bytes = aom_rb_read_literal(rb, 2) + 1;
2207 }
2208 }
2209 #endif // EXT_TILE_DEBUG
2210
mem_get_varsize(const uint8_t * src,int sz)2211 static size_t mem_get_varsize(const uint8_t *src, int sz) {
2212 switch (sz) {
2213 case 1: return src[0];
2214 case 2: return mem_get_le16(src);
2215 case 3: return mem_get_le24(src);
2216 case 4: return mem_get_le32(src);
2217 default: assert(0 && "Invalid size"); return -1;
2218 }
2219 }
2220
2221 #if EXT_TILE_DEBUG
2222 // Reads the next tile returning its size and adjusting '*data' accordingly
2223 // based on 'is_last'. On return, '*data' is updated to point to the end of the
2224 // raw tile buffer in the bit stream.
get_ls_tile_buffer(const uint8_t * const data_end,struct aom_internal_error_info * error_info,const uint8_t ** data,TileBufferDec (* const tile_buffers)[MAX_TILE_COLS],int tile_size_bytes,int col,int row,int tile_copy_mode)2225 static AOM_INLINE void get_ls_tile_buffer(
2226 const uint8_t *const data_end, struct aom_internal_error_info *error_info,
2227 const uint8_t **data, TileBufferDec (*const tile_buffers)[MAX_TILE_COLS],
2228 int tile_size_bytes, int col, int row, int tile_copy_mode) {
2229 size_t size;
2230
2231 size_t copy_size = 0;
2232 const uint8_t *copy_data = NULL;
2233
2234 if (!read_is_valid(*data, tile_size_bytes, data_end))
2235 aom_internal_error(error_info, AOM_CODEC_CORRUPT_FRAME,
2236 "Truncated packet or corrupt tile length");
2237 size = mem_get_varsize(*data, tile_size_bytes);
2238
2239 // If tile_copy_mode = 1, then the top bit of the tile header indicates copy
2240 // mode.
2241 if (tile_copy_mode && (size >> (tile_size_bytes * 8 - 1)) == 1) {
2242 // The remaining bits in the top byte signal the row offset
2243 int offset = (size >> (tile_size_bytes - 1) * 8) & 0x7f;
2244
2245 // Currently, only use tiles in same column as reference tiles.
2246 copy_data = tile_buffers[row - offset][col].data;
2247 copy_size = tile_buffers[row - offset][col].size;
2248 size = 0;
2249 } else {
2250 size += AV1_MIN_TILE_SIZE_BYTES;
2251 }
2252
2253 *data += tile_size_bytes;
2254
2255 if (size > (size_t)(data_end - *data))
2256 aom_internal_error(error_info, AOM_CODEC_CORRUPT_FRAME,
2257 "Truncated packet or corrupt tile size");
2258
2259 if (size > 0) {
2260 tile_buffers[row][col].data = *data;
2261 tile_buffers[row][col].size = size;
2262 } else {
2263 tile_buffers[row][col].data = copy_data;
2264 tile_buffers[row][col].size = copy_size;
2265 }
2266
2267 *data += size;
2268 }
2269
2270 // Returns the end of the last tile buffer
2271 // (tile_buffers[cm->tiles.rows - 1][cm->tiles.cols - 1]).
get_ls_tile_buffers(AV1Decoder * pbi,const uint8_t * data,const uint8_t * data_end,TileBufferDec (* const tile_buffers)[MAX_TILE_COLS])2272 static const uint8_t *get_ls_tile_buffers(
2273 AV1Decoder *pbi, const uint8_t *data, const uint8_t *data_end,
2274 TileBufferDec (*const tile_buffers)[MAX_TILE_COLS]) {
2275 AV1_COMMON *const cm = &pbi->common;
2276 const int tile_cols = cm->tiles.cols;
2277 const int tile_rows = cm->tiles.rows;
2278 const int have_tiles = tile_cols * tile_rows > 1;
2279 const uint8_t *raw_data_end; // The end of the last tile buffer
2280
2281 if (!have_tiles) {
2282 const size_t tile_size = data_end - data;
2283 tile_buffers[0][0].data = data;
2284 tile_buffers[0][0].size = tile_size;
2285 raw_data_end = NULL;
2286 } else {
2287 // We locate only the tile buffers that are required, which are the ones
2288 // specified by pbi->dec_tile_col and pbi->dec_tile_row. Also, we always
2289 // need the last (bottom right) tile buffer, as we need to know where the
2290 // end of the compressed frame buffer is for proper superframe decoding.
2291
2292 const uint8_t *tile_col_data_end[MAX_TILE_COLS] = { NULL };
2293 const uint8_t *const data_start = data;
2294
2295 const int dec_tile_row = AOMMIN(pbi->dec_tile_row, tile_rows);
2296 const int single_row = pbi->dec_tile_row >= 0;
2297 const int tile_rows_start = single_row ? dec_tile_row : 0;
2298 const int tile_rows_end = single_row ? tile_rows_start + 1 : tile_rows;
2299 const int dec_tile_col = AOMMIN(pbi->dec_tile_col, tile_cols);
2300 const int single_col = pbi->dec_tile_col >= 0;
2301 const int tile_cols_start = single_col ? dec_tile_col : 0;
2302 const int tile_cols_end = single_col ? tile_cols_start + 1 : tile_cols;
2303
2304 const int tile_col_size_bytes = pbi->tile_col_size_bytes;
2305 const int tile_size_bytes = pbi->tile_size_bytes;
2306 int tile_width, tile_height;
2307 if (!av1_get_uniform_tile_size(cm, &tile_width, &tile_height)) {
2308 aom_internal_error(
2309 &pbi->error, AOM_CODEC_CORRUPT_FRAME,
2310 "Not all the tiles in the tile list have the same size.");
2311 }
2312 const int tile_copy_mode =
2313 ((AOMMAX(tile_width, tile_height) << MI_SIZE_LOG2) <= 256) ? 1 : 0;
2314 // Read tile column sizes for all columns (we need the last tile buffer)
2315 for (int c = 0; c < tile_cols; ++c) {
2316 const int is_last = c == tile_cols - 1;
2317 size_t tile_col_size;
2318
2319 if (!is_last) {
2320 if (tile_col_size_bytes > data_end - data) {
2321 aom_internal_error(&pbi->error, AOM_CODEC_CORRUPT_FRAME,
2322 "Not enough data to read tile_col_size");
2323 }
2324 tile_col_size = mem_get_varsize(data, tile_col_size_bytes);
2325 data += tile_col_size_bytes;
2326 if (tile_col_size > (size_t)(data_end - data)) {
2327 aom_internal_error(&pbi->error, AOM_CODEC_CORRUPT_FRAME,
2328 "tile_col_data_end[%d] is out of bound", c);
2329 }
2330 tile_col_data_end[c] = data + tile_col_size;
2331 } else {
2332 tile_col_size = data_end - data;
2333 tile_col_data_end[c] = data_end;
2334 }
2335 data += tile_col_size;
2336 }
2337
2338 data = data_start;
2339
2340 // Read the required tile sizes.
2341 for (int c = tile_cols_start; c < tile_cols_end; ++c) {
2342 const int is_last = c == tile_cols - 1;
2343
2344 if (c > 0) data = tile_col_data_end[c - 1];
2345
2346 if (!is_last) data += tile_col_size_bytes;
2347
2348 // Get the whole of the last column, otherwise stop at the required tile.
2349 for (int r = 0; r < (is_last ? tile_rows : tile_rows_end); ++r) {
2350 get_ls_tile_buffer(tile_col_data_end[c], &pbi->error, &data,
2351 tile_buffers, tile_size_bytes, c, r, tile_copy_mode);
2352 }
2353 }
2354
2355 // If we have not read the last column, then read it to get the last tile.
2356 if (tile_cols_end != tile_cols) {
2357 const int c = tile_cols - 1;
2358
2359 data = tile_col_data_end[c - 1];
2360
2361 for (int r = 0; r < tile_rows; ++r) {
2362 get_ls_tile_buffer(tile_col_data_end[c], &pbi->error, &data,
2363 tile_buffers, tile_size_bytes, c, r, tile_copy_mode);
2364 }
2365 }
2366 raw_data_end = data;
2367 }
2368 return raw_data_end;
2369 }
2370 #endif // EXT_TILE_DEBUG
2371
get_ls_single_tile_buffer(AV1Decoder * pbi,const uint8_t * data,TileBufferDec (* const tile_buffers)[MAX_TILE_COLS])2372 static const uint8_t *get_ls_single_tile_buffer(
2373 AV1Decoder *pbi, const uint8_t *data,
2374 TileBufferDec (*const tile_buffers)[MAX_TILE_COLS]) {
2375 assert(pbi->dec_tile_row >= 0 && pbi->dec_tile_col >= 0);
2376 tile_buffers[pbi->dec_tile_row][pbi->dec_tile_col].data = data;
2377 tile_buffers[pbi->dec_tile_row][pbi->dec_tile_col].size =
2378 (size_t)pbi->coded_tile_data_size;
2379 return data + pbi->coded_tile_data_size;
2380 }
2381
2382 // Reads the next tile returning its size and adjusting '*data' accordingly
2383 // based on 'is_last'.
get_tile_buffer(const uint8_t * const data_end,const int tile_size_bytes,int is_last,struct aom_internal_error_info * error_info,const uint8_t ** data,TileBufferDec * const buf)2384 static AOM_INLINE void get_tile_buffer(
2385 const uint8_t *const data_end, const int tile_size_bytes, int is_last,
2386 struct aom_internal_error_info *error_info, const uint8_t **data,
2387 TileBufferDec *const buf) {
2388 size_t size;
2389
2390 if (!is_last) {
2391 if (!read_is_valid(*data, tile_size_bytes, data_end))
2392 aom_internal_error(error_info, AOM_CODEC_CORRUPT_FRAME,
2393 "Not enough data to read tile size");
2394
2395 size = mem_get_varsize(*data, tile_size_bytes) + AV1_MIN_TILE_SIZE_BYTES;
2396 *data += tile_size_bytes;
2397
2398 if (size > (size_t)(data_end - *data))
2399 aom_internal_error(error_info, AOM_CODEC_CORRUPT_FRAME,
2400 "Truncated packet or corrupt tile size");
2401 } else {
2402 size = data_end - *data;
2403 }
2404
2405 buf->data = *data;
2406 buf->size = size;
2407
2408 *data += size;
2409 }
2410
get_tile_buffers(AV1Decoder * pbi,const uint8_t * data,const uint8_t * data_end,TileBufferDec (* const tile_buffers)[MAX_TILE_COLS],int start_tile,int end_tile)2411 static AOM_INLINE void get_tile_buffers(
2412 AV1Decoder *pbi, const uint8_t *data, const uint8_t *data_end,
2413 TileBufferDec (*const tile_buffers)[MAX_TILE_COLS], int start_tile,
2414 int end_tile) {
2415 AV1_COMMON *const cm = &pbi->common;
2416 const int tile_cols = cm->tiles.cols;
2417 const int tile_rows = cm->tiles.rows;
2418 int tc = 0;
2419
2420 for (int r = 0; r < tile_rows; ++r) {
2421 for (int c = 0; c < tile_cols; ++c, ++tc) {
2422 TileBufferDec *const buf = &tile_buffers[r][c];
2423
2424 const int is_last = (tc == end_tile);
2425 const size_t hdr_offset = 0;
2426
2427 if (tc < start_tile || tc > end_tile) continue;
2428
2429 if (data + hdr_offset >= data_end)
2430 aom_internal_error(&pbi->error, AOM_CODEC_CORRUPT_FRAME,
2431 "Data ended before all tiles were read.");
2432 data += hdr_offset;
2433 get_tile_buffer(data_end, pbi->tile_size_bytes, is_last, &pbi->error,
2434 &data, buf);
2435 }
2436 }
2437 }
2438
set_cb_buffer(AV1Decoder * pbi,DecoderCodingBlock * dcb,CB_BUFFER * cb_buffer_base,const int num_planes,int mi_row,int mi_col)2439 static AOM_INLINE void set_cb_buffer(AV1Decoder *pbi, DecoderCodingBlock *dcb,
2440 CB_BUFFER *cb_buffer_base,
2441 const int num_planes, int mi_row,
2442 int mi_col) {
2443 AV1_COMMON *const cm = &pbi->common;
2444 int mib_size_log2 = cm->seq_params->mib_size_log2;
2445 int stride = (cm->mi_params.mi_cols >> mib_size_log2) + 1;
2446 int offset = (mi_row >> mib_size_log2) * stride + (mi_col >> mib_size_log2);
2447 CB_BUFFER *cb_buffer = cb_buffer_base + offset;
2448
2449 for (int plane = 0; plane < num_planes; ++plane) {
2450 dcb->dqcoeff_block[plane] = cb_buffer->dqcoeff[plane];
2451 dcb->eob_data[plane] = cb_buffer->eob_data[plane];
2452 dcb->cb_offset[plane] = 0;
2453 dcb->txb_offset[plane] = 0;
2454 }
2455 MACROBLOCKD *const xd = &dcb->xd;
2456 xd->plane[0].color_index_map = cb_buffer->color_index_map[0];
2457 xd->plane[1].color_index_map = cb_buffer->color_index_map[1];
2458 xd->color_index_map_offset[0] = 0;
2459 xd->color_index_map_offset[1] = 0;
2460 }
2461
decoder_alloc_tile_data(AV1Decoder * pbi,const int n_tiles)2462 static AOM_INLINE void decoder_alloc_tile_data(AV1Decoder *pbi,
2463 const int n_tiles) {
2464 AV1_COMMON *const cm = &pbi->common;
2465 aom_free(pbi->tile_data);
2466 pbi->allocated_tiles = 0;
2467 CHECK_MEM_ERROR(cm, pbi->tile_data,
2468 aom_memalign(32, n_tiles * sizeof(*pbi->tile_data)));
2469 pbi->allocated_tiles = n_tiles;
2470 for (int i = 0; i < n_tiles; i++) {
2471 TileDataDec *const tile_data = pbi->tile_data + i;
2472 av1_zero(tile_data->dec_row_mt_sync);
2473 }
2474 pbi->allocated_row_mt_sync_rows = 0;
2475 }
2476
2477 // Set up nsync by width.
get_sync_range(int width)2478 static INLINE int get_sync_range(int width) {
2479 // nsync numbers are picked by testing.
2480 #if 0
2481 if (width < 640)
2482 return 1;
2483 else if (width <= 1280)
2484 return 2;
2485 else if (width <= 4096)
2486 return 4;
2487 else
2488 return 8;
2489 #else
2490 (void)width;
2491 #endif
2492 return 1;
2493 }
2494
2495 // Allocate memory for decoder row synchronization
dec_row_mt_alloc(AV1DecRowMTSync * dec_row_mt_sync,AV1_COMMON * cm,int rows)2496 static AOM_INLINE void dec_row_mt_alloc(AV1DecRowMTSync *dec_row_mt_sync,
2497 AV1_COMMON *cm, int rows) {
2498 dec_row_mt_sync->allocated_sb_rows = rows;
2499 #if CONFIG_MULTITHREAD
2500 {
2501 int i;
2502
2503 CHECK_MEM_ERROR(cm, dec_row_mt_sync->mutex_,
2504 aom_malloc(sizeof(*(dec_row_mt_sync->mutex_)) * rows));
2505 if (dec_row_mt_sync->mutex_) {
2506 for (i = 0; i < rows; ++i) {
2507 pthread_mutex_init(&dec_row_mt_sync->mutex_[i], NULL);
2508 }
2509 }
2510
2511 CHECK_MEM_ERROR(cm, dec_row_mt_sync->cond_,
2512 aom_malloc(sizeof(*(dec_row_mt_sync->cond_)) * rows));
2513 if (dec_row_mt_sync->cond_) {
2514 for (i = 0; i < rows; ++i) {
2515 pthread_cond_init(&dec_row_mt_sync->cond_[i], NULL);
2516 }
2517 }
2518 }
2519 #endif // CONFIG_MULTITHREAD
2520
2521 CHECK_MEM_ERROR(cm, dec_row_mt_sync->cur_sb_col,
2522 aom_malloc(sizeof(*(dec_row_mt_sync->cur_sb_col)) * rows));
2523
2524 // Set up nsync.
2525 dec_row_mt_sync->sync_range = get_sync_range(cm->width);
2526 }
2527
2528 // Deallocate decoder row synchronization related mutex and data
av1_dec_row_mt_dealloc(AV1DecRowMTSync * dec_row_mt_sync)2529 void av1_dec_row_mt_dealloc(AV1DecRowMTSync *dec_row_mt_sync) {
2530 if (dec_row_mt_sync != NULL) {
2531 #if CONFIG_MULTITHREAD
2532 int i;
2533 if (dec_row_mt_sync->mutex_ != NULL) {
2534 for (i = 0; i < dec_row_mt_sync->allocated_sb_rows; ++i) {
2535 pthread_mutex_destroy(&dec_row_mt_sync->mutex_[i]);
2536 }
2537 aom_free(dec_row_mt_sync->mutex_);
2538 }
2539 if (dec_row_mt_sync->cond_ != NULL) {
2540 for (i = 0; i < dec_row_mt_sync->allocated_sb_rows; ++i) {
2541 pthread_cond_destroy(&dec_row_mt_sync->cond_[i]);
2542 }
2543 aom_free(dec_row_mt_sync->cond_);
2544 }
2545 #endif // CONFIG_MULTITHREAD
2546 aom_free(dec_row_mt_sync->cur_sb_col);
2547
2548 // clear the structure as the source of this call may be a resize in which
2549 // case this call will be followed by an _alloc() which may fail.
2550 av1_zero(*dec_row_mt_sync);
2551 }
2552 }
2553
sync_read(AV1DecRowMTSync * const dec_row_mt_sync,int r,int c)2554 static INLINE void sync_read(AV1DecRowMTSync *const dec_row_mt_sync, int r,
2555 int c) {
2556 #if CONFIG_MULTITHREAD
2557 const int nsync = dec_row_mt_sync->sync_range;
2558
2559 if (r && !(c & (nsync - 1))) {
2560 pthread_mutex_t *const mutex = &dec_row_mt_sync->mutex_[r - 1];
2561 pthread_mutex_lock(mutex);
2562
2563 while (c > dec_row_mt_sync->cur_sb_col[r - 1] - nsync -
2564 dec_row_mt_sync->intrabc_extra_top_right_sb_delay) {
2565 pthread_cond_wait(&dec_row_mt_sync->cond_[r - 1], mutex);
2566 }
2567 pthread_mutex_unlock(mutex);
2568 }
2569 #else
2570 (void)dec_row_mt_sync;
2571 (void)r;
2572 (void)c;
2573 #endif // CONFIG_MULTITHREAD
2574 }
2575
sync_write(AV1DecRowMTSync * const dec_row_mt_sync,int r,int c,const int sb_cols)2576 static INLINE void sync_write(AV1DecRowMTSync *const dec_row_mt_sync, int r,
2577 int c, const int sb_cols) {
2578 #if CONFIG_MULTITHREAD
2579 const int nsync = dec_row_mt_sync->sync_range;
2580 int cur;
2581 int sig = 1;
2582
2583 if (c < sb_cols - 1) {
2584 cur = c;
2585 if (c % nsync) sig = 0;
2586 } else {
2587 cur = sb_cols + nsync + dec_row_mt_sync->intrabc_extra_top_right_sb_delay;
2588 }
2589
2590 if (sig) {
2591 pthread_mutex_lock(&dec_row_mt_sync->mutex_[r]);
2592
2593 dec_row_mt_sync->cur_sb_col[r] = cur;
2594
2595 pthread_cond_signal(&dec_row_mt_sync->cond_[r]);
2596 pthread_mutex_unlock(&dec_row_mt_sync->mutex_[r]);
2597 }
2598 #else
2599 (void)dec_row_mt_sync;
2600 (void)r;
2601 (void)c;
2602 (void)sb_cols;
2603 #endif // CONFIG_MULTITHREAD
2604 }
2605
signal_decoding_done_for_erroneous_row(AV1Decoder * const pbi,const MACROBLOCKD * const xd)2606 static INLINE void signal_decoding_done_for_erroneous_row(
2607 AV1Decoder *const pbi, const MACROBLOCKD *const xd) {
2608 AV1_COMMON *const cm = &pbi->common;
2609 const TileInfo *const tile = &xd->tile;
2610 const int sb_row_in_tile =
2611 ((xd->mi_row - tile->mi_row_start) >> cm->seq_params->mib_size_log2);
2612 const int sb_cols_in_tile = av1_get_sb_cols_in_tile(cm, tile);
2613 TileDataDec *const tile_data =
2614 pbi->tile_data + tile->tile_row * cm->tiles.cols + tile->tile_col;
2615 AV1DecRowMTSync *dec_row_mt_sync = &tile_data->dec_row_mt_sync;
2616
2617 sync_write(dec_row_mt_sync, sb_row_in_tile, sb_cols_in_tile - 1,
2618 sb_cols_in_tile);
2619 }
2620
decode_tile_sb_row(AV1Decoder * pbi,ThreadData * const td,const TileInfo * tile_info,const int mi_row)2621 static AOM_INLINE void decode_tile_sb_row(AV1Decoder *pbi, ThreadData *const td,
2622 const TileInfo *tile_info,
2623 const int mi_row) {
2624 AV1_COMMON *const cm = &pbi->common;
2625 const int num_planes = av1_num_planes(cm);
2626 TileDataDec *const tile_data = pbi->tile_data +
2627 tile_info->tile_row * cm->tiles.cols +
2628 tile_info->tile_col;
2629 const int sb_cols_in_tile = av1_get_sb_cols_in_tile(cm, tile_info);
2630 const int sb_row_in_tile =
2631 (mi_row - tile_info->mi_row_start) >> cm->seq_params->mib_size_log2;
2632 int sb_col_in_tile = 0;
2633 int row_mt_exit = 0;
2634
2635 for (int mi_col = tile_info->mi_col_start; mi_col < tile_info->mi_col_end;
2636 mi_col += cm->seq_params->mib_size, sb_col_in_tile++) {
2637 set_cb_buffer(pbi, &td->dcb, pbi->cb_buffer_base, num_planes, mi_row,
2638 mi_col);
2639
2640 sync_read(&tile_data->dec_row_mt_sync, sb_row_in_tile, sb_col_in_tile);
2641
2642 #if CONFIG_MULTITHREAD
2643 pthread_mutex_lock(pbi->row_mt_mutex_);
2644 #endif
2645 row_mt_exit = pbi->frame_row_mt_info.row_mt_exit;
2646 #if CONFIG_MULTITHREAD
2647 pthread_mutex_unlock(pbi->row_mt_mutex_);
2648 #endif
2649
2650 if (!row_mt_exit) {
2651 // Decoding of the super-block
2652 decode_partition(pbi, td, mi_row, mi_col, td->bit_reader,
2653 cm->seq_params->sb_size, 0x2);
2654 }
2655
2656 sync_write(&tile_data->dec_row_mt_sync, sb_row_in_tile, sb_col_in_tile,
2657 sb_cols_in_tile);
2658 }
2659 }
2660
check_trailing_bits_after_symbol_coder(aom_reader * r)2661 static int check_trailing_bits_after_symbol_coder(aom_reader *r) {
2662 if (aom_reader_has_overflowed(r)) return -1;
2663
2664 uint32_t nb_bits = aom_reader_tell(r);
2665 uint32_t nb_bytes = (nb_bits + 7) >> 3;
2666 const uint8_t *p = aom_reader_find_begin(r) + nb_bytes;
2667
2668 // aom_reader_tell() returns 1 for a newly initialized decoder, and the
2669 // return value only increases as values are decoded. So nb_bits > 0, and
2670 // thus p > p_begin. Therefore accessing p[-1] is safe.
2671 uint8_t last_byte = p[-1];
2672 uint8_t pattern = 128 >> ((nb_bits - 1) & 7);
2673 if ((last_byte & (2 * pattern - 1)) != pattern) return -1;
2674
2675 // Make sure that all padding bytes are zero as required by the spec.
2676 const uint8_t *p_end = aom_reader_find_end(r);
2677 while (p < p_end) {
2678 if (*p != 0) return -1;
2679 p++;
2680 }
2681 return 0;
2682 }
2683
set_decode_func_pointers(ThreadData * td,int parse_decode_flag)2684 static AOM_INLINE void set_decode_func_pointers(ThreadData *td,
2685 int parse_decode_flag) {
2686 td->read_coeffs_tx_intra_block_visit = decode_block_void;
2687 td->predict_and_recon_intra_block_visit = decode_block_void;
2688 td->read_coeffs_tx_inter_block_visit = decode_block_void;
2689 td->inverse_tx_inter_block_visit = decode_block_void;
2690 td->predict_inter_block_visit = predict_inter_block_void;
2691 td->cfl_store_inter_block_visit = cfl_store_inter_block_void;
2692
2693 if (parse_decode_flag & 0x1) {
2694 td->read_coeffs_tx_intra_block_visit = read_coeffs_tx_intra_block;
2695 td->read_coeffs_tx_inter_block_visit = av1_read_coeffs_txb_facade;
2696 }
2697 if (parse_decode_flag & 0x2) {
2698 td->predict_and_recon_intra_block_visit =
2699 predict_and_reconstruct_intra_block;
2700 td->inverse_tx_inter_block_visit = inverse_transform_inter_block;
2701 td->predict_inter_block_visit = predict_inter_block;
2702 td->cfl_store_inter_block_visit = cfl_store_inter_block;
2703 }
2704 }
2705
decode_tile(AV1Decoder * pbi,ThreadData * const td,int tile_row,int tile_col)2706 static AOM_INLINE void decode_tile(AV1Decoder *pbi, ThreadData *const td,
2707 int tile_row, int tile_col) {
2708 TileInfo tile_info;
2709
2710 AV1_COMMON *const cm = &pbi->common;
2711 const int num_planes = av1_num_planes(cm);
2712
2713 av1_tile_set_row(&tile_info, cm, tile_row);
2714 av1_tile_set_col(&tile_info, cm, tile_col);
2715 DecoderCodingBlock *const dcb = &td->dcb;
2716 MACROBLOCKD *const xd = &dcb->xd;
2717
2718 av1_zero_above_context(cm, xd, tile_info.mi_col_start, tile_info.mi_col_end,
2719 tile_row);
2720 av1_reset_loop_filter_delta(xd, num_planes);
2721 av1_reset_loop_restoration(xd, num_planes);
2722
2723 for (int mi_row = tile_info.mi_row_start; mi_row < tile_info.mi_row_end;
2724 mi_row += cm->seq_params->mib_size) {
2725 av1_zero_left_context(xd);
2726
2727 for (int mi_col = tile_info.mi_col_start; mi_col < tile_info.mi_col_end;
2728 mi_col += cm->seq_params->mib_size) {
2729 set_cb_buffer(pbi, dcb, &td->cb_buffer_base, num_planes, 0, 0);
2730
2731 // Bit-stream parsing and decoding of the superblock
2732 decode_partition(pbi, td, mi_row, mi_col, td->bit_reader,
2733 cm->seq_params->sb_size, 0x3);
2734
2735 if (aom_reader_has_overflowed(td->bit_reader)) {
2736 aom_merge_corrupted_flag(&dcb->corrupted, 1);
2737 return;
2738 }
2739 }
2740 }
2741
2742 int corrupted =
2743 (check_trailing_bits_after_symbol_coder(td->bit_reader)) ? 1 : 0;
2744 aom_merge_corrupted_flag(&dcb->corrupted, corrupted);
2745 }
2746
decode_tiles(AV1Decoder * pbi,const uint8_t * data,const uint8_t * data_end,int start_tile,int end_tile)2747 static const uint8_t *decode_tiles(AV1Decoder *pbi, const uint8_t *data,
2748 const uint8_t *data_end, int start_tile,
2749 int end_tile) {
2750 AV1_COMMON *const cm = &pbi->common;
2751 ThreadData *const td = &pbi->td;
2752 CommonTileParams *const tiles = &cm->tiles;
2753 const int tile_cols = tiles->cols;
2754 const int tile_rows = tiles->rows;
2755 const int n_tiles = tile_cols * tile_rows;
2756 TileBufferDec(*const tile_buffers)[MAX_TILE_COLS] = pbi->tile_buffers;
2757 const int dec_tile_row = AOMMIN(pbi->dec_tile_row, tile_rows);
2758 const int single_row = pbi->dec_tile_row >= 0;
2759 const int dec_tile_col = AOMMIN(pbi->dec_tile_col, tile_cols);
2760 const int single_col = pbi->dec_tile_col >= 0;
2761 int tile_rows_start;
2762 int tile_rows_end;
2763 int tile_cols_start;
2764 int tile_cols_end;
2765 int inv_col_order;
2766 int inv_row_order;
2767 int tile_row, tile_col;
2768 uint8_t allow_update_cdf;
2769 const uint8_t *raw_data_end = NULL;
2770
2771 if (tiles->large_scale) {
2772 tile_rows_start = single_row ? dec_tile_row : 0;
2773 tile_rows_end = single_row ? dec_tile_row + 1 : tile_rows;
2774 tile_cols_start = single_col ? dec_tile_col : 0;
2775 tile_cols_end = single_col ? tile_cols_start + 1 : tile_cols;
2776 inv_col_order = pbi->inv_tile_order && !single_col;
2777 inv_row_order = pbi->inv_tile_order && !single_row;
2778 allow_update_cdf = 0;
2779 } else {
2780 tile_rows_start = 0;
2781 tile_rows_end = tile_rows;
2782 tile_cols_start = 0;
2783 tile_cols_end = tile_cols;
2784 inv_col_order = pbi->inv_tile_order;
2785 inv_row_order = pbi->inv_tile_order;
2786 allow_update_cdf = 1;
2787 }
2788
2789 // No tiles to decode.
2790 if (tile_rows_end <= tile_rows_start || tile_cols_end <= tile_cols_start ||
2791 // First tile is larger than end_tile.
2792 tile_rows_start * tiles->cols + tile_cols_start > end_tile ||
2793 // Last tile is smaller than start_tile.
2794 (tile_rows_end - 1) * tiles->cols + tile_cols_end - 1 < start_tile)
2795 return data;
2796
2797 allow_update_cdf = allow_update_cdf && !cm->features.disable_cdf_update;
2798
2799 assert(tile_rows <= MAX_TILE_ROWS);
2800 assert(tile_cols <= MAX_TILE_COLS);
2801
2802 #if EXT_TILE_DEBUG
2803 if (tiles->large_scale && !pbi->ext_tile_debug)
2804 raw_data_end = get_ls_single_tile_buffer(pbi, data, tile_buffers);
2805 else if (tiles->large_scale && pbi->ext_tile_debug)
2806 raw_data_end = get_ls_tile_buffers(pbi, data, data_end, tile_buffers);
2807 else
2808 #endif // EXT_TILE_DEBUG
2809 get_tile_buffers(pbi, data, data_end, tile_buffers, start_tile, end_tile);
2810
2811 if (pbi->tile_data == NULL || n_tiles != pbi->allocated_tiles) {
2812 decoder_alloc_tile_data(pbi, n_tiles);
2813 }
2814 if (pbi->dcb.xd.seg_mask == NULL)
2815 CHECK_MEM_ERROR(cm, pbi->dcb.xd.seg_mask,
2816 (uint8_t *)aom_memalign(
2817 16, 2 * MAX_SB_SQUARE * sizeof(*pbi->dcb.xd.seg_mask)));
2818 #if CONFIG_ACCOUNTING
2819 if (pbi->acct_enabled) {
2820 aom_accounting_reset(&pbi->accounting);
2821 }
2822 #endif
2823
2824 set_decode_func_pointers(&pbi->td, 0x3);
2825
2826 // Load all tile information into thread_data.
2827 td->dcb = pbi->dcb;
2828
2829 td->dcb.corrupted = 0;
2830 td->dcb.mc_buf[0] = td->mc_buf[0];
2831 td->dcb.mc_buf[1] = td->mc_buf[1];
2832 td->dcb.xd.tmp_conv_dst = td->tmp_conv_dst;
2833 for (int j = 0; j < 2; ++j) {
2834 td->dcb.xd.tmp_obmc_bufs[j] = td->tmp_obmc_bufs[j];
2835 }
2836
2837 for (tile_row = tile_rows_start; tile_row < tile_rows_end; ++tile_row) {
2838 const int row = inv_row_order ? tile_rows - 1 - tile_row : tile_row;
2839
2840 for (tile_col = tile_cols_start; tile_col < tile_cols_end; ++tile_col) {
2841 const int col = inv_col_order ? tile_cols - 1 - tile_col : tile_col;
2842 TileDataDec *const tile_data = pbi->tile_data + row * tiles->cols + col;
2843 const TileBufferDec *const tile_bs_buf = &tile_buffers[row][col];
2844
2845 if (row * tiles->cols + col < start_tile ||
2846 row * tiles->cols + col > end_tile)
2847 continue;
2848
2849 td->bit_reader = &tile_data->bit_reader;
2850 av1_zero(td->cb_buffer_base.dqcoeff);
2851 av1_tile_init(&td->dcb.xd.tile, cm, row, col);
2852 td->dcb.xd.current_base_qindex = cm->quant_params.base_qindex;
2853 setup_bool_decoder(&td->dcb.xd, tile_bs_buf->data, data_end,
2854 tile_bs_buf->size, &pbi->error, td->bit_reader,
2855 allow_update_cdf);
2856 #if CONFIG_ACCOUNTING
2857 if (pbi->acct_enabled) {
2858 td->bit_reader->accounting = &pbi->accounting;
2859 td->bit_reader->accounting->last_tell_frac =
2860 aom_reader_tell_frac(td->bit_reader);
2861 } else {
2862 td->bit_reader->accounting = NULL;
2863 }
2864 #endif
2865 av1_init_macroblockd(cm, &td->dcb.xd);
2866 av1_init_above_context(&cm->above_contexts, av1_num_planes(cm), row,
2867 &td->dcb.xd);
2868
2869 // Initialise the tile context from the frame context
2870 tile_data->tctx = *cm->fc;
2871 td->dcb.xd.tile_ctx = &tile_data->tctx;
2872
2873 // decode tile
2874 decode_tile(pbi, td, row, col);
2875 aom_merge_corrupted_flag(&pbi->dcb.corrupted, td->dcb.corrupted);
2876 if (pbi->dcb.corrupted)
2877 aom_internal_error(&pbi->error, AOM_CODEC_CORRUPT_FRAME,
2878 "Failed to decode tile data");
2879 }
2880 }
2881
2882 if (tiles->large_scale) {
2883 if (n_tiles == 1) {
2884 // Find the end of the single tile buffer
2885 return aom_reader_find_end(&pbi->tile_data->bit_reader);
2886 }
2887 // Return the end of the last tile buffer
2888 return raw_data_end;
2889 }
2890 TileDataDec *const tile_data = pbi->tile_data + end_tile;
2891
2892 return aom_reader_find_end(&tile_data->bit_reader);
2893 }
2894
get_dec_job_info(AV1DecTileMT * tile_mt_info)2895 static TileJobsDec *get_dec_job_info(AV1DecTileMT *tile_mt_info) {
2896 TileJobsDec *cur_job_info = NULL;
2897 #if CONFIG_MULTITHREAD
2898 pthread_mutex_lock(tile_mt_info->job_mutex);
2899
2900 if (tile_mt_info->jobs_dequeued < tile_mt_info->jobs_enqueued) {
2901 cur_job_info = tile_mt_info->job_queue + tile_mt_info->jobs_dequeued;
2902 tile_mt_info->jobs_dequeued++;
2903 }
2904
2905 pthread_mutex_unlock(tile_mt_info->job_mutex);
2906 #else
2907 (void)tile_mt_info;
2908 #endif
2909 return cur_job_info;
2910 }
2911
tile_worker_hook_init(AV1Decoder * const pbi,DecWorkerData * const thread_data,const TileBufferDec * const tile_buffer,TileDataDec * const tile_data,uint8_t allow_update_cdf)2912 static AOM_INLINE void tile_worker_hook_init(
2913 AV1Decoder *const pbi, DecWorkerData *const thread_data,
2914 const TileBufferDec *const tile_buffer, TileDataDec *const tile_data,
2915 uint8_t allow_update_cdf) {
2916 AV1_COMMON *cm = &pbi->common;
2917 ThreadData *const td = thread_data->td;
2918 int tile_row = tile_data->tile_info.tile_row;
2919 int tile_col = tile_data->tile_info.tile_col;
2920
2921 td->bit_reader = &tile_data->bit_reader;
2922 av1_zero(td->cb_buffer_base.dqcoeff);
2923
2924 MACROBLOCKD *const xd = &td->dcb.xd;
2925 av1_tile_init(&xd->tile, cm, tile_row, tile_col);
2926 xd->current_base_qindex = cm->quant_params.base_qindex;
2927
2928 setup_bool_decoder(xd, tile_buffer->data, thread_data->data_end,
2929 tile_buffer->size, &thread_data->error_info,
2930 td->bit_reader, allow_update_cdf);
2931 #if CONFIG_ACCOUNTING
2932 if (pbi->acct_enabled) {
2933 td->bit_reader->accounting = &pbi->accounting;
2934 td->bit_reader->accounting->last_tell_frac =
2935 aom_reader_tell_frac(td->bit_reader);
2936 } else {
2937 td->bit_reader->accounting = NULL;
2938 }
2939 #endif
2940 av1_init_macroblockd(cm, xd);
2941 xd->error_info = &thread_data->error_info;
2942 av1_init_above_context(&cm->above_contexts, av1_num_planes(cm), tile_row, xd);
2943
2944 // Initialise the tile context from the frame context
2945 tile_data->tctx = *cm->fc;
2946 xd->tile_ctx = &tile_data->tctx;
2947 #if CONFIG_ACCOUNTING
2948 if (pbi->acct_enabled) {
2949 tile_data->bit_reader.accounting->last_tell_frac =
2950 aom_reader_tell_frac(&tile_data->bit_reader);
2951 }
2952 #endif
2953 }
2954
tile_worker_hook(void * arg1,void * arg2)2955 static int tile_worker_hook(void *arg1, void *arg2) {
2956 DecWorkerData *const thread_data = (DecWorkerData *)arg1;
2957 AV1Decoder *const pbi = (AV1Decoder *)arg2;
2958 AV1_COMMON *cm = &pbi->common;
2959 ThreadData *const td = thread_data->td;
2960 uint8_t allow_update_cdf;
2961
2962 // The jmp_buf is valid only for the duration of the function that calls
2963 // setjmp(). Therefore, this function must reset the 'setjmp' field to 0
2964 // before it returns.
2965 if (setjmp(thread_data->error_info.jmp)) {
2966 thread_data->error_info.setjmp = 0;
2967 thread_data->td->dcb.corrupted = 1;
2968 return 0;
2969 }
2970 thread_data->error_info.setjmp = 1;
2971
2972 allow_update_cdf = cm->tiles.large_scale ? 0 : 1;
2973 allow_update_cdf = allow_update_cdf && !cm->features.disable_cdf_update;
2974
2975 set_decode_func_pointers(td, 0x3);
2976
2977 assert(cm->tiles.cols > 0);
2978 while (!td->dcb.corrupted) {
2979 TileJobsDec *cur_job_info = get_dec_job_info(&pbi->tile_mt_info);
2980
2981 if (cur_job_info != NULL) {
2982 const TileBufferDec *const tile_buffer = cur_job_info->tile_buffer;
2983 TileDataDec *const tile_data = cur_job_info->tile_data;
2984 tile_worker_hook_init(pbi, thread_data, tile_buffer, tile_data,
2985 allow_update_cdf);
2986 // decode tile
2987 int tile_row = tile_data->tile_info.tile_row;
2988 int tile_col = tile_data->tile_info.tile_col;
2989 decode_tile(pbi, td, tile_row, tile_col);
2990 } else {
2991 break;
2992 }
2993 }
2994 thread_data->error_info.setjmp = 0;
2995 return !td->dcb.corrupted;
2996 }
2997
get_max_row_mt_workers_per_tile(AV1_COMMON * cm,const TileInfo * tile)2998 static INLINE int get_max_row_mt_workers_per_tile(AV1_COMMON *cm,
2999 const TileInfo *tile) {
3000 // NOTE: Currently value of max workers is calculated based
3001 // on the parse and decode time. As per the theoretical estimate
3002 // when percentage of parse time is equal to percentage of decode
3003 // time, number of workers needed to parse + decode a tile can not
3004 // exceed more than 2.
3005 // TODO(any): Modify this value if parsing is optimized in future.
3006 int sb_rows = av1_get_sb_rows_in_tile(cm, tile);
3007 int max_workers =
3008 sb_rows == 1 ? AOM_MIN_THREADS_PER_TILE : AOM_MAX_THREADS_PER_TILE;
3009 return max_workers;
3010 }
3011
3012 // The caller must hold pbi->row_mt_mutex_ when calling this function.
3013 // Returns 1 if either the next job is stored in *next_job_info or 1 is stored
3014 // in *end_of_frame.
3015 // NOTE: The caller waits on pbi->row_mt_cond_ if this function returns 0.
3016 // The return value of this function depends on the following variables:
3017 // - frame_row_mt_info->mi_rows_parse_done
3018 // - frame_row_mt_info->mi_rows_decode_started
3019 // - frame_row_mt_info->row_mt_exit
3020 // Therefore we may need to signal or broadcast pbi->row_mt_cond_ if any of
3021 // these variables is modified.
get_next_job_info(AV1Decoder * const pbi,AV1DecRowMTJobInfo * next_job_info,int * end_of_frame)3022 static int get_next_job_info(AV1Decoder *const pbi,
3023 AV1DecRowMTJobInfo *next_job_info,
3024 int *end_of_frame) {
3025 AV1_COMMON *cm = &pbi->common;
3026 TileDataDec *tile_data;
3027 AV1DecRowMTSync *dec_row_mt_sync;
3028 AV1DecRowMTInfo *frame_row_mt_info = &pbi->frame_row_mt_info;
3029 const int tile_rows_start = frame_row_mt_info->tile_rows_start;
3030 const int tile_rows_end = frame_row_mt_info->tile_rows_end;
3031 const int tile_cols_start = frame_row_mt_info->tile_cols_start;
3032 const int tile_cols_end = frame_row_mt_info->tile_cols_end;
3033 const int start_tile = frame_row_mt_info->start_tile;
3034 const int end_tile = frame_row_mt_info->end_tile;
3035 const int sb_mi_size = mi_size_wide[cm->seq_params->sb_size];
3036 int num_mis_to_decode, num_threads_working;
3037 int num_mis_waiting_for_decode;
3038 int min_threads_working = INT_MAX;
3039 int max_mis_to_decode = 0;
3040 int tile_row_idx, tile_col_idx;
3041 int tile_row = -1;
3042 int tile_col = -1;
3043
3044 memset(next_job_info, 0, sizeof(*next_job_info));
3045
3046 // Frame decode is completed or error is encountered.
3047 *end_of_frame = (frame_row_mt_info->mi_rows_decode_started ==
3048 frame_row_mt_info->mi_rows_to_decode) ||
3049 (frame_row_mt_info->row_mt_exit == 1);
3050 if (*end_of_frame) {
3051 return 1;
3052 }
3053
3054 // Decoding cannot start as bit-stream parsing is not complete.
3055 assert(frame_row_mt_info->mi_rows_parse_done >=
3056 frame_row_mt_info->mi_rows_decode_started);
3057 if (frame_row_mt_info->mi_rows_parse_done ==
3058 frame_row_mt_info->mi_rows_decode_started)
3059 return 0;
3060
3061 // Choose the tile to decode.
3062 for (tile_row_idx = tile_rows_start; tile_row_idx < tile_rows_end;
3063 ++tile_row_idx) {
3064 for (tile_col_idx = tile_cols_start; tile_col_idx < tile_cols_end;
3065 ++tile_col_idx) {
3066 if (tile_row_idx * cm->tiles.cols + tile_col_idx < start_tile ||
3067 tile_row_idx * cm->tiles.cols + tile_col_idx > end_tile)
3068 continue;
3069
3070 tile_data = pbi->tile_data + tile_row_idx * cm->tiles.cols + tile_col_idx;
3071 dec_row_mt_sync = &tile_data->dec_row_mt_sync;
3072
3073 num_threads_working = dec_row_mt_sync->num_threads_working;
3074 num_mis_waiting_for_decode = (dec_row_mt_sync->mi_rows_parse_done -
3075 dec_row_mt_sync->mi_rows_decode_started) *
3076 dec_row_mt_sync->mi_cols;
3077 num_mis_to_decode =
3078 (dec_row_mt_sync->mi_rows - dec_row_mt_sync->mi_rows_decode_started) *
3079 dec_row_mt_sync->mi_cols;
3080
3081 assert(num_mis_to_decode >= num_mis_waiting_for_decode);
3082
3083 // Pick the tile which has minimum number of threads working on it.
3084 if (num_mis_waiting_for_decode > 0) {
3085 if (num_threads_working < min_threads_working) {
3086 min_threads_working = num_threads_working;
3087 max_mis_to_decode = 0;
3088 }
3089 if (num_threads_working == min_threads_working &&
3090 num_mis_to_decode > max_mis_to_decode &&
3091 num_threads_working <
3092 get_max_row_mt_workers_per_tile(cm, &tile_data->tile_info)) {
3093 max_mis_to_decode = num_mis_to_decode;
3094 tile_row = tile_row_idx;
3095 tile_col = tile_col_idx;
3096 }
3097 }
3098 }
3099 }
3100 // No job found to process
3101 if (tile_row == -1 || tile_col == -1) return 0;
3102
3103 tile_data = pbi->tile_data + tile_row * cm->tiles.cols + tile_col;
3104 dec_row_mt_sync = &tile_data->dec_row_mt_sync;
3105
3106 next_job_info->tile_row = tile_row;
3107 next_job_info->tile_col = tile_col;
3108 next_job_info->mi_row = dec_row_mt_sync->mi_rows_decode_started +
3109 tile_data->tile_info.mi_row_start;
3110
3111 dec_row_mt_sync->num_threads_working++;
3112 dec_row_mt_sync->mi_rows_decode_started += sb_mi_size;
3113 frame_row_mt_info->mi_rows_decode_started += sb_mi_size;
3114 assert(frame_row_mt_info->mi_rows_parse_done >=
3115 frame_row_mt_info->mi_rows_decode_started);
3116 #if CONFIG_MULTITHREAD
3117 if (frame_row_mt_info->mi_rows_decode_started ==
3118 frame_row_mt_info->mi_rows_to_decode) {
3119 pthread_cond_broadcast(pbi->row_mt_cond_);
3120 }
3121 #endif
3122
3123 return 1;
3124 }
3125
signal_parse_sb_row_done(AV1Decoder * const pbi,TileDataDec * const tile_data,const int sb_mi_size)3126 static INLINE void signal_parse_sb_row_done(AV1Decoder *const pbi,
3127 TileDataDec *const tile_data,
3128 const int sb_mi_size) {
3129 AV1DecRowMTInfo *frame_row_mt_info = &pbi->frame_row_mt_info;
3130 #if CONFIG_MULTITHREAD
3131 pthread_mutex_lock(pbi->row_mt_mutex_);
3132 #endif
3133 assert(frame_row_mt_info->mi_rows_parse_done >=
3134 frame_row_mt_info->mi_rows_decode_started);
3135 tile_data->dec_row_mt_sync.mi_rows_parse_done += sb_mi_size;
3136 frame_row_mt_info->mi_rows_parse_done += sb_mi_size;
3137 #if CONFIG_MULTITHREAD
3138 // A new decode job is available. Wake up one worker thread to handle the
3139 // new decode job.
3140 // NOTE: This assumes we bump mi_rows_parse_done and mi_rows_decode_started
3141 // by the same increment (sb_mi_size).
3142 pthread_cond_signal(pbi->row_mt_cond_);
3143 pthread_mutex_unlock(pbi->row_mt_mutex_);
3144 #endif
3145 }
3146
3147 // This function is very similar to decode_tile(). It would be good to figure
3148 // out how to share code.
parse_tile_row_mt(AV1Decoder * pbi,ThreadData * const td,TileDataDec * const tile_data)3149 static AOM_INLINE void parse_tile_row_mt(AV1Decoder *pbi, ThreadData *const td,
3150 TileDataDec *const tile_data) {
3151 AV1_COMMON *const cm = &pbi->common;
3152 const int sb_mi_size = mi_size_wide[cm->seq_params->sb_size];
3153 const int num_planes = av1_num_planes(cm);
3154 const TileInfo *const tile_info = &tile_data->tile_info;
3155 int tile_row = tile_info->tile_row;
3156 DecoderCodingBlock *const dcb = &td->dcb;
3157 MACROBLOCKD *const xd = &dcb->xd;
3158
3159 av1_zero_above_context(cm, xd, tile_info->mi_col_start, tile_info->mi_col_end,
3160 tile_row);
3161 av1_reset_loop_filter_delta(xd, num_planes);
3162 av1_reset_loop_restoration(xd, num_planes);
3163
3164 for (int mi_row = tile_info->mi_row_start; mi_row < tile_info->mi_row_end;
3165 mi_row += cm->seq_params->mib_size) {
3166 av1_zero_left_context(xd);
3167
3168 for (int mi_col = tile_info->mi_col_start; mi_col < tile_info->mi_col_end;
3169 mi_col += cm->seq_params->mib_size) {
3170 set_cb_buffer(pbi, dcb, pbi->cb_buffer_base, num_planes, mi_row, mi_col);
3171
3172 // Bit-stream parsing of the superblock
3173 decode_partition(pbi, td, mi_row, mi_col, td->bit_reader,
3174 cm->seq_params->sb_size, 0x1);
3175
3176 if (aom_reader_has_overflowed(td->bit_reader)) {
3177 aom_merge_corrupted_flag(&dcb->corrupted, 1);
3178 return;
3179 }
3180 }
3181 signal_parse_sb_row_done(pbi, tile_data, sb_mi_size);
3182 }
3183
3184 int corrupted =
3185 (check_trailing_bits_after_symbol_coder(td->bit_reader)) ? 1 : 0;
3186 aom_merge_corrupted_flag(&dcb->corrupted, corrupted);
3187 }
3188
row_mt_worker_hook(void * arg1,void * arg2)3189 static int row_mt_worker_hook(void *arg1, void *arg2) {
3190 DecWorkerData *const thread_data = (DecWorkerData *)arg1;
3191 AV1Decoder *const pbi = (AV1Decoder *)arg2;
3192 ThreadData *const td = thread_data->td;
3193 uint8_t allow_update_cdf;
3194 AV1DecRowMTInfo *frame_row_mt_info = &pbi->frame_row_mt_info;
3195 td->dcb.corrupted = 0;
3196
3197 // The jmp_buf is valid only for the duration of the function that calls
3198 // setjmp(). Therefore, this function must reset the 'setjmp' field to 0
3199 // before it returns.
3200 if (setjmp(thread_data->error_info.jmp)) {
3201 thread_data->error_info.setjmp = 0;
3202 thread_data->td->dcb.corrupted = 1;
3203 #if CONFIG_MULTITHREAD
3204 pthread_mutex_lock(pbi->row_mt_mutex_);
3205 #endif
3206 frame_row_mt_info->row_mt_exit = 1;
3207 #if CONFIG_MULTITHREAD
3208 pthread_cond_broadcast(pbi->row_mt_cond_);
3209 pthread_mutex_unlock(pbi->row_mt_mutex_);
3210 #endif
3211 // If any SB row (erroneous row) processed by a thread encounters an
3212 // internal error, there is a need to indicate other threads that decoding
3213 // of the erroneous row is complete. This ensures that other threads which
3214 // wait upon the completion of SB's present in erroneous row are not waiting
3215 // indefinitely.
3216 signal_decoding_done_for_erroneous_row(pbi, &thread_data->td->dcb.xd);
3217 return 0;
3218 }
3219 thread_data->error_info.setjmp = 1;
3220
3221 AV1_COMMON *cm = &pbi->common;
3222 allow_update_cdf = cm->tiles.large_scale ? 0 : 1;
3223 allow_update_cdf = allow_update_cdf && !cm->features.disable_cdf_update;
3224
3225 set_decode_func_pointers(td, 0x1);
3226
3227 assert(cm->tiles.cols > 0);
3228 while (!td->dcb.corrupted) {
3229 TileJobsDec *cur_job_info = get_dec_job_info(&pbi->tile_mt_info);
3230
3231 if (cur_job_info != NULL) {
3232 const TileBufferDec *const tile_buffer = cur_job_info->tile_buffer;
3233 TileDataDec *const tile_data = cur_job_info->tile_data;
3234 tile_worker_hook_init(pbi, thread_data, tile_buffer, tile_data,
3235 allow_update_cdf);
3236 #if CONFIG_MULTITHREAD
3237 pthread_mutex_lock(pbi->row_mt_mutex_);
3238 #endif
3239 tile_data->dec_row_mt_sync.num_threads_working++;
3240 #if CONFIG_MULTITHREAD
3241 pthread_mutex_unlock(pbi->row_mt_mutex_);
3242 #endif
3243 // decode tile
3244 parse_tile_row_mt(pbi, td, tile_data);
3245 #if CONFIG_MULTITHREAD
3246 pthread_mutex_lock(pbi->row_mt_mutex_);
3247 #endif
3248 tile_data->dec_row_mt_sync.num_threads_working--;
3249 #if CONFIG_MULTITHREAD
3250 pthread_mutex_unlock(pbi->row_mt_mutex_);
3251 #endif
3252 } else {
3253 break;
3254 }
3255 }
3256
3257 if (td->dcb.corrupted) {
3258 thread_data->error_info.setjmp = 0;
3259 #if CONFIG_MULTITHREAD
3260 pthread_mutex_lock(pbi->row_mt_mutex_);
3261 #endif
3262 frame_row_mt_info->row_mt_exit = 1;
3263 #if CONFIG_MULTITHREAD
3264 pthread_cond_broadcast(pbi->row_mt_cond_);
3265 pthread_mutex_unlock(pbi->row_mt_mutex_);
3266 #endif
3267 return 0;
3268 }
3269
3270 set_decode_func_pointers(td, 0x2);
3271
3272 while (1) {
3273 AV1DecRowMTJobInfo next_job_info;
3274 int end_of_frame = 0;
3275
3276 #if CONFIG_MULTITHREAD
3277 pthread_mutex_lock(pbi->row_mt_mutex_);
3278 #endif
3279 while (!get_next_job_info(pbi, &next_job_info, &end_of_frame)) {
3280 #if CONFIG_MULTITHREAD
3281 pthread_cond_wait(pbi->row_mt_cond_, pbi->row_mt_mutex_);
3282 #endif
3283 }
3284 #if CONFIG_MULTITHREAD
3285 pthread_mutex_unlock(pbi->row_mt_mutex_);
3286 #endif
3287
3288 if (end_of_frame) break;
3289
3290 int tile_row = next_job_info.tile_row;
3291 int tile_col = next_job_info.tile_col;
3292 int mi_row = next_job_info.mi_row;
3293
3294 TileDataDec *tile_data =
3295 pbi->tile_data + tile_row * cm->tiles.cols + tile_col;
3296 AV1DecRowMTSync *dec_row_mt_sync = &tile_data->dec_row_mt_sync;
3297
3298 av1_tile_init(&td->dcb.xd.tile, cm, tile_row, tile_col);
3299 av1_init_macroblockd(cm, &td->dcb.xd);
3300 td->dcb.xd.error_info = &thread_data->error_info;
3301
3302 decode_tile_sb_row(pbi, td, &tile_data->tile_info, mi_row);
3303
3304 #if CONFIG_MULTITHREAD
3305 pthread_mutex_lock(pbi->row_mt_mutex_);
3306 #endif
3307 dec_row_mt_sync->num_threads_working--;
3308 #if CONFIG_MULTITHREAD
3309 pthread_mutex_unlock(pbi->row_mt_mutex_);
3310 #endif
3311 }
3312 thread_data->error_info.setjmp = 0;
3313 return !td->dcb.corrupted;
3314 }
3315
3316 // sorts in descending order
compare_tile_buffers(const void * a,const void * b)3317 static int compare_tile_buffers(const void *a, const void *b) {
3318 const TileJobsDec *const buf1 = (const TileJobsDec *)a;
3319 const TileJobsDec *const buf2 = (const TileJobsDec *)b;
3320 return (((int)buf2->tile_buffer->size) - ((int)buf1->tile_buffer->size));
3321 }
3322
enqueue_tile_jobs(AV1Decoder * pbi,AV1_COMMON * cm,int tile_rows_start,int tile_rows_end,int tile_cols_start,int tile_cols_end,int start_tile,int end_tile)3323 static AOM_INLINE void enqueue_tile_jobs(AV1Decoder *pbi, AV1_COMMON *cm,
3324 int tile_rows_start, int tile_rows_end,
3325 int tile_cols_start, int tile_cols_end,
3326 int start_tile, int end_tile) {
3327 AV1DecTileMT *tile_mt_info = &pbi->tile_mt_info;
3328 TileJobsDec *tile_job_queue = tile_mt_info->job_queue;
3329 tile_mt_info->jobs_enqueued = 0;
3330 tile_mt_info->jobs_dequeued = 0;
3331
3332 for (int row = tile_rows_start; row < tile_rows_end; row++) {
3333 for (int col = tile_cols_start; col < tile_cols_end; col++) {
3334 if (row * cm->tiles.cols + col < start_tile ||
3335 row * cm->tiles.cols + col > end_tile)
3336 continue;
3337 tile_job_queue->tile_buffer = &pbi->tile_buffers[row][col];
3338 tile_job_queue->tile_data = pbi->tile_data + row * cm->tiles.cols + col;
3339 tile_job_queue++;
3340 tile_mt_info->jobs_enqueued++;
3341 }
3342 }
3343 }
3344
alloc_dec_jobs(AV1DecTileMT * tile_mt_info,AV1_COMMON * cm,int tile_rows,int tile_cols)3345 static AOM_INLINE void alloc_dec_jobs(AV1DecTileMT *tile_mt_info,
3346 AV1_COMMON *cm, int tile_rows,
3347 int tile_cols) {
3348 tile_mt_info->alloc_tile_rows = tile_rows;
3349 tile_mt_info->alloc_tile_cols = tile_cols;
3350 int num_tiles = tile_rows * tile_cols;
3351 #if CONFIG_MULTITHREAD
3352 {
3353 CHECK_MEM_ERROR(cm, tile_mt_info->job_mutex,
3354 aom_malloc(sizeof(*tile_mt_info->job_mutex) * num_tiles));
3355
3356 for (int i = 0; i < num_tiles; i++) {
3357 pthread_mutex_init(&tile_mt_info->job_mutex[i], NULL);
3358 }
3359 }
3360 #endif
3361 CHECK_MEM_ERROR(cm, tile_mt_info->job_queue,
3362 aom_malloc(sizeof(*tile_mt_info->job_queue) * num_tiles));
3363 }
3364
av1_free_mc_tmp_buf(ThreadData * thread_data)3365 void av1_free_mc_tmp_buf(ThreadData *thread_data) {
3366 int ref;
3367 for (ref = 0; ref < 2; ref++) {
3368 if (thread_data->mc_buf_use_highbd)
3369 aom_free(CONVERT_TO_SHORTPTR(thread_data->mc_buf[ref]));
3370 else
3371 aom_free(thread_data->mc_buf[ref]);
3372 thread_data->mc_buf[ref] = NULL;
3373 }
3374 thread_data->mc_buf_size = 0;
3375 thread_data->mc_buf_use_highbd = 0;
3376
3377 aom_free(thread_data->tmp_conv_dst);
3378 thread_data->tmp_conv_dst = NULL;
3379 aom_free(thread_data->seg_mask);
3380 thread_data->seg_mask = NULL;
3381 for (int i = 0; i < 2; ++i) {
3382 aom_free(thread_data->tmp_obmc_bufs[i]);
3383 thread_data->tmp_obmc_bufs[i] = NULL;
3384 }
3385 }
3386
allocate_mc_tmp_buf(AV1_COMMON * const cm,ThreadData * thread_data,int buf_size,int use_highbd)3387 static AOM_INLINE void allocate_mc_tmp_buf(AV1_COMMON *const cm,
3388 ThreadData *thread_data,
3389 int buf_size, int use_highbd) {
3390 for (int ref = 0; ref < 2; ref++) {
3391 // The mc_buf/hbd_mc_buf must be zeroed to fix a intermittent valgrind error
3392 // 'Conditional jump or move depends on uninitialised value' from the loop
3393 // filter. Uninitialized reads in convolve function (e.g. horiz_4tap path in
3394 // av1_convolve_2d_sr_avx2()) from mc_buf/hbd_mc_buf are seen to be the
3395 // potential reason for this issue.
3396 if (use_highbd) {
3397 uint16_t *hbd_mc_buf;
3398 CHECK_MEM_ERROR(cm, hbd_mc_buf, (uint16_t *)aom_memalign(16, buf_size));
3399 memset(hbd_mc_buf, 0, buf_size);
3400 thread_data->mc_buf[ref] = CONVERT_TO_BYTEPTR(hbd_mc_buf);
3401 } else {
3402 CHECK_MEM_ERROR(cm, thread_data->mc_buf[ref],
3403 (uint8_t *)aom_memalign(16, buf_size));
3404 memset(thread_data->mc_buf[ref], 0, buf_size);
3405 }
3406 }
3407 thread_data->mc_buf_size = buf_size;
3408 thread_data->mc_buf_use_highbd = use_highbd;
3409
3410 CHECK_MEM_ERROR(cm, thread_data->tmp_conv_dst,
3411 aom_memalign(32, MAX_SB_SIZE * MAX_SB_SIZE *
3412 sizeof(*thread_data->tmp_conv_dst)));
3413 CHECK_MEM_ERROR(cm, thread_data->seg_mask,
3414 (uint8_t *)aom_memalign(
3415 16, 2 * MAX_SB_SQUARE * sizeof(*thread_data->seg_mask)));
3416
3417 for (int i = 0; i < 2; ++i) {
3418 CHECK_MEM_ERROR(
3419 cm, thread_data->tmp_obmc_bufs[i],
3420 aom_memalign(16, 2 * MAX_MB_PLANE * MAX_SB_SQUARE *
3421 sizeof(*thread_data->tmp_obmc_bufs[i])));
3422 }
3423 }
3424
reset_dec_workers(AV1Decoder * pbi,AVxWorkerHook worker_hook,int num_workers)3425 static AOM_INLINE void reset_dec_workers(AV1Decoder *pbi,
3426 AVxWorkerHook worker_hook,
3427 int num_workers) {
3428 const AVxWorkerInterface *const winterface = aom_get_worker_interface();
3429
3430 // Reset tile decoding hook
3431 for (int worker_idx = 0; worker_idx < num_workers; ++worker_idx) {
3432 AVxWorker *const worker = &pbi->tile_workers[worker_idx];
3433 DecWorkerData *const thread_data = pbi->thread_data + worker_idx;
3434 thread_data->td->dcb = pbi->dcb;
3435 thread_data->td->dcb.corrupted = 0;
3436 thread_data->td->dcb.mc_buf[0] = thread_data->td->mc_buf[0];
3437 thread_data->td->dcb.mc_buf[1] = thread_data->td->mc_buf[1];
3438 thread_data->td->dcb.xd.tmp_conv_dst = thread_data->td->tmp_conv_dst;
3439 if (worker_idx)
3440 thread_data->td->dcb.xd.seg_mask = thread_data->td->seg_mask;
3441 for (int j = 0; j < 2; ++j) {
3442 thread_data->td->dcb.xd.tmp_obmc_bufs[j] =
3443 thread_data->td->tmp_obmc_bufs[j];
3444 }
3445 winterface->sync(worker);
3446
3447 worker->hook = worker_hook;
3448 worker->data1 = thread_data;
3449 worker->data2 = pbi;
3450 }
3451 #if CONFIG_ACCOUNTING
3452 if (pbi->acct_enabled) {
3453 aom_accounting_reset(&pbi->accounting);
3454 }
3455 #endif
3456 }
3457
launch_dec_workers(AV1Decoder * pbi,const uint8_t * data_end,int num_workers)3458 static AOM_INLINE void launch_dec_workers(AV1Decoder *pbi,
3459 const uint8_t *data_end,
3460 int num_workers) {
3461 const AVxWorkerInterface *const winterface = aom_get_worker_interface();
3462
3463 for (int worker_idx = num_workers - 1; worker_idx >= 0; --worker_idx) {
3464 AVxWorker *const worker = &pbi->tile_workers[worker_idx];
3465 DecWorkerData *const thread_data = (DecWorkerData *)worker->data1;
3466
3467 thread_data->data_end = data_end;
3468
3469 worker->had_error = 0;
3470 if (worker_idx == 0) {
3471 winterface->execute(worker);
3472 } else {
3473 winterface->launch(worker);
3474 }
3475 }
3476 }
3477
sync_dec_workers(AV1Decoder * pbi,int num_workers)3478 static AOM_INLINE void sync_dec_workers(AV1Decoder *pbi, int num_workers) {
3479 const AVxWorkerInterface *const winterface = aom_get_worker_interface();
3480 int corrupted = 0;
3481
3482 for (int worker_idx = num_workers; worker_idx > 0; --worker_idx) {
3483 AVxWorker *const worker = &pbi->tile_workers[worker_idx - 1];
3484 aom_merge_corrupted_flag(&corrupted, !winterface->sync(worker));
3485 }
3486
3487 pbi->dcb.corrupted = corrupted;
3488 }
3489
decode_mt_init(AV1Decoder * pbi)3490 static AOM_INLINE void decode_mt_init(AV1Decoder *pbi) {
3491 AV1_COMMON *const cm = &pbi->common;
3492 const AVxWorkerInterface *const winterface = aom_get_worker_interface();
3493 int worker_idx;
3494
3495 // Create workers and thread_data
3496 if (pbi->num_workers == 0) {
3497 const int num_threads = pbi->max_threads;
3498 CHECK_MEM_ERROR(cm, pbi->tile_workers,
3499 aom_malloc(num_threads * sizeof(*pbi->tile_workers)));
3500 CHECK_MEM_ERROR(cm, pbi->thread_data,
3501 aom_calloc(num_threads, sizeof(*pbi->thread_data)));
3502
3503 for (worker_idx = 0; worker_idx < num_threads; ++worker_idx) {
3504 AVxWorker *const worker = &pbi->tile_workers[worker_idx];
3505 DecWorkerData *const thread_data = pbi->thread_data + worker_idx;
3506
3507 winterface->init(worker);
3508 worker->thread_name = "aom tile worker";
3509 if (worker_idx != 0 && !winterface->reset(worker)) {
3510 aom_internal_error(&pbi->error, AOM_CODEC_ERROR,
3511 "Tile decoder thread creation failed");
3512 }
3513 ++pbi->num_workers;
3514
3515 if (worker_idx != 0) {
3516 // Allocate thread data.
3517 CHECK_MEM_ERROR(cm, thread_data->td,
3518 aom_memalign(32, sizeof(*thread_data->td)));
3519 av1_zero(*thread_data->td);
3520 } else {
3521 // Main thread acts as a worker and uses the thread data in pbi
3522 thread_data->td = &pbi->td;
3523 }
3524 thread_data->error_info.error_code = AOM_CODEC_OK;
3525 thread_data->error_info.setjmp = 0;
3526 }
3527 }
3528 const int use_highbd = cm->seq_params->use_highbitdepth;
3529 const int buf_size = MC_TEMP_BUF_PELS << use_highbd;
3530 for (worker_idx = 1; worker_idx < pbi->max_threads; ++worker_idx) {
3531 DecWorkerData *const thread_data = pbi->thread_data + worker_idx;
3532 if (thread_data->td->mc_buf_size != buf_size) {
3533 av1_free_mc_tmp_buf(thread_data->td);
3534 allocate_mc_tmp_buf(cm, thread_data->td, buf_size, use_highbd);
3535 }
3536 }
3537 }
3538
tile_mt_queue(AV1Decoder * pbi,int tile_cols,int tile_rows,int tile_rows_start,int tile_rows_end,int tile_cols_start,int tile_cols_end,int start_tile,int end_tile)3539 static AOM_INLINE void tile_mt_queue(AV1Decoder *pbi, int tile_cols,
3540 int tile_rows, int tile_rows_start,
3541 int tile_rows_end, int tile_cols_start,
3542 int tile_cols_end, int start_tile,
3543 int end_tile) {
3544 AV1_COMMON *const cm = &pbi->common;
3545 if (pbi->tile_mt_info.alloc_tile_cols != tile_cols ||
3546 pbi->tile_mt_info.alloc_tile_rows != tile_rows) {
3547 av1_dealloc_dec_jobs(&pbi->tile_mt_info);
3548 alloc_dec_jobs(&pbi->tile_mt_info, cm, tile_rows, tile_cols);
3549 }
3550 enqueue_tile_jobs(pbi, cm, tile_rows_start, tile_rows_end, tile_cols_start,
3551 tile_cols_end, start_tile, end_tile);
3552 qsort(pbi->tile_mt_info.job_queue, pbi->tile_mt_info.jobs_enqueued,
3553 sizeof(pbi->tile_mt_info.job_queue[0]), compare_tile_buffers);
3554 }
3555
decode_tiles_mt(AV1Decoder * pbi,const uint8_t * data,const uint8_t * data_end,int start_tile,int end_tile)3556 static const uint8_t *decode_tiles_mt(AV1Decoder *pbi, const uint8_t *data,
3557 const uint8_t *data_end, int start_tile,
3558 int end_tile) {
3559 AV1_COMMON *const cm = &pbi->common;
3560 CommonTileParams *const tiles = &cm->tiles;
3561 const int tile_cols = tiles->cols;
3562 const int tile_rows = tiles->rows;
3563 const int n_tiles = tile_cols * tile_rows;
3564 TileBufferDec(*const tile_buffers)[MAX_TILE_COLS] = pbi->tile_buffers;
3565 const int dec_tile_row = AOMMIN(pbi->dec_tile_row, tile_rows);
3566 const int single_row = pbi->dec_tile_row >= 0;
3567 const int dec_tile_col = AOMMIN(pbi->dec_tile_col, tile_cols);
3568 const int single_col = pbi->dec_tile_col >= 0;
3569 int tile_rows_start;
3570 int tile_rows_end;
3571 int tile_cols_start;
3572 int tile_cols_end;
3573 int tile_count_tg;
3574 int num_workers;
3575 const uint8_t *raw_data_end = NULL;
3576
3577 if (tiles->large_scale) {
3578 tile_rows_start = single_row ? dec_tile_row : 0;
3579 tile_rows_end = single_row ? dec_tile_row + 1 : tile_rows;
3580 tile_cols_start = single_col ? dec_tile_col : 0;
3581 tile_cols_end = single_col ? tile_cols_start + 1 : tile_cols;
3582 } else {
3583 tile_rows_start = 0;
3584 tile_rows_end = tile_rows;
3585 tile_cols_start = 0;
3586 tile_cols_end = tile_cols;
3587 }
3588 tile_count_tg = end_tile - start_tile + 1;
3589 num_workers = AOMMIN(pbi->max_threads, tile_count_tg);
3590
3591 // No tiles to decode.
3592 if (tile_rows_end <= tile_rows_start || tile_cols_end <= tile_cols_start ||
3593 // First tile is larger than end_tile.
3594 tile_rows_start * tile_cols + tile_cols_start > end_tile ||
3595 // Last tile is smaller than start_tile.
3596 (tile_rows_end - 1) * tile_cols + tile_cols_end - 1 < start_tile)
3597 return data;
3598
3599 assert(tile_rows <= MAX_TILE_ROWS);
3600 assert(tile_cols <= MAX_TILE_COLS);
3601 assert(tile_count_tg > 0);
3602 assert(num_workers > 0);
3603 assert(start_tile <= end_tile);
3604 assert(start_tile >= 0 && end_tile < n_tiles);
3605
3606 decode_mt_init(pbi);
3607
3608 // get tile size in tile group
3609 #if EXT_TILE_DEBUG
3610 if (tiles->large_scale) assert(pbi->ext_tile_debug == 1);
3611 if (tiles->large_scale)
3612 raw_data_end = get_ls_tile_buffers(pbi, data, data_end, tile_buffers);
3613 else
3614 #endif // EXT_TILE_DEBUG
3615 get_tile_buffers(pbi, data, data_end, tile_buffers, start_tile, end_tile);
3616
3617 if (pbi->tile_data == NULL || n_tiles != pbi->allocated_tiles) {
3618 decoder_alloc_tile_data(pbi, n_tiles);
3619 }
3620 if (pbi->dcb.xd.seg_mask == NULL)
3621 CHECK_MEM_ERROR(cm, pbi->dcb.xd.seg_mask,
3622 (uint8_t *)aom_memalign(
3623 16, 2 * MAX_SB_SQUARE * sizeof(*pbi->dcb.xd.seg_mask)));
3624
3625 for (int row = 0; row < tile_rows; row++) {
3626 for (int col = 0; col < tile_cols; col++) {
3627 TileDataDec *tile_data = pbi->tile_data + row * tiles->cols + col;
3628 av1_tile_init(&tile_data->tile_info, cm, row, col);
3629 }
3630 }
3631
3632 tile_mt_queue(pbi, tile_cols, tile_rows, tile_rows_start, tile_rows_end,
3633 tile_cols_start, tile_cols_end, start_tile, end_tile);
3634
3635 reset_dec_workers(pbi, tile_worker_hook, num_workers);
3636 launch_dec_workers(pbi, data_end, num_workers);
3637 sync_dec_workers(pbi, num_workers);
3638
3639 if (pbi->dcb.corrupted)
3640 aom_internal_error(&pbi->error, AOM_CODEC_CORRUPT_FRAME,
3641 "Failed to decode tile data");
3642
3643 if (tiles->large_scale) {
3644 if (n_tiles == 1) {
3645 // Find the end of the single tile buffer
3646 return aom_reader_find_end(&pbi->tile_data->bit_reader);
3647 }
3648 // Return the end of the last tile buffer
3649 return raw_data_end;
3650 }
3651 TileDataDec *const tile_data = pbi->tile_data + end_tile;
3652
3653 return aom_reader_find_end(&tile_data->bit_reader);
3654 }
3655
dec_alloc_cb_buf(AV1Decoder * pbi)3656 static AOM_INLINE void dec_alloc_cb_buf(AV1Decoder *pbi) {
3657 AV1_COMMON *const cm = &pbi->common;
3658 int size = ((cm->mi_params.mi_rows >> cm->seq_params->mib_size_log2) + 1) *
3659 ((cm->mi_params.mi_cols >> cm->seq_params->mib_size_log2) + 1);
3660
3661 if (pbi->cb_buffer_alloc_size < size) {
3662 av1_dec_free_cb_buf(pbi);
3663 CHECK_MEM_ERROR(cm, pbi->cb_buffer_base,
3664 aom_memalign(32, sizeof(*pbi->cb_buffer_base) * size));
3665 memset(pbi->cb_buffer_base, 0, sizeof(*pbi->cb_buffer_base) * size);
3666 pbi->cb_buffer_alloc_size = size;
3667 }
3668 }
3669
row_mt_frame_init(AV1Decoder * pbi,int tile_rows_start,int tile_rows_end,int tile_cols_start,int tile_cols_end,int start_tile,int end_tile,int max_sb_rows)3670 static AOM_INLINE void row_mt_frame_init(AV1Decoder *pbi, int tile_rows_start,
3671 int tile_rows_end, int tile_cols_start,
3672 int tile_cols_end, int start_tile,
3673 int end_tile, int max_sb_rows) {
3674 AV1_COMMON *const cm = &pbi->common;
3675 AV1DecRowMTInfo *frame_row_mt_info = &pbi->frame_row_mt_info;
3676
3677 frame_row_mt_info->tile_rows_start = tile_rows_start;
3678 frame_row_mt_info->tile_rows_end = tile_rows_end;
3679 frame_row_mt_info->tile_cols_start = tile_cols_start;
3680 frame_row_mt_info->tile_cols_end = tile_cols_end;
3681 frame_row_mt_info->start_tile = start_tile;
3682 frame_row_mt_info->end_tile = end_tile;
3683 frame_row_mt_info->mi_rows_to_decode = 0;
3684 frame_row_mt_info->mi_rows_parse_done = 0;
3685 frame_row_mt_info->mi_rows_decode_started = 0;
3686 frame_row_mt_info->row_mt_exit = 0;
3687
3688 for (int tile_row = tile_rows_start; tile_row < tile_rows_end; ++tile_row) {
3689 for (int tile_col = tile_cols_start; tile_col < tile_cols_end; ++tile_col) {
3690 if (tile_row * cm->tiles.cols + tile_col < start_tile ||
3691 tile_row * cm->tiles.cols + tile_col > end_tile)
3692 continue;
3693
3694 TileDataDec *const tile_data =
3695 pbi->tile_data + tile_row * cm->tiles.cols + tile_col;
3696 const TileInfo *const tile_info = &tile_data->tile_info;
3697
3698 tile_data->dec_row_mt_sync.mi_rows_parse_done = 0;
3699 tile_data->dec_row_mt_sync.mi_rows_decode_started = 0;
3700 tile_data->dec_row_mt_sync.num_threads_working = 0;
3701 tile_data->dec_row_mt_sync.mi_rows =
3702 ALIGN_POWER_OF_TWO(tile_info->mi_row_end - tile_info->mi_row_start,
3703 cm->seq_params->mib_size_log2);
3704 tile_data->dec_row_mt_sync.mi_cols =
3705 ALIGN_POWER_OF_TWO(tile_info->mi_col_end - tile_info->mi_col_start,
3706 cm->seq_params->mib_size_log2);
3707 tile_data->dec_row_mt_sync.intrabc_extra_top_right_sb_delay =
3708 av1_get_intrabc_extra_top_right_sb_delay(cm);
3709
3710 frame_row_mt_info->mi_rows_to_decode +=
3711 tile_data->dec_row_mt_sync.mi_rows;
3712
3713 // Initialize cur_sb_col to -1 for all SB rows.
3714 memset(tile_data->dec_row_mt_sync.cur_sb_col, -1,
3715 sizeof(*tile_data->dec_row_mt_sync.cur_sb_col) * max_sb_rows);
3716 }
3717 }
3718
3719 #if CONFIG_MULTITHREAD
3720 if (pbi->row_mt_mutex_ == NULL) {
3721 CHECK_MEM_ERROR(cm, pbi->row_mt_mutex_,
3722 aom_malloc(sizeof(*(pbi->row_mt_mutex_))));
3723 if (pbi->row_mt_mutex_) {
3724 pthread_mutex_init(pbi->row_mt_mutex_, NULL);
3725 }
3726 }
3727
3728 if (pbi->row_mt_cond_ == NULL) {
3729 CHECK_MEM_ERROR(cm, pbi->row_mt_cond_,
3730 aom_malloc(sizeof(*(pbi->row_mt_cond_))));
3731 if (pbi->row_mt_cond_) {
3732 pthread_cond_init(pbi->row_mt_cond_, NULL);
3733 }
3734 }
3735 #endif
3736 }
3737
decode_tiles_row_mt(AV1Decoder * pbi,const uint8_t * data,const uint8_t * data_end,int start_tile,int end_tile)3738 static const uint8_t *decode_tiles_row_mt(AV1Decoder *pbi, const uint8_t *data,
3739 const uint8_t *data_end,
3740 int start_tile, int end_tile) {
3741 AV1_COMMON *const cm = &pbi->common;
3742 CommonTileParams *const tiles = &cm->tiles;
3743 const int tile_cols = tiles->cols;
3744 const int tile_rows = tiles->rows;
3745 const int n_tiles = tile_cols * tile_rows;
3746 TileBufferDec(*const tile_buffers)[MAX_TILE_COLS] = pbi->tile_buffers;
3747 const int dec_tile_row = AOMMIN(pbi->dec_tile_row, tile_rows);
3748 const int single_row = pbi->dec_tile_row >= 0;
3749 const int dec_tile_col = AOMMIN(pbi->dec_tile_col, tile_cols);
3750 const int single_col = pbi->dec_tile_col >= 0;
3751 int tile_rows_start;
3752 int tile_rows_end;
3753 int tile_cols_start;
3754 int tile_cols_end;
3755 int tile_count_tg;
3756 int num_workers = 0;
3757 int max_threads;
3758 const uint8_t *raw_data_end = NULL;
3759 int max_sb_rows = 0;
3760
3761 if (tiles->large_scale) {
3762 tile_rows_start = single_row ? dec_tile_row : 0;
3763 tile_rows_end = single_row ? dec_tile_row + 1 : tile_rows;
3764 tile_cols_start = single_col ? dec_tile_col : 0;
3765 tile_cols_end = single_col ? tile_cols_start + 1 : tile_cols;
3766 } else {
3767 tile_rows_start = 0;
3768 tile_rows_end = tile_rows;
3769 tile_cols_start = 0;
3770 tile_cols_end = tile_cols;
3771 }
3772 tile_count_tg = end_tile - start_tile + 1;
3773 max_threads = pbi->max_threads;
3774
3775 // No tiles to decode.
3776 if (tile_rows_end <= tile_rows_start || tile_cols_end <= tile_cols_start ||
3777 // First tile is larger than end_tile.
3778 tile_rows_start * tile_cols + tile_cols_start > end_tile ||
3779 // Last tile is smaller than start_tile.
3780 (tile_rows_end - 1) * tile_cols + tile_cols_end - 1 < start_tile)
3781 return data;
3782
3783 assert(tile_rows <= MAX_TILE_ROWS);
3784 assert(tile_cols <= MAX_TILE_COLS);
3785 assert(tile_count_tg > 0);
3786 assert(max_threads > 0);
3787 assert(start_tile <= end_tile);
3788 assert(start_tile >= 0 && end_tile < n_tiles);
3789
3790 (void)tile_count_tg;
3791
3792 decode_mt_init(pbi);
3793
3794 // get tile size in tile group
3795 #if EXT_TILE_DEBUG
3796 if (tiles->large_scale) assert(pbi->ext_tile_debug == 1);
3797 if (tiles->large_scale)
3798 raw_data_end = get_ls_tile_buffers(pbi, data, data_end, tile_buffers);
3799 else
3800 #endif // EXT_TILE_DEBUG
3801 get_tile_buffers(pbi, data, data_end, tile_buffers, start_tile, end_tile);
3802
3803 if (pbi->tile_data == NULL || n_tiles != pbi->allocated_tiles) {
3804 if (pbi->tile_data != NULL) {
3805 for (int i = 0; i < pbi->allocated_tiles; i++) {
3806 TileDataDec *const tile_data = pbi->tile_data + i;
3807 av1_dec_row_mt_dealloc(&tile_data->dec_row_mt_sync);
3808 }
3809 }
3810 decoder_alloc_tile_data(pbi, n_tiles);
3811 }
3812 if (pbi->dcb.xd.seg_mask == NULL)
3813 CHECK_MEM_ERROR(cm, pbi->dcb.xd.seg_mask,
3814 (uint8_t *)aom_memalign(
3815 16, 2 * MAX_SB_SQUARE * sizeof(*pbi->dcb.xd.seg_mask)));
3816
3817 for (int row = 0; row < tile_rows; row++) {
3818 for (int col = 0; col < tile_cols; col++) {
3819 TileDataDec *tile_data = pbi->tile_data + row * tiles->cols + col;
3820 av1_tile_init(&tile_data->tile_info, cm, row, col);
3821
3822 max_sb_rows = AOMMAX(max_sb_rows,
3823 av1_get_sb_rows_in_tile(cm, &tile_data->tile_info));
3824 num_workers += get_max_row_mt_workers_per_tile(cm, &tile_data->tile_info);
3825 }
3826 }
3827 num_workers = AOMMIN(num_workers, max_threads);
3828
3829 if (pbi->allocated_row_mt_sync_rows != max_sb_rows) {
3830 for (int i = 0; i < n_tiles; ++i) {
3831 TileDataDec *const tile_data = pbi->tile_data + i;
3832 av1_dec_row_mt_dealloc(&tile_data->dec_row_mt_sync);
3833 dec_row_mt_alloc(&tile_data->dec_row_mt_sync, cm, max_sb_rows);
3834 }
3835 pbi->allocated_row_mt_sync_rows = max_sb_rows;
3836 }
3837
3838 tile_mt_queue(pbi, tile_cols, tile_rows, tile_rows_start, tile_rows_end,
3839 tile_cols_start, tile_cols_end, start_tile, end_tile);
3840
3841 dec_alloc_cb_buf(pbi);
3842
3843 row_mt_frame_init(pbi, tile_rows_start, tile_rows_end, tile_cols_start,
3844 tile_cols_end, start_tile, end_tile, max_sb_rows);
3845
3846 reset_dec_workers(pbi, row_mt_worker_hook, num_workers);
3847 launch_dec_workers(pbi, data_end, num_workers);
3848 sync_dec_workers(pbi, num_workers);
3849
3850 if (pbi->dcb.corrupted)
3851 aom_internal_error(&pbi->error, AOM_CODEC_CORRUPT_FRAME,
3852 "Failed to decode tile data");
3853
3854 if (tiles->large_scale) {
3855 if (n_tiles == 1) {
3856 // Find the end of the single tile buffer
3857 return aom_reader_find_end(&pbi->tile_data->bit_reader);
3858 }
3859 // Return the end of the last tile buffer
3860 return raw_data_end;
3861 }
3862 TileDataDec *const tile_data = pbi->tile_data + end_tile;
3863
3864 return aom_reader_find_end(&tile_data->bit_reader);
3865 }
3866
error_handler(void * data)3867 static AOM_INLINE void error_handler(void *data) {
3868 AV1_COMMON *const cm = (AV1_COMMON *)data;
3869 aom_internal_error(cm->error, AOM_CODEC_CORRUPT_FRAME, "Truncated packet");
3870 }
3871
3872 // Reads the high_bitdepth and twelve_bit fields in color_config() and sets
3873 // seq_params->bit_depth based on the values of those fields and
3874 // seq_params->profile. Reports errors by calling rb->error_handler() or
3875 // aom_internal_error().
read_bitdepth(struct aom_read_bit_buffer * rb,SequenceHeader * seq_params,struct aom_internal_error_info * error_info)3876 static AOM_INLINE void read_bitdepth(
3877 struct aom_read_bit_buffer *rb, SequenceHeader *seq_params,
3878 struct aom_internal_error_info *error_info) {
3879 const int high_bitdepth = aom_rb_read_bit(rb);
3880 if (seq_params->profile == PROFILE_2 && high_bitdepth) {
3881 const int twelve_bit = aom_rb_read_bit(rb);
3882 seq_params->bit_depth = twelve_bit ? AOM_BITS_12 : AOM_BITS_10;
3883 } else if (seq_params->profile <= PROFILE_2) {
3884 seq_params->bit_depth = high_bitdepth ? AOM_BITS_10 : AOM_BITS_8;
3885 } else {
3886 aom_internal_error(error_info, AOM_CODEC_UNSUP_BITSTREAM,
3887 "Unsupported profile/bit-depth combination");
3888 }
3889 #if !CONFIG_AV1_HIGHBITDEPTH
3890 if (seq_params->bit_depth > AOM_BITS_8) {
3891 aom_internal_error(error_info, AOM_CODEC_UNSUP_BITSTREAM,
3892 "Bit-depth %d not supported", seq_params->bit_depth);
3893 }
3894 #endif
3895 }
3896
av1_read_film_grain_params(AV1_COMMON * cm,struct aom_read_bit_buffer * rb)3897 void av1_read_film_grain_params(AV1_COMMON *cm,
3898 struct aom_read_bit_buffer *rb) {
3899 aom_film_grain_t *pars = &cm->film_grain_params;
3900 const SequenceHeader *const seq_params = cm->seq_params;
3901
3902 pars->apply_grain = aom_rb_read_bit(rb);
3903 if (!pars->apply_grain) {
3904 memset(pars, 0, sizeof(*pars));
3905 return;
3906 }
3907
3908 pars->random_seed = aom_rb_read_literal(rb, 16);
3909 if (cm->current_frame.frame_type == INTER_FRAME)
3910 pars->update_parameters = aom_rb_read_bit(rb);
3911 else
3912 pars->update_parameters = 1;
3913
3914 pars->bit_depth = seq_params->bit_depth;
3915
3916 if (!pars->update_parameters) {
3917 // inherit parameters from a previous reference frame
3918 int film_grain_params_ref_idx = aom_rb_read_literal(rb, 3);
3919 // Section 6.8.20: It is a requirement of bitstream conformance that
3920 // film_grain_params_ref_idx is equal to ref_frame_idx[ j ] for some value
3921 // of j in the range 0 to REFS_PER_FRAME - 1.
3922 int found = 0;
3923 for (int i = 0; i < INTER_REFS_PER_FRAME; ++i) {
3924 if (film_grain_params_ref_idx == cm->remapped_ref_idx[i]) {
3925 found = 1;
3926 break;
3927 }
3928 }
3929 if (!found) {
3930 aom_internal_error(cm->error, AOM_CODEC_UNSUP_BITSTREAM,
3931 "Invalid film grain reference idx %d. ref_frame_idx = "
3932 "{%d, %d, %d, %d, %d, %d, %d}",
3933 film_grain_params_ref_idx, cm->remapped_ref_idx[0],
3934 cm->remapped_ref_idx[1], cm->remapped_ref_idx[2],
3935 cm->remapped_ref_idx[3], cm->remapped_ref_idx[4],
3936 cm->remapped_ref_idx[5], cm->remapped_ref_idx[6]);
3937 }
3938 RefCntBuffer *const buf = cm->ref_frame_map[film_grain_params_ref_idx];
3939 if (buf == NULL) {
3940 aom_internal_error(cm->error, AOM_CODEC_UNSUP_BITSTREAM,
3941 "Invalid Film grain reference idx");
3942 }
3943 if (!buf->film_grain_params_present) {
3944 aom_internal_error(cm->error, AOM_CODEC_UNSUP_BITSTREAM,
3945 "Film grain reference parameters not available");
3946 }
3947 uint16_t random_seed = pars->random_seed;
3948 *pars = buf->film_grain_params; // inherit paramaters
3949 pars->random_seed = random_seed; // with new random seed
3950 return;
3951 }
3952
3953 // Scaling functions parameters
3954 pars->num_y_points = aom_rb_read_literal(rb, 4); // max 14
3955 if (pars->num_y_points > 14)
3956 aom_internal_error(cm->error, AOM_CODEC_UNSUP_BITSTREAM,
3957 "Number of points for film grain luma scaling function "
3958 "exceeds the maximum value.");
3959 for (int i = 0; i < pars->num_y_points; i++) {
3960 pars->scaling_points_y[i][0] = aom_rb_read_literal(rb, 8);
3961 if (i && pars->scaling_points_y[i - 1][0] >= pars->scaling_points_y[i][0])
3962 aom_internal_error(cm->error, AOM_CODEC_UNSUP_BITSTREAM,
3963 "First coordinate of the scaling function points "
3964 "shall be increasing.");
3965 pars->scaling_points_y[i][1] = aom_rb_read_literal(rb, 8);
3966 }
3967
3968 if (!seq_params->monochrome)
3969 pars->chroma_scaling_from_luma = aom_rb_read_bit(rb);
3970 else
3971 pars->chroma_scaling_from_luma = 0;
3972
3973 if (seq_params->monochrome || pars->chroma_scaling_from_luma ||
3974 ((seq_params->subsampling_x == 1) && (seq_params->subsampling_y == 1) &&
3975 (pars->num_y_points == 0))) {
3976 pars->num_cb_points = 0;
3977 pars->num_cr_points = 0;
3978 } else {
3979 pars->num_cb_points = aom_rb_read_literal(rb, 4); // max 10
3980 if (pars->num_cb_points > 10)
3981 aom_internal_error(cm->error, AOM_CODEC_UNSUP_BITSTREAM,
3982 "Number of points for film grain cb scaling function "
3983 "exceeds the maximum value.");
3984 for (int i = 0; i < pars->num_cb_points; i++) {
3985 pars->scaling_points_cb[i][0] = aom_rb_read_literal(rb, 8);
3986 if (i &&
3987 pars->scaling_points_cb[i - 1][0] >= pars->scaling_points_cb[i][0])
3988 aom_internal_error(cm->error, AOM_CODEC_UNSUP_BITSTREAM,
3989 "First coordinate of the scaling function points "
3990 "shall be increasing.");
3991 pars->scaling_points_cb[i][1] = aom_rb_read_literal(rb, 8);
3992 }
3993
3994 pars->num_cr_points = aom_rb_read_literal(rb, 4); // max 10
3995 if (pars->num_cr_points > 10)
3996 aom_internal_error(cm->error, AOM_CODEC_UNSUP_BITSTREAM,
3997 "Number of points for film grain cr scaling function "
3998 "exceeds the maximum value.");
3999 for (int i = 0; i < pars->num_cr_points; i++) {
4000 pars->scaling_points_cr[i][0] = aom_rb_read_literal(rb, 8);
4001 if (i &&
4002 pars->scaling_points_cr[i - 1][0] >= pars->scaling_points_cr[i][0])
4003 aom_internal_error(cm->error, AOM_CODEC_UNSUP_BITSTREAM,
4004 "First coordinate of the scaling function points "
4005 "shall be increasing.");
4006 pars->scaling_points_cr[i][1] = aom_rb_read_literal(rb, 8);
4007 }
4008
4009 if ((seq_params->subsampling_x == 1) && (seq_params->subsampling_y == 1) &&
4010 (((pars->num_cb_points == 0) && (pars->num_cr_points != 0)) ||
4011 ((pars->num_cb_points != 0) && (pars->num_cr_points == 0))))
4012 aom_internal_error(cm->error, AOM_CODEC_UNSUP_BITSTREAM,
4013 "In YCbCr 4:2:0, film grain shall be applied "
4014 "to both chroma components or neither.");
4015 }
4016
4017 pars->scaling_shift = aom_rb_read_literal(rb, 2) + 8; // 8 + value
4018
4019 // AR coefficients
4020 // Only sent if the corresponsing scaling function has
4021 // more than 0 points
4022
4023 pars->ar_coeff_lag = aom_rb_read_literal(rb, 2);
4024
4025 int num_pos_luma = 2 * pars->ar_coeff_lag * (pars->ar_coeff_lag + 1);
4026 int num_pos_chroma = num_pos_luma;
4027 if (pars->num_y_points > 0) ++num_pos_chroma;
4028
4029 if (pars->num_y_points)
4030 for (int i = 0; i < num_pos_luma; i++)
4031 pars->ar_coeffs_y[i] = aom_rb_read_literal(rb, 8) - 128;
4032
4033 if (pars->num_cb_points || pars->chroma_scaling_from_luma)
4034 for (int i = 0; i < num_pos_chroma; i++)
4035 pars->ar_coeffs_cb[i] = aom_rb_read_literal(rb, 8) - 128;
4036
4037 if (pars->num_cr_points || pars->chroma_scaling_from_luma)
4038 for (int i = 0; i < num_pos_chroma; i++)
4039 pars->ar_coeffs_cr[i] = aom_rb_read_literal(rb, 8) - 128;
4040
4041 pars->ar_coeff_shift = aom_rb_read_literal(rb, 2) + 6; // 6 + value
4042
4043 pars->grain_scale_shift = aom_rb_read_literal(rb, 2);
4044
4045 if (pars->num_cb_points) {
4046 pars->cb_mult = aom_rb_read_literal(rb, 8);
4047 pars->cb_luma_mult = aom_rb_read_literal(rb, 8);
4048 pars->cb_offset = aom_rb_read_literal(rb, 9);
4049 }
4050
4051 if (pars->num_cr_points) {
4052 pars->cr_mult = aom_rb_read_literal(rb, 8);
4053 pars->cr_luma_mult = aom_rb_read_literal(rb, 8);
4054 pars->cr_offset = aom_rb_read_literal(rb, 9);
4055 }
4056
4057 pars->overlap_flag = aom_rb_read_bit(rb);
4058
4059 pars->clip_to_restricted_range = aom_rb_read_bit(rb);
4060 }
4061
read_film_grain(AV1_COMMON * cm,struct aom_read_bit_buffer * rb)4062 static AOM_INLINE void read_film_grain(AV1_COMMON *cm,
4063 struct aom_read_bit_buffer *rb) {
4064 if (cm->seq_params->film_grain_params_present &&
4065 (cm->show_frame || cm->showable_frame)) {
4066 av1_read_film_grain_params(cm, rb);
4067 } else {
4068 memset(&cm->film_grain_params, 0, sizeof(cm->film_grain_params));
4069 }
4070 cm->film_grain_params.bit_depth = cm->seq_params->bit_depth;
4071 memcpy(&cm->cur_frame->film_grain_params, &cm->film_grain_params,
4072 sizeof(aom_film_grain_t));
4073 }
4074
av1_read_color_config(struct aom_read_bit_buffer * rb,int allow_lowbitdepth,SequenceHeader * seq_params,struct aom_internal_error_info * error_info)4075 void av1_read_color_config(struct aom_read_bit_buffer *rb,
4076 int allow_lowbitdepth, SequenceHeader *seq_params,
4077 struct aom_internal_error_info *error_info) {
4078 read_bitdepth(rb, seq_params, error_info);
4079
4080 seq_params->use_highbitdepth =
4081 seq_params->bit_depth > AOM_BITS_8 || !allow_lowbitdepth;
4082 // monochrome bit (not needed for PROFILE_1)
4083 const int is_monochrome =
4084 seq_params->profile != PROFILE_1 ? aom_rb_read_bit(rb) : 0;
4085 seq_params->monochrome = is_monochrome;
4086 int color_description_present_flag = aom_rb_read_bit(rb);
4087 if (color_description_present_flag) {
4088 seq_params->color_primaries = aom_rb_read_literal(rb, 8);
4089 seq_params->transfer_characteristics = aom_rb_read_literal(rb, 8);
4090 seq_params->matrix_coefficients = aom_rb_read_literal(rb, 8);
4091 } else {
4092 seq_params->color_primaries = AOM_CICP_CP_UNSPECIFIED;
4093 seq_params->transfer_characteristics = AOM_CICP_TC_UNSPECIFIED;
4094 seq_params->matrix_coefficients = AOM_CICP_MC_UNSPECIFIED;
4095 }
4096 if (is_monochrome) {
4097 // [16,235] (including xvycc) vs [0,255] range
4098 seq_params->color_range = aom_rb_read_bit(rb);
4099 seq_params->subsampling_y = seq_params->subsampling_x = 1;
4100 seq_params->chroma_sample_position = AOM_CSP_UNKNOWN;
4101 seq_params->separate_uv_delta_q = 0;
4102 return;
4103 }
4104 if (seq_params->color_primaries == AOM_CICP_CP_BT_709 &&
4105 seq_params->transfer_characteristics == AOM_CICP_TC_SRGB &&
4106 seq_params->matrix_coefficients == AOM_CICP_MC_IDENTITY) {
4107 seq_params->subsampling_y = seq_params->subsampling_x = 0;
4108 seq_params->color_range = 1; // assume full color-range
4109 if (!(seq_params->profile == PROFILE_1 ||
4110 (seq_params->profile == PROFILE_2 &&
4111 seq_params->bit_depth == AOM_BITS_12))) {
4112 aom_internal_error(
4113 error_info, AOM_CODEC_UNSUP_BITSTREAM,
4114 "sRGB colorspace not compatible with specified profile");
4115 }
4116 } else {
4117 // [16,235] (including xvycc) vs [0,255] range
4118 seq_params->color_range = aom_rb_read_bit(rb);
4119 if (seq_params->profile == PROFILE_0) {
4120 // 420 only
4121 seq_params->subsampling_x = seq_params->subsampling_y = 1;
4122 } else if (seq_params->profile == PROFILE_1) {
4123 // 444 only
4124 seq_params->subsampling_x = seq_params->subsampling_y = 0;
4125 } else {
4126 assert(seq_params->profile == PROFILE_2);
4127 if (seq_params->bit_depth == AOM_BITS_12) {
4128 seq_params->subsampling_x = aom_rb_read_bit(rb);
4129 if (seq_params->subsampling_x)
4130 seq_params->subsampling_y = aom_rb_read_bit(rb); // 422 or 420
4131 else
4132 seq_params->subsampling_y = 0; // 444
4133 } else {
4134 // 422
4135 seq_params->subsampling_x = 1;
4136 seq_params->subsampling_y = 0;
4137 }
4138 }
4139 if (seq_params->matrix_coefficients == AOM_CICP_MC_IDENTITY &&
4140 (seq_params->subsampling_x || seq_params->subsampling_y)) {
4141 aom_internal_error(
4142 error_info, AOM_CODEC_UNSUP_BITSTREAM,
4143 "Identity CICP Matrix incompatible with non 4:4:4 color sampling");
4144 }
4145 if (seq_params->subsampling_x && seq_params->subsampling_y) {
4146 seq_params->chroma_sample_position = aom_rb_read_literal(rb, 2);
4147 }
4148 }
4149 seq_params->separate_uv_delta_q = aom_rb_read_bit(rb);
4150 }
4151
av1_read_timing_info_header(aom_timing_info_t * timing_info,struct aom_internal_error_info * error,struct aom_read_bit_buffer * rb)4152 void av1_read_timing_info_header(aom_timing_info_t *timing_info,
4153 struct aom_internal_error_info *error,
4154 struct aom_read_bit_buffer *rb) {
4155 timing_info->num_units_in_display_tick =
4156 aom_rb_read_unsigned_literal(rb,
4157 32); // Number of units in a display tick
4158 timing_info->time_scale = aom_rb_read_unsigned_literal(rb, 32); // Time scale
4159 if (timing_info->num_units_in_display_tick == 0 ||
4160 timing_info->time_scale == 0) {
4161 aom_internal_error(
4162 error, AOM_CODEC_UNSUP_BITSTREAM,
4163 "num_units_in_display_tick and time_scale must be greater than 0.");
4164 }
4165 timing_info->equal_picture_interval =
4166 aom_rb_read_bit(rb); // Equal picture interval bit
4167 if (timing_info->equal_picture_interval) {
4168 const uint32_t num_ticks_per_picture_minus_1 = aom_rb_read_uvlc(rb);
4169 if (num_ticks_per_picture_minus_1 == UINT32_MAX) {
4170 aom_internal_error(
4171 error, AOM_CODEC_UNSUP_BITSTREAM,
4172 "num_ticks_per_picture_minus_1 cannot be (1 << 32) - 1.");
4173 }
4174 timing_info->num_ticks_per_picture = num_ticks_per_picture_minus_1 + 1;
4175 }
4176 }
4177
av1_read_decoder_model_info(aom_dec_model_info_t * decoder_model_info,struct aom_read_bit_buffer * rb)4178 void av1_read_decoder_model_info(aom_dec_model_info_t *decoder_model_info,
4179 struct aom_read_bit_buffer *rb) {
4180 decoder_model_info->encoder_decoder_buffer_delay_length =
4181 aom_rb_read_literal(rb, 5) + 1;
4182 decoder_model_info->num_units_in_decoding_tick =
4183 aom_rb_read_unsigned_literal(rb,
4184 32); // Number of units in a decoding tick
4185 decoder_model_info->buffer_removal_time_length =
4186 aom_rb_read_literal(rb, 5) + 1;
4187 decoder_model_info->frame_presentation_time_length =
4188 aom_rb_read_literal(rb, 5) + 1;
4189 }
4190
av1_read_op_parameters_info(aom_dec_model_op_parameters_t * op_params,int buffer_delay_length,struct aom_read_bit_buffer * rb)4191 void av1_read_op_parameters_info(aom_dec_model_op_parameters_t *op_params,
4192 int buffer_delay_length,
4193 struct aom_read_bit_buffer *rb) {
4194 op_params->decoder_buffer_delay =
4195 aom_rb_read_unsigned_literal(rb, buffer_delay_length);
4196 op_params->encoder_buffer_delay =
4197 aom_rb_read_unsigned_literal(rb, buffer_delay_length);
4198 op_params->low_delay_mode_flag = aom_rb_read_bit(rb);
4199 }
4200
read_temporal_point_info(AV1_COMMON * const cm,struct aom_read_bit_buffer * rb)4201 static AOM_INLINE void read_temporal_point_info(
4202 AV1_COMMON *const cm, struct aom_read_bit_buffer *rb) {
4203 cm->frame_presentation_time = aom_rb_read_unsigned_literal(
4204 rb, cm->seq_params->decoder_model_info.frame_presentation_time_length);
4205 }
4206
av1_read_sequence_header(AV1_COMMON * cm,struct aom_read_bit_buffer * rb,SequenceHeader * seq_params)4207 void av1_read_sequence_header(AV1_COMMON *cm, struct aom_read_bit_buffer *rb,
4208 SequenceHeader *seq_params) {
4209 const int num_bits_width = aom_rb_read_literal(rb, 4) + 1;
4210 const int num_bits_height = aom_rb_read_literal(rb, 4) + 1;
4211 const int max_frame_width = aom_rb_read_literal(rb, num_bits_width) + 1;
4212 const int max_frame_height = aom_rb_read_literal(rb, num_bits_height) + 1;
4213
4214 seq_params->num_bits_width = num_bits_width;
4215 seq_params->num_bits_height = num_bits_height;
4216 seq_params->max_frame_width = max_frame_width;
4217 seq_params->max_frame_height = max_frame_height;
4218
4219 if (seq_params->reduced_still_picture_hdr) {
4220 seq_params->frame_id_numbers_present_flag = 0;
4221 } else {
4222 seq_params->frame_id_numbers_present_flag = aom_rb_read_bit(rb);
4223 }
4224 if (seq_params->frame_id_numbers_present_flag) {
4225 // We must always have delta_frame_id_length < frame_id_length,
4226 // in order for a frame to be referenced with a unique delta.
4227 // Avoid wasting bits by using a coding that enforces this restriction.
4228 seq_params->delta_frame_id_length = aom_rb_read_literal(rb, 4) + 2;
4229 seq_params->frame_id_length =
4230 aom_rb_read_literal(rb, 3) + seq_params->delta_frame_id_length + 1;
4231 if (seq_params->frame_id_length > 16)
4232 aom_internal_error(cm->error, AOM_CODEC_CORRUPT_FRAME,
4233 "Invalid frame_id_length");
4234 }
4235
4236 setup_sb_size(seq_params, rb);
4237
4238 seq_params->enable_filter_intra = aom_rb_read_bit(rb);
4239 seq_params->enable_intra_edge_filter = aom_rb_read_bit(rb);
4240
4241 if (seq_params->reduced_still_picture_hdr) {
4242 seq_params->enable_interintra_compound = 0;
4243 seq_params->enable_masked_compound = 0;
4244 seq_params->enable_warped_motion = 0;
4245 seq_params->enable_dual_filter = 0;
4246 seq_params->order_hint_info.enable_order_hint = 0;
4247 seq_params->order_hint_info.enable_dist_wtd_comp = 0;
4248 seq_params->order_hint_info.enable_ref_frame_mvs = 0;
4249 seq_params->force_screen_content_tools = 2; // SELECT_SCREEN_CONTENT_TOOLS
4250 seq_params->force_integer_mv = 2; // SELECT_INTEGER_MV
4251 seq_params->order_hint_info.order_hint_bits_minus_1 = -1;
4252 } else {
4253 seq_params->enable_interintra_compound = aom_rb_read_bit(rb);
4254 seq_params->enable_masked_compound = aom_rb_read_bit(rb);
4255 seq_params->enable_warped_motion = aom_rb_read_bit(rb);
4256 seq_params->enable_dual_filter = aom_rb_read_bit(rb);
4257
4258 seq_params->order_hint_info.enable_order_hint = aom_rb_read_bit(rb);
4259 seq_params->order_hint_info.enable_dist_wtd_comp =
4260 seq_params->order_hint_info.enable_order_hint ? aom_rb_read_bit(rb) : 0;
4261 seq_params->order_hint_info.enable_ref_frame_mvs =
4262 seq_params->order_hint_info.enable_order_hint ? aom_rb_read_bit(rb) : 0;
4263
4264 if (aom_rb_read_bit(rb)) {
4265 seq_params->force_screen_content_tools =
4266 2; // SELECT_SCREEN_CONTENT_TOOLS
4267 } else {
4268 seq_params->force_screen_content_tools = aom_rb_read_bit(rb);
4269 }
4270
4271 if (seq_params->force_screen_content_tools > 0) {
4272 if (aom_rb_read_bit(rb)) {
4273 seq_params->force_integer_mv = 2; // SELECT_INTEGER_MV
4274 } else {
4275 seq_params->force_integer_mv = aom_rb_read_bit(rb);
4276 }
4277 } else {
4278 seq_params->force_integer_mv = 2; // SELECT_INTEGER_MV
4279 }
4280 seq_params->order_hint_info.order_hint_bits_minus_1 =
4281 seq_params->order_hint_info.enable_order_hint
4282 ? aom_rb_read_literal(rb, 3)
4283 : -1;
4284 }
4285
4286 seq_params->enable_superres = aom_rb_read_bit(rb);
4287 seq_params->enable_cdef = aom_rb_read_bit(rb);
4288 seq_params->enable_restoration = aom_rb_read_bit(rb);
4289 }
4290
read_global_motion_params(WarpedMotionParams * params,const WarpedMotionParams * ref_params,struct aom_read_bit_buffer * rb,int allow_hp)4291 static int read_global_motion_params(WarpedMotionParams *params,
4292 const WarpedMotionParams *ref_params,
4293 struct aom_read_bit_buffer *rb,
4294 int allow_hp) {
4295 TransformationType type = aom_rb_read_bit(rb);
4296 if (type != IDENTITY) {
4297 if (aom_rb_read_bit(rb))
4298 type = ROTZOOM;
4299 else
4300 type = aom_rb_read_bit(rb) ? TRANSLATION : AFFINE;
4301 }
4302
4303 *params = default_warp_params;
4304 params->wmtype = type;
4305
4306 if (type >= ROTZOOM) {
4307 params->wmmat[2] = aom_rb_read_signed_primitive_refsubexpfin(
4308 rb, GM_ALPHA_MAX + 1, SUBEXPFIN_K,
4309 (ref_params->wmmat[2] >> GM_ALPHA_PREC_DIFF) -
4310 (1 << GM_ALPHA_PREC_BITS)) *
4311 GM_ALPHA_DECODE_FACTOR +
4312 (1 << WARPEDMODEL_PREC_BITS);
4313 params->wmmat[3] = aom_rb_read_signed_primitive_refsubexpfin(
4314 rb, GM_ALPHA_MAX + 1, SUBEXPFIN_K,
4315 (ref_params->wmmat[3] >> GM_ALPHA_PREC_DIFF)) *
4316 GM_ALPHA_DECODE_FACTOR;
4317 }
4318
4319 if (type >= AFFINE) {
4320 params->wmmat[4] = aom_rb_read_signed_primitive_refsubexpfin(
4321 rb, GM_ALPHA_MAX + 1, SUBEXPFIN_K,
4322 (ref_params->wmmat[4] >> GM_ALPHA_PREC_DIFF)) *
4323 GM_ALPHA_DECODE_FACTOR;
4324 params->wmmat[5] = aom_rb_read_signed_primitive_refsubexpfin(
4325 rb, GM_ALPHA_MAX + 1, SUBEXPFIN_K,
4326 (ref_params->wmmat[5] >> GM_ALPHA_PREC_DIFF) -
4327 (1 << GM_ALPHA_PREC_BITS)) *
4328 GM_ALPHA_DECODE_FACTOR +
4329 (1 << WARPEDMODEL_PREC_BITS);
4330 } else {
4331 params->wmmat[4] = -params->wmmat[3];
4332 params->wmmat[5] = params->wmmat[2];
4333 }
4334
4335 if (type >= TRANSLATION) {
4336 const int trans_bits = (type == TRANSLATION)
4337 ? GM_ABS_TRANS_ONLY_BITS - !allow_hp
4338 : GM_ABS_TRANS_BITS;
4339 const int trans_dec_factor =
4340 (type == TRANSLATION) ? GM_TRANS_ONLY_DECODE_FACTOR * (1 << !allow_hp)
4341 : GM_TRANS_DECODE_FACTOR;
4342 const int trans_prec_diff = (type == TRANSLATION)
4343 ? GM_TRANS_ONLY_PREC_DIFF + !allow_hp
4344 : GM_TRANS_PREC_DIFF;
4345 params->wmmat[0] = aom_rb_read_signed_primitive_refsubexpfin(
4346 rb, (1 << trans_bits) + 1, SUBEXPFIN_K,
4347 (ref_params->wmmat[0] >> trans_prec_diff)) *
4348 trans_dec_factor;
4349 params->wmmat[1] = aom_rb_read_signed_primitive_refsubexpfin(
4350 rb, (1 << trans_bits) + 1, SUBEXPFIN_K,
4351 (ref_params->wmmat[1] >> trans_prec_diff)) *
4352 trans_dec_factor;
4353 }
4354
4355 int good_shear_params = av1_get_shear_params(params);
4356 if (!good_shear_params) return 0;
4357
4358 return 1;
4359 }
4360
read_global_motion(AV1_COMMON * cm,struct aom_read_bit_buffer * rb)4361 static AOM_INLINE void read_global_motion(AV1_COMMON *cm,
4362 struct aom_read_bit_buffer *rb) {
4363 for (int frame = LAST_FRAME; frame <= ALTREF_FRAME; ++frame) {
4364 const WarpedMotionParams *ref_params =
4365 cm->prev_frame ? &cm->prev_frame->global_motion[frame]
4366 : &default_warp_params;
4367 int good_params =
4368 read_global_motion_params(&cm->global_motion[frame], ref_params, rb,
4369 cm->features.allow_high_precision_mv);
4370 if (!good_params) {
4371 #if WARPED_MOTION_DEBUG
4372 printf("Warning: unexpected global motion shear params from aomenc\n");
4373 #endif
4374 cm->global_motion[frame].invalid = 1;
4375 }
4376
4377 // TODO(sarahparker, debargha): The logic in the commented out code below
4378 // does not work currently and causes mismatches when resize is on. Fix it
4379 // before turning the optimization back on.
4380 /*
4381 YV12_BUFFER_CONFIG *ref_buf = get_ref_frame(cm, frame);
4382 if (cm->width == ref_buf->y_crop_width &&
4383 cm->height == ref_buf->y_crop_height) {
4384 read_global_motion_params(&cm->global_motion[frame],
4385 &cm->prev_frame->global_motion[frame], rb,
4386 cm->features.allow_high_precision_mv);
4387 } else {
4388 cm->global_motion[frame] = default_warp_params;
4389 }
4390 */
4391 /*
4392 printf("Dec Ref %d [%d/%d]: %d %d %d %d\n",
4393 frame, cm->current_frame.frame_number, cm->show_frame,
4394 cm->global_motion[frame].wmmat[0],
4395 cm->global_motion[frame].wmmat[1],
4396 cm->global_motion[frame].wmmat[2],
4397 cm->global_motion[frame].wmmat[3]);
4398 */
4399 }
4400 memcpy(cm->cur_frame->global_motion, cm->global_motion,
4401 REF_FRAMES * sizeof(WarpedMotionParams));
4402 }
4403
4404 // Release the references to the frame buffers in cm->ref_frame_map and reset
4405 // all elements of cm->ref_frame_map to NULL.
reset_ref_frame_map(AV1_COMMON * const cm)4406 static AOM_INLINE void reset_ref_frame_map(AV1_COMMON *const cm) {
4407 BufferPool *const pool = cm->buffer_pool;
4408
4409 for (int i = 0; i < REF_FRAMES; i++) {
4410 decrease_ref_count(cm->ref_frame_map[i], pool);
4411 cm->ref_frame_map[i] = NULL;
4412 }
4413 }
4414
4415 // If the refresh_frame_flags bitmask is set, update reference frame id values
4416 // and mark frames as valid for reference.
update_ref_frame_id(AV1Decoder * const pbi)4417 static AOM_INLINE void update_ref_frame_id(AV1Decoder *const pbi) {
4418 AV1_COMMON *const cm = &pbi->common;
4419 int refresh_frame_flags = cm->current_frame.refresh_frame_flags;
4420 for (int i = 0; i < REF_FRAMES; i++) {
4421 if ((refresh_frame_flags >> i) & 1) {
4422 cm->ref_frame_id[i] = cm->current_frame_id;
4423 pbi->valid_for_referencing[i] = 1;
4424 }
4425 }
4426 }
4427
show_existing_frame_reset(AV1Decoder * const pbi,int existing_frame_idx)4428 static AOM_INLINE void show_existing_frame_reset(AV1Decoder *const pbi,
4429 int existing_frame_idx) {
4430 AV1_COMMON *const cm = &pbi->common;
4431
4432 assert(cm->show_existing_frame);
4433
4434 cm->current_frame.frame_type = KEY_FRAME;
4435
4436 cm->current_frame.refresh_frame_flags = (1 << REF_FRAMES) - 1;
4437
4438 for (int i = 0; i < INTER_REFS_PER_FRAME; ++i) {
4439 cm->remapped_ref_idx[i] = INVALID_IDX;
4440 }
4441
4442 if (pbi->need_resync) {
4443 reset_ref_frame_map(cm);
4444 pbi->need_resync = 0;
4445 }
4446
4447 // Note that the displayed frame must be valid for referencing in order to
4448 // have been selected.
4449 cm->current_frame_id = cm->ref_frame_id[existing_frame_idx];
4450 update_ref_frame_id(pbi);
4451
4452 cm->features.refresh_frame_context = REFRESH_FRAME_CONTEXT_DISABLED;
4453 }
4454
reset_frame_buffers(AV1_COMMON * cm)4455 static INLINE void reset_frame_buffers(AV1_COMMON *cm) {
4456 RefCntBuffer *const frame_bufs = cm->buffer_pool->frame_bufs;
4457 int i;
4458
4459 lock_buffer_pool(cm->buffer_pool);
4460 reset_ref_frame_map(cm);
4461 assert(cm->cur_frame->ref_count == 1);
4462 for (i = 0; i < cm->buffer_pool->num_frame_bufs; ++i) {
4463 // Reset all unreferenced frame buffers. We can also reset cm->cur_frame
4464 // because we are the sole owner of cm->cur_frame.
4465 if (frame_bufs[i].ref_count > 0 && &frame_bufs[i] != cm->cur_frame) {
4466 continue;
4467 }
4468 frame_bufs[i].order_hint = 0;
4469 av1_zero(frame_bufs[i].ref_order_hints);
4470 }
4471 av1_zero_unused_internal_frame_buffers(&cm->buffer_pool->int_frame_buffers);
4472 unlock_buffer_pool(cm->buffer_pool);
4473 }
4474
4475 // On success, returns 0. On failure, calls aom_internal_error and does not
4476 // return.
read_uncompressed_header(AV1Decoder * pbi,struct aom_read_bit_buffer * rb)4477 static int read_uncompressed_header(AV1Decoder *pbi,
4478 struct aom_read_bit_buffer *rb) {
4479 AV1_COMMON *const cm = &pbi->common;
4480 const SequenceHeader *const seq_params = cm->seq_params;
4481 CurrentFrame *const current_frame = &cm->current_frame;
4482 FeatureFlags *const features = &cm->features;
4483 MACROBLOCKD *const xd = &pbi->dcb.xd;
4484 BufferPool *const pool = cm->buffer_pool;
4485 RefCntBuffer *const frame_bufs = pool->frame_bufs;
4486 aom_s_frame_info *sframe_info = &pbi->sframe_info;
4487 sframe_info->is_s_frame = 0;
4488 sframe_info->is_s_frame_at_altref = 0;
4489
4490 if (!pbi->sequence_header_ready) {
4491 aom_internal_error(&pbi->error, AOM_CODEC_CORRUPT_FRAME,
4492 "No sequence header");
4493 }
4494
4495 if (seq_params->reduced_still_picture_hdr) {
4496 cm->show_existing_frame = 0;
4497 cm->show_frame = 1;
4498 current_frame->frame_type = KEY_FRAME;
4499 if (pbi->sequence_header_changed) {
4500 // This is the start of a new coded video sequence.
4501 pbi->sequence_header_changed = 0;
4502 pbi->decoding_first_frame = 1;
4503 reset_frame_buffers(cm);
4504 }
4505 features->error_resilient_mode = 1;
4506 } else {
4507 cm->show_existing_frame = aom_rb_read_bit(rb);
4508 pbi->reset_decoder_state = 0;
4509
4510 if (cm->show_existing_frame) {
4511 if (pbi->sequence_header_changed) {
4512 aom_internal_error(
4513 &pbi->error, AOM_CODEC_CORRUPT_FRAME,
4514 "New sequence header starts with a show_existing_frame.");
4515 }
4516 // Show an existing frame directly.
4517 const int existing_frame_idx = aom_rb_read_literal(rb, 3);
4518 RefCntBuffer *const frame_to_show = cm->ref_frame_map[existing_frame_idx];
4519 if (frame_to_show == NULL) {
4520 aom_internal_error(&pbi->error, AOM_CODEC_UNSUP_BITSTREAM,
4521 "Buffer does not contain a decoded frame");
4522 }
4523 if (seq_params->decoder_model_info_present_flag &&
4524 seq_params->timing_info.equal_picture_interval == 0) {
4525 read_temporal_point_info(cm, rb);
4526 }
4527 if (seq_params->frame_id_numbers_present_flag) {
4528 int frame_id_length = seq_params->frame_id_length;
4529 int display_frame_id = aom_rb_read_literal(rb, frame_id_length);
4530 /* Compare display_frame_id with ref_frame_id and check valid for
4531 * referencing */
4532 if (display_frame_id != cm->ref_frame_id[existing_frame_idx] ||
4533 pbi->valid_for_referencing[existing_frame_idx] == 0)
4534 aom_internal_error(&pbi->error, AOM_CODEC_CORRUPT_FRAME,
4535 "Reference buffer frame ID mismatch");
4536 }
4537 lock_buffer_pool(pool);
4538 assert(frame_to_show->ref_count > 0);
4539 // cm->cur_frame should be the buffer referenced by the return value
4540 // of the get_free_fb() call in assign_cur_frame_new_fb() (called by
4541 // av1_receive_compressed_data()), so the ref_count should be 1.
4542 assert(cm->cur_frame->ref_count == 1);
4543 // assign_frame_buffer_p() decrements ref_count directly rather than
4544 // call decrease_ref_count(). If cm->cur_frame->raw_frame_buffer has
4545 // already been allocated, it will not be released by
4546 // assign_frame_buffer_p()!
4547 assert(!cm->cur_frame->raw_frame_buffer.data);
4548 assign_frame_buffer_p(&cm->cur_frame, frame_to_show);
4549 pbi->reset_decoder_state = frame_to_show->frame_type == KEY_FRAME;
4550 unlock_buffer_pool(pool);
4551
4552 cm->lf.filter_level[0] = 0;
4553 cm->lf.filter_level[1] = 0;
4554 cm->show_frame = 1;
4555 current_frame->order_hint = frame_to_show->order_hint;
4556
4557 // Section 6.8.2: It is a requirement of bitstream conformance that when
4558 // show_existing_frame is used to show a previous frame, that the value
4559 // of showable_frame for the previous frame was equal to 1.
4560 if (!frame_to_show->showable_frame) {
4561 aom_internal_error(&pbi->error, AOM_CODEC_UNSUP_BITSTREAM,
4562 "Buffer does not contain a showable frame");
4563 }
4564 // Section 6.8.2: It is a requirement of bitstream conformance that when
4565 // show_existing_frame is used to show a previous frame with
4566 // RefFrameType[ frame_to_show_map_idx ] equal to KEY_FRAME, that the
4567 // frame is output via the show_existing_frame mechanism at most once.
4568 if (pbi->reset_decoder_state) frame_to_show->showable_frame = 0;
4569
4570 cm->film_grain_params = frame_to_show->film_grain_params;
4571
4572 if (pbi->reset_decoder_state) {
4573 show_existing_frame_reset(pbi, existing_frame_idx);
4574 } else {
4575 current_frame->refresh_frame_flags = 0;
4576 }
4577
4578 return 0;
4579 }
4580
4581 current_frame->frame_type = (FRAME_TYPE)aom_rb_read_literal(rb, 2);
4582 if (pbi->sequence_header_changed) {
4583 if (current_frame->frame_type == KEY_FRAME) {
4584 // This is the start of a new coded video sequence.
4585 pbi->sequence_header_changed = 0;
4586 pbi->decoding_first_frame = 1;
4587 reset_frame_buffers(cm);
4588 } else {
4589 aom_internal_error(&pbi->error, AOM_CODEC_CORRUPT_FRAME,
4590 "Sequence header has changed without a keyframe.");
4591 }
4592 }
4593
4594 cm->show_frame = aom_rb_read_bit(rb);
4595 if (cm->show_frame == 0) pbi->is_arf_frame_present = 1;
4596 if (cm->show_frame == 0 && cm->current_frame.frame_type == KEY_FRAME)
4597 pbi->is_fwd_kf_present = 1;
4598 if (cm->current_frame.frame_type == S_FRAME) {
4599 sframe_info->is_s_frame = 1;
4600 sframe_info->is_s_frame_at_altref = cm->show_frame ? 0 : 1;
4601 }
4602 if (seq_params->still_picture &&
4603 (current_frame->frame_type != KEY_FRAME || !cm->show_frame)) {
4604 aom_internal_error(&pbi->error, AOM_CODEC_CORRUPT_FRAME,
4605 "Still pictures must be coded as shown keyframes");
4606 }
4607 cm->showable_frame = current_frame->frame_type != KEY_FRAME;
4608 if (cm->show_frame) {
4609 if (seq_params->decoder_model_info_present_flag &&
4610 seq_params->timing_info.equal_picture_interval == 0)
4611 read_temporal_point_info(cm, rb);
4612 } else {
4613 // See if this frame can be used as show_existing_frame in future
4614 cm->showable_frame = aom_rb_read_bit(rb);
4615 }
4616 cm->cur_frame->showable_frame = cm->showable_frame;
4617 features->error_resilient_mode =
4618 frame_is_sframe(cm) ||
4619 (current_frame->frame_type == KEY_FRAME && cm->show_frame)
4620 ? 1
4621 : aom_rb_read_bit(rb);
4622 }
4623
4624 if (current_frame->frame_type == KEY_FRAME && cm->show_frame) {
4625 /* All frames need to be marked as not valid for referencing */
4626 for (int i = 0; i < REF_FRAMES; i++) {
4627 pbi->valid_for_referencing[i] = 0;
4628 }
4629 }
4630 features->disable_cdf_update = aom_rb_read_bit(rb);
4631 if (seq_params->force_screen_content_tools == 2) {
4632 features->allow_screen_content_tools = aom_rb_read_bit(rb);
4633 } else {
4634 features->allow_screen_content_tools =
4635 seq_params->force_screen_content_tools;
4636 }
4637
4638 if (features->allow_screen_content_tools) {
4639 if (seq_params->force_integer_mv == 2) {
4640 features->cur_frame_force_integer_mv = aom_rb_read_bit(rb);
4641 } else {
4642 features->cur_frame_force_integer_mv = seq_params->force_integer_mv;
4643 }
4644 } else {
4645 features->cur_frame_force_integer_mv = 0;
4646 }
4647
4648 int frame_size_override_flag = 0;
4649 features->allow_intrabc = 0;
4650 features->primary_ref_frame = PRIMARY_REF_NONE;
4651
4652 if (!seq_params->reduced_still_picture_hdr) {
4653 if (seq_params->frame_id_numbers_present_flag) {
4654 int frame_id_length = seq_params->frame_id_length;
4655 int diff_len = seq_params->delta_frame_id_length;
4656 int prev_frame_id = 0;
4657 int have_prev_frame_id =
4658 !pbi->decoding_first_frame &&
4659 !(current_frame->frame_type == KEY_FRAME && cm->show_frame);
4660 if (have_prev_frame_id) {
4661 prev_frame_id = cm->current_frame_id;
4662 }
4663 cm->current_frame_id = aom_rb_read_literal(rb, frame_id_length);
4664
4665 if (have_prev_frame_id) {
4666 int diff_frame_id;
4667 if (cm->current_frame_id > prev_frame_id) {
4668 diff_frame_id = cm->current_frame_id - prev_frame_id;
4669 } else {
4670 diff_frame_id =
4671 (1 << frame_id_length) + cm->current_frame_id - prev_frame_id;
4672 }
4673 /* Check current_frame_id for conformance */
4674 if (prev_frame_id == cm->current_frame_id ||
4675 diff_frame_id >= (1 << (frame_id_length - 1))) {
4676 aom_internal_error(&pbi->error, AOM_CODEC_CORRUPT_FRAME,
4677 "Invalid value of current_frame_id");
4678 }
4679 }
4680 /* Check if some frames need to be marked as not valid for referencing */
4681 for (int i = 0; i < REF_FRAMES; i++) {
4682 if (cm->current_frame_id - (1 << diff_len) > 0) {
4683 if (cm->ref_frame_id[i] > cm->current_frame_id ||
4684 cm->ref_frame_id[i] < cm->current_frame_id - (1 << diff_len))
4685 pbi->valid_for_referencing[i] = 0;
4686 } else {
4687 if (cm->ref_frame_id[i] > cm->current_frame_id &&
4688 cm->ref_frame_id[i] < (1 << frame_id_length) +
4689 cm->current_frame_id - (1 << diff_len))
4690 pbi->valid_for_referencing[i] = 0;
4691 }
4692 }
4693 }
4694
4695 frame_size_override_flag = frame_is_sframe(cm) ? 1 : aom_rb_read_bit(rb);
4696
4697 current_frame->order_hint = aom_rb_read_literal(
4698 rb, seq_params->order_hint_info.order_hint_bits_minus_1 + 1);
4699
4700 if (seq_params->order_hint_info.enable_order_hint)
4701 current_frame->frame_number = current_frame->order_hint;
4702
4703 if (!features->error_resilient_mode && !frame_is_intra_only(cm)) {
4704 features->primary_ref_frame = aom_rb_read_literal(rb, PRIMARY_REF_BITS);
4705 }
4706 }
4707
4708 if (seq_params->decoder_model_info_present_flag) {
4709 pbi->buffer_removal_time_present = aom_rb_read_bit(rb);
4710 if (pbi->buffer_removal_time_present) {
4711 for (int op_num = 0;
4712 op_num < seq_params->operating_points_cnt_minus_1 + 1; op_num++) {
4713 if (seq_params->op_params[op_num].decoder_model_param_present_flag) {
4714 if (seq_params->operating_point_idc[op_num] == 0 ||
4715 (((seq_params->operating_point_idc[op_num] >>
4716 cm->temporal_layer_id) &
4717 0x1) &&
4718 ((seq_params->operating_point_idc[op_num] >>
4719 (cm->spatial_layer_id + 8)) &
4720 0x1))) {
4721 cm->buffer_removal_times[op_num] = aom_rb_read_unsigned_literal(
4722 rb, seq_params->decoder_model_info.buffer_removal_time_length);
4723 } else {
4724 cm->buffer_removal_times[op_num] = 0;
4725 }
4726 } else {
4727 cm->buffer_removal_times[op_num] = 0;
4728 }
4729 }
4730 }
4731 }
4732 if (current_frame->frame_type == KEY_FRAME) {
4733 if (!cm->show_frame) { // unshown keyframe (forward keyframe)
4734 current_frame->refresh_frame_flags = aom_rb_read_literal(rb, REF_FRAMES);
4735 } else { // shown keyframe
4736 current_frame->refresh_frame_flags = (1 << REF_FRAMES) - 1;
4737 }
4738
4739 for (int i = 0; i < INTER_REFS_PER_FRAME; ++i) {
4740 cm->remapped_ref_idx[i] = INVALID_IDX;
4741 }
4742 if (pbi->need_resync) {
4743 reset_ref_frame_map(cm);
4744 pbi->need_resync = 0;
4745 }
4746 } else {
4747 if (current_frame->frame_type == INTRA_ONLY_FRAME) {
4748 current_frame->refresh_frame_flags = aom_rb_read_literal(rb, REF_FRAMES);
4749 if (current_frame->refresh_frame_flags == 0xFF) {
4750 aom_internal_error(&pbi->error, AOM_CODEC_UNSUP_BITSTREAM,
4751 "Intra only frames cannot have refresh flags 0xFF");
4752 }
4753 if (pbi->need_resync) {
4754 reset_ref_frame_map(cm);
4755 pbi->need_resync = 0;
4756 }
4757 } else if (pbi->need_resync != 1) { /* Skip if need resync */
4758 current_frame->refresh_frame_flags =
4759 frame_is_sframe(cm) ? 0xFF : aom_rb_read_literal(rb, REF_FRAMES);
4760 }
4761 }
4762
4763 if (!frame_is_intra_only(cm) || current_frame->refresh_frame_flags != 0xFF) {
4764 // Read all ref frame order hints if error_resilient_mode == 1
4765 if (features->error_resilient_mode &&
4766 seq_params->order_hint_info.enable_order_hint) {
4767 for (int ref_idx = 0; ref_idx < REF_FRAMES; ref_idx++) {
4768 // Read order hint from bit stream
4769 unsigned int order_hint = aom_rb_read_literal(
4770 rb, seq_params->order_hint_info.order_hint_bits_minus_1 + 1);
4771 // Get buffer
4772 RefCntBuffer *buf = cm->ref_frame_map[ref_idx];
4773 if (buf == NULL || order_hint != buf->order_hint) {
4774 if (buf != NULL) {
4775 lock_buffer_pool(pool);
4776 decrease_ref_count(buf, pool);
4777 unlock_buffer_pool(pool);
4778 cm->ref_frame_map[ref_idx] = NULL;
4779 }
4780 // If no corresponding buffer exists, allocate a new buffer with all
4781 // pixels set to neutral grey.
4782 int buf_idx = get_free_fb(cm);
4783 if (buf_idx == INVALID_IDX) {
4784 aom_internal_error(&pbi->error, AOM_CODEC_MEM_ERROR,
4785 "Unable to find free frame buffer");
4786 }
4787 buf = &frame_bufs[buf_idx];
4788 lock_buffer_pool(pool);
4789 if (aom_realloc_frame_buffer(
4790 &buf->buf, seq_params->max_frame_width,
4791 seq_params->max_frame_height, seq_params->subsampling_x,
4792 seq_params->subsampling_y, seq_params->use_highbitdepth,
4793 AOM_BORDER_IN_PIXELS, features->byte_alignment,
4794 &buf->raw_frame_buffer, pool->get_fb_cb, pool->cb_priv, false,
4795 0)) {
4796 decrease_ref_count(buf, pool);
4797 unlock_buffer_pool(pool);
4798 aom_internal_error(&pbi->error, AOM_CODEC_MEM_ERROR,
4799 "Failed to allocate frame buffer");
4800 }
4801 unlock_buffer_pool(pool);
4802 // According to the specification, valid bitstreams are required to
4803 // never use missing reference frames so the filling process for
4804 // missing frames is not normatively defined and RefValid for missing
4805 // frames is set to 0.
4806
4807 // To make libaom more robust when the bitstream has been corrupted
4808 // by the loss of some frames of data, this code adds a neutral grey
4809 // buffer in place of missing frames, i.e.
4810 //
4811 set_planes_to_neutral_grey(seq_params, &buf->buf, 0);
4812 //
4813 // and allows the frames to be used for referencing, i.e.
4814 //
4815 pbi->valid_for_referencing[ref_idx] = 1;
4816 //
4817 // Please note such behavior is not normative and other decoders may
4818 // use a different approach.
4819 cm->ref_frame_map[ref_idx] = buf;
4820 buf->order_hint = order_hint;
4821 }
4822 }
4823 }
4824 }
4825
4826 if (current_frame->frame_type == KEY_FRAME) {
4827 setup_frame_size(cm, frame_size_override_flag, rb);
4828
4829 if (features->allow_screen_content_tools && !av1_superres_scaled(cm))
4830 features->allow_intrabc = aom_rb_read_bit(rb);
4831 features->allow_ref_frame_mvs = 0;
4832 cm->prev_frame = NULL;
4833 } else {
4834 features->allow_ref_frame_mvs = 0;
4835
4836 if (current_frame->frame_type == INTRA_ONLY_FRAME) {
4837 cm->cur_frame->film_grain_params_present =
4838 seq_params->film_grain_params_present;
4839 setup_frame_size(cm, frame_size_override_flag, rb);
4840 if (features->allow_screen_content_tools && !av1_superres_scaled(cm))
4841 features->allow_intrabc = aom_rb_read_bit(rb);
4842
4843 } else if (pbi->need_resync != 1) { /* Skip if need resync */
4844 int frame_refs_short_signaling = 0;
4845 // Frame refs short signaling is off when error resilient mode is on.
4846 if (seq_params->order_hint_info.enable_order_hint)
4847 frame_refs_short_signaling = aom_rb_read_bit(rb);
4848
4849 if (frame_refs_short_signaling) {
4850 // == LAST_FRAME ==
4851 const int lst_ref = aom_rb_read_literal(rb, REF_FRAMES_LOG2);
4852 const RefCntBuffer *const lst_buf = cm->ref_frame_map[lst_ref];
4853
4854 // == GOLDEN_FRAME ==
4855 const int gld_ref = aom_rb_read_literal(rb, REF_FRAMES_LOG2);
4856 const RefCntBuffer *const gld_buf = cm->ref_frame_map[gld_ref];
4857
4858 // Most of the time, streams start with a keyframe. In that case,
4859 // ref_frame_map will have been filled in at that point and will not
4860 // contain any NULLs. However, streams are explicitly allowed to start
4861 // with an intra-only frame, so long as they don't then signal a
4862 // reference to a slot that hasn't been set yet. That's what we are
4863 // checking here.
4864 if (lst_buf == NULL)
4865 aom_internal_error(&pbi->error, AOM_CODEC_CORRUPT_FRAME,
4866 "Inter frame requests nonexistent reference");
4867 if (gld_buf == NULL)
4868 aom_internal_error(&pbi->error, AOM_CODEC_CORRUPT_FRAME,
4869 "Inter frame requests nonexistent reference");
4870
4871 av1_set_frame_refs(cm, cm->remapped_ref_idx, lst_ref, gld_ref);
4872 }
4873
4874 for (int i = 0; i < INTER_REFS_PER_FRAME; ++i) {
4875 int ref = 0;
4876 if (!frame_refs_short_signaling) {
4877 ref = aom_rb_read_literal(rb, REF_FRAMES_LOG2);
4878
4879 // Most of the time, streams start with a keyframe. In that case,
4880 // ref_frame_map will have been filled in at that point and will not
4881 // contain any NULLs. However, streams are explicitly allowed to start
4882 // with an intra-only frame, so long as they don't then signal a
4883 // reference to a slot that hasn't been set yet. That's what we are
4884 // checking here.
4885 if (cm->ref_frame_map[ref] == NULL)
4886 aom_internal_error(&pbi->error, AOM_CODEC_CORRUPT_FRAME,
4887 "Inter frame requests nonexistent reference");
4888 cm->remapped_ref_idx[i] = ref;
4889 } else {
4890 ref = cm->remapped_ref_idx[i];
4891 }
4892 // Check valid for referencing
4893 if (pbi->valid_for_referencing[ref] == 0)
4894 aom_internal_error(&pbi->error, AOM_CODEC_CORRUPT_FRAME,
4895 "Reference frame not valid for referencing");
4896
4897 cm->ref_frame_sign_bias[LAST_FRAME + i] = 0;
4898
4899 if (seq_params->frame_id_numbers_present_flag) {
4900 int frame_id_length = seq_params->frame_id_length;
4901 int diff_len = seq_params->delta_frame_id_length;
4902 int delta_frame_id_minus_1 = aom_rb_read_literal(rb, diff_len);
4903 int ref_frame_id =
4904 ((cm->current_frame_id - (delta_frame_id_minus_1 + 1) +
4905 (1 << frame_id_length)) %
4906 (1 << frame_id_length));
4907 // Compare values derived from delta_frame_id_minus_1 and
4908 // refresh_frame_flags.
4909 if (ref_frame_id != cm->ref_frame_id[ref])
4910 aom_internal_error(&pbi->error, AOM_CODEC_CORRUPT_FRAME,
4911 "Reference buffer frame ID mismatch");
4912 }
4913 }
4914
4915 if (!features->error_resilient_mode && frame_size_override_flag) {
4916 setup_frame_size_with_refs(cm, rb);
4917 } else {
4918 setup_frame_size(cm, frame_size_override_flag, rb);
4919 }
4920
4921 if (features->cur_frame_force_integer_mv) {
4922 features->allow_high_precision_mv = 0;
4923 } else {
4924 features->allow_high_precision_mv = aom_rb_read_bit(rb);
4925 }
4926 features->interp_filter = read_frame_interp_filter(rb);
4927 features->switchable_motion_mode = aom_rb_read_bit(rb);
4928 }
4929
4930 cm->prev_frame = get_primary_ref_frame_buf(cm);
4931 if (features->primary_ref_frame != PRIMARY_REF_NONE &&
4932 get_primary_ref_frame_buf(cm) == NULL) {
4933 aom_internal_error(&pbi->error, AOM_CODEC_CORRUPT_FRAME,
4934 "Reference frame containing this frame's initial "
4935 "frame context is unavailable.");
4936 }
4937
4938 if (!(current_frame->frame_type == INTRA_ONLY_FRAME) &&
4939 pbi->need_resync != 1) {
4940 if (frame_might_allow_ref_frame_mvs(cm))
4941 features->allow_ref_frame_mvs = aom_rb_read_bit(rb);
4942 else
4943 features->allow_ref_frame_mvs = 0;
4944
4945 for (int i = LAST_FRAME; i <= ALTREF_FRAME; ++i) {
4946 const RefCntBuffer *const ref_buf = get_ref_frame_buf(cm, i);
4947 struct scale_factors *const ref_scale_factors =
4948 get_ref_scale_factors(cm, i);
4949 av1_setup_scale_factors_for_frame(
4950 ref_scale_factors, ref_buf->buf.y_crop_width,
4951 ref_buf->buf.y_crop_height, cm->width, cm->height);
4952 if ((!av1_is_valid_scale(ref_scale_factors)))
4953 aom_internal_error(&pbi->error, AOM_CODEC_UNSUP_BITSTREAM,
4954 "Reference frame has invalid dimensions");
4955 }
4956 }
4957 }
4958
4959 av1_setup_frame_buf_refs(cm);
4960
4961 av1_setup_frame_sign_bias(cm);
4962
4963 cm->cur_frame->frame_type = current_frame->frame_type;
4964
4965 update_ref_frame_id(pbi);
4966
4967 const int might_bwd_adapt = !(seq_params->reduced_still_picture_hdr) &&
4968 !(features->disable_cdf_update);
4969 if (might_bwd_adapt) {
4970 features->refresh_frame_context = aom_rb_read_bit(rb)
4971 ? REFRESH_FRAME_CONTEXT_DISABLED
4972 : REFRESH_FRAME_CONTEXT_BACKWARD;
4973 } else {
4974 features->refresh_frame_context = REFRESH_FRAME_CONTEXT_DISABLED;
4975 }
4976
4977 cm->cur_frame->buf.bit_depth = seq_params->bit_depth;
4978 cm->cur_frame->buf.color_primaries = seq_params->color_primaries;
4979 cm->cur_frame->buf.transfer_characteristics =
4980 seq_params->transfer_characteristics;
4981 cm->cur_frame->buf.matrix_coefficients = seq_params->matrix_coefficients;
4982 cm->cur_frame->buf.monochrome = seq_params->monochrome;
4983 cm->cur_frame->buf.chroma_sample_position =
4984 seq_params->chroma_sample_position;
4985 cm->cur_frame->buf.color_range = seq_params->color_range;
4986 cm->cur_frame->buf.render_width = cm->render_width;
4987 cm->cur_frame->buf.render_height = cm->render_height;
4988
4989 if (pbi->need_resync) {
4990 aom_internal_error(&pbi->error, AOM_CODEC_CORRUPT_FRAME,
4991 "Keyframe / intra-only frame required to reset decoder"
4992 " state");
4993 }
4994
4995 if (features->allow_intrabc) {
4996 // Set parameters corresponding to no filtering.
4997 struct loopfilter *lf = &cm->lf;
4998 lf->filter_level[0] = 0;
4999 lf->filter_level[1] = 0;
5000 cm->cdef_info.cdef_bits = 0;
5001 cm->cdef_info.cdef_strengths[0] = 0;
5002 cm->cdef_info.nb_cdef_strengths = 1;
5003 cm->cdef_info.cdef_uv_strengths[0] = 0;
5004 cm->rst_info[0].frame_restoration_type = RESTORE_NONE;
5005 cm->rst_info[1].frame_restoration_type = RESTORE_NONE;
5006 cm->rst_info[2].frame_restoration_type = RESTORE_NONE;
5007 }
5008
5009 read_tile_info(pbi, rb);
5010 if (!av1_is_min_tile_width_satisfied(cm)) {
5011 aom_internal_error(&pbi->error, AOM_CODEC_CORRUPT_FRAME,
5012 "Minimum tile width requirement not satisfied");
5013 }
5014
5015 CommonQuantParams *const quant_params = &cm->quant_params;
5016 setup_quantization(quant_params, av1_num_planes(cm),
5017 cm->seq_params->separate_uv_delta_q, rb);
5018 xd->bd = (int)seq_params->bit_depth;
5019
5020 CommonContexts *const above_contexts = &cm->above_contexts;
5021 if (above_contexts->num_planes < av1_num_planes(cm) ||
5022 above_contexts->num_mi_cols < cm->mi_params.mi_cols ||
5023 above_contexts->num_tile_rows < cm->tiles.rows) {
5024 av1_free_above_context_buffers(above_contexts);
5025 if (av1_alloc_above_context_buffers(above_contexts, cm->tiles.rows,
5026 cm->mi_params.mi_cols,
5027 av1_num_planes(cm))) {
5028 aom_internal_error(&pbi->error, AOM_CODEC_MEM_ERROR,
5029 "Failed to allocate context buffers");
5030 }
5031 }
5032
5033 if (features->primary_ref_frame == PRIMARY_REF_NONE) {
5034 av1_setup_past_independence(cm);
5035 }
5036
5037 setup_segmentation(cm, rb);
5038
5039 cm->delta_q_info.delta_q_res = 1;
5040 cm->delta_q_info.delta_lf_res = 1;
5041 cm->delta_q_info.delta_lf_present_flag = 0;
5042 cm->delta_q_info.delta_lf_multi = 0;
5043 cm->delta_q_info.delta_q_present_flag =
5044 quant_params->base_qindex > 0 ? aom_rb_read_bit(rb) : 0;
5045 if (cm->delta_q_info.delta_q_present_flag) {
5046 xd->current_base_qindex = quant_params->base_qindex;
5047 cm->delta_q_info.delta_q_res = 1 << aom_rb_read_literal(rb, 2);
5048 if (!features->allow_intrabc)
5049 cm->delta_q_info.delta_lf_present_flag = aom_rb_read_bit(rb);
5050 if (cm->delta_q_info.delta_lf_present_flag) {
5051 cm->delta_q_info.delta_lf_res = 1 << aom_rb_read_literal(rb, 2);
5052 cm->delta_q_info.delta_lf_multi = aom_rb_read_bit(rb);
5053 av1_reset_loop_filter_delta(xd, av1_num_planes(cm));
5054 }
5055 }
5056
5057 xd->cur_frame_force_integer_mv = features->cur_frame_force_integer_mv;
5058
5059 for (int i = 0; i < MAX_SEGMENTS; ++i) {
5060 const int qindex = av1_get_qindex(&cm->seg, i, quant_params->base_qindex);
5061 xd->lossless[i] =
5062 qindex == 0 && quant_params->y_dc_delta_q == 0 &&
5063 quant_params->u_dc_delta_q == 0 && quant_params->u_ac_delta_q == 0 &&
5064 quant_params->v_dc_delta_q == 0 && quant_params->v_ac_delta_q == 0;
5065 xd->qindex[i] = qindex;
5066 }
5067 features->coded_lossless = is_coded_lossless(cm, xd);
5068 features->all_lossless = features->coded_lossless && !av1_superres_scaled(cm);
5069 setup_segmentation_dequant(cm, xd);
5070 if (features->coded_lossless) {
5071 cm->lf.filter_level[0] = 0;
5072 cm->lf.filter_level[1] = 0;
5073 }
5074 if (features->coded_lossless || !seq_params->enable_cdef) {
5075 cm->cdef_info.cdef_bits = 0;
5076 cm->cdef_info.cdef_strengths[0] = 0;
5077 cm->cdef_info.cdef_uv_strengths[0] = 0;
5078 }
5079 if (features->all_lossless || !seq_params->enable_restoration) {
5080 cm->rst_info[0].frame_restoration_type = RESTORE_NONE;
5081 cm->rst_info[1].frame_restoration_type = RESTORE_NONE;
5082 cm->rst_info[2].frame_restoration_type = RESTORE_NONE;
5083 }
5084 setup_loopfilter(cm, rb);
5085
5086 if (!features->coded_lossless && seq_params->enable_cdef) {
5087 setup_cdef(cm, rb);
5088 }
5089 if (!features->all_lossless && seq_params->enable_restoration) {
5090 decode_restoration_mode(cm, rb);
5091 }
5092
5093 features->tx_mode = read_tx_mode(rb, features->coded_lossless);
5094 current_frame->reference_mode = read_frame_reference_mode(cm, rb);
5095
5096 av1_setup_skip_mode_allowed(cm);
5097 current_frame->skip_mode_info.skip_mode_flag =
5098 current_frame->skip_mode_info.skip_mode_allowed ? aom_rb_read_bit(rb) : 0;
5099
5100 if (frame_might_allow_warped_motion(cm))
5101 features->allow_warped_motion = aom_rb_read_bit(rb);
5102 else
5103 features->allow_warped_motion = 0;
5104
5105 features->reduced_tx_set_used = aom_rb_read_bit(rb);
5106
5107 if (features->allow_ref_frame_mvs && !frame_might_allow_ref_frame_mvs(cm)) {
5108 aom_internal_error(&pbi->error, AOM_CODEC_CORRUPT_FRAME,
5109 "Frame wrongly requests reference frame MVs");
5110 }
5111
5112 if (!frame_is_intra_only(cm)) read_global_motion(cm, rb);
5113
5114 cm->cur_frame->film_grain_params_present =
5115 seq_params->film_grain_params_present;
5116 read_film_grain(cm, rb);
5117
5118 #if EXT_TILE_DEBUG
5119 if (pbi->ext_tile_debug && cm->tiles.large_scale) {
5120 read_ext_tile_info(pbi, rb);
5121 av1_set_single_tile_decoding_mode(cm);
5122 }
5123 #endif // EXT_TILE_DEBUG
5124 return 0;
5125 }
5126
av1_init_read_bit_buffer(AV1Decoder * pbi,struct aom_read_bit_buffer * rb,const uint8_t * data,const uint8_t * data_end)5127 struct aom_read_bit_buffer *av1_init_read_bit_buffer(
5128 AV1Decoder *pbi, struct aom_read_bit_buffer *rb, const uint8_t *data,
5129 const uint8_t *data_end) {
5130 rb->bit_offset = 0;
5131 rb->error_handler = error_handler;
5132 rb->error_handler_data = &pbi->common;
5133 rb->bit_buffer = data;
5134 rb->bit_buffer_end = data_end;
5135 return rb;
5136 }
5137
av1_read_frame_size(struct aom_read_bit_buffer * rb,int num_bits_width,int num_bits_height,int * width,int * height)5138 void av1_read_frame_size(struct aom_read_bit_buffer *rb, int num_bits_width,
5139 int num_bits_height, int *width, int *height) {
5140 *width = aom_rb_read_literal(rb, num_bits_width) + 1;
5141 *height = aom_rb_read_literal(rb, num_bits_height) + 1;
5142 }
5143
av1_read_profile(struct aom_read_bit_buffer * rb)5144 BITSTREAM_PROFILE av1_read_profile(struct aom_read_bit_buffer *rb) {
5145 int profile = aom_rb_read_literal(rb, PROFILE_BITS);
5146 return (BITSTREAM_PROFILE)profile;
5147 }
5148
superres_post_decode(AV1Decoder * pbi)5149 static AOM_INLINE void superres_post_decode(AV1Decoder *pbi) {
5150 AV1_COMMON *const cm = &pbi->common;
5151 BufferPool *const pool = cm->buffer_pool;
5152
5153 if (!av1_superres_scaled(cm)) return;
5154 assert(!cm->features.all_lossless);
5155
5156 av1_superres_upscale(cm, pool, 0);
5157 }
5158
av1_decode_frame_headers_and_setup(AV1Decoder * pbi,struct aom_read_bit_buffer * rb,int trailing_bits_present)5159 uint32_t av1_decode_frame_headers_and_setup(AV1Decoder *pbi,
5160 struct aom_read_bit_buffer *rb,
5161 int trailing_bits_present) {
5162 AV1_COMMON *const cm = &pbi->common;
5163 const int num_planes = av1_num_planes(cm);
5164 MACROBLOCKD *const xd = &pbi->dcb.xd;
5165
5166 #if CONFIG_BITSTREAM_DEBUG
5167 if (cm->seq_params->order_hint_info.enable_order_hint) {
5168 aom_bitstream_queue_set_frame_read(cm->current_frame.order_hint * 2 +
5169 cm->show_frame);
5170 } else {
5171 // This is currently used in RTC encoding. cm->show_frame is always 1.
5172 assert(cm->show_frame);
5173 aom_bitstream_queue_set_frame_read(cm->current_frame.frame_number);
5174 }
5175 #endif
5176 #if CONFIG_MISMATCH_DEBUG
5177 mismatch_move_frame_idx_r();
5178 #endif
5179
5180 for (int i = LAST_FRAME; i <= ALTREF_FRAME; ++i) {
5181 cm->global_motion[i] = default_warp_params;
5182 cm->cur_frame->global_motion[i] = default_warp_params;
5183 }
5184 xd->global_motion = cm->global_motion;
5185
5186 read_uncompressed_header(pbi, rb);
5187
5188 if (trailing_bits_present) av1_check_trailing_bits(pbi, rb);
5189
5190 if (!cm->tiles.single_tile_decoding &&
5191 (pbi->dec_tile_row >= 0 || pbi->dec_tile_col >= 0)) {
5192 pbi->dec_tile_row = -1;
5193 pbi->dec_tile_col = -1;
5194 }
5195
5196 const uint32_t uncomp_hdr_size =
5197 (uint32_t)aom_rb_bytes_read(rb); // Size of the uncompressed header
5198 YV12_BUFFER_CONFIG *new_fb = &cm->cur_frame->buf;
5199 xd->cur_buf = new_fb;
5200 if (av1_allow_intrabc(cm)) {
5201 av1_setup_scale_factors_for_frame(
5202 &cm->sf_identity, xd->cur_buf->y_crop_width, xd->cur_buf->y_crop_height,
5203 xd->cur_buf->y_crop_width, xd->cur_buf->y_crop_height);
5204 }
5205
5206 // Showing a frame directly.
5207 if (cm->show_existing_frame) {
5208 if (pbi->reset_decoder_state) {
5209 // Use the default frame context values.
5210 *cm->fc = *cm->default_frame_context;
5211 if (!cm->fc->initialized)
5212 aom_internal_error(&pbi->error, AOM_CODEC_CORRUPT_FRAME,
5213 "Uninitialized entropy context.");
5214 }
5215 return uncomp_hdr_size;
5216 }
5217
5218 cm->mi_params.setup_mi(&cm->mi_params);
5219
5220 av1_calculate_ref_frame_side(cm);
5221 if (cm->features.allow_ref_frame_mvs) av1_setup_motion_field(cm);
5222
5223 av1_setup_block_planes(xd, cm->seq_params->subsampling_x,
5224 cm->seq_params->subsampling_y, num_planes);
5225 if (cm->features.primary_ref_frame == PRIMARY_REF_NONE) {
5226 // use the default frame context values
5227 *cm->fc = *cm->default_frame_context;
5228 } else {
5229 *cm->fc = get_primary_ref_frame_buf(cm)->frame_context;
5230 }
5231 if (!cm->fc->initialized)
5232 aom_internal_error(&pbi->error, AOM_CODEC_CORRUPT_FRAME,
5233 "Uninitialized entropy context.");
5234
5235 pbi->dcb.corrupted = 0;
5236 return uncomp_hdr_size;
5237 }
5238
5239 // Once-per-frame initialization
setup_frame_info(AV1Decoder * pbi)5240 static AOM_INLINE void setup_frame_info(AV1Decoder *pbi) {
5241 AV1_COMMON *const cm = &pbi->common;
5242
5243 if (cm->rst_info[0].frame_restoration_type != RESTORE_NONE ||
5244 cm->rst_info[1].frame_restoration_type != RESTORE_NONE ||
5245 cm->rst_info[2].frame_restoration_type != RESTORE_NONE) {
5246 av1_alloc_restoration_buffers(cm, /*is_sgr_enabled =*/true);
5247 for (int p = 0; p < av1_num_planes(cm); p++) {
5248 av1_alloc_restoration_struct(cm, &cm->rst_info[p], p > 0);
5249 }
5250 }
5251
5252 const int use_highbd = cm->seq_params->use_highbitdepth;
5253 const int buf_size = MC_TEMP_BUF_PELS << use_highbd;
5254 if (pbi->td.mc_buf_size != buf_size) {
5255 av1_free_mc_tmp_buf(&pbi->td);
5256 allocate_mc_tmp_buf(cm, &pbi->td, buf_size, use_highbd);
5257 }
5258 }
5259
av1_decode_tg_tiles_and_wrapup(AV1Decoder * pbi,const uint8_t * data,const uint8_t * data_end,const uint8_t ** p_data_end,int start_tile,int end_tile,int initialize_flag)5260 void av1_decode_tg_tiles_and_wrapup(AV1Decoder *pbi, const uint8_t *data,
5261 const uint8_t *data_end,
5262 const uint8_t **p_data_end, int start_tile,
5263 int end_tile, int initialize_flag) {
5264 AV1_COMMON *const cm = &pbi->common;
5265 CommonTileParams *const tiles = &cm->tiles;
5266 MACROBLOCKD *const xd = &pbi->dcb.xd;
5267 const int tile_count_tg = end_tile - start_tile + 1;
5268
5269 xd->error_info = cm->error;
5270 if (initialize_flag) setup_frame_info(pbi);
5271 const int num_planes = av1_num_planes(cm);
5272
5273 if (pbi->max_threads > 1 && !(tiles->large_scale && !pbi->ext_tile_debug) &&
5274 pbi->row_mt)
5275 *p_data_end =
5276 decode_tiles_row_mt(pbi, data, data_end, start_tile, end_tile);
5277 else if (pbi->max_threads > 1 && tile_count_tg > 1 &&
5278 !(tiles->large_scale && !pbi->ext_tile_debug))
5279 *p_data_end = decode_tiles_mt(pbi, data, data_end, start_tile, end_tile);
5280 else
5281 *p_data_end = decode_tiles(pbi, data, data_end, start_tile, end_tile);
5282
5283 // If the bit stream is monochrome, set the U and V buffers to a constant.
5284 if (num_planes < 3) {
5285 set_planes_to_neutral_grey(cm->seq_params, xd->cur_buf, 1);
5286 }
5287
5288 if (end_tile != tiles->rows * tiles->cols - 1) {
5289 return;
5290 }
5291
5292 av1_alloc_cdef_buffers(cm, &pbi->cdef_worker, &pbi->cdef_sync,
5293 pbi->num_workers, 1);
5294 av1_alloc_cdef_sync(cm, &pbi->cdef_sync, pbi->num_workers);
5295
5296 if (!cm->features.allow_intrabc && !tiles->single_tile_decoding) {
5297 if (cm->lf.filter_level[0] || cm->lf.filter_level[1]) {
5298 av1_loop_filter_frame_mt(&cm->cur_frame->buf, cm, &pbi->dcb.xd, 0,
5299 num_planes, 0, pbi->tile_workers,
5300 pbi->num_workers, &pbi->lf_row_sync, 0);
5301 }
5302
5303 const int do_cdef =
5304 !pbi->skip_loop_filter && !cm->features.coded_lossless &&
5305 (cm->cdef_info.cdef_bits || cm->cdef_info.cdef_strengths[0] ||
5306 cm->cdef_info.cdef_uv_strengths[0]);
5307 const int do_superres = av1_superres_scaled(cm);
5308 const int optimized_loop_restoration = !do_cdef && !do_superres;
5309 const int do_loop_restoration =
5310 cm->rst_info[0].frame_restoration_type != RESTORE_NONE ||
5311 cm->rst_info[1].frame_restoration_type != RESTORE_NONE ||
5312 cm->rst_info[2].frame_restoration_type != RESTORE_NONE;
5313 // Frame border extension is not required in the decoder
5314 // as it happens in extend_mc_border().
5315 int do_extend_border_mt = 0;
5316 if (!optimized_loop_restoration) {
5317 if (do_loop_restoration)
5318 av1_loop_restoration_save_boundary_lines(&pbi->common.cur_frame->buf,
5319 cm, 0);
5320
5321 if (do_cdef) {
5322 if (pbi->num_workers > 1) {
5323 av1_cdef_frame_mt(cm, &pbi->dcb.xd, pbi->cdef_worker,
5324 pbi->tile_workers, &pbi->cdef_sync,
5325 pbi->num_workers, av1_cdef_init_fb_row_mt,
5326 do_extend_border_mt);
5327 } else {
5328 av1_cdef_frame(&pbi->common.cur_frame->buf, cm, &pbi->dcb.xd,
5329 av1_cdef_init_fb_row);
5330 }
5331 }
5332
5333 superres_post_decode(pbi);
5334
5335 if (do_loop_restoration) {
5336 av1_loop_restoration_save_boundary_lines(&pbi->common.cur_frame->buf,
5337 cm, 1);
5338 if (pbi->num_workers > 1) {
5339 av1_loop_restoration_filter_frame_mt(
5340 (YV12_BUFFER_CONFIG *)xd->cur_buf, cm, optimized_loop_restoration,
5341 pbi->tile_workers, pbi->num_workers, &pbi->lr_row_sync,
5342 &pbi->lr_ctxt, do_extend_border_mt);
5343 } else {
5344 av1_loop_restoration_filter_frame((YV12_BUFFER_CONFIG *)xd->cur_buf,
5345 cm, optimized_loop_restoration,
5346 &pbi->lr_ctxt);
5347 }
5348 }
5349 } else {
5350 // In no cdef and no superres case. Provide an optimized version of
5351 // loop_restoration_filter.
5352 if (do_loop_restoration) {
5353 if (pbi->num_workers > 1) {
5354 av1_loop_restoration_filter_frame_mt(
5355 (YV12_BUFFER_CONFIG *)xd->cur_buf, cm, optimized_loop_restoration,
5356 pbi->tile_workers, pbi->num_workers, &pbi->lr_row_sync,
5357 &pbi->lr_ctxt, do_extend_border_mt);
5358 } else {
5359 av1_loop_restoration_filter_frame((YV12_BUFFER_CONFIG *)xd->cur_buf,
5360 cm, optimized_loop_restoration,
5361 &pbi->lr_ctxt);
5362 }
5363 }
5364 }
5365 }
5366
5367 if (!pbi->dcb.corrupted) {
5368 if (cm->features.refresh_frame_context == REFRESH_FRAME_CONTEXT_BACKWARD) {
5369 assert(pbi->context_update_tile_id < pbi->allocated_tiles);
5370 *cm->fc = pbi->tile_data[pbi->context_update_tile_id].tctx;
5371 av1_reset_cdf_symbol_counters(cm->fc);
5372 }
5373 } else {
5374 aom_internal_error(&pbi->error, AOM_CODEC_CORRUPT_FRAME,
5375 "Decode failed. Frame data is corrupted.");
5376 }
5377
5378 #if CONFIG_INSPECTION
5379 if (pbi->inspect_cb != NULL) {
5380 (*pbi->inspect_cb)(pbi, pbi->inspect_ctx);
5381 }
5382 #endif
5383
5384 // Non frame parallel update frame context here.
5385 if (!tiles->large_scale) {
5386 cm->cur_frame->frame_context = *cm->fc;
5387 }
5388
5389 if (cm->show_frame && !cm->seq_params->order_hint_info.enable_order_hint) {
5390 ++cm->current_frame.frame_number;
5391 }
5392 }
5393