1 /*
2 * H.26L/H.264/AVC/JVT/14496-10/... encoder/decoder
3 * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
4 *
5 * This file is part of FFmpeg.
6 *
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22 /**
23 * @file
24 * H.264 / AVC / MPEG-4 part10 codec.
25 * @author Michael Niedermayer <michaelni@gmx.at>
26 */
27
28 #ifndef AVCODEC_H264DEC_H
29 #define AVCODEC_H264DEC_H
30
31 #include "libavutil/buffer.h"
32 #include "libavutil/intreadwrite.h"
33 #include "libavutil/thread.h"
34
35 #include "cabac.h"
36 #include "error_resilience.h"
37 #include "h264_parse.h"
38 #include "h264_ps.h"
39 #include "h264_sei.h"
40 #include "h2645_parse.h"
41 #include "h264chroma.h"
42 #include "h264dsp.h"
43 #include "h264pred.h"
44 #include "h264qpel.h"
45 #include "internal.h"
46 #include "mpegutils.h"
47 #include "parser.h"
48 #include "qpeldsp.h"
49 #include "rectangle.h"
50 #include "videodsp.h"
51
52 #define H264_MAX_PICTURE_COUNT 36
53
54 #define MAX_MMCO_COUNT 66
55
56 #define MAX_DELAYED_PIC_COUNT 16
57
58 /* Compiling in interlaced support reduces the speed
59 * of progressive decoding by about 2%. */
60 #define ALLOW_INTERLACE
61
62 #define FMO 0
63
64 /**
65 * The maximum number of slices supported by the decoder.
66 * must be a power of 2
67 */
68 #define MAX_SLICES 32
69
70 #ifdef ALLOW_INTERLACE
71 #define MB_MBAFF(h) (h)->mb_mbaff
72 #define MB_FIELD(sl) (sl)->mb_field_decoding_flag
73 #define FRAME_MBAFF(h) (h)->mb_aff_frame
74 #define FIELD_PICTURE(h) ((h)->picture_structure != PICT_FRAME)
75 #define LEFT_MBS 2
76 #define LTOP 0
77 #define LBOT 1
78 #define LEFT(i) (i)
79 #else
80 #define MB_MBAFF(h) 0
81 #define MB_FIELD(sl) 0
82 #define FRAME_MBAFF(h) 0
83 #define FIELD_PICTURE(h) 0
84 #undef IS_INTERLACED
85 #define IS_INTERLACED(mb_type) 0
86 #define LEFT_MBS 1
87 #define LTOP 0
88 #define LBOT 0
89 #define LEFT(i) 0
90 #endif
91 #define FIELD_OR_MBAFF_PICTURE(h) (FRAME_MBAFF(h) || FIELD_PICTURE(h))
92
93 #ifndef CABAC
94 #define CABAC(h) (h)->ps.pps->cabac
95 #endif
96
97 #define CHROMA(h) ((h)->ps.sps->chroma_format_idc)
98 #define CHROMA422(h) ((h)->ps.sps->chroma_format_idc == 2)
99 #define CHROMA444(h) ((h)->ps.sps->chroma_format_idc == 3)
100
101 #define MB_TYPE_REF0 MB_TYPE_ACPRED // dirty but it fits in 16 bit
102 #define MB_TYPE_8x8DCT 0x01000000
103 #define IS_REF0(a) ((a) & MB_TYPE_REF0)
104 #define IS_8x8DCT(a) ((a) & MB_TYPE_8x8DCT)
105
106 /**
107 * Memory management control operation opcode.
108 */
109 typedef enum MMCOOpcode {
110 MMCO_END = 0,
111 MMCO_SHORT2UNUSED,
112 MMCO_LONG2UNUSED,
113 MMCO_SHORT2LONG,
114 MMCO_SET_MAX_LONG,
115 MMCO_RESET,
116 MMCO_LONG,
117 } MMCOOpcode;
118
119 /**
120 * Memory management control operation.
121 */
122 typedef struct MMCO {
123 MMCOOpcode opcode;
124 int short_pic_num; ///< pic_num without wrapping (pic_num & max_pic_num)
125 int long_arg; ///< index, pic_num, or num long refs depending on opcode
126 } MMCO;
127
128 typedef struct H264Picture {
129 AVFrame *f;
130 ThreadFrame tf;
131
132 AVBufferRef *qscale_table_buf;
133 int8_t *qscale_table;
134
135 AVBufferRef *motion_val_buf[2];
136 int16_t (*motion_val[2])[2];
137
138 AVBufferRef *mb_type_buf;
139 uint32_t *mb_type;
140
141 AVBufferRef *hwaccel_priv_buf;
142 void *hwaccel_picture_private; ///< hardware accelerator private data
143
144 AVBufferRef *ref_index_buf[2];
145 int8_t *ref_index[2];
146
147 int field_poc[2]; ///< top/bottom POC
148 int poc; ///< frame POC
149 int frame_num; ///< frame_num (raw frame_num from slice header)
150 int mmco_reset; /**< MMCO_RESET set this 1. Reordering code must
151 not mix pictures before and after MMCO_RESET. */
152 int pic_id; /**< pic_num (short -> no wrap version of pic_num,
153 pic_num & max_pic_num; long -> long_pic_num) */
154 int long_ref; ///< 1->long term reference 0->short term reference
155 int ref_poc[2][2][32]; ///< POCs of the frames/fields used as reference (FIXME need per slice)
156 int ref_count[2][2]; ///< number of entries in ref_poc (FIXME need per slice)
157 int mbaff; ///< 1 -> MBAFF frame 0-> not MBAFF
158 int field_picture; ///< whether or not picture was encoded in separate fields
159
160 int reference;
161 int recovered; ///< picture at IDR or recovery point + recovery count
162 int invalid_gap;
163 int sei_recovery_frame_cnt;
164
165 AVBufferRef *pps_buf;
166 const PPS *pps;
167
168 int mb_width, mb_height;
169 int mb_stride;
170 } H264Picture;
171
172 typedef struct H264Ref {
173 uint8_t *data[3];
174 int linesize[3];
175
176 int reference;
177 int poc;
178 int pic_id;
179
180 H264Picture *parent;
181 } H264Ref;
182
183 typedef struct H264SliceContext {
184 struct H264Context *h264;
185 GetBitContext gb;
186 ERContext er;
187
188 int slice_num;
189 int slice_type;
190 int slice_type_nos; ///< S free slice type (SI/SP are remapped to I/P)
191 int slice_type_fixed;
192
193 int qscale;
194 int chroma_qp[2]; // QPc
195 int qp_thresh; ///< QP threshold to skip loopfilter
196 int last_qscale_diff;
197
198 // deblock
199 int deblocking_filter; ///< disable_deblocking_filter_idc with 1 <-> 0
200 int slice_alpha_c0_offset;
201 int slice_beta_offset;
202
203 H264PredWeightTable pwt;
204
205 int prev_mb_skipped;
206 int next_mb_skipped;
207
208 int chroma_pred_mode;
209 int intra16x16_pred_mode;
210
211 int8_t intra4x4_pred_mode_cache[5 * 8];
212 int8_t(*intra4x4_pred_mode);
213
214 int topleft_mb_xy;
215 int top_mb_xy;
216 int topright_mb_xy;
217 int left_mb_xy[LEFT_MBS];
218
219 int topleft_type;
220 int top_type;
221 int topright_type;
222 int left_type[LEFT_MBS];
223
224 const uint8_t *left_block;
225 int topleft_partition;
226
227 unsigned int topleft_samples_available;
228 unsigned int top_samples_available;
229 unsigned int topright_samples_available;
230 unsigned int left_samples_available;
231
232 ptrdiff_t linesize, uvlinesize;
233 ptrdiff_t mb_linesize; ///< may be equal to s->linesize or s->linesize * 2, for mbaff
234 ptrdiff_t mb_uvlinesize;
235
236 int mb_x, mb_y;
237 int mb_xy;
238 int resync_mb_x;
239 int resync_mb_y;
240 unsigned int first_mb_addr;
241 // index of the first MB of the next slice
242 int next_slice_idx;
243 int mb_skip_run;
244 int is_complex;
245
246 int picture_structure;
247 int mb_field_decoding_flag;
248 int mb_mbaff; ///< mb_aff_frame && mb_field_decoding_flag
249
250 int redundant_pic_count;
251
252 /**
253 * number of neighbors (top and/or left) that used 8x8 dct
254 */
255 int neighbor_transform_size;
256
257 int direct_spatial_mv_pred;
258 int col_parity;
259 int col_fieldoff;
260
261 int cbp;
262 int top_cbp;
263 int left_cbp;
264
265 int dist_scale_factor[32];
266 int dist_scale_factor_field[2][32];
267 int map_col_to_list0[2][16 + 32];
268 int map_col_to_list0_field[2][2][16 + 32];
269
270 /**
271 * num_ref_idx_l0/1_active_minus1 + 1
272 */
273 unsigned int ref_count[2]; ///< counts frames or fields, depending on current mb mode
274 unsigned int list_count;
275 H264Ref ref_list[2][48]; /**< 0..15: frame refs, 16..47: mbaff field refs.
276 * Reordered version of default_ref_list
277 * according to picture reordering in slice header */
278 struct {
279 uint8_t op;
280 uint32_t val;
281 } ref_modifications[2][32];
282 int nb_ref_modifications[2];
283
284 unsigned int pps_id;
285
286 const uint8_t *intra_pcm_ptr;
287 int16_t *dc_val_base;
288
289 uint8_t *bipred_scratchpad;
290 uint8_t *edge_emu_buffer;
291 uint8_t (*top_borders[2])[(16 * 3) * 2];
292 int bipred_scratchpad_allocated;
293 int edge_emu_buffer_allocated;
294 int top_borders_allocated[2];
295
296 /**
297 * non zero coeff count cache.
298 * is 64 if not available.
299 */
300 DECLARE_ALIGNED(8, uint8_t, non_zero_count_cache)[15 * 8];
301
302 /**
303 * Motion vector cache.
304 */
305 DECLARE_ALIGNED(16, int16_t, mv_cache)[2][5 * 8][2];
306 DECLARE_ALIGNED(8, int8_t, ref_cache)[2][5 * 8];
307 DECLARE_ALIGNED(16, uint8_t, mvd_cache)[2][5 * 8][2];
308 uint8_t direct_cache[5 * 8];
309
310 DECLARE_ALIGNED(8, uint16_t, sub_mb_type)[4];
311
312 ///< as a DCT coefficient is int32_t in high depth, we need to reserve twice the space.
313 DECLARE_ALIGNED(16, int16_t, mb)[16 * 48 * 2];
314 DECLARE_ALIGNED(16, int16_t, mb_luma_dc)[3][16 * 2];
315 ///< as mb is addressed by scantable[i] and scantable is uint8_t we can either
316 ///< check that i is not too large or ensure that there is some unused stuff after mb
317 int16_t mb_padding[256 * 2];
318
319 uint8_t (*mvd_table[2])[2];
320
321 /**
322 * Cabac
323 */
324 CABACContext cabac;
325 uint8_t cabac_state[1024];
326 int cabac_init_idc;
327
328 MMCO mmco[MAX_MMCO_COUNT];
329 int nb_mmco;
330 int explicit_ref_marking;
331
332 int frame_num;
333 int poc_lsb;
334 int delta_poc_bottom;
335 int delta_poc[2];
336 int curr_pic_num;
337 int max_pic_num;
338 } H264SliceContext;
339
340 /**
341 * H264Context
342 */
343 typedef struct H264Context {
344 const AVClass *class;
345 AVCodecContext *avctx;
346 VideoDSPContext vdsp;
347 H264DSPContext h264dsp;
348 H264ChromaContext h264chroma;
349 H264QpelContext h264qpel;
350
351 H264Picture DPB[H264_MAX_PICTURE_COUNT];
352 H264Picture *cur_pic_ptr;
353 H264Picture cur_pic;
354 H264Picture last_pic_for_ec;
355
356 H264SliceContext *slice_ctx;
357 int nb_slice_ctx;
358 int nb_slice_ctx_queued;
359
360 H2645Packet pkt;
361
362 int pixel_shift; ///< 0 for 8-bit H.264, 1 for high-bit-depth H.264
363
364 /* coded dimensions -- 16 * mb w/h */
365 int width, height;
366 int chroma_x_shift, chroma_y_shift;
367
368 int droppable;
369 int coded_picture_number;
370
371 int context_initialized;
372 int flags;
373 int workaround_bugs;
374 int x264_build;
375 /* Set when slice threading is used and at least one slice uses deblocking
376 * mode 1 (i.e. across slice boundaries). Then we disable the loop filter
377 * during normal MB decoding and execute it serially at the end.
378 */
379 int postpone_filter;
380
381 /*
382 * Set to 1 when the current picture is IDR, 0 otherwise.
383 */
384 int picture_idr;
385
386 int crop_left;
387 int crop_right;
388 int crop_top;
389 int crop_bottom;
390
391 int8_t(*intra4x4_pred_mode);
392 H264PredContext hpc;
393
394 uint8_t (*non_zero_count)[48];
395
396 #define LIST_NOT_USED -1 // FIXME rename?
397 #define PART_NOT_AVAILABLE -2
398
399 /**
400 * block_offset[ 0..23] for frame macroblocks
401 * block_offset[24..47] for field macroblocks
402 */
403 int block_offset[2 * (16 * 3)];
404
405 uint32_t *mb2b_xy; // FIXME are these 4 a good idea?
406 uint32_t *mb2br_xy;
407 int b_stride; // FIXME use s->b4_stride
408
409 uint16_t *slice_table; ///< slice_table_base + 2*mb_stride + 1
410
411 // interlacing specific flags
412 int mb_aff_frame;
413 int picture_structure;
414 int first_field;
415
416 uint8_t *list_counts; ///< Array of list_count per MB specifying the slice type
417
418 /* 0x100 -> non null luma_dc, 0x80/0x40 -> non null chroma_dc (cb/cr), 0x?0 -> chroma_cbp(0, 1, 2), 0x0? luma_cbp */
419 uint16_t *cbp_table;
420
421 /* chroma_pred_mode for i4x4 or i16x16, else 0 */
422 uint8_t *chroma_pred_mode_table;
423 uint8_t (*mvd_table[2])[2];
424 uint8_t *direct_table;
425
426 uint8_t scan_padding[16];
427 uint8_t zigzag_scan[16];
428 uint8_t zigzag_scan8x8[64];
429 uint8_t zigzag_scan8x8_cavlc[64];
430 uint8_t field_scan[16];
431 uint8_t field_scan8x8[64];
432 uint8_t field_scan8x8_cavlc[64];
433 uint8_t zigzag_scan_q0[16];
434 uint8_t zigzag_scan8x8_q0[64];
435 uint8_t zigzag_scan8x8_cavlc_q0[64];
436 uint8_t field_scan_q0[16];
437 uint8_t field_scan8x8_q0[64];
438 uint8_t field_scan8x8_cavlc_q0[64];
439
440 int mb_y;
441 int mb_height, mb_width;
442 int mb_stride;
443 int mb_num;
444
445 // =============================================================
446 // Things below are not used in the MB or more inner code
447
448 int nal_ref_idc;
449 int nal_unit_type;
450
451 int has_slice; ///< slice NAL is found in the packet, set by decode_nal_units, its state does not need to be preserved outside h264_decode_frame()
452
453 /**
454 * Used to parse AVC variant of H.264
455 */
456 int is_avc; ///< this flag is != 0 if codec is avc1
457 int nal_length_size; ///< Number of bytes used for nal length (1, 2 or 4)
458
459 int bit_depth_luma; ///< luma bit depth from sps to detect changes
460 int chroma_format_idc; ///< chroma format from sps to detect changes
461
462 H264ParamSets ps;
463
464 uint16_t *slice_table_base;
465
466 H264POCContext poc;
467
468 H264Ref default_ref[2];
469 H264Picture *short_ref[32];
470 H264Picture *long_ref[32];
471 H264Picture *delayed_pic[MAX_DELAYED_PIC_COUNT + 2]; // FIXME size?
472 int last_pocs[MAX_DELAYED_PIC_COUNT];
473 H264Picture *next_output_pic;
474 int next_outputed_poc;
475
476 /**
477 * memory management control operations buffer.
478 */
479 MMCO mmco[MAX_MMCO_COUNT];
480 int nb_mmco;
481 int mmco_reset;
482 int explicit_ref_marking;
483
484 int long_ref_count; ///< number of actual long term references
485 int short_ref_count; ///< number of actual short term references
486
487 /**
488 * @name Members for slice based multithreading
489 * @{
490 */
491 /**
492 * current slice number, used to initialize slice_num of each thread/context
493 */
494 int current_slice;
495
496 /** @} */
497
498 /**
499 * Complement sei_pic_struct
500 * SEI_PIC_STRUCT_TOP_BOTTOM and SEI_PIC_STRUCT_BOTTOM_TOP indicate interlaced frames.
501 * However, soft telecined frames may have these values.
502 * This is used in an attempt to flag soft telecine progressive.
503 */
504 int prev_interlaced_frame;
505
506 /**
507 * Are the SEI recovery points looking valid.
508 */
509 int valid_recovery_point;
510
511 /**
512 * recovery_frame is the frame_num at which the next frame should
513 * be fully constructed.
514 *
515 * Set to -1 when not expecting a recovery point.
516 */
517 int recovery_frame;
518
519 /**
520 * We have seen an IDR, so all the following frames in coded order are correctly
521 * decodable.
522 */
523 #define FRAME_RECOVERED_IDR (1 << 0)
524 /**
525 * Sufficient number of frames have been decoded since a SEI recovery point,
526 * so all the following frames in presentation order are correct.
527 */
528 #define FRAME_RECOVERED_SEI (1 << 1)
529
530 int frame_recovered; ///< Initial frame has been completely recovered
531
532 int has_recovery_point;
533
534 int missing_fields;
535
536 /* for frame threading, this is set to 1
537 * after finish_setup() has been called, so we cannot modify
538 * some context properties (which are supposed to stay constant between
539 * slices) anymore */
540 int setup_finished;
541
542 int cur_chroma_format_idc;
543 int cur_bit_depth_luma;
544 int16_t slice_row[MAX_SLICES]; ///< to detect when MAX_SLICES is too low
545
546 /* original AVCodecContext dimensions, used to handle container
547 * cropping */
548 int width_from_caller;
549 int height_from_caller;
550
551 int enable_er;
552
553 H264SEIContext sei;
554
555 AVBufferPool *qscale_table_pool;
556 AVBufferPool *mb_type_pool;
557 AVBufferPool *motion_val_pool;
558 AVBufferPool *ref_index_pool;
559 int ref2frm[MAX_SLICES][2][64]; ///< reference to frame number lists, used in the loop filter, the first 2 are for -2,-1
560 } H264Context;
561
562 extern const uint16_t ff_h264_mb_sizes[4];
563
564 /**
565 * Reconstruct bitstream slice_type.
566 */
567 int ff_h264_get_slice_type(const H264SliceContext *sl);
568
569 /**
570 * Allocate tables.
571 * needs width/height
572 */
573 int ff_h264_alloc_tables(H264Context *h);
574
575 int ff_h264_decode_ref_pic_list_reordering(H264SliceContext *sl, void *logctx);
576 int ff_h264_build_ref_list(H264Context *h, H264SliceContext *sl);
577 void ff_h264_remove_all_refs(H264Context *h);
578
579 /**
580 * Execute the reference picture marking (memory management control operations).
581 */
582 int ff_h264_execute_ref_pic_marking(H264Context *h);
583
584 int ff_h264_decode_ref_pic_marking(H264SliceContext *sl, GetBitContext *gb,
585 const H2645NAL *nal, void *logctx);
586
587 void ff_h264_hl_decode_mb(const H264Context *h, H264SliceContext *sl);
588 void ff_h264_decode_init_vlc(void);
589
590 /**
591 * Decode a macroblock
592 * @return 0 if OK, ER_AC_ERROR / ER_DC_ERROR / ER_MV_ERROR on error
593 */
594 int ff_h264_decode_mb_cavlc(const H264Context *h, H264SliceContext *sl);
595
596 /**
597 * Decode a CABAC coded macroblock
598 * @return 0 if OK, ER_AC_ERROR / ER_DC_ERROR / ER_MV_ERROR on error
599 */
600 int ff_h264_decode_mb_cabac(const H264Context *h, H264SliceContext *sl);
601
602 void ff_h264_init_cabac_states(const H264Context *h, H264SliceContext *sl);
603
604 void ff_h264_direct_dist_scale_factor(const H264Context *const h, H264SliceContext *sl);
605 void ff_h264_direct_ref_list_init(const H264Context *const h, H264SliceContext *sl);
606 void ff_h264_pred_direct_motion(const H264Context *const h, H264SliceContext *sl,
607 int *mb_type);
608
609 void ff_h264_filter_mb_fast(const H264Context *h, H264SliceContext *sl, int mb_x, int mb_y,
610 uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr,
611 unsigned int linesize, unsigned int uvlinesize);
612 void ff_h264_filter_mb(const H264Context *h, H264SliceContext *sl, int mb_x, int mb_y,
613 uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr,
614 unsigned int linesize, unsigned int uvlinesize);
615
616 /*
617 * o-o o-o
618 * / / /
619 * o-o o-o
620 * ,---'
621 * o-o o-o
622 * / / /
623 * o-o o-o
624 */
625
626 /* Scan8 organization:
627 * 0 1 2 3 4 5 6 7
628 * 0 DY y y y y y
629 * 1 y Y Y Y Y
630 * 2 y Y Y Y Y
631 * 3 y Y Y Y Y
632 * 4 y Y Y Y Y
633 * 5 DU u u u u u
634 * 6 u U U U U
635 * 7 u U U U U
636 * 8 u U U U U
637 * 9 u U U U U
638 * 10 DV v v v v v
639 * 11 v V V V V
640 * 12 v V V V V
641 * 13 v V V V V
642 * 14 v V V V V
643 * DY/DU/DV are for luma/chroma DC.
644 */
645
646 #define LUMA_DC_BLOCK_INDEX 48
647 #define CHROMA_DC_BLOCK_INDEX 49
648
649 // This table must be here because scan8[constant] must be known at compiletime
650 static const uint8_t scan8[16 * 3 + 3] = {
651 4 + 1 * 8, 5 + 1 * 8, 4 + 2 * 8, 5 + 2 * 8,
652 6 + 1 * 8, 7 + 1 * 8, 6 + 2 * 8, 7 + 2 * 8,
653 4 + 3 * 8, 5 + 3 * 8, 4 + 4 * 8, 5 + 4 * 8,
654 6 + 3 * 8, 7 + 3 * 8, 6 + 4 * 8, 7 + 4 * 8,
655 4 + 6 * 8, 5 + 6 * 8, 4 + 7 * 8, 5 + 7 * 8,
656 6 + 6 * 8, 7 + 6 * 8, 6 + 7 * 8, 7 + 7 * 8,
657 4 + 8 * 8, 5 + 8 * 8, 4 + 9 * 8, 5 + 9 * 8,
658 6 + 8 * 8, 7 + 8 * 8, 6 + 9 * 8, 7 + 9 * 8,
659 4 + 11 * 8, 5 + 11 * 8, 4 + 12 * 8, 5 + 12 * 8,
660 6 + 11 * 8, 7 + 11 * 8, 6 + 12 * 8, 7 + 12 * 8,
661 4 + 13 * 8, 5 + 13 * 8, 4 + 14 * 8, 5 + 14 * 8,
662 6 + 13 * 8, 7 + 13 * 8, 6 + 14 * 8, 7 + 14 * 8,
663 0 + 0 * 8, 0 + 5 * 8, 0 + 10 * 8
664 };
665
pack16to32(unsigned a,unsigned b)666 static av_always_inline uint32_t pack16to32(unsigned a, unsigned b)
667 {
668 #if HAVE_BIGENDIAN
669 return (b & 0xFFFF) + (a << 16);
670 #else
671 return (a & 0xFFFF) + (b << 16);
672 #endif
673 }
674
pack8to16(unsigned a,unsigned b)675 static av_always_inline uint16_t pack8to16(unsigned a, unsigned b)
676 {
677 #if HAVE_BIGENDIAN
678 return (b & 0xFF) + (a << 8);
679 #else
680 return (a & 0xFF) + (b << 8);
681 #endif
682 }
683
684 /**
685 * Get the chroma qp.
686 */
get_chroma_qp(const PPS * pps,int t,int qscale)687 static av_always_inline int get_chroma_qp(const PPS *pps, int t, int qscale)
688 {
689 return pps->chroma_qp_table[t][qscale];
690 }
691
692 /**
693 * Get the predicted intra4x4 prediction mode.
694 */
pred_intra_mode(const H264Context * h,H264SliceContext * sl,int n)695 static av_always_inline int pred_intra_mode(const H264Context *h,
696 H264SliceContext *sl, int n)
697 {
698 const int index8 = scan8[n];
699 const int left = sl->intra4x4_pred_mode_cache[index8 - 1];
700 const int top = sl->intra4x4_pred_mode_cache[index8 - 8];
701 const int min = FFMIN(left, top);
702
703 ff_tlog(h->avctx, "mode:%d %d min:%d\n", left, top, min);
704
705 if (min < 0)
706 return DC_PRED;
707 else
708 return min;
709 }
710
write_back_intra_pred_mode(const H264Context * h,H264SliceContext * sl)711 static av_always_inline void write_back_intra_pred_mode(const H264Context *h,
712 H264SliceContext *sl)
713 {
714 int8_t *i4x4 = sl->intra4x4_pred_mode + h->mb2br_xy[sl->mb_xy];
715 int8_t *i4x4_cache = sl->intra4x4_pred_mode_cache;
716
717 AV_COPY32(i4x4, i4x4_cache + 4 + 8 * 4);
718 i4x4[4] = i4x4_cache[7 + 8 * 3];
719 i4x4[5] = i4x4_cache[7 + 8 * 2];
720 i4x4[6] = i4x4_cache[7 + 8 * 1];
721 }
722
write_back_non_zero_count(const H264Context * h,H264SliceContext * sl)723 static av_always_inline void write_back_non_zero_count(const H264Context *h,
724 H264SliceContext *sl)
725 {
726 const int mb_xy = sl->mb_xy;
727 uint8_t *nnz = h->non_zero_count[mb_xy];
728 uint8_t *nnz_cache = sl->non_zero_count_cache;
729
730 AV_COPY32(&nnz[ 0], &nnz_cache[4 + 8 * 1]);
731 AV_COPY32(&nnz[ 4], &nnz_cache[4 + 8 * 2]);
732 AV_COPY32(&nnz[ 8], &nnz_cache[4 + 8 * 3]);
733 AV_COPY32(&nnz[12], &nnz_cache[4 + 8 * 4]);
734 AV_COPY32(&nnz[16], &nnz_cache[4 + 8 * 6]);
735 AV_COPY32(&nnz[20], &nnz_cache[4 + 8 * 7]);
736 AV_COPY32(&nnz[32], &nnz_cache[4 + 8 * 11]);
737 AV_COPY32(&nnz[36], &nnz_cache[4 + 8 * 12]);
738
739 if (!h->chroma_y_shift) {
740 AV_COPY32(&nnz[24], &nnz_cache[4 + 8 * 8]);
741 AV_COPY32(&nnz[28], &nnz_cache[4 + 8 * 9]);
742 AV_COPY32(&nnz[40], &nnz_cache[4 + 8 * 13]);
743 AV_COPY32(&nnz[44], &nnz_cache[4 + 8 * 14]);
744 }
745 }
746
write_back_motion_list(const H264Context * h,H264SliceContext * sl,int b_stride,int b_xy,int b8_xy,int mb_type,int list)747 static av_always_inline void write_back_motion_list(const H264Context *h,
748 H264SliceContext *sl,
749 int b_stride,
750 int b_xy, int b8_xy,
751 int mb_type, int list)
752 {
753 int16_t(*mv_dst)[2] = &h->cur_pic.motion_val[list][b_xy];
754 int16_t(*mv_src)[2] = &sl->mv_cache[list][scan8[0]];
755 AV_COPY128(mv_dst + 0 * b_stride, mv_src + 8 * 0);
756 AV_COPY128(mv_dst + 1 * b_stride, mv_src + 8 * 1);
757 AV_COPY128(mv_dst + 2 * b_stride, mv_src + 8 * 2);
758 AV_COPY128(mv_dst + 3 * b_stride, mv_src + 8 * 3);
759 if (CABAC(h)) {
760 uint8_t (*mvd_dst)[2] = &sl->mvd_table[list][FMO ? 8 * sl->mb_xy
761 : h->mb2br_xy[sl->mb_xy]];
762 uint8_t(*mvd_src)[2] = &sl->mvd_cache[list][scan8[0]];
763 if (IS_SKIP(mb_type)) {
764 AV_ZERO128(mvd_dst);
765 } else {
766 AV_COPY64(mvd_dst, mvd_src + 8 * 3);
767 AV_COPY16(mvd_dst + 3 + 3, mvd_src + 3 + 8 * 0);
768 AV_COPY16(mvd_dst + 3 + 2, mvd_src + 3 + 8 * 1);
769 AV_COPY16(mvd_dst + 3 + 1, mvd_src + 3 + 8 * 2);
770 }
771 }
772
773 {
774 int8_t *ref_index = &h->cur_pic.ref_index[list][b8_xy];
775 int8_t *ref_cache = sl->ref_cache[list];
776 ref_index[0 + 0 * 2] = ref_cache[scan8[0]];
777 ref_index[1 + 0 * 2] = ref_cache[scan8[4]];
778 ref_index[0 + 1 * 2] = ref_cache[scan8[8]];
779 ref_index[1 + 1 * 2] = ref_cache[scan8[12]];
780 }
781 }
782
write_back_motion(const H264Context * h,H264SliceContext * sl,int mb_type)783 static av_always_inline void write_back_motion(const H264Context *h,
784 H264SliceContext *sl,
785 int mb_type)
786 {
787 const int b_stride = h->b_stride;
788 const int b_xy = 4 * sl->mb_x + 4 * sl->mb_y * h->b_stride; // try mb2b(8)_xy
789 const int b8_xy = 4 * sl->mb_xy;
790
791 if (USES_LIST(mb_type, 0)) {
792 write_back_motion_list(h, sl, b_stride, b_xy, b8_xy, mb_type, 0);
793 } else {
794 fill_rectangle(&h->cur_pic.ref_index[0][b8_xy],
795 2, 2, 2, (uint8_t)LIST_NOT_USED, 1);
796 }
797 if (USES_LIST(mb_type, 1))
798 write_back_motion_list(h, sl, b_stride, b_xy, b8_xy, mb_type, 1);
799
800 if (sl->slice_type_nos == AV_PICTURE_TYPE_B && CABAC(h)) {
801 if (IS_8X8(mb_type)) {
802 uint8_t *direct_table = &h->direct_table[4 * sl->mb_xy];
803 direct_table[1] = sl->sub_mb_type[1] >> 1;
804 direct_table[2] = sl->sub_mb_type[2] >> 1;
805 direct_table[3] = sl->sub_mb_type[3] >> 1;
806 }
807 }
808 }
809
get_dct8x8_allowed(const H264Context * h,H264SliceContext * sl)810 static av_always_inline int get_dct8x8_allowed(const H264Context *h, H264SliceContext *sl)
811 {
812 if (h->ps.sps->direct_8x8_inference_flag)
813 return !(AV_RN64A(sl->sub_mb_type) &
814 ((MB_TYPE_16x8 | MB_TYPE_8x16 | MB_TYPE_8x8) *
815 0x0001000100010001ULL));
816 else
817 return !(AV_RN64A(sl->sub_mb_type) &
818 ((MB_TYPE_16x8 | MB_TYPE_8x16 | MB_TYPE_8x8 | MB_TYPE_DIRECT2) *
819 0x0001000100010001ULL));
820 }
821
find_start_code(const uint8_t * buf,int buf_size,int buf_index,int next_avc)822 static inline int find_start_code(const uint8_t *buf, int buf_size,
823 int buf_index, int next_avc)
824 {
825 uint32_t state = -1;
826
827 buf_index = avpriv_find_start_code(buf + buf_index, buf + next_avc + 1, &state) - buf - 1;
828
829 return FFMIN(buf_index, buf_size);
830 }
831
832 int ff_h264_field_end(H264Context *h, H264SliceContext *sl, int in_setup);
833
834 int ff_h264_ref_picture(H264Context *h, H264Picture *dst, H264Picture *src);
835 void ff_h264_unref_picture(H264Context *h, H264Picture *pic);
836
837 int ff_h264_slice_context_init(H264Context *h, H264SliceContext *sl);
838
839 void ff_h264_draw_horiz_band(const H264Context *h, H264SliceContext *sl, int y, int height);
840
841 /**
842 * Submit a slice for decoding.
843 *
844 * Parse the slice header, starting a new field/frame if necessary. If any
845 * slices are queued for the previous field, they are decoded.
846 */
847 int ff_h264_queue_decode_slice(H264Context *h, const H2645NAL *nal);
848 int ff_h264_execute_decode_slices(H264Context *h);
849 int ff_h264_update_thread_context(AVCodecContext *dst,
850 const AVCodecContext *src);
851
852 void ff_h264_flush_change(H264Context *h);
853
854 void ff_h264_free_tables(H264Context *h);
855
856 void ff_h264_set_erpic(ERPicture *dst, H264Picture *src);
857
858 #endif /* AVCODEC_H264DEC_H */
859