• Home
  • Raw
  • Download

Lines Matching full:s

4  * Copyright (C) 2013 Ronald S. Bultje <rsbultje gmail com>
53 VP9Context *s = avctx->priv_data; in vp9_alloc_entries() local
57 if (s->entries) in vp9_alloc_entries()
58 av_freep(&s->entries); in vp9_alloc_entries()
60 s->entries = av_malloc_array(n, sizeof(atomic_int)); in vp9_alloc_entries()
61 if (!s->entries) in vp9_alloc_entries()
65 atomic_init(&s->entries[i], 0); in vp9_alloc_entries()
70 static void vp9_report_tile_progress(VP9Context *s, int field, int n) { in vp9_report_tile_progress() argument
71 pthread_mutex_lock(&s->progress_mutex); in vp9_report_tile_progress()
72 atomic_fetch_add_explicit(&s->entries[field], n, memory_order_release); in vp9_report_tile_progress()
73 pthread_cond_signal(&s->progress_cond); in vp9_report_tile_progress()
74 pthread_mutex_unlock(&s->progress_mutex); in vp9_report_tile_progress()
77 static void vp9_await_tile_progress(VP9Context *s, int field, int n) { in vp9_await_tile_progress() argument
78 if (atomic_load_explicit(&s->entries[field], memory_order_acquire) >= n) in vp9_await_tile_progress()
81 pthread_mutex_lock(&s->progress_mutex); in vp9_await_tile_progress()
82 while (atomic_load_explicit(&s->entries[field], memory_order_relaxed) != n) in vp9_await_tile_progress()
83 pthread_cond_wait(&s->progress_cond, &s->progress_mutex); in vp9_await_tile_progress()
84 pthread_mutex_unlock(&s->progress_mutex); in vp9_await_tile_progress()
108 VP9Context *s = avctx->priv_data; in vp9_frame_alloc() local
115 sz = 64 * s->sb_cols * s->sb_rows; in vp9_frame_alloc()
116 if (sz != s->frame_extradata_pool_size) { in vp9_frame_alloc()
117 av_buffer_pool_uninit(&s->frame_extradata_pool); in vp9_frame_alloc()
118 s->frame_extradata_pool = av_buffer_pool_init(sz * (1 + sizeof(VP9mvrefPair)), NULL); in vp9_frame_alloc()
119 if (!s->frame_extradata_pool) { in vp9_frame_alloc()
120 s->frame_extradata_pool_size = 0; in vp9_frame_alloc()
123 s->frame_extradata_pool_size = sz; in vp9_frame_alloc()
125 f->extradata = av_buffer_pool_get(s->frame_extradata_pool); in vp9_frame_alloc()
191 VP9Context *s = avctx->priv_data; in update_size() local
193 int bytesperpixel = s->bytesperpixel, ret, cols, rows; in update_size()
198 if (!(s->pix_fmt == s->gf_fmt && w == s->w && h == s->h)) { in update_size()
202 switch (s->pix_fmt) { in update_size()
238 *fmtp++ = s->pix_fmt; in update_size()
246 s->gf_fmt = s->pix_fmt; in update_size()
247 s->w = w; in update_size()
248 s->h = h; in update_size()
254 if (s->intra_pred_data[0] && cols == s->cols && rows == s->rows && s->pix_fmt == s->last_fmt) in update_size()
257 s->last_fmt = s->pix_fmt; in update_size()
258 s->sb_cols = (w + 63) >> 6; in update_size()
259 s->sb_rows = (h + 63) >> 6; in update_size()
260 s->cols = (w + 7) >> 3; in update_size()
261 s->rows = (h + 7) >> 3; in update_size()
262 lflvl_len = avctx->active_thread_type == FF_THREAD_SLICE ? s->sb_rows : 1; in update_size()
264 #define assign(var, type, n) var = (type) p; p += s->sb_cols * (n) * sizeof(*var) in update_size()
265 av_freep(&s->intra_pred_data[0]); in update_size()
268 p = av_malloc(s->sb_cols * (128 + 192 * bytesperpixel + in update_size()
269 lflvl_len * sizeof(*s->lflvl) + 16 * sizeof(*s->above_mv_ctx))); in update_size()
272 assign(s->intra_pred_data[0], uint8_t *, 64 * bytesperpixel); in update_size()
273 assign(s->intra_pred_data[1], uint8_t *, 64 * bytesperpixel); in update_size()
274 assign(s->intra_pred_data[2], uint8_t *, 64 * bytesperpixel); in update_size()
275 assign(s->above_y_nnz_ctx, uint8_t *, 16); in update_size()
276 assign(s->above_mode_ctx, uint8_t *, 16); in update_size()
277 assign(s->above_mv_ctx, VP56mv(*)[2], 16); in update_size()
278 assign(s->above_uv_nnz_ctx[0], uint8_t *, 16); in update_size()
279 assign(s->above_uv_nnz_ctx[1], uint8_t *, 16); in update_size()
280 assign(s->above_partition_ctx, uint8_t *, 8); in update_size()
281 assign(s->above_skip_ctx, uint8_t *, 8); in update_size()
282 assign(s->above_txfm_ctx, uint8_t *, 8); in update_size()
283 assign(s->above_segpred_ctx, uint8_t *, 8); in update_size()
284 assign(s->above_intra_ctx, uint8_t *, 8); in update_size()
285 assign(s->above_comp_ctx, uint8_t *, 8); in update_size()
286 assign(s->above_ref_ctx, uint8_t *, 8); in update_size()
287 assign(s->above_filter_ctx, uint8_t *, 8); in update_size()
288 assign(s->lflvl, VP9Filter *, lflvl_len); in update_size()
291 if (s->td) { in update_size()
292 for (i = 0; i < s->active_tile_cols; i++) in update_size()
293 vp9_tile_data_free(&s->td[i]); in update_size()
296 if (s->s.h.bpp != s->last_bpp) { in update_size()
297 ff_vp9dsp_init(&s->dsp, s->s.h.bpp, avctx->flags & AV_CODEC_FLAG_BITEXACT); in update_size()
298 ff_videodsp_init(&s->vdsp, s->s.h.bpp); in update_size()
299 s->last_bpp = s->s.h.bpp; in update_size()
308 VP9Context *s = avctx->priv_data; in update_block_buffers() local
309 int chroma_blocks, chroma_eobs, bytesperpixel = s->bytesperpixel; in update_block_buffers()
310 VP9TileData *td = &s->td[0]; in update_block_buffers()
312 …if (td->b_base && td->block_base && s->block_alloc_using_2pass == s->s.frames[CUR_FRAME].uses_2pas… in update_block_buffers()
316 chroma_blocks = 64 * 64 >> (s->ss_h + s->ss_v); in update_block_buffers()
317 chroma_eobs = 16 * 16 >> (s->ss_h + s->ss_v); in update_block_buffers()
318 if (s->s.frames[CUR_FRAME].uses_2pass) { in update_block_buffers()
319 int sbs = s->sb_cols * s->sb_rows; in update_block_buffers()
321 td->b_base = av_malloc_array(s->cols * s->rows, sizeof(VP9Block)); in update_block_buffers()
333 td->block_structure = av_malloc_array(s->cols * s->rows, sizeof(*td->block_structure)); in update_block_buffers()
338 for (i = 1; i < s->active_tile_cols; i++) in update_block_buffers()
339 vp9_tile_data_free(&s->td[i]); in update_block_buffers()
341 for (i = 0; i < s->active_tile_cols; i++) { in update_block_buffers()
342 s->td[i].b_base = av_malloc(sizeof(VP9Block)); in update_block_buffers()
343s->td[i].block_base = av_mallocz((64 * 64 + 2 * chroma_blocks) * bytesperpixel * sizeof(int16_t) + in update_block_buffers()
345 if (!s->td[i].b_base || !s->td[i].block_base) in update_block_buffers()
347 s->td[i].uvblock_base[0] = s->td[i].block_base + 64 * 64 * bytesperpixel; in update_block_buffers()
348 s->td[i].uvblock_base[1] = s->td[i].uvblock_base[0] + chroma_blocks * bytesperpixel; in update_block_buffers()
349s->td[i].eob_base = (uint8_t *) (s->td[i].uvblock_base[1] + chroma_blocks * bytesperpixel); in update_block_buffers()
350 s->td[i].uveob_base[0] = s->td[i].eob_base + 16 * 16; in update_block_buffers()
351 s->td[i].uveob_base[1] = s->td[i].uveob_base[0] + chroma_eobs; in update_block_buffers()
354s->td[i].block_structure = av_malloc_array(s->cols * s->rows, sizeof(*td->block_structure)); in update_block_buffers()
355 if (!s->td[i].block_structure) in update_block_buffers()
360 s->block_alloc_using_2pass = s->s.frames[CUR_FRAME].uses_2pass; in update_block_buffers()
446 VP9Context *s = avctx->priv_data; in read_colorspace_details() local
447 int bits = avctx->profile <= 1 ? 0 : 1 + get_bits1(&s->gb); // 0:8, 1:10, 2:12 in read_colorspace_details()
449 s->bpp_index = bits; in read_colorspace_details()
450 s->s.h.bpp = 8 + bits * 2; in read_colorspace_details()
451 s->bytesperpixel = (7 + s->s.h.bpp) >> 3; in read_colorspace_details()
452 avctx->colorspace = colorspaces[get_bits(&s->gb, 3)]; in read_colorspace_details()
457 s->ss_h = s->ss_v = 0; in read_colorspace_details()
459 s->pix_fmt = pix_fmt_rgb[bits]; in read_colorspace_details()
461 if (get_bits1(&s->gb)) { in read_colorspace_details()
479 avctx->color_range = get_bits1(&s->gb) ? AVCOL_RANGE_JPEG : AVCOL_RANGE_MPEG; in read_colorspace_details()
481 s->ss_h = get_bits1(&s->gb); in read_colorspace_details()
482 s->ss_v = get_bits1(&s->gb); in read_colorspace_details()
483 s->pix_fmt = pix_fmt_for_ss[bits][s->ss_v][s->ss_h]; in read_colorspace_details()
484 if (s->pix_fmt == AV_PIX_FMT_YUV420P) { in read_colorspace_details()
488 } else if (get_bits1(&s->gb)) { in read_colorspace_details()
494 s->ss_h = s->ss_v = 1; in read_colorspace_details()
495 s->pix_fmt = pix_fmt_for_ss[bits][1][1]; in read_colorspace_details()
505 VP9Context *s = avctx->priv_data; in decode_frame_header() local
511 if ((ret = init_get_bits8(&s->gb, data, size)) < 0) { in decode_frame_header()
515 if (get_bits(&s->gb, 2) != 0x2) { // frame marker in decode_frame_header()
519 avctx->profile = get_bits1(&s->gb); in decode_frame_header()
520 avctx->profile |= get_bits1(&s->gb) << 1; in decode_frame_header()
521 if (avctx->profile == 3) avctx->profile += get_bits1(&s->gb); in decode_frame_header()
526 s->s.h.profile = avctx->profile; in decode_frame_header()
527 if (get_bits1(&s->gb)) { in decode_frame_header()
528 *ref = get_bits(&s->gb, 3); in decode_frame_header()
532 s->last_keyframe = s->s.h.keyframe; in decode_frame_header()
533 s->s.h.keyframe = !get_bits1(&s->gb); in decode_frame_header()
535 last_invisible = s->s.h.invisible; in decode_frame_header()
536 s->s.h.invisible = !get_bits1(&s->gb); in decode_frame_header()
537 s->s.h.errorres = get_bits1(&s->gb); in decode_frame_header()
538 s->s.h.use_last_frame_mvs = !s->s.h.errorres && !last_invisible; in decode_frame_header()
540 if (s->s.h.keyframe) { in decode_frame_header()
541 if (get_bits(&s->gb, 24) != VP9_SYNCCODE) { // synccode in decode_frame_header()
548 s->s.h.refreshrefmask = 0xff; in decode_frame_header()
549 w = get_bits(&s->gb, 16) + 1; in decode_frame_header()
550 h = get_bits(&s->gb, 16) + 1; in decode_frame_header()
551 if (get_bits1(&s->gb)) // display size in decode_frame_header()
552 skip_bits(&s->gb, 32); in decode_frame_header()
554 s->s.h.intraonly = s->s.h.invisible ? get_bits1(&s->gb) : 0; in decode_frame_header()
555 s->s.h.resetctx = s->s.h.errorres ? 0 : get_bits(&s->gb, 2); in decode_frame_header()
556 if (s->s.h.intraonly) { in decode_frame_header()
557 if (get_bits(&s->gb, 24) != VP9_SYNCCODE) { // synccode in decode_frame_header()
565 s->ss_h = s->ss_v = 1; in decode_frame_header()
566 s->s.h.bpp = 8; in decode_frame_header()
567 s->bpp_index = 0; in decode_frame_header()
568 s->bytesperpixel = 1; in decode_frame_header()
569 s->pix_fmt = AV_PIX_FMT_YUV420P; in decode_frame_header()
573 s->s.h.refreshrefmask = get_bits(&s->gb, 8); in decode_frame_header()
574 w = get_bits(&s->gb, 16) + 1; in decode_frame_header()
575 h = get_bits(&s->gb, 16) + 1; in decode_frame_header()
576 if (get_bits1(&s->gb)) // display size in decode_frame_header()
577 skip_bits(&s->gb, 32); in decode_frame_header()
579 s->s.h.refreshrefmask = get_bits(&s->gb, 8); in decode_frame_header()
580 s->s.h.refidx[0] = get_bits(&s->gb, 3); in decode_frame_header()
581 s->s.h.signbias[0] = get_bits1(&s->gb) && !s->s.h.errorres; in decode_frame_header()
582 s->s.h.refidx[1] = get_bits(&s->gb, 3); in decode_frame_header()
583 s->s.h.signbias[1] = get_bits1(&s->gb) && !s->s.h.errorres; in decode_frame_header()
584 s->s.h.refidx[2] = get_bits(&s->gb, 3); in decode_frame_header()
585 s->s.h.signbias[2] = get_bits1(&s->gb) && !s->s.h.errorres; in decode_frame_header()
586 if (!s->s.refs[s->s.h.refidx[0]].f->buf[0] || in decode_frame_header()
587 !s->s.refs[s->s.h.refidx[1]].f->buf[0] || in decode_frame_header()
588 !s->s.refs[s->s.h.refidx[2]].f->buf[0]) { in decode_frame_header()
592 if (get_bits1(&s->gb)) { in decode_frame_header()
593 w = s->s.refs[s->s.h.refidx[0]].f->width; in decode_frame_header()
594 h = s->s.refs[s->s.h.refidx[0]].f->height; in decode_frame_header()
595 } else if (get_bits1(&s->gb)) { in decode_frame_header()
596 w = s->s.refs[s->s.h.refidx[1]].f->width; in decode_frame_header()
597 h = s->s.refs[s->s.h.refidx[1]].f->height; in decode_frame_header()
598 } else if (get_bits1(&s->gb)) { in decode_frame_header()
599 w = s->s.refs[s->s.h.refidx[2]].f->width; in decode_frame_header()
600 h = s->s.refs[s->s.h.refidx[2]].f->height; in decode_frame_header()
602 w = get_bits(&s->gb, 16) + 1; in decode_frame_header()
603 h = get_bits(&s->gb, 16) + 1; in decode_frame_header()
608 s->s.h.use_last_frame_mvs &= s->s.frames[CUR_FRAME].tf.f->width == w && in decode_frame_header()
609 s->s.frames[CUR_FRAME].tf.f->height == h; in decode_frame_header()
610 if (get_bits1(&s->gb)) // display size in decode_frame_header()
611 skip_bits(&s->gb, 32); in decode_frame_header()
612 s->s.h.highprecisionmvs = get_bits1(&s->gb); in decode_frame_header()
613 s->s.h.filtermode = get_bits1(&s->gb) ? FILTER_SWITCHABLE : in decode_frame_header()
614 get_bits(&s->gb, 2); in decode_frame_header()
615 s->s.h.allowcompinter = s->s.h.signbias[0] != s->s.h.signbias[1] || in decode_frame_header()
616 s->s.h.signbias[0] != s->s.h.signbias[2]; in decode_frame_header()
617 if (s->s.h.allowcompinter) { in decode_frame_header()
618 if (s->s.h.signbias[0] == s->s.h.signbias[1]) { in decode_frame_header()
619 s->s.h.fixcompref = 2; in decode_frame_header()
620 s->s.h.varcompref[0] = 0; in decode_frame_header()
621 s->s.h.varcompref[1] = 1; in decode_frame_header()
622 } else if (s->s.h.signbias[0] == s->s.h.signbias[2]) { in decode_frame_header()
623 s->s.h.fixcompref = 1; in decode_frame_header()
624 s->s.h.varcompref[0] = 0; in decode_frame_header()
625 s->s.h.varcompref[1] = 2; in decode_frame_header()
627 s->s.h.fixcompref = 0; in decode_frame_header()
628 s->s.h.varcompref[0] = 1; in decode_frame_header()
629 s->s.h.varcompref[1] = 2; in decode_frame_header()
634 s->s.h.refreshctx = s->s.h.errorres ? 0 : get_bits1(&s->gb); in decode_frame_header()
635 s->s.h.parallelmode = s->s.h.errorres ? 1 : get_bits1(&s->gb); in decode_frame_header()
636 s->s.h.framectxid = c = get_bits(&s->gb, 2); in decode_frame_header()
637 if (s->s.h.keyframe || s->s.h.intraonly) in decode_frame_header()
638 s->s.h.framectxid = 0; // BUG: libvpx ignores this field in keyframes in decode_frame_header()
641 if (s->s.h.keyframe || s->s.h.errorres || s->s.h.intraonly) { in decode_frame_header()
643 s->s.h.lf_delta.ref[0] = 1; in decode_frame_header()
644 s->s.h.lf_delta.ref[1] = 0; in decode_frame_header()
645 s->s.h.lf_delta.ref[2] = -1; in decode_frame_header()
646 s->s.h.lf_delta.ref[3] = -1; in decode_frame_header()
647 s->s.h.lf_delta.mode[0] = 0; in decode_frame_header()
648 s->s.h.lf_delta.mode[1] = 0; in decode_frame_header()
649 memset(s->s.h.segmentation.feat, 0, sizeof(s->s.h.segmentation.feat)); in decode_frame_header()
651 s->s.h.filter.level = get_bits(&s->gb, 6); in decode_frame_header()
652 sharp = get_bits(&s->gb, 3); in decode_frame_header()
655 if (s->s.h.filter.sharpness != sharp) { in decode_frame_header()
665 s->filter_lut.lim_lut[i] = limit; in decode_frame_header()
666 s->filter_lut.mblim_lut[i] = 2 * (i + 2) + limit; in decode_frame_header()
669 s->s.h.filter.sharpness = sharp; in decode_frame_header()
670 if ((s->s.h.lf_delta.enabled = get_bits1(&s->gb))) { in decode_frame_header()
671 if ((s->s.h.lf_delta.updated = get_bits1(&s->gb))) { in decode_frame_header()
673 if (get_bits1(&s->gb)) in decode_frame_header()
674 s->s.h.lf_delta.ref[i] = get_sbits_inv(&s->gb, 6); in decode_frame_header()
676 if (get_bits1(&s->gb)) in decode_frame_header()
677 s->s.h.lf_delta.mode[i] = get_sbits_inv(&s->gb, 6); in decode_frame_header()
682 s->s.h.yac_qi = get_bits(&s->gb, 8); in decode_frame_header()
683 s->s.h.ydc_qdelta = get_bits1(&s->gb) ? get_sbits_inv(&s->gb, 4) : 0; in decode_frame_header()
684 s->s.h.uvdc_qdelta = get_bits1(&s->gb) ? get_sbits_inv(&s->gb, 4) : 0; in decode_frame_header()
685 s->s.h.uvac_qdelta = get_bits1(&s->gb) ? get_sbits_inv(&s->gb, 4) : 0; in decode_frame_header()
686 s->s.h.lossless = s->s.h.yac_qi == 0 && s->s.h.ydc_qdelta == 0 && in decode_frame_header()
687 s->s.h.uvdc_qdelta == 0 && s->s.h.uvac_qdelta == 0; in decode_frame_header()
688 if (s->s.h.lossless) in decode_frame_header()
692 if ((s->s.h.segmentation.enabled = get_bits1(&s->gb))) { in decode_frame_header()
693 if ((s->s.h.segmentation.update_map = get_bits1(&s->gb))) { in decode_frame_header()
695 s->s.h.segmentation.prob[i] = get_bits1(&s->gb) ? in decode_frame_header()
696 get_bits(&s->gb, 8) : 255; in decode_frame_header()
697 if ((s->s.h.segmentation.temporal = get_bits1(&s->gb))) in decode_frame_header()
699 s->s.h.segmentation.pred_prob[i] = get_bits1(&s->gb) ? in decode_frame_header()
700 get_bits(&s->gb, 8) : 255; in decode_frame_header()
703 if (get_bits1(&s->gb)) { in decode_frame_header()
704 s->s.h.segmentation.absolute_vals = get_bits1(&s->gb); in decode_frame_header()
706 if ((s->s.h.segmentation.feat[i].q_enabled = get_bits1(&s->gb))) in decode_frame_header()
707 s->s.h.segmentation.feat[i].q_val = get_sbits_inv(&s->gb, 8); in decode_frame_header()
708 if ((s->s.h.segmentation.feat[i].lf_enabled = get_bits1(&s->gb))) in decode_frame_header()
709 s->s.h.segmentation.feat[i].lf_val = get_sbits_inv(&s->gb, 6); in decode_frame_header()
710 if ((s->s.h.segmentation.feat[i].ref_enabled = get_bits1(&s->gb))) in decode_frame_header()
711 s->s.h.segmentation.feat[i].ref_val = get_bits(&s->gb, 2); in decode_frame_header()
712 s->s.h.segmentation.feat[i].skip_enabled = get_bits1(&s->gb); in decode_frame_header()
718 for (i = 0; i < (s->s.h.segmentation.enabled ? 8 : 1); i++) { in decode_frame_header()
721 if (s->s.h.segmentation.enabled && s->s.h.segmentation.feat[i].q_enabled) { in decode_frame_header()
722 if (s->s.h.segmentation.absolute_vals) in decode_frame_header()
723 qyac = av_clip_uintp2(s->s.h.segmentation.feat[i].q_val, 8); in decode_frame_header()
725 qyac = av_clip_uintp2(s->s.h.yac_qi + s->s.h.segmentation.feat[i].q_val, 8); in decode_frame_header()
727 qyac = s->s.h.yac_qi; in decode_frame_header()
729 qydc = av_clip_uintp2(qyac + s->s.h.ydc_qdelta, 8); in decode_frame_header()
730 quvdc = av_clip_uintp2(qyac + s->s.h.uvdc_qdelta, 8); in decode_frame_header()
731 quvac = av_clip_uintp2(qyac + s->s.h.uvac_qdelta, 8); in decode_frame_header()
734 s->s.h.segmentation.feat[i].qmul[0][0] = ff_vp9_dc_qlookup[s->bpp_index][qydc]; in decode_frame_header()
735 s->s.h.segmentation.feat[i].qmul[0][1] = ff_vp9_ac_qlookup[s->bpp_index][qyac]; in decode_frame_header()
736 s->s.h.segmentation.feat[i].qmul[1][0] = ff_vp9_dc_qlookup[s->bpp_index][quvdc]; in decode_frame_header()
737 s->s.h.segmentation.feat[i].qmul[1][1] = ff_vp9_ac_qlookup[s->bpp_index][quvac]; in decode_frame_header()
739 sh = s->s.h.filter.level >= 32; in decode_frame_header()
740 if (s->s.h.segmentation.enabled && s->s.h.segmentation.feat[i].lf_enabled) { in decode_frame_header()
741 if (s->s.h.segmentation.absolute_vals) in decode_frame_header()
742 lflvl = av_clip_uintp2(s->s.h.segmentation.feat[i].lf_val, 6); in decode_frame_header()
744 lflvl = av_clip_uintp2(s->s.h.filter.level + s->s.h.segmentation.feat[i].lf_val, 6); in decode_frame_header()
746 lflvl = s->s.h.filter.level; in decode_frame_header()
748 if (s->s.h.lf_delta.enabled) { in decode_frame_header()
749 s->s.h.segmentation.feat[i].lflvl[0][0] = in decode_frame_header()
750 s->s.h.segmentation.feat[i].lflvl[0][1] = in decode_frame_header()
751 av_clip_uintp2(lflvl + (s->s.h.lf_delta.ref[0] * (1 << sh)), 6); in decode_frame_header()
753 s->s.h.segmentation.feat[i].lflvl[j][0] = in decode_frame_header()
754 av_clip_uintp2(lflvl + ((s->s.h.lf_delta.ref[j] + in decode_frame_header()
755 s->s.h.lf_delta.mode[0]) * (1 << sh)), 6); in decode_frame_header()
756 s->s.h.segmentation.feat[i].lflvl[j][1] = in decode_frame_header()
757 av_clip_uintp2(lflvl + ((s->s.h.lf_delta.ref[j] + in decode_frame_header()
758 s->s.h.lf_delta.mode[1]) * (1 << sh)), 6); in decode_frame_header()
761 memset(s->s.h.segmentation.feat[i].lflvl, lflvl, in decode_frame_header()
762 sizeof(s->s.h.segmentation.feat[i].lflvl)); in decode_frame_header()
769 w, h, s->pix_fmt); in decode_frame_header()
772 for (s->s.h.tiling.log2_tile_cols = 0; in decode_frame_header()
773 s->sb_cols > (64 << s->s.h.tiling.log2_tile_cols); in decode_frame_header()
774 s->s.h.tiling.log2_tile_cols++) ; in decode_frame_header()
775 for (max = 0; (s->sb_cols >> max) >= 4; max++) ; in decode_frame_header()
777 while (max > s->s.h.tiling.log2_tile_cols) { in decode_frame_header()
778 if (get_bits1(&s->gb)) in decode_frame_header()
779 s->s.h.tiling.log2_tile_cols++; in decode_frame_header()
783 s->s.h.tiling.log2_tile_rows = decode012(&s->gb); in decode_frame_header()
784 s->s.h.tiling.tile_rows = 1 << s->s.h.tiling.log2_tile_rows; in decode_frame_header()
785 if (s->s.h.tiling.tile_cols != (1 << s->s.h.tiling.log2_tile_cols)) { in decode_frame_header()
789 if (s->td) { in decode_frame_header()
790 for (i = 0; i < s->active_tile_cols; i++) in decode_frame_header()
791 vp9_tile_data_free(&s->td[i]); in decode_frame_header()
792 av_freep(&s->td); in decode_frame_header()
795 s->s.h.tiling.tile_cols = 1 << s->s.h.tiling.log2_tile_cols; in decode_frame_header()
796 s->active_tile_cols = avctx->active_thread_type == FF_THREAD_SLICE ? in decode_frame_header()
797 s->s.h.tiling.tile_cols : 1; in decode_frame_header()
798 vp9_alloc_entries(avctx, s->sb_rows); in decode_frame_header()
802 n_range_coders = s->s.h.tiling.tile_cols; in decode_frame_header()
804 s->td = av_calloc(s->active_tile_cols, sizeof(VP9TileData) + in decode_frame_header()
806 if (!s->td) in decode_frame_header()
808 rc = (VP56RangeCoder *) &s->td[s->active_tile_cols]; in decode_frame_header()
809 for (i = 0; i < s->active_tile_cols; i++) { in decode_frame_header()
810 s->td[i].s = s; in decode_frame_header()
811 s->td[i].c_b = rc; in decode_frame_header()
817 if (!s->s.h.keyframe && !s->s.h.intraonly) { in decode_frame_header()
820 AVFrame *ref = s->s.refs[s->s.h.refidx[i]].f; in decode_frame_header()
825 "Ref pixfmt (%s) did not match current frame (%s)", in decode_frame_header()
830 s->mvscale[i][0] = s->mvscale[i][1] = 0; in decode_frame_header()
838 s->mvscale[i][0] = s->mvscale[i][1] = REF_INVALID_SCALE; in decode_frame_header()
841 s->mvscale[i][0] = (refw << 14) / w; in decode_frame_header()
842 s->mvscale[i][1] = (refh << 14) / h; in decode_frame_header()
843 s->mvstep[i][0] = 16 * s->mvscale[i][0] >> 14; in decode_frame_header()
844 s->mvstep[i][1] = 16 * s->mvscale[i][1] >> 14; in decode_frame_header()
854 if (s->s.h.keyframe || s->s.h.errorres || (s->s.h.intraonly && s->s.h.resetctx == 3)) { in decode_frame_header()
855 s->prob_ctx[0].p = s->prob_ctx[1].p = s->prob_ctx[2].p = in decode_frame_header()
856 s->prob_ctx[3].p = ff_vp9_default_probs; in decode_frame_header()
857 memcpy(s->prob_ctx[0].coef, ff_vp9_default_coef_probs, in decode_frame_header()
859 memcpy(s->prob_ctx[1].coef, ff_vp9_default_coef_probs, in decode_frame_header()
861 memcpy(s->prob_ctx[2].coef, ff_vp9_default_coef_probs, in decode_frame_header()
863 memcpy(s->prob_ctx[3].coef, ff_vp9_default_coef_probs, in decode_frame_header()
865 } else if (s->s.h.intraonly && s->s.h.resetctx == 2) { in decode_frame_header()
866 s->prob_ctx[c].p = ff_vp9_default_probs; in decode_frame_header()
867 memcpy(s->prob_ctx[c].coef, ff_vp9_default_coef_probs, in decode_frame_header()
872 s->s.h.compressed_header_size = size2 = get_bits(&s->gb, 16); in decode_frame_header()
873 s->s.h.uncompressed_header_size = (get_bits_count(&s->gb) + 7) / 8; in decode_frame_header()
875 data2 = align_get_bits(&s->gb); in decode_frame_header()
880 ret = ff_vp56_init_range_decoder(&s->c, data2, size2); in decode_frame_header()
884 if (vp56_rac_get_prob_branchy(&s->c, 128)) { // marker bit in decode_frame_header()
889 for (i = 0; i < s->active_tile_cols; i++) { in decode_frame_header()
890 if (s->s.h.keyframe || s->s.h.intraonly) { in decode_frame_header()
891 memset(s->td[i].counts.coef, 0, sizeof(s->td[0].counts.coef)); in decode_frame_header()
892 memset(s->td[i].counts.eob, 0, sizeof(s->td[0].counts.eob)); in decode_frame_header()
894 memset(&s->td[i].counts, 0, sizeof(s->td[0].counts)); in decode_frame_header()
896 s->td[i].nb_block_structure = 0; in decode_frame_header()
902 s->prob.p = s->prob_ctx[c].p; in decode_frame_header()
905 if (s->s.h.lossless) { in decode_frame_header()
906 s->s.h.txfmmode = TX_4X4; in decode_frame_header()
908 s->s.h.txfmmode = vp8_rac_get_uint(&s->c, 2); in decode_frame_header()
909 if (s->s.h.txfmmode == 3) in decode_frame_header()
910 s->s.h.txfmmode += vp8_rac_get(&s->c); in decode_frame_header()
912 if (s->s.h.txfmmode == TX_SWITCHABLE) { in decode_frame_header()
914 if (vp56_rac_get_prob_branchy(&s->c, 252)) in decode_frame_header()
915 s->prob.p.tx8p[i] = update_prob(&s->c, s->prob.p.tx8p[i]); in decode_frame_header()
918 if (vp56_rac_get_prob_branchy(&s->c, 252)) in decode_frame_header()
919 s->prob.p.tx16p[i][j] = in decode_frame_header()
920 update_prob(&s->c, s->prob.p.tx16p[i][j]); in decode_frame_header()
923 if (vp56_rac_get_prob_branchy(&s->c, 252)) in decode_frame_header()
924 s->prob.p.tx32p[i][j] = in decode_frame_header()
925 update_prob(&s->c, s->prob.p.tx32p[i][j]); in decode_frame_header()
931 uint8_t (*ref)[2][6][6][3] = s->prob_ctx[c].coef[i]; in decode_frame_header()
932 if (vp8_rac_get(&s->c)) { in decode_frame_header()
937 uint8_t *p = s->prob.coef[i][j][k][l][m]; in decode_frame_header()
942 if (vp56_rac_get_prob_branchy(&s->c, 252)) in decode_frame_header()
943 p[n] = update_prob(&s->c, r[n]); in decode_frame_header()
954 uint8_t *p = s->prob.coef[i][j][k][l][m]; in decode_frame_header()
962 if (s->s.h.txfmmode == i) in decode_frame_header()
968 if (vp56_rac_get_prob_branchy(&s->c, 252)) in decode_frame_header()
969 s->prob.p.skip[i] = update_prob(&s->c, s->prob.p.skip[i]); in decode_frame_header()
970 if (!s->s.h.keyframe && !s->s.h.intraonly) { in decode_frame_header()
973 if (vp56_rac_get_prob_branchy(&s->c, 252)) in decode_frame_header()
974 s->prob.p.mv_mode[i][j] = in decode_frame_header()
975 update_prob(&s->c, s->prob.p.mv_mode[i][j]); in decode_frame_header()
977 if (s->s.h.filtermode == FILTER_SWITCHABLE) in decode_frame_header()
980 if (vp56_rac_get_prob_branchy(&s->c, 252)) in decode_frame_header()
981 s->prob.p.filter[i][j] = in decode_frame_header()
982 update_prob(&s->c, s->prob.p.filter[i][j]); in decode_frame_header()
985 if (vp56_rac_get_prob_branchy(&s->c, 252)) in decode_frame_header()
986 s->prob.p.intra[i] = update_prob(&s->c, s->prob.p.intra[i]); in decode_frame_header()
988 if (s->s.h.allowcompinter) { in decode_frame_header()
989 s->s.h.comppredmode = vp8_rac_get(&s->c); in decode_frame_header()
990 if (s->s.h.comppredmode) in decode_frame_header()
991 s->s.h.comppredmode += vp8_rac_get(&s->c); in decode_frame_header()
992 if (s->s.h.comppredmode == PRED_SWITCHABLE) in decode_frame_header()
994 if (vp56_rac_get_prob_branchy(&s->c, 252)) in decode_frame_header()
995 s->prob.p.comp[i] = in decode_frame_header()
996 update_prob(&s->c, s->prob.p.comp[i]); in decode_frame_header()
998 s->s.h.comppredmode = PRED_SINGLEREF; in decode_frame_header()
1001 if (s->s.h.comppredmode != PRED_COMPREF) { in decode_frame_header()
1003 if (vp56_rac_get_prob_branchy(&s->c, 252)) in decode_frame_header()
1004 s->prob.p.single_ref[i][0] = in decode_frame_header()
1005 update_prob(&s->c, s->prob.p.single_ref[i][0]); in decode_frame_header()
1006 if (vp56_rac_get_prob_branchy(&s->c, 252)) in decode_frame_header()
1007 s->prob.p.single_ref[i][1] = in decode_frame_header()
1008 update_prob(&s->c, s->prob.p.single_ref[i][1]); in decode_frame_header()
1012 if (s->s.h.comppredmode != PRED_SINGLEREF) { in decode_frame_header()
1014 if (vp56_rac_get_prob_branchy(&s->c, 252)) in decode_frame_header()
1015 s->prob.p.comp_ref[i] = in decode_frame_header()
1016 update_prob(&s->c, s->prob.p.comp_ref[i]); in decode_frame_header()
1021 if (vp56_rac_get_prob_branchy(&s->c, 252)) in decode_frame_header()
1022 s->prob.p.y_mode[i][j] = in decode_frame_header()
1023 update_prob(&s->c, s->prob.p.y_mode[i][j]); in decode_frame_header()
1028 if (vp56_rac_get_prob_branchy(&s->c, 252)) in decode_frame_header()
1029 s->prob.p.partition[3 - i][j][k] = in decode_frame_header()
1030 update_prob(&s->c, in decode_frame_header()
1031 s->prob.p.partition[3 - i][j][k]); in decode_frame_header()
1035 if (vp56_rac_get_prob_branchy(&s->c, 252)) in decode_frame_header()
1036 s->prob.p.mv_joint[i] = (vp8_rac_get_uint(&s->c, 7) << 1) | 1; in decode_frame_header()
1039 if (vp56_rac_get_prob_branchy(&s->c, 252)) in decode_frame_header()
1040 s->prob.p.mv_comp[i].sign = in decode_frame_header()
1041 (vp8_rac_get_uint(&s->c, 7) << 1) | 1; in decode_frame_header()
1044 if (vp56_rac_get_prob_branchy(&s->c, 252)) in decode_frame_header()
1045 s->prob.p.mv_comp[i].classes[j] = in decode_frame_header()
1046 (vp8_rac_get_uint(&s->c, 7) << 1) | 1; in decode_frame_header()
1048 if (vp56_rac_get_prob_branchy(&s->c, 252)) in decode_frame_header()
1049 s->prob.p.mv_comp[i].class0 = in decode_frame_header()
1050 (vp8_rac_get_uint(&s->c, 7) << 1) | 1; in decode_frame_header()
1053 if (vp56_rac_get_prob_branchy(&s->c, 252)) in decode_frame_header()
1054 s->prob.p.mv_comp[i].bits[j] = in decode_frame_header()
1055 (vp8_rac_get_uint(&s->c, 7) << 1) | 1; in decode_frame_header()
1061 if (vp56_rac_get_prob_branchy(&s->c, 252)) in decode_frame_header()
1062 s->prob.p.mv_comp[i].class0_fp[j][k] = in decode_frame_header()
1063 (vp8_rac_get_uint(&s->c, 7) << 1) | 1; in decode_frame_header()
1066 if (vp56_rac_get_prob_branchy(&s->c, 252)) in decode_frame_header()
1067 s->prob.p.mv_comp[i].fp[j] = in decode_frame_header()
1068 (vp8_rac_get_uint(&s->c, 7) << 1) | 1; in decode_frame_header()
1071 if (s->s.h.highprecisionmvs) { in decode_frame_header()
1073 if (vp56_rac_get_prob_branchy(&s->c, 252)) in decode_frame_header()
1074 s->prob.p.mv_comp[i].class0_hp = in decode_frame_header()
1075 (vp8_rac_get_uint(&s->c, 7) << 1) | 1; in decode_frame_header()
1077 if (vp56_rac_get_prob_branchy(&s->c, 252)) in decode_frame_header()
1078 s->prob.p.mv_comp[i].hp = in decode_frame_header()
1079 (vp8_rac_get_uint(&s->c, 7) << 1) | 1; in decode_frame_header()
1090 const VP9Context *s = td->s; in decode_sb() local
1091 int c = ((s->above_partition_ctx[col] >> (3 - bl)) & 1) | in decode_sb()
1093 …const uint8_t *p = s->s.h.keyframe || s->s.h.intraonly ? ff_vp9_default_kf_partition_probs[bl][c] : in decode_sb()
1094 s->prob.p.partition[bl][c]; in decode_sb()
1097 AVFrame *f = s->s.frames[CUR_FRAME].tf.f; in decode_sb()
1099 int bytesperpixel = s->bytesperpixel; in decode_sb()
1104 } else if (col + hbs < s->cols) { // FIXME why not <=? in decode_sb()
1105 if (row + hbs < s->rows) { // FIXME why not <=? in decode_sb()
1114 uvoff += hbs * 8 * uv_stride >> s->ss_v; in decode_sb()
1120 uvoff += hbs * 8 * bytesperpixel >> s->ss_h; in decode_sb()
1127 uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1); in decode_sb()
1129 uvoff += hbs * 8 * uv_stride >> s->ss_v; in decode_sb()
1133 uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1); in decode_sb()
1143 uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1); in decode_sb()
1148 } else if (row + hbs < s->rows) { // FIXME why not <=? in decode_sb()
1153 uvoff += hbs * 8 * uv_stride >> s->ss_v; in decode_sb()
1169 const VP9Context *s = td->s; in decode_sb_mem() local
1172 AVFrame *f = s->s.frames[CUR_FRAME].tf.f; in decode_sb_mem()
1174 int bytesperpixel = s->bytesperpixel; in decode_sb_mem()
1181 if (b->bp == PARTITION_H && row + hbs < s->rows) { in decode_sb_mem()
1183 uvoff += hbs * 8 * uv_stride >> s->ss_v; in decode_sb_mem()
1185 } else if (b->bp == PARTITION_V && col + hbs < s->cols) { in decode_sb_mem()
1187 uvoff += hbs * 8 * bytesperpixel >> s->ss_h; in decode_sb_mem()
1192 if (col + hbs < s->cols) { // FIXME why not <=? in decode_sb_mem()
1193 if (row + hbs < s->rows) { in decode_sb_mem()
1195 uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1); in decode_sb_mem()
1197 uvoff += hbs * 8 * uv_stride >> s->ss_v; in decode_sb_mem()
1201 uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1); in decode_sb_mem()
1204 uvoff += hbs * 8 * bytesperpixel >> s->ss_h; in decode_sb_mem()
1207 } else if (row + hbs < s->rows) { in decode_sb_mem()
1209 uvoff += hbs * 8 * uv_stride >> s->ss_v; in decode_sb_mem()
1223 static void free_buffers(VP9Context *s) in free_buffers() argument
1227 av_freep(&s->intra_pred_data[0]); in free_buffers()
1228 for (i = 0; i < s->active_tile_cols; i++) in free_buffers()
1229 vp9_tile_data_free(&s->td[i]); in free_buffers()
1234 VP9Context *s = avctx->priv_data; in vp9_decode_free() local
1238 vp9_frame_unref(avctx, &s->s.frames[i]); in vp9_decode_free()
1239 av_frame_free(&s->s.frames[i].tf.f); in vp9_decode_free()
1241 av_buffer_pool_uninit(&s->frame_extradata_pool); in vp9_decode_free()
1243 ff_thread_release_ext_buffer(avctx, &s->s.refs[i]); in vp9_decode_free()
1244 av_frame_free(&s->s.refs[i].f); in vp9_decode_free()
1245 ff_thread_release_ext_buffer(avctx, &s->next_refs[i]); in vp9_decode_free()
1246 av_frame_free(&s->next_refs[i].f); in vp9_decode_free()
1249 free_buffers(s); in vp9_decode_free()
1251 av_freep(&s->entries); in vp9_decode_free()
1252 ff_pthread_free(s, vp9_context_offsets); in vp9_decode_free()
1254 av_freep(&s->td); in vp9_decode_free()
1261 VP9Context *s = avctx->priv_data; in decode_tiles() local
1262 VP9TileData *td = &s->td[0]; in decode_tiles()
1269 f = s->s.frames[CUR_FRAME].tf.f; in decode_tiles()
1272 bytesperpixel = s->bytesperpixel; in decode_tiles()
1275 for (tile_row = 0; tile_row < s->s.h.tiling.tile_rows; tile_row++) { in decode_tiles()
1277 tile_row, s->s.h.tiling.log2_tile_rows, s->sb_rows); in decode_tiles()
1279 for (tile_col = 0; tile_col < s->s.h.tiling.tile_cols; tile_col++) { in decode_tiles()
1282 if (tile_col == s->s.h.tiling.tile_cols - 1 && in decode_tiles()
1283 tile_row == s->s.h.tiling.tile_rows - 1) { in decode_tiles()
1291 ff_thread_report_progress(&s->s.frames[CUR_FRAME].tf, INT_MAX, 0); in decode_tiles()
1298 ff_thread_report_progress(&s->s.frames[CUR_FRAME].tf, INT_MAX, 0); in decode_tiles()
1306 row += 8, yoff += ls_y * 64, uvoff += ls_uv * 64 >> s->ss_v) { in decode_tiles()
1307 VP9Filter *lflvl_ptr = s->lflvl; in decode_tiles()
1310 for (tile_col = 0; tile_col < s->s.h.tiling.tile_cols; tile_col++) { in decode_tiles()
1312 tile_col, s->s.h.tiling.log2_tile_cols, s->sb_cols); in decode_tiles()
1314 if (s->pass != 2) { in decode_tiles()
1317 if (s->s.h.keyframe || s->s.h.intraonly) { in decode_tiles()
1332 uvoff2 += 64 * bytesperpixel >> s->ss_h, lflvl_ptr++) { in decode_tiles()
1335 if (s->pass != 1) { in decode_tiles()
1339 if (s->pass == 2) { in decode_tiles()
1352 if (s->pass == 1) in decode_tiles()
1357 if (row + 8 < s->rows) { in decode_tiles()
1358 memcpy(s->intra_pred_data[0], in decode_tiles()
1360 8 * s->cols * bytesperpixel); in decode_tiles()
1361 memcpy(s->intra_pred_data[1], in decode_tiles()
1362 f->data[1] + uvoff + ((64 >> s->ss_v) - 1) * ls_uv, in decode_tiles()
1363 8 * s->cols * bytesperpixel >> s->ss_h); in decode_tiles()
1364 memcpy(s->intra_pred_data[2], in decode_tiles()
1365 f->data[2] + uvoff + ((64 >> s->ss_v) - 1) * ls_uv, in decode_tiles()
1366 8 * s->cols * bytesperpixel >> s->ss_h); in decode_tiles()
1370 if (s->s.h.filter.level) { in decode_tiles()
1373 lflvl_ptr = s->lflvl; in decode_tiles()
1374 for (col = 0; col < s->cols; in decode_tiles()
1376 uvoff2 += 64 * bytesperpixel >> s->ss_h, lflvl_ptr++) { in decode_tiles()
1385 ff_thread_report_progress(&s->s.frames[CUR_FRAME].tf, row >> 3, 0); in decode_tiles()
1396 VP9Context *s = avctx->priv_data; in decode_tiles_mt() local
1397 VP9TileData *td = &s->td[jobnr]; in decode_tiles_mt()
1399 int bytesperpixel = s->bytesperpixel, row, col, tile_row; in decode_tiles_mt()
1405 f = s->s.frames[CUR_FRAME].tf.f; in decode_tiles_mt()
1410 jobnr, s->s.h.tiling.log2_tile_cols, s->sb_cols); in decode_tiles_mt()
1412 uvoff = (64 * bytesperpixel >> s->ss_h)*(tile_col_start >> 3); in decode_tiles_mt()
1414 lflvl_ptr_base = s->lflvl+(tile_col_start >> 3); in decode_tiles_mt()
1416 for (tile_row = 0; tile_row < s->s.h.tiling.tile_rows; tile_row++) { in decode_tiles_mt()
1418 tile_row, s->s.h.tiling.log2_tile_rows, s->sb_rows); in decode_tiles_mt()
1422 row += 8, yoff += ls_y * 64, uvoff += ls_uv * 64 >> s->ss_v) { in decode_tiles_mt()
1424 VP9Filter *lflvl_ptr = lflvl_ptr_base+s->sb_cols*(row >> 3); in decode_tiles_mt()
1428 if (s->s.h.keyframe || s->s.h.intraonly) { in decode_tiles_mt()
1440 uvoff2 += 64 * bytesperpixel >> s->ss_h, lflvl_ptr++) { in decode_tiles_mt()
1451 if (row + 8 < s->rows) { in decode_tiles_mt()
1452 memcpy(s->intra_pred_data[0] + (tile_col_start * 8 * bytesperpixel), in decode_tiles_mt()
1455 memcpy(s->intra_pred_data[1] + (tile_col_start * 8 * bytesperpixel >> s->ss_h), in decode_tiles_mt()
1456 f->data[1] + uvoff + ((64 >> s->ss_v) - 1) * ls_uv, in decode_tiles_mt()
1457 8 * tile_cols_len * bytesperpixel >> s->ss_h); in decode_tiles_mt()
1458 memcpy(s->intra_pred_data[2] + (tile_col_start * 8 * bytesperpixel >> s->ss_h), in decode_tiles_mt()
1459 f->data[2] + uvoff + ((64 >> s->ss_v) - 1) * ls_uv, in decode_tiles_mt()
1460 8 * tile_cols_len * bytesperpixel >> s->ss_h); in decode_tiles_mt()
1463 vp9_report_tile_progress(s, row >> 3, 1); in decode_tiles_mt()
1472 VP9Context *s = avctx->priv_data; in loopfilter_proc() local
1475 int bytesperpixel = s->bytesperpixel, col, i; in loopfilter_proc()
1478 f = s->s.frames[CUR_FRAME].tf.f; in loopfilter_proc()
1482 for (i = 0; i < s->sb_rows; i++) { in loopfilter_proc()
1483 vp9_await_tile_progress(s, i, s->s.h.tiling.tile_cols); in loopfilter_proc()
1485 if (s->s.h.filter.level) { in loopfilter_proc()
1487 uvoff = (ls_uv * 64 >> s->ss_v)*i; in loopfilter_proc()
1488 lflvl_ptr = s->lflvl+s->sb_cols*i; in loopfilter_proc()
1489 for (col = 0; col < s->cols; in loopfilter_proc()
1491 uvoff += 64 * bytesperpixel >> s->ss_h, lflvl_ptr++) { in loopfilter_proc()
1501 static int vp9_export_enc_params(VP9Context *s, VP9Frame *frame) in vp9_export_enc_params() argument
1506 if (s->s.h.segmentation.enabled) { in vp9_export_enc_params()
1507 for (tile = 0; tile < s->active_tile_cols; tile++) in vp9_export_enc_params()
1508 nb_blocks += s->td[tile].nb_block_structure; in vp9_export_enc_params()
1516 par->qp = s->s.h.yac_qi; in vp9_export_enc_params()
1517 par->delta_qp[0][0] = s->s.h.ydc_qdelta; in vp9_export_enc_params()
1518 par->delta_qp[1][0] = s->s.h.uvdc_qdelta; in vp9_export_enc_params()
1519 par->delta_qp[2][0] = s->s.h.uvdc_qdelta; in vp9_export_enc_params()
1520 par->delta_qp[1][1] = s->s.h.uvac_qdelta; in vp9_export_enc_params()
1521 par->delta_qp[2][1] = s->s.h.uvac_qdelta; in vp9_export_enc_params()
1527 for (tile = 0; tile < s->active_tile_cols; tile++) { in vp9_export_enc_params()
1528 VP9TileData *td = &s->td[tile]; in vp9_export_enc_params()
1534 uint8_t seg_id = frame->segmentation_map[row * 8 * s->sb_cols + col]; in vp9_export_enc_params()
1541 if (s->s.h.segmentation.feat[seg_id].q_enabled) { in vp9_export_enc_params()
1542 b->delta_qp = s->s.h.segmentation.feat[seg_id].q_val; in vp9_export_enc_params()
1543 if (s->s.h.segmentation.absolute_vals) in vp9_export_enc_params()
1558 VP9Context *s = avctx->priv_data; in vp9_decode_frame() local
1560 int retain_segmap_ref = s->s.frames[REF_FRAME_SEGMAP].segmentation_map && in vp9_decode_frame()
1561 (!s->s.h.segmentation.enabled || !s->s.h.segmentation.update_map); in vp9_decode_frame()
1567 if (!s->s.refs[ref].f->buf[0]) { in vp9_decode_frame()
1571 if ((ret = av_frame_ref(frame, s->s.refs[ref].f)) < 0) in vp9_decode_frame()
1576 if (s->next_refs[i].f->buf[0]) in vp9_decode_frame()
1577 ff_thread_release_ext_buffer(avctx, &s->next_refs[i]); in vp9_decode_frame()
1578 if (s->s.refs[i].f->buf[0] && in vp9_decode_frame()
1579 (ret = ff_thread_ref_frame(&s->next_refs[i], &s->s.refs[i])) < 0) in vp9_decode_frame()
1588 if (!retain_segmap_ref || s->s.h.keyframe || s->s.h.intraonly) { in vp9_decode_frame()
1589 if (s->s.frames[REF_FRAME_SEGMAP].tf.f->buf[0]) in vp9_decode_frame()
1590 vp9_frame_unref(avctx, &s->s.frames[REF_FRAME_SEGMAP]); in vp9_decode_frame()
1591 …if (!s->s.h.keyframe && !s->s.h.intraonly && !s->s.h.errorres && s->s.frames[CUR_FRAME].tf.f->buf[… in vp9_decode_frame()
1592 … (ret = vp9_frame_ref(avctx, &s->s.frames[REF_FRAME_SEGMAP], &s->s.frames[CUR_FRAME])) < 0) in vp9_decode_frame()
1595 if (s->s.frames[REF_FRAME_MVPAIR].tf.f->buf[0]) in vp9_decode_frame()
1596 vp9_frame_unref(avctx, &s->s.frames[REF_FRAME_MVPAIR]); in vp9_decode_frame()
1597 …if (!s->s.h.intraonly && !s->s.h.keyframe && !s->s.h.errorres && s->s.frames[CUR_FRAME].tf.f->buf[… in vp9_decode_frame()
1598 (ret = vp9_frame_ref(avctx, &s->s.frames[REF_FRAME_MVPAIR], &s->s.frames[CUR_FRAME])) < 0) in vp9_decode_frame()
1600 if (s->s.frames[CUR_FRAME].tf.f->buf[0]) in vp9_decode_frame()
1601 vp9_frame_unref(avctx, &s->s.frames[CUR_FRAME]); in vp9_decode_frame()
1602 if ((ret = vp9_frame_alloc(avctx, &s->s.frames[CUR_FRAME])) < 0) in vp9_decode_frame()
1604 f = s->s.frames[CUR_FRAME].tf.f; in vp9_decode_frame()
1605 f->key_frame = s->s.h.keyframe; in vp9_decode_frame()
1606 f->pict_type = (s->s.h.keyframe || s->s.h.intraonly) ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P; in vp9_decode_frame()
1608 if (s->s.frames[REF_FRAME_SEGMAP].tf.f->buf[0] && in vp9_decode_frame()
1609 (s->s.frames[REF_FRAME_MVPAIR].tf.f->width != s->s.frames[CUR_FRAME].tf.f->width || in vp9_decode_frame()
1610 s->s.frames[REF_FRAME_MVPAIR].tf.f->height != s->s.frames[CUR_FRAME].tf.f->height)) { in vp9_decode_frame()
1611 vp9_frame_unref(avctx, &s->s.frames[REF_FRAME_SEGMAP]); in vp9_decode_frame()
1616 if (s->next_refs[i].f->buf[0]) in vp9_decode_frame()
1617 ff_thread_release_ext_buffer(avctx, &s->next_refs[i]); in vp9_decode_frame()
1618 if (s->s.h.refreshrefmask & (1 << i)) { in vp9_decode_frame()
1619 ret = ff_thread_ref_frame(&s->next_refs[i], &s->s.frames[CUR_FRAME].tf); in vp9_decode_frame()
1620 } else if (s->s.refs[i].f->buf[0]) { in vp9_decode_frame()
1621 ret = ff_thread_ref_frame(&s->next_refs[i], &s->s.refs[i]); in vp9_decode_frame()
1641 memset(s->above_partition_ctx, 0, s->cols); in vp9_decode_frame()
1642 memset(s->above_skip_ctx, 0, s->cols); in vp9_decode_frame()
1643 if (s->s.h.keyframe || s->s.h.intraonly) { in vp9_decode_frame()
1644 memset(s->above_mode_ctx, DC_PRED, s->cols * 2); in vp9_decode_frame()
1646 memset(s->above_mode_ctx, NEARESTMV, s->cols); in vp9_decode_frame()
1648 memset(s->above_y_nnz_ctx, 0, s->sb_cols * 16); in vp9_decode_frame()
1649 memset(s->above_uv_nnz_ctx[0], 0, s->sb_cols * 16 >> s->ss_h); in vp9_decode_frame()
1650 memset(s->above_uv_nnz_ctx[1], 0, s->sb_cols * 16 >> s->ss_h); in vp9_decode_frame()
1651 memset(s->above_segpred_ctx, 0, s->cols); in vp9_decode_frame()
1652 s->pass = s->s.frames[CUR_FRAME].uses_2pass = in vp9_decode_frame()
1653 avctx->active_thread_type == FF_THREAD_FRAME && s->s.h.refreshctx && !s->s.h.parallelmode; in vp9_decode_frame()
1659 if (s->s.h.refreshctx && s->s.h.parallelmode) { in vp9_decode_frame()
1667 memcpy(s->prob_ctx[s->s.h.framectxid].coef[i][j][k][l][m], in vp9_decode_frame()
1668 s->prob.coef[i][j][k][l][m], 3); in vp9_decode_frame()
1669 if (s->s.h.txfmmode == i) in vp9_decode_frame()
1672 s->prob_ctx[s->s.h.framectxid].p = s->prob.p; in vp9_decode_frame()
1674 } else if (!s->s.h.refreshctx) { in vp9_decode_frame()
1680 for (i = 0; i < s->sb_rows; i++) in vp9_decode_frame()
1681 atomic_store(&s->entries[i], 0); in vp9_decode_frame()
1686 for (i = 0; i < s->active_tile_cols; i++) { in vp9_decode_frame()
1687 s->td[i].b = s->td[i].b_base; in vp9_decode_frame()
1688 s->td[i].block = s->td[i].block_base; in vp9_decode_frame()
1689 s->td[i].uvblock[0] = s->td[i].uvblock_base[0]; in vp9_decode_frame()
1690 s->td[i].uvblock[1] = s->td[i].uvblock_base[1]; in vp9_decode_frame()
1691 s->td[i].eob = s->td[i].eob_base; in vp9_decode_frame()
1692 s->td[i].uveob[0] = s->td[i].uveob_base[0]; in vp9_decode_frame()
1693 s->td[i].uveob[1] = s->td[i].uveob_base[1]; in vp9_decode_frame()
1694 s->td[i].error_info = 0; in vp9_decode_frame()
1701 av_assert1(!s->pass); in vp9_decode_frame()
1703 for (tile_row = 0; tile_row < s->s.h.tiling.tile_rows; tile_row++) { in vp9_decode_frame()
1704 for (tile_col = 0; tile_col < s->s.h.tiling.tile_cols; tile_col++) { in vp9_decode_frame()
1707 if (tile_col == s->s.h.tiling.tile_cols - 1 && in vp9_decode_frame()
1708 tile_row == s->s.h.tiling.tile_rows - 1) { in vp9_decode_frame()
1717 … ret = ff_vp56_init_range_decoder(&s->td[tile_col].c_b[tile_row], data, tile_size); in vp9_decode_frame()
1720 … if (vp56_rac_get_prob_branchy(&s->td[tile_col].c_b[tile_row], 128)) // marker bit in vp9_decode_frame()
1727 …read_execute_with_mainfunc(avctx, decode_tiles_mt, loopfilter_proc, s->td, NULL, s->s.h.tiling.til… in vp9_decode_frame()
1733 ff_thread_report_progress(&s->s.frames[CUR_FRAME].tf, INT_MAX, 0); in vp9_decode_frame()
1740 for (i = 1; i < s->s.h.tiling.tile_cols; i++) in vp9_decode_frame()
1741 for (j = 0; j < sizeof(s->td[i].counts) / sizeof(unsigned); j++) in vp9_decode_frame()
1742 ((unsigned *)&s->td[0].counts)[j] += ((unsigned *)&s->td[i].counts)[j]; in vp9_decode_frame()
1744 if (s->pass < 2 && s->s.h.refreshctx && !s->s.h.parallelmode) { in vp9_decode_frame()
1745 ff_vp9_adapt_probs(s); in vp9_decode_frame()
1748 } while (s->pass++ == 1); in vp9_decode_frame()
1749 ff_thread_report_progress(&s->s.frames[CUR_FRAME].tf, INT_MAX, 0); in vp9_decode_frame()
1751 if (s->td->error_info < 0) { in vp9_decode_frame()
1753 s->td->error_info = 0; in vp9_decode_frame()
1757 ret = vp9_export_enc_params(s, &s->s.frames[CUR_FRAME]); in vp9_decode_frame()
1765 if (s->s.refs[i].f->buf[0]) in vp9_decode_frame()
1766 ff_thread_release_ext_buffer(avctx, &s->s.refs[i]); in vp9_decode_frame()
1767 if (s->next_refs[i].f->buf[0] && in vp9_decode_frame()
1768 (ret = ff_thread_ref_frame(&s->s.refs[i], &s->next_refs[i])) < 0) in vp9_decode_frame()
1772 if (!s->s.h.invisible) { in vp9_decode_frame()
1773 if ((ret = av_frame_ref(frame, s->s.frames[CUR_FRAME].tf.f)) < 0) in vp9_decode_frame()
1783 VP9Context *s = avctx->priv_data; in vp9_decode_flush() local
1787 vp9_frame_unref(avctx, &s->s.frames[i]); in vp9_decode_flush()
1789 ff_thread_release_ext_buffer(avctx, &s->s.refs[i]); in vp9_decode_flush()
1794 VP9Context *s = avctx->priv_data; in vp9_decode_init() local
1797 s->last_bpp = 0; in vp9_decode_init()
1798 s->s.h.filter.sharpness = -1; in vp9_decode_init()
1802 ret = ff_pthread_init(s, vp9_context_offsets); in vp9_decode_init()
1809 s->s.frames[i].tf.f = av_frame_alloc(); in vp9_decode_init()
1810 if (!s->s.frames[i].tf.f) in vp9_decode_init()
1814 s->s.refs[i].f = av_frame_alloc(); in vp9_decode_init()
1815 s->next_refs[i].f = av_frame_alloc(); in vp9_decode_init()
1816 if (!s->s.refs[i].f || !s->next_refs[i].f) in vp9_decode_init()
1826 VP9Context *s = dst->priv_data, *ssrc = src->priv_data; in vp9_decode_update_thread_context() local
1829 if (s->s.frames[i].tf.f->buf[0]) in vp9_decode_update_thread_context()
1830 vp9_frame_unref(dst, &s->s.frames[i]); in vp9_decode_update_thread_context()
1831 if (ssrc->s.frames[i].tf.f->buf[0]) { in vp9_decode_update_thread_context()
1832 if ((ret = vp9_frame_ref(dst, &s->s.frames[i], &ssrc->s.frames[i])) < 0) in vp9_decode_update_thread_context()
1837 if (s->s.refs[i].f->buf[0]) in vp9_decode_update_thread_context()
1838 ff_thread_release_ext_buffer(dst, &s->s.refs[i]); in vp9_decode_update_thread_context()
1840 if ((ret = ff_thread_ref_frame(&s->s.refs[i], &ssrc->next_refs[i])) < 0) in vp9_decode_update_thread_context()
1845 s->s.h.invisible = ssrc->s.h.invisible; in vp9_decode_update_thread_context()
1846 s->s.h.keyframe = ssrc->s.h.keyframe; in vp9_decode_update_thread_context()
1847 s->s.h.intraonly = ssrc->s.h.intraonly; in vp9_decode_update_thread_context()
1848 s->ss_v = ssrc->ss_v; in vp9_decode_update_thread_context()
1849 s->ss_h = ssrc->ss_h; in vp9_decode_update_thread_context()
1850 s->s.h.segmentation.enabled = ssrc->s.h.segmentation.enabled; in vp9_decode_update_thread_context()
1851 s->s.h.segmentation.update_map = ssrc->s.h.segmentation.update_map; in vp9_decode_update_thread_context()
1852 s->s.h.segmentation.absolute_vals = ssrc->s.h.segmentation.absolute_vals; in vp9_decode_update_thread_context()
1853 s->bytesperpixel = ssrc->bytesperpixel; in vp9_decode_update_thread_context()
1854 s->gf_fmt = ssrc->gf_fmt; in vp9_decode_update_thread_context()
1855 s->w = ssrc->w; in vp9_decode_update_thread_context()
1856 s->h = ssrc->h; in vp9_decode_update_thread_context()
1857 s->s.h.bpp = ssrc->s.h.bpp; in vp9_decode_update_thread_context()
1858 s->bpp_index = ssrc->bpp_index; in vp9_decode_update_thread_context()
1859 s->pix_fmt = ssrc->pix_fmt; in vp9_decode_update_thread_context()
1860 memcpy(&s->prob_ctx, &ssrc->prob_ctx, sizeof(s->prob_ctx)); in vp9_decode_update_thread_context()
1861 memcpy(&s->s.h.lf_delta, &ssrc->s.h.lf_delta, sizeof(s->s.h.lf_delta)); in vp9_decode_update_thread_context()
1862 memcpy(&s->s.h.segmentation.feat, &ssrc->s.h.segmentation.feat, in vp9_decode_update_thread_context()
1863 sizeof(s->s.h.segmentation.feat)); in vp9_decode_update_thread_context()