• Home
  • Raw
  • Download

Lines Matching refs:dec

20 static void ReconstructRow(const VP8Decoder* const dec,
33 static void DoFilter(const VP8Decoder* const dec, int mb_x, int mb_y) { in DoFilter() argument
34 const VP8ThreadContext* const ctx = &dec->thread_ctx_; in DoFilter()
36 const int y_bps = dec->cache_y_stride_; in DoFilter()
38 uint8_t* const y_dst = dec->cache_y_ + cache_id * 16 * y_bps + mb_x * 16; in DoFilter()
45 if (dec->filter_type_ == 1) { // simple in DoFilter()
59 const int uv_bps = dec->cache_uv_stride_; in DoFilter()
60 uint8_t* const u_dst = dec->cache_u_ + cache_id * 8 * uv_bps + mb_x * 8; in DoFilter()
61 uint8_t* const v_dst = dec->cache_v_ + cache_id * 8 * uv_bps + mb_x * 8; in DoFilter()
83 static void FilterRow(const VP8Decoder* const dec) { in FilterRow() argument
85 const int mb_y = dec->thread_ctx_.mb_y_; in FilterRow()
86 assert(dec->thread_ctx_.filter_row_); in FilterRow()
87 for (mb_x = dec->tl_mb_x_; mb_x < dec->br_mb_x_; ++mb_x) { in FilterRow()
88 DoFilter(dec, mb_x, mb_y); in FilterRow()
95 static void PrecomputeFilterStrengths(VP8Decoder* const dec) { in PrecomputeFilterStrengths() argument
96 if (dec->filter_type_ > 0) { in PrecomputeFilterStrengths()
98 const VP8FilterHeader* const hdr = &dec->filter_hdr_; in PrecomputeFilterStrengths()
103 if (dec->segment_hdr_.use_segment_) { in PrecomputeFilterStrengths()
104 base_level = dec->segment_hdr_.filter_strength_[s]; in PrecomputeFilterStrengths()
105 if (!dec->segment_hdr_.absolute_delta_) { in PrecomputeFilterStrengths()
112 VP8FInfo* const info = &dec->fstrengths_[s][i4x4]; in PrecomputeFilterStrengths()
157 VP8Decoder* const dec) { in VP8InitDithering() argument
158 assert(dec != NULL); in VP8InitDithering()
167 VP8QuantMatrix* const dqm = &dec->dqm_[s]; in VP8InitDithering()
176 VP8InitRandom(&dec->dithering_rg_, 1.0f); in VP8InitDithering()
177 dec->dither_ = 1; in VP8InitDithering()
182 dec->alpha_dithering_ = options->alpha_dithering_strength; in VP8InitDithering()
183 if (dec->alpha_dithering_ > 100) { in VP8InitDithering()
184 dec->alpha_dithering_ = 100; in VP8InitDithering()
185 } else if (dec->alpha_dithering_ < 0) { in VP8InitDithering()
186 dec->alpha_dithering_ = 0; in VP8InitDithering()
215 static void DitherRow(VP8Decoder* const dec) { in DitherRow() argument
217 assert(dec->dither_); in DitherRow()
218 for (mb_x = dec->tl_mb_x_; mb_x < dec->br_mb_x_; ++mb_x) { in DitherRow()
219 const VP8ThreadContext* const ctx = &dec->thread_ctx_; in DitherRow()
222 const int uv_bps = dec->cache_uv_stride_; in DitherRow()
224 uint8_t* const u_dst = dec->cache_u_ + cache_id * 8 * uv_bps + mb_x * 8; in DitherRow()
225 uint8_t* const v_dst = dec->cache_v_ + cache_id * 8 * uv_bps + mb_x * 8; in DitherRow()
226 Dither8x8(&dec->dithering_rg_, u_dst, uv_bps, data->dither_); in DitherRow()
227 Dither8x8(&dec->dithering_rg_, v_dst, uv_bps, data->dither_); in DitherRow()
246 static int FinishRow(VP8Decoder* const dec, VP8Io* const io) { in FinishRow() argument
248 const VP8ThreadContext* const ctx = &dec->thread_ctx_; in FinishRow()
250 const int extra_y_rows = kFilterExtraRows[dec->filter_type_]; in FinishRow()
251 const int ysize = extra_y_rows * dec->cache_y_stride_; in FinishRow()
252 const int uvsize = (extra_y_rows / 2) * dec->cache_uv_stride_; in FinishRow()
253 const int y_offset = cache_id * 16 * dec->cache_y_stride_; in FinishRow()
254 const int uv_offset = cache_id * 8 * dec->cache_uv_stride_; in FinishRow()
255 uint8_t* const ydst = dec->cache_y_ - ysize + y_offset; in FinishRow()
256 uint8_t* const udst = dec->cache_u_ - uvsize + uv_offset; in FinishRow()
257 uint8_t* const vdst = dec->cache_v_ - uvsize + uv_offset; in FinishRow()
260 const int is_last_row = (mb_y >= dec->br_mb_y_ - 1); in FinishRow()
262 if (dec->mt_method_ == 2) { in FinishRow()
263 ReconstructRow(dec, ctx); in FinishRow()
267 FilterRow(dec); in FinishRow()
270 if (dec->dither_) { in FinishRow()
271 DitherRow(dec); in FinishRow()
283 io->y = dec->cache_y_ + y_offset; in FinishRow()
284 io->u = dec->cache_u_ + uv_offset; in FinishRow()
285 io->v = dec->cache_v_ + uv_offset; in FinishRow()
295 if (dec->alpha_data_ != NULL && y_start < y_end) { in FinishRow()
298 io->a = VP8DecompressAlphaRows(dec, y_start, y_end - y_start); in FinishRow()
300 return VP8SetError(dec, VP8_STATUS_BITSTREAM_ERROR, in FinishRow()
308 io->y += dec->cache_y_stride_ * delta_y; in FinishRow()
309 io->u += dec->cache_uv_stride_ * (delta_y >> 1); in FinishRow()
310 io->v += dec->cache_uv_stride_ * (delta_y >> 1); in FinishRow()
329 if (cache_id + 1 == dec->num_caches_) { in FinishRow()
331 memcpy(dec->cache_y_ - ysize, ydst + 16 * dec->cache_y_stride_, ysize); in FinishRow()
332 memcpy(dec->cache_u_ - uvsize, udst + 8 * dec->cache_uv_stride_, uvsize); in FinishRow()
333 memcpy(dec->cache_v_ - uvsize, vdst + 8 * dec->cache_uv_stride_, uvsize); in FinishRow()
344 int VP8ProcessRow(VP8Decoder* const dec, VP8Io* const io) { in VP8ProcessRow() argument
346 VP8ThreadContext* const ctx = &dec->thread_ctx_; in VP8ProcessRow()
348 (dec->filter_type_ > 0) && in VP8ProcessRow()
349 (dec->mb_y_ >= dec->tl_mb_y_) && (dec->mb_y_ <= dec->br_mb_y_); in VP8ProcessRow()
350 if (dec->mt_method_ == 0) { in VP8ProcessRow()
352 ctx->mb_y_ = dec->mb_y_; in VP8ProcessRow()
354 ReconstructRow(dec, ctx); in VP8ProcessRow()
355 ok = FinishRow(dec, io); in VP8ProcessRow()
357 WebPWorker* const worker = &dec->worker_; in VP8ProcessRow()
363 ctx->id_ = dec->cache_id_; in VP8ProcessRow()
364 ctx->mb_y_ = dec->mb_y_; in VP8ProcessRow()
366 if (dec->mt_method_ == 2) { // swap macroblock data in VP8ProcessRow()
368 ctx->mb_data_ = dec->mb_data_; in VP8ProcessRow()
369 dec->mb_data_ = tmp; in VP8ProcessRow()
372 ReconstructRow(dec, ctx); in VP8ProcessRow()
376 ctx->f_info_ = dec->f_info_; in VP8ProcessRow()
377 dec->f_info_ = tmp; in VP8ProcessRow()
381 if (++dec->cache_id_ == dec->num_caches_) { in VP8ProcessRow()
382 dec->cache_id_ = 0; in VP8ProcessRow()
392 VP8StatusCode VP8EnterCritical(VP8Decoder* const dec, VP8Io* const io) { in VP8EnterCritical() argument
396 VP8SetError(dec, VP8_STATUS_USER_ABORT, "Frame setup failed"); in VP8EnterCritical()
397 return dec->status_; in VP8EnterCritical()
402 dec->filter_type_ = 0; in VP8EnterCritical()
418 const int extra_pixels = kFilterExtraRows[dec->filter_type_]; in VP8EnterCritical()
419 if (dec->filter_type_ == 2) { in VP8EnterCritical()
421 dec->tl_mb_x_ = 0; in VP8EnterCritical()
422 dec->tl_mb_y_ = 0; in VP8EnterCritical()
428 dec->tl_mb_x_ = (io->crop_left - extra_pixels) >> 4; in VP8EnterCritical()
429 dec->tl_mb_y_ = (io->crop_top - extra_pixels) >> 4; in VP8EnterCritical()
430 if (dec->tl_mb_x_ < 0) dec->tl_mb_x_ = 0; in VP8EnterCritical()
431 if (dec->tl_mb_y_ < 0) dec->tl_mb_y_ = 0; in VP8EnterCritical()
434 dec->br_mb_y_ = (io->crop_bottom + 15 + extra_pixels) >> 4; in VP8EnterCritical()
435 dec->br_mb_x_ = (io->crop_right + 15 + extra_pixels) >> 4; in VP8EnterCritical()
436 if (dec->br_mb_x_ > dec->mb_w_) { in VP8EnterCritical()
437 dec->br_mb_x_ = dec->mb_w_; in VP8EnterCritical()
439 if (dec->br_mb_y_ > dec->mb_h_) { in VP8EnterCritical()
440 dec->br_mb_y_ = dec->mb_h_; in VP8EnterCritical()
443 PrecomputeFilterStrengths(dec); in VP8EnterCritical()
447 int VP8ExitCritical(VP8Decoder* const dec, VP8Io* const io) { in VP8ExitCritical() argument
449 if (dec->mt_method_ > 0) { in VP8ExitCritical()
450 ok = WebPGetWorkerInterface()->Sync(&dec->worker_); in VP8ExitCritical()
487 static int InitThreadContext(VP8Decoder* const dec) { in InitThreadContext() argument
488 dec->cache_id_ = 0; in InitThreadContext()
489 if (dec->mt_method_ > 0) { in InitThreadContext()
490 WebPWorker* const worker = &dec->worker_; in InitThreadContext()
492 return VP8SetError(dec, VP8_STATUS_OUT_OF_MEMORY, in InitThreadContext()
495 worker->data1 = dec; in InitThreadContext()
496 worker->data2 = (void*)&dec->thread_ctx_.io_; in InitThreadContext()
498 dec->num_caches_ = in InitThreadContext()
499 (dec->filter_type_ > 0) ? MT_CACHE_LINES : MT_CACHE_LINES - 1; in InitThreadContext()
501 dec->num_caches_ = ST_CACHE_LINES; in InitThreadContext()
534 static int AllocateMemory(VP8Decoder* const dec) { in AllocateMemory() argument
535 const int num_caches = dec->num_caches_; in AllocateMemory()
536 const int mb_w = dec->mb_w_; in AllocateMemory()
542 (dec->filter_type_ > 0) ? in AllocateMemory()
543 mb_w * (dec->mt_method_ > 0 ? 2 : 1) * sizeof(VP8FInfo) in AllocateMemory()
545 const size_t yuv_size = YUV_SIZE * sizeof(*dec->yuv_b_); in AllocateMemory()
547 (dec->mt_method_ == 2 ? 2 : 1) * mb_w * sizeof(*dec->mb_data_); in AllocateMemory()
549 + kFilterExtraRows[dec->filter_type_]) * 3 / 2; in AllocateMemory()
552 const uint64_t alpha_size = (dec->alpha_data_ != NULL) ? in AllocateMemory()
553 (uint64_t)dec->pic_hdr_.width_ * dec->pic_hdr_.height_ : 0ULL; in AllocateMemory()
561 if (needed > dec->mem_size_) { in AllocateMemory()
562 WebPSafeFree(dec->mem_); in AllocateMemory()
563 dec->mem_size_ = 0; in AllocateMemory()
564 dec->mem_ = WebPSafeMalloc(needed, sizeof(uint8_t)); in AllocateMemory()
565 if (dec->mem_ == NULL) { in AllocateMemory()
566 return VP8SetError(dec, VP8_STATUS_OUT_OF_MEMORY, in AllocateMemory()
570 dec->mem_size_ = (size_t)needed; in AllocateMemory()
573 mem = (uint8_t*)dec->mem_; in AllocateMemory()
574 dec->intra_t_ = (uint8_t*)mem; in AllocateMemory()
577 dec->yuv_t_ = (VP8TopSamples*)mem; in AllocateMemory()
580 dec->mb_info_ = ((VP8MB*)mem) + 1; in AllocateMemory()
583 dec->f_info_ = f_info_size ? (VP8FInfo*)mem : NULL; in AllocateMemory()
585 dec->thread_ctx_.id_ = 0; in AllocateMemory()
586 dec->thread_ctx_.f_info_ = dec->f_info_; in AllocateMemory()
587 if (dec->mt_method_ > 0) { in AllocateMemory()
591 dec->thread_ctx_.f_info_ += mb_w; in AllocateMemory()
596 dec->yuv_b_ = (uint8_t*)mem; in AllocateMemory()
599 dec->mb_data_ = (VP8MBData*)mem; in AllocateMemory()
600 dec->thread_ctx_.mb_data_ = (VP8MBData*)mem; in AllocateMemory()
601 if (dec->mt_method_ == 2) { in AllocateMemory()
602 dec->thread_ctx_.mb_data_ += mb_w; in AllocateMemory()
606 dec->cache_y_stride_ = 16 * mb_w; in AllocateMemory()
607 dec->cache_uv_stride_ = 8 * mb_w; in AllocateMemory()
609 const int extra_rows = kFilterExtraRows[dec->filter_type_]; in AllocateMemory()
610 const int extra_y = extra_rows * dec->cache_y_stride_; in AllocateMemory()
611 const int extra_uv = (extra_rows / 2) * dec->cache_uv_stride_; in AllocateMemory()
612 dec->cache_y_ = ((uint8_t*)mem) + extra_y; in AllocateMemory()
613 dec->cache_u_ = dec->cache_y_ in AllocateMemory()
614 + 16 * num_caches * dec->cache_y_stride_ + extra_uv; in AllocateMemory()
615 dec->cache_v_ = dec->cache_u_ in AllocateMemory()
616 + 8 * num_caches * dec->cache_uv_stride_ + extra_uv; in AllocateMemory()
617 dec->cache_id_ = 0; in AllocateMemory()
622 dec->alpha_plane_ = alpha_size ? (uint8_t*)mem : NULL; in AllocateMemory()
624 assert(mem <= (uint8_t*)dec->mem_ + dec->mem_size_); in AllocateMemory()
627 memset(dec->mb_info_ - 1, 0, mb_info_size); in AllocateMemory()
628 VP8InitScanline(dec); // initialize left too. in AllocateMemory()
631 memset(dec->intra_t_, B_DC_PRED, intra_pred_mode_size); in AllocateMemory()
636 static void InitIo(VP8Decoder* const dec, VP8Io* io) { in InitIo() argument
639 io->y = dec->cache_y_; in InitIo()
640 io->u = dec->cache_u_; in InitIo()
641 io->v = dec->cache_v_; in InitIo()
642 io->y_stride = dec->cache_y_stride_; in InitIo()
643 io->uv_stride = dec->cache_uv_stride_; in InitIo()
647 int VP8InitFrame(VP8Decoder* const dec, VP8Io* io) { in VP8InitFrame() argument
648 if (!InitThreadContext(dec)) return 0; // call first. Sets dec->num_caches_. in VP8InitFrame()
649 if (!AllocateMemory(dec)) return 0; in VP8InitFrame()
650 InitIo(dec, io); in VP8InitFrame()
708 static void ReconstructRow(const VP8Decoder* const dec, in ReconstructRow() argument
714 uint8_t* const y_dst = dec->yuv_b_ + Y_OFF; in ReconstructRow()
715 uint8_t* const u_dst = dec->yuv_b_ + U_OFF; in ReconstructRow()
716 uint8_t* const v_dst = dec->yuv_b_ + V_OFF; in ReconstructRow()
717 for (mb_x = 0; mb_x < dec->mb_w_; ++mb_x) { in ReconstructRow()
745 VP8TopSamples* const top_yuv = dec->yuv_t_ + mb_x; in ReconstructRow()
767 if (mb_x >= dec->mb_w_ - 1) { // on rightmost border in ReconstructRow()
803 if (mb_y < dec->mb_h_ - 1) { in ReconstructRow()
811 const int y_offset = cache_id * 16 * dec->cache_y_stride_; in ReconstructRow()
812 const int uv_offset = cache_id * 8 * dec->cache_uv_stride_; in ReconstructRow()
813 uint8_t* const y_out = dec->cache_y_ + mb_x * 16 + y_offset; in ReconstructRow()
814 uint8_t* const u_out = dec->cache_u_ + mb_x * 8 + uv_offset; in ReconstructRow()
815 uint8_t* const v_out = dec->cache_v_ + mb_x * 8 + uv_offset; in ReconstructRow()
817 memcpy(y_out + j * dec->cache_y_stride_, y_dst + j * BPS, 16); in ReconstructRow()
820 memcpy(u_out + j * dec->cache_uv_stride_, u_dst + j * BPS, 8); in ReconstructRow()
821 memcpy(v_out + j * dec->cache_uv_stride_, v_dst + j * BPS, 8); in ReconstructRow()