1 /*
2 * Error resilience / concealment
3 *
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5 *
6 * This file is part of FFmpeg.
7 *
8 * FFmpeg is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * FFmpeg is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with FFmpeg; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23 /**
24 * @file
25 * Error resilience / concealment.
26 */
27
28 #include <limits.h>
29
30 #include "libavutil/internal.h"
31 #include "avcodec.h"
32 #include "error_resilience.h"
33 #include "me_cmp.h"
34 #include "mpegutils.h"
35 #include "mpegvideo.h"
36 #include "rectangle.h"
37 #include "thread.h"
38 #include "version.h"
39
40 /**
41 * @param stride the number of MVs to get to the next row
42 * @param mv_step the number of MVs per row or column in a macroblock
43 */
set_mv_strides(ERContext * s,ptrdiff_t * mv_step,ptrdiff_t * stride)44 static void set_mv_strides(ERContext *s, ptrdiff_t *mv_step, ptrdiff_t *stride)
45 {
46 if (s->avctx->codec_id == AV_CODEC_ID_H264) {
47 av_assert0(s->quarter_sample);
48 *mv_step = 4;
49 *stride = s->mb_width * 4;
50 } else {
51 *mv_step = 2;
52 *stride = s->b8_stride;
53 }
54 }
55
56 /**
57 * Replace the current MB with a flat dc-only version.
58 */
put_dc(ERContext * s,uint8_t * dest_y,uint8_t * dest_cb,uint8_t * dest_cr,int mb_x,int mb_y)59 static void put_dc(ERContext *s, uint8_t *dest_y, uint8_t *dest_cb,
60 uint8_t *dest_cr, int mb_x, int mb_y)
61 {
62 int *linesize = s->cur_pic.f->linesize;
63 int dc, dcu, dcv, y, i;
64 for (i = 0; i < 4; i++) {
65 dc = s->dc_val[0][mb_x * 2 + (i & 1) + (mb_y * 2 + (i >> 1)) * s->b8_stride];
66 if (dc < 0)
67 dc = 0;
68 else if (dc > 2040)
69 dc = 2040;
70 for (y = 0; y < 8; y++) {
71 int x;
72 for (x = 0; x < 8; x++)
73 dest_y[x + (i & 1) * 8 + (y + (i >> 1) * 8) * linesize[0]] = dc / 8;
74 }
75 }
76 dcu = s->dc_val[1][mb_x + mb_y * s->mb_stride];
77 dcv = s->dc_val[2][mb_x + mb_y * s->mb_stride];
78 if (dcu < 0)
79 dcu = 0;
80 else if (dcu > 2040)
81 dcu = 2040;
82 if (dcv < 0)
83 dcv = 0;
84 else if (dcv > 2040)
85 dcv = 2040;
86
87 if (dest_cr)
88 for (y = 0; y < 8; y++) {
89 int x;
90 for (x = 0; x < 8; x++) {
91 dest_cb[x + y * linesize[1]] = dcu / 8;
92 dest_cr[x + y * linesize[2]] = dcv / 8;
93 }
94 }
95 }
96
filter181(int16_t * data,int width,int height,ptrdiff_t stride)97 static void filter181(int16_t *data, int width, int height, ptrdiff_t stride)
98 {
99 int x, y;
100
101 /* horizontal filter */
102 for (y = 1; y < height - 1; y++) {
103 int prev_dc = data[0 + y * stride];
104
105 for (x = 1; x < width - 1; x++) {
106 int dc;
107 dc = -prev_dc +
108 data[x + y * stride] * 8 -
109 data[x + 1 + y * stride];
110 dc = (av_clip(dc, INT_MIN/10923, INT_MAX/10923 - 32768) * 10923 + 32768) >> 16;
111 prev_dc = data[x + y * stride];
112 data[x + y * stride] = dc;
113 }
114 }
115
116 /* vertical filter */
117 for (x = 1; x < width - 1; x++) {
118 int prev_dc = data[x];
119
120 for (y = 1; y < height - 1; y++) {
121 int dc;
122
123 dc = -prev_dc +
124 data[x + y * stride] * 8 -
125 data[x + (y + 1) * stride];
126 dc = (av_clip(dc, INT_MIN/10923, INT_MAX/10923 - 32768) * 10923 + 32768) >> 16;
127 prev_dc = data[x + y * stride];
128 data[x + y * stride] = dc;
129 }
130 }
131 }
132
133 /**
134 * guess the dc of blocks which do not have an undamaged dc
135 * @param w width in 8 pixel blocks
136 * @param h height in 8 pixel blocks
137 */
guess_dc(ERContext * s,int16_t * dc,int w,int h,ptrdiff_t stride,int is_luma)138 static void guess_dc(ERContext *s, int16_t *dc, int w,
139 int h, ptrdiff_t stride, int is_luma)
140 {
141 int b_x, b_y;
142 int16_t (*col )[4] = av_malloc_array(stride, h*sizeof( int16_t)*4);
143 uint32_t (*dist)[4] = av_malloc_array(stride, h*sizeof(uint32_t)*4);
144
145 if(!col || !dist) {
146 av_log(s->avctx, AV_LOG_ERROR, "guess_dc() is out of memory\n");
147 goto fail;
148 }
149
150 for(b_y=0; b_y<h; b_y++){
151 int color= 1024;
152 int distance= -1;
153 for(b_x=0; b_x<w; b_x++){
154 int mb_index_j= (b_x>>is_luma) + (b_y>>is_luma)*s->mb_stride;
155 int error_j= s->error_status_table[mb_index_j];
156 int intra_j = IS_INTRA(s->cur_pic.mb_type[mb_index_j]);
157 if(intra_j==0 || !(error_j&ER_DC_ERROR)){
158 color= dc[b_x + b_y*stride];
159 distance= b_x;
160 }
161 col [b_x + b_y*stride][1]= color;
162 dist[b_x + b_y*stride][1]= distance >= 0 ? b_x-distance : 9999;
163 }
164 color= 1024;
165 distance= -1;
166 for(b_x=w-1; b_x>=0; b_x--){
167 int mb_index_j= (b_x>>is_luma) + (b_y>>is_luma)*s->mb_stride;
168 int error_j= s->error_status_table[mb_index_j];
169 int intra_j = IS_INTRA(s->cur_pic.mb_type[mb_index_j]);
170 if(intra_j==0 || !(error_j&ER_DC_ERROR)){
171 color= dc[b_x + b_y*stride];
172 distance= b_x;
173 }
174 col [b_x + b_y*stride][0]= color;
175 dist[b_x + b_y*stride][0]= distance >= 0 ? distance-b_x : 9999;
176 }
177 }
178 for(b_x=0; b_x<w; b_x++){
179 int color= 1024;
180 int distance= -1;
181 for(b_y=0; b_y<h; b_y++){
182 int mb_index_j= (b_x>>is_luma) + (b_y>>is_luma)*s->mb_stride;
183 int error_j= s->error_status_table[mb_index_j];
184 int intra_j = IS_INTRA(s->cur_pic.mb_type[mb_index_j]);
185 if(intra_j==0 || !(error_j&ER_DC_ERROR)){
186 color= dc[b_x + b_y*stride];
187 distance= b_y;
188 }
189 col [b_x + b_y*stride][3]= color;
190 dist[b_x + b_y*stride][3]= distance >= 0 ? b_y-distance : 9999;
191 }
192 color= 1024;
193 distance= -1;
194 for(b_y=h-1; b_y>=0; b_y--){
195 int mb_index_j= (b_x>>is_luma) + (b_y>>is_luma)*s->mb_stride;
196 int error_j= s->error_status_table[mb_index_j];
197 int intra_j = IS_INTRA(s->cur_pic.mb_type[mb_index_j]);
198 if(intra_j==0 || !(error_j&ER_DC_ERROR)){
199 color= dc[b_x + b_y*stride];
200 distance= b_y;
201 }
202 col [b_x + b_y*stride][2]= color;
203 dist[b_x + b_y*stride][2]= distance >= 0 ? distance-b_y : 9999;
204 }
205 }
206
207 for (b_y = 0; b_y < h; b_y++) {
208 for (b_x = 0; b_x < w; b_x++) {
209 int mb_index, error, j;
210 int64_t guess, weight_sum;
211 mb_index = (b_x >> is_luma) + (b_y >> is_luma) * s->mb_stride;
212 error = s->error_status_table[mb_index];
213
214 if (IS_INTER(s->cur_pic.mb_type[mb_index]))
215 continue; // inter
216 if (!(error & ER_DC_ERROR))
217 continue; // dc-ok
218
219 weight_sum = 0;
220 guess = 0;
221 for (j = 0; j < 4; j++) {
222 int64_t weight = 256 * 256 * 256 * 16 / FFMAX(dist[b_x + b_y*stride][j], 1);
223 guess += weight*(int64_t)col[b_x + b_y*stride][j];
224 weight_sum += weight;
225 }
226 guess = (guess + weight_sum / 2) / weight_sum;
227 dc[b_x + b_y * stride] = guess;
228 }
229 }
230
231 fail:
232 av_freep(&col);
233 av_freep(&dist);
234 }
235
236 /**
237 * simple horizontal deblocking filter used for error resilience
238 * @param w width in 8 pixel blocks
239 * @param h height in 8 pixel blocks
240 */
h_block_filter(ERContext * s,uint8_t * dst,int w,int h,ptrdiff_t stride,int is_luma)241 static void h_block_filter(ERContext *s, uint8_t *dst, int w,
242 int h, ptrdiff_t stride, int is_luma)
243 {
244 int b_x, b_y;
245 ptrdiff_t mvx_stride, mvy_stride;
246 const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;
247 set_mv_strides(s, &mvx_stride, &mvy_stride);
248 mvx_stride >>= is_luma;
249 mvy_stride *= mvx_stride;
250
251 for (b_y = 0; b_y < h; b_y++) {
252 for (b_x = 0; b_x < w - 1; b_x++) {
253 int y;
254 int left_status = s->error_status_table[( b_x >> is_luma) + (b_y >> is_luma) * s->mb_stride];
255 int right_status = s->error_status_table[((b_x + 1) >> is_luma) + (b_y >> is_luma) * s->mb_stride];
256 int left_intra = IS_INTRA(s->cur_pic.mb_type[( b_x >> is_luma) + (b_y >> is_luma) * s->mb_stride]);
257 int right_intra = IS_INTRA(s->cur_pic.mb_type[((b_x + 1) >> is_luma) + (b_y >> is_luma) * s->mb_stride]);
258 int left_damage = left_status & ER_MB_ERROR;
259 int right_damage = right_status & ER_MB_ERROR;
260 int offset = b_x * 8 + b_y * stride * 8;
261 int16_t *left_mv = s->cur_pic.motion_val[0][mvy_stride * b_y + mvx_stride * b_x];
262 int16_t *right_mv = s->cur_pic.motion_val[0][mvy_stride * b_y + mvx_stride * (b_x + 1)];
263 if (!(left_damage || right_damage))
264 continue; // both undamaged
265 if ((!left_intra) && (!right_intra) &&
266 FFABS(left_mv[0] - right_mv[0]) +
267 FFABS(left_mv[1] + right_mv[1]) < 2)
268 continue;
269
270 for (y = 0; y < 8; y++) {
271 int a, b, c, d;
272
273 a = dst[offset + 7 + y * stride] - dst[offset + 6 + y * stride];
274 b = dst[offset + 8 + y * stride] - dst[offset + 7 + y * stride];
275 c = dst[offset + 9 + y * stride] - dst[offset + 8 + y * stride];
276
277 d = FFABS(b) - ((FFABS(a) + FFABS(c) + 1) >> 1);
278 d = FFMAX(d, 0);
279 if (b < 0)
280 d = -d;
281
282 if (d == 0)
283 continue;
284
285 if (!(left_damage && right_damage))
286 d = d * 16 / 9;
287
288 if (left_damage) {
289 dst[offset + 7 + y * stride] = cm[dst[offset + 7 + y * stride] + ((d * 7) >> 4)];
290 dst[offset + 6 + y * stride] = cm[dst[offset + 6 + y * stride] + ((d * 5) >> 4)];
291 dst[offset + 5 + y * stride] = cm[dst[offset + 5 + y * stride] + ((d * 3) >> 4)];
292 dst[offset + 4 + y * stride] = cm[dst[offset + 4 + y * stride] + ((d * 1) >> 4)];
293 }
294 if (right_damage) {
295 dst[offset + 8 + y * stride] = cm[dst[offset + 8 + y * stride] - ((d * 7) >> 4)];
296 dst[offset + 9 + y * stride] = cm[dst[offset + 9 + y * stride] - ((d * 5) >> 4)];
297 dst[offset + 10+ y * stride] = cm[dst[offset + 10 + y * stride] - ((d * 3) >> 4)];
298 dst[offset + 11+ y * stride] = cm[dst[offset + 11 + y * stride] - ((d * 1) >> 4)];
299 }
300 }
301 }
302 }
303 }
304
305 /**
306 * simple vertical deblocking filter used for error resilience
307 * @param w width in 8 pixel blocks
308 * @param h height in 8 pixel blocks
309 */
v_block_filter(ERContext * s,uint8_t * dst,int w,int h,ptrdiff_t stride,int is_luma)310 static void v_block_filter(ERContext *s, uint8_t *dst, int w, int h,
311 ptrdiff_t stride, int is_luma)
312 {
313 int b_x, b_y;
314 ptrdiff_t mvx_stride, mvy_stride;
315 const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;
316 set_mv_strides(s, &mvx_stride, &mvy_stride);
317 mvx_stride >>= is_luma;
318 mvy_stride *= mvx_stride;
319
320 for (b_y = 0; b_y < h - 1; b_y++) {
321 for (b_x = 0; b_x < w; b_x++) {
322 int x;
323 int top_status = s->error_status_table[(b_x >> is_luma) + (b_y >> is_luma) * s->mb_stride];
324 int bottom_status = s->error_status_table[(b_x >> is_luma) + ((b_y + 1) >> is_luma) * s->mb_stride];
325 int top_intra = IS_INTRA(s->cur_pic.mb_type[(b_x >> is_luma) + ( b_y >> is_luma) * s->mb_stride]);
326 int bottom_intra = IS_INTRA(s->cur_pic.mb_type[(b_x >> is_luma) + ((b_y + 1) >> is_luma) * s->mb_stride]);
327 int top_damage = top_status & ER_MB_ERROR;
328 int bottom_damage = bottom_status & ER_MB_ERROR;
329 int offset = b_x * 8 + b_y * stride * 8;
330
331 int16_t *top_mv = s->cur_pic.motion_val[0][mvy_stride * b_y + mvx_stride * b_x];
332 int16_t *bottom_mv = s->cur_pic.motion_val[0][mvy_stride * (b_y + 1) + mvx_stride * b_x];
333
334 if (!(top_damage || bottom_damage))
335 continue; // both undamaged
336
337 if ((!top_intra) && (!bottom_intra) &&
338 FFABS(top_mv[0] - bottom_mv[0]) +
339 FFABS(top_mv[1] + bottom_mv[1]) < 2)
340 continue;
341
342 for (x = 0; x < 8; x++) {
343 int a, b, c, d;
344
345 a = dst[offset + x + 7 * stride] - dst[offset + x + 6 * stride];
346 b = dst[offset + x + 8 * stride] - dst[offset + x + 7 * stride];
347 c = dst[offset + x + 9 * stride] - dst[offset + x + 8 * stride];
348
349 d = FFABS(b) - ((FFABS(a) + FFABS(c) + 1) >> 1);
350 d = FFMAX(d, 0);
351 if (b < 0)
352 d = -d;
353
354 if (d == 0)
355 continue;
356
357 if (!(top_damage && bottom_damage))
358 d = d * 16 / 9;
359
360 if (top_damage) {
361 dst[offset + x + 7 * stride] = cm[dst[offset + x + 7 * stride] + ((d * 7) >> 4)];
362 dst[offset + x + 6 * stride] = cm[dst[offset + x + 6 * stride] + ((d * 5) >> 4)];
363 dst[offset + x + 5 * stride] = cm[dst[offset + x + 5 * stride] + ((d * 3) >> 4)];
364 dst[offset + x + 4 * stride] = cm[dst[offset + x + 4 * stride] + ((d * 1) >> 4)];
365 }
366 if (bottom_damage) {
367 dst[offset + x + 8 * stride] = cm[dst[offset + x + 8 * stride] - ((d * 7) >> 4)];
368 dst[offset + x + 9 * stride] = cm[dst[offset + x + 9 * stride] - ((d * 5) >> 4)];
369 dst[offset + x + 10 * stride] = cm[dst[offset + x + 10 * stride] - ((d * 3) >> 4)];
370 dst[offset + x + 11 * stride] = cm[dst[offset + x + 11 * stride] - ((d * 1) >> 4)];
371 }
372 }
373 }
374 }
375 }
376
377 #define MV_FROZEN 8
378 #define MV_CHANGED 4
379 #define MV_UNCHANGED 2
380 #define MV_LISTED 1
add_blocklist(int (* blocklist)[2],int * blocklist_length,uint8_t * fixed,int mb_x,int mb_y,int mb_xy)381 static av_always_inline void add_blocklist(int (*blocklist)[2], int *blocklist_length, uint8_t *fixed, int mb_x, int mb_y, int mb_xy)
382 {
383 if (fixed[mb_xy])
384 return;
385 fixed[mb_xy] = MV_LISTED;
386 blocklist[ *blocklist_length ][0] = mb_x;
387 blocklist[(*blocklist_length)++][1] = mb_y;
388 }
389
guess_mv(ERContext * s)390 static void guess_mv(ERContext *s)
391 {
392 int (*blocklist)[2], (*next_blocklist)[2];
393 uint8_t *fixed;
394 const ptrdiff_t mb_stride = s->mb_stride;
395 const int mb_width = s->mb_width;
396 int mb_height = s->mb_height;
397 int i, depth, num_avail;
398 int mb_x, mb_y;
399 ptrdiff_t mot_step, mot_stride;
400 int blocklist_length, next_blocklist_length;
401
402 if (s->last_pic.f && s->last_pic.f->data[0])
403 mb_height = FFMIN(mb_height, (s->last_pic.f->height+15)>>4);
404 if (s->next_pic.f && s->next_pic.f->data[0])
405 mb_height = FFMIN(mb_height, (s->next_pic.f->height+15)>>4);
406
407 blocklist = (int (*)[2])s->er_temp_buffer;
408 next_blocklist = blocklist + s->mb_stride * s->mb_height;
409 fixed = (uint8_t *)(next_blocklist + s->mb_stride * s->mb_height);
410
411 set_mv_strides(s, &mot_step, &mot_stride);
412
413 num_avail = 0;
414 if (s->last_pic.motion_val[0])
415 ff_thread_await_progress(s->last_pic.tf, mb_height-1, 0);
416 for (i = 0; i < mb_width * mb_height; i++) {
417 const int mb_xy = s->mb_index2xy[i];
418 int f = 0;
419 int error = s->error_status_table[mb_xy];
420
421 if (IS_INTRA(s->cur_pic.mb_type[mb_xy]))
422 f = MV_FROZEN; // intra // FIXME check
423 if (!(error & ER_MV_ERROR))
424 f = MV_FROZEN; // inter with undamaged MV
425
426 fixed[mb_xy] = f;
427 if (f == MV_FROZEN)
428 num_avail++;
429 else if(s->last_pic.f->data[0] && s->last_pic.motion_val[0]){
430 const int mb_y= mb_xy / s->mb_stride;
431 const int mb_x= mb_xy % s->mb_stride;
432 const int mot_index= (mb_x + mb_y*mot_stride) * mot_step;
433 s->cur_pic.motion_val[0][mot_index][0]= s->last_pic.motion_val[0][mot_index][0];
434 s->cur_pic.motion_val[0][mot_index][1]= s->last_pic.motion_val[0][mot_index][1];
435 s->cur_pic.ref_index[0][4*mb_xy] = s->last_pic.ref_index[0][4*mb_xy];
436 }
437 }
438
439 if ((!(s->avctx->error_concealment&FF_EC_GUESS_MVS)) ||
440 num_avail <= FFMAX(mb_width, mb_height) / 2) {
441 for (mb_y = 0; mb_y < mb_height; mb_y++) {
442 for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
443 const int mb_xy = mb_x + mb_y * s->mb_stride;
444 int mv_dir = (s->last_pic.f && s->last_pic.f->data[0]) ? MV_DIR_FORWARD : MV_DIR_BACKWARD;
445
446 if (IS_INTRA(s->cur_pic.mb_type[mb_xy]))
447 continue;
448 if (!(s->error_status_table[mb_xy] & ER_MV_ERROR))
449 continue;
450
451 s->mv[0][0][0] = 0;
452 s->mv[0][0][1] = 0;
453 s->decode_mb(s->opaque, 0, mv_dir, MV_TYPE_16X16, &s->mv,
454 mb_x, mb_y, 0, 0);
455 }
456 }
457 return;
458 }
459
460 blocklist_length = 0;
461 for (mb_y = 0; mb_y < mb_height; mb_y++) {
462 for (mb_x = 0; mb_x < mb_width; mb_x++) {
463 const int mb_xy = mb_x + mb_y * mb_stride;
464 if (fixed[mb_xy] == MV_FROZEN) {
465 if (mb_x) add_blocklist(blocklist, &blocklist_length, fixed, mb_x - 1, mb_y, mb_xy - 1);
466 if (mb_y) add_blocklist(blocklist, &blocklist_length, fixed, mb_x, mb_y - 1, mb_xy - mb_stride);
467 if (mb_x+1 < mb_width) add_blocklist(blocklist, &blocklist_length, fixed, mb_x + 1, mb_y, mb_xy + 1);
468 if (mb_y+1 < mb_height) add_blocklist(blocklist, &blocklist_length, fixed, mb_x, mb_y + 1, mb_xy + mb_stride);
469 }
470 }
471 }
472
473 for (depth = 0; ; depth++) {
474 int changed, pass, none_left;
475 int blocklist_index;
476
477 none_left = 1;
478 changed = 1;
479 for (pass = 0; (changed || pass < 2) && pass < 10; pass++) {
480 int score_sum = 0;
481
482 changed = 0;
483 for (blocklist_index = 0; blocklist_index < blocklist_length; blocklist_index++) {
484 const int mb_x = blocklist[blocklist_index][0];
485 const int mb_y = blocklist[blocklist_index][1];
486 const int mb_xy = mb_x + mb_y * mb_stride;
487 int mv_predictor[8][2];
488 int ref[8];
489 int pred_count;
490 int j;
491 int best_score;
492 int best_pred;
493 int mot_index;
494 int prev_x, prev_y, prev_ref;
495
496 if ((mb_x ^ mb_y ^ pass) & 1)
497 continue;
498 av_assert2(fixed[mb_xy] != MV_FROZEN);
499
500
501 av_assert1(!IS_INTRA(s->cur_pic.mb_type[mb_xy]));
502 av_assert1(s->last_pic.f && s->last_pic.f->data[0]);
503
504 j = 0;
505 if (mb_x > 0)
506 j |= fixed[mb_xy - 1];
507 if (mb_x + 1 < mb_width)
508 j |= fixed[mb_xy + 1];
509 if (mb_y > 0)
510 j |= fixed[mb_xy - mb_stride];
511 if (mb_y + 1 < mb_height)
512 j |= fixed[mb_xy + mb_stride];
513
514 av_assert2(j & MV_FROZEN);
515
516 if (!(j & MV_CHANGED) && pass > 1)
517 continue;
518
519 none_left = 0;
520 pred_count = 0;
521 mot_index = (mb_x + mb_y * mot_stride) * mot_step;
522
523 if (mb_x > 0 && fixed[mb_xy - 1] > 1) {
524 mv_predictor[pred_count][0] =
525 s->cur_pic.motion_val[0][mot_index - mot_step][0];
526 mv_predictor[pred_count][1] =
527 s->cur_pic.motion_val[0][mot_index - mot_step][1];
528 ref[pred_count] =
529 s->cur_pic.ref_index[0][4 * (mb_xy - 1)];
530 pred_count++;
531 }
532 if (mb_x + 1 < mb_width && fixed[mb_xy + 1] > 1) {
533 mv_predictor[pred_count][0] =
534 s->cur_pic.motion_val[0][mot_index + mot_step][0];
535 mv_predictor[pred_count][1] =
536 s->cur_pic.motion_val[0][mot_index + mot_step][1];
537 ref[pred_count] =
538 s->cur_pic.ref_index[0][4 * (mb_xy + 1)];
539 pred_count++;
540 }
541 if (mb_y > 0 && fixed[mb_xy - mb_stride] > 1) {
542 mv_predictor[pred_count][0] =
543 s->cur_pic.motion_val[0][mot_index - mot_stride * mot_step][0];
544 mv_predictor[pred_count][1] =
545 s->cur_pic.motion_val[0][mot_index - mot_stride * mot_step][1];
546 ref[pred_count] =
547 s->cur_pic.ref_index[0][4 * (mb_xy - s->mb_stride)];
548 pred_count++;
549 }
550 if (mb_y + 1<mb_height && fixed[mb_xy + mb_stride] > 1) {
551 mv_predictor[pred_count][0] =
552 s->cur_pic.motion_val[0][mot_index + mot_stride * mot_step][0];
553 mv_predictor[pred_count][1] =
554 s->cur_pic.motion_val[0][mot_index + mot_stride * mot_step][1];
555 ref[pred_count] =
556 s->cur_pic.ref_index[0][4 * (mb_xy + s->mb_stride)];
557 pred_count++;
558 }
559 if (pred_count == 0)
560 continue;
561
562 if (pred_count > 1) {
563 int sum_x = 0, sum_y = 0, sum_r = 0;
564 int max_x, max_y, min_x, min_y, max_r, min_r;
565
566 for (j = 0; j < pred_count; j++) {
567 sum_x += mv_predictor[j][0];
568 sum_y += mv_predictor[j][1];
569 sum_r += ref[j];
570 if (j && ref[j] != ref[j - 1])
571 goto skip_mean_and_median;
572 }
573
574 /* mean */
575 mv_predictor[pred_count][0] = sum_x / j;
576 mv_predictor[pred_count][1] = sum_y / j;
577 ref[pred_count] = sum_r / j;
578
579 /* median */
580 if (pred_count >= 3) {
581 min_y = min_x = min_r = 99999;
582 max_y = max_x = max_r = -99999;
583 } else {
584 min_x = min_y = max_x = max_y = min_r = max_r = 0;
585 }
586 for (j = 0; j < pred_count; j++) {
587 max_x = FFMAX(max_x, mv_predictor[j][0]);
588 max_y = FFMAX(max_y, mv_predictor[j][1]);
589 max_r = FFMAX(max_r, ref[j]);
590 min_x = FFMIN(min_x, mv_predictor[j][0]);
591 min_y = FFMIN(min_y, mv_predictor[j][1]);
592 min_r = FFMIN(min_r, ref[j]);
593 }
594 mv_predictor[pred_count + 1][0] = sum_x - max_x - min_x;
595 mv_predictor[pred_count + 1][1] = sum_y - max_y - min_y;
596 ref[pred_count + 1] = sum_r - max_r - min_r;
597
598 if (pred_count == 4) {
599 mv_predictor[pred_count + 1][0] /= 2;
600 mv_predictor[pred_count + 1][1] /= 2;
601 ref[pred_count + 1] /= 2;
602 }
603 pred_count += 2;
604 }
605
606 skip_mean_and_median:
607 /* zero MV */
608 mv_predictor[pred_count][0] =
609 mv_predictor[pred_count][1] =
610 ref[pred_count] = 0;
611 pred_count++;
612
613 prev_x = s->cur_pic.motion_val[0][mot_index][0];
614 prev_y = s->cur_pic.motion_val[0][mot_index][1];
615 prev_ref = s->cur_pic.ref_index[0][4 * mb_xy];
616
617 /* last MV */
618 mv_predictor[pred_count][0] = prev_x;
619 mv_predictor[pred_count][1] = prev_y;
620 ref[pred_count] = prev_ref;
621 pred_count++;
622
623 best_pred = 0;
624 best_score = 256 * 256 * 256 * 64;
625 for (j = 0; j < pred_count; j++) {
626 int *linesize = s->cur_pic.f->linesize;
627 int score = 0;
628 uint8_t *src = s->cur_pic.f->data[0] +
629 mb_x * 16 + mb_y * 16 * linesize[0];
630
631 s->cur_pic.motion_val[0][mot_index][0] =
632 s->mv[0][0][0] = mv_predictor[j][0];
633 s->cur_pic.motion_val[0][mot_index][1] =
634 s->mv[0][0][1] = mv_predictor[j][1];
635
636 // predictor intra or otherwise not available
637 if (ref[j] < 0)
638 continue;
639
640 s->decode_mb(s->opaque, ref[j], MV_DIR_FORWARD,
641 MV_TYPE_16X16, &s->mv, mb_x, mb_y, 0, 0);
642
643 if (mb_x > 0 && fixed[mb_xy - 1] > 1) {
644 int k;
645 for (k = 0; k < 16; k++)
646 score += FFABS(src[k * linesize[0] - 1] -
647 src[k * linesize[0]]);
648 }
649 if (mb_x + 1 < mb_width && fixed[mb_xy + 1] > 1) {
650 int k;
651 for (k = 0; k < 16; k++)
652 score += FFABS(src[k * linesize[0] + 15] -
653 src[k * linesize[0] + 16]);
654 }
655 if (mb_y > 0 && fixed[mb_xy - mb_stride] > 1) {
656 int k;
657 for (k = 0; k < 16; k++)
658 score += FFABS(src[k - linesize[0]] - src[k]);
659 }
660 if (mb_y + 1 < mb_height && fixed[mb_xy + mb_stride] > 1) {
661 int k;
662 for (k = 0; k < 16; k++)
663 score += FFABS(src[k + linesize[0] * 15] -
664 src[k + linesize[0] * 16]);
665 }
666
667 if (score <= best_score) { // <= will favor the last MV
668 best_score = score;
669 best_pred = j;
670 }
671 }
672 score_sum += best_score;
673 s->mv[0][0][0] = mv_predictor[best_pred][0];
674 s->mv[0][0][1] = mv_predictor[best_pred][1];
675
676 for (i = 0; i < mot_step; i++)
677 for (j = 0; j < mot_step; j++) {
678 s->cur_pic.motion_val[0][mot_index + i + j * mot_stride][0] = s->mv[0][0][0];
679 s->cur_pic.motion_val[0][mot_index + i + j * mot_stride][1] = s->mv[0][0][1];
680 }
681
682 s->decode_mb(s->opaque, ref[best_pred], MV_DIR_FORWARD,
683 MV_TYPE_16X16, &s->mv, mb_x, mb_y, 0, 0);
684
685
686 if (s->mv[0][0][0] != prev_x || s->mv[0][0][1] != prev_y) {
687 fixed[mb_xy] = MV_CHANGED;
688 changed++;
689 } else
690 fixed[mb_xy] = MV_UNCHANGED;
691 }
692 }
693
694 if (none_left)
695 return;
696
697 next_blocklist_length = 0;
698
699 for (blocklist_index = 0; blocklist_index < blocklist_length; blocklist_index++) {
700 const int mb_x = blocklist[blocklist_index][0];
701 const int mb_y = blocklist[blocklist_index][1];
702 const int mb_xy = mb_x + mb_y * mb_stride;
703
704 if (fixed[mb_xy] & (MV_CHANGED|MV_UNCHANGED|MV_FROZEN)) {
705 fixed[mb_xy] = MV_FROZEN;
706 if (mb_x > 0)
707 add_blocklist(next_blocklist, &next_blocklist_length, fixed, mb_x - 1, mb_y, mb_xy - 1);
708 if (mb_y > 0)
709 add_blocklist(next_blocklist, &next_blocklist_length, fixed, mb_x, mb_y - 1, mb_xy - mb_stride);
710 if (mb_x + 1 < mb_width)
711 add_blocklist(next_blocklist, &next_blocklist_length, fixed, mb_x + 1, mb_y, mb_xy + 1);
712 if (mb_y + 1 < mb_height)
713 add_blocklist(next_blocklist, &next_blocklist_length, fixed, mb_x, mb_y + 1, mb_xy + mb_stride);
714 }
715 }
716 av_assert0(next_blocklist_length <= mb_height * mb_width);
717 FFSWAP(int , blocklist_length, next_blocklist_length);
718 FFSWAP(void*, blocklist, next_blocklist);
719 }
720 }
721
is_intra_more_likely(ERContext * s)722 static int is_intra_more_likely(ERContext *s)
723 {
724 int is_intra_likely, i, j, undamaged_count, skip_amount, mb_x, mb_y;
725
726 if (!s->last_pic.f || !s->last_pic.f->data[0])
727 return 1; // no previous frame available -> use spatial prediction
728
729 if (s->avctx->error_concealment & FF_EC_FAVOR_INTER)
730 return 0;
731
732 undamaged_count = 0;
733 for (i = 0; i < s->mb_num; i++) {
734 const int mb_xy = s->mb_index2xy[i];
735 const int error = s->error_status_table[mb_xy];
736 if (!((error & ER_DC_ERROR) && (error & ER_MV_ERROR)))
737 undamaged_count++;
738 }
739
740 if (undamaged_count < 5)
741 return 0; // almost all MBs damaged -> use temporal prediction
742
743 // prevent dsp.sad() check, that requires access to the image
744 if (CONFIG_XVMC &&
745 s->avctx->hwaccel && s->avctx->hwaccel->decode_mb &&
746 s->cur_pic.f->pict_type == AV_PICTURE_TYPE_I)
747 return 1;
748
749 skip_amount = FFMAX(undamaged_count / 50, 1); // check only up to 50 MBs
750 is_intra_likely = 0;
751
752 j = 0;
753 for (mb_y = 0; mb_y < s->mb_height - 1; mb_y++) {
754 for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
755 int error;
756 const int mb_xy = mb_x + mb_y * s->mb_stride;
757
758 error = s->error_status_table[mb_xy];
759 if ((error & ER_DC_ERROR) && (error & ER_MV_ERROR))
760 continue; // skip damaged
761
762 j++;
763 // skip a few to speed things up
764 if ((j % skip_amount) != 0)
765 continue;
766
767 if (s->cur_pic.f->pict_type == AV_PICTURE_TYPE_I) {
768 int *linesize = s->cur_pic.f->linesize;
769 uint8_t *mb_ptr = s->cur_pic.f->data[0] +
770 mb_x * 16 + mb_y * 16 * linesize[0];
771 uint8_t *last_mb_ptr = s->last_pic.f->data[0] +
772 mb_x * 16 + mb_y * 16 * linesize[0];
773
774 if (s->avctx->codec_id == AV_CODEC_ID_H264) {
775 // FIXME
776 } else {
777 ff_thread_await_progress(s->last_pic.tf, mb_y, 0);
778 }
779 is_intra_likely += s->mecc.sad[0](NULL, last_mb_ptr, mb_ptr,
780 linesize[0], 16);
781 // FIXME need await_progress() here
782 is_intra_likely -= s->mecc.sad[0](NULL, last_mb_ptr,
783 last_mb_ptr + linesize[0] * 16,
784 linesize[0], 16);
785 } else {
786 if (IS_INTRA(s->cur_pic.mb_type[mb_xy]))
787 is_intra_likely++;
788 else
789 is_intra_likely--;
790 }
791 }
792 }
793 // av_log(NULL, AV_LOG_ERROR, "is_intra_likely: %d type:%d\n", is_intra_likely, s->pict_type);
794 return is_intra_likely > 0;
795 }
796
ff_er_frame_start(ERContext * s)797 void ff_er_frame_start(ERContext *s)
798 {
799 if (!s->avctx->error_concealment)
800 return;
801
802 if (!s->mecc_inited) {
803 ff_me_cmp_init(&s->mecc, s->avctx);
804 s->mecc_inited = 1;
805 }
806
807 memset(s->error_status_table, ER_MB_ERROR | VP_START | ER_MB_END,
808 s->mb_stride * s->mb_height * sizeof(uint8_t));
809 atomic_init(&s->error_count, 3 * s->mb_num);
810 s->error_occurred = 0;
811 }
812
er_supported(ERContext * s)813 static int er_supported(ERContext *s)
814 {
815 if(s->avctx->hwaccel && s->avctx->hwaccel->decode_slice ||
816 !s->cur_pic.f ||
817 s->cur_pic.field_picture
818 )
819 return 0;
820 return 1;
821 }
822
823 /**
824 * Add a slice.
825 * @param endx x component of the last macroblock, can be -1
826 * for the last of the previous line
827 * @param status the status at the end (ER_MV_END, ER_AC_ERROR, ...), it is
828 * assumed that no earlier end or error of the same type occurred
829 */
ff_er_add_slice(ERContext * s,int startx,int starty,int endx,int endy,int status)830 void ff_er_add_slice(ERContext *s, int startx, int starty,
831 int endx, int endy, int status)
832 {
833 const int start_i = av_clip(startx + starty * s->mb_width, 0, s->mb_num - 1);
834 const int end_i = av_clip(endx + endy * s->mb_width, 0, s->mb_num);
835 const int start_xy = s->mb_index2xy[start_i];
836 const int end_xy = s->mb_index2xy[end_i];
837 int mask = -1;
838
839 if (s->avctx->hwaccel && s->avctx->hwaccel->decode_slice)
840 return;
841
842 if (start_i > end_i || start_xy > end_xy) {
843 av_log(s->avctx, AV_LOG_ERROR,
844 "internal error, slice end before start\n");
845 return;
846 }
847
848 if (!s->avctx->error_concealment)
849 return;
850
851 mask &= ~VP_START;
852 if (status & (ER_AC_ERROR | ER_AC_END)) {
853 mask &= ~(ER_AC_ERROR | ER_AC_END);
854 atomic_fetch_add(&s->error_count, start_i - end_i - 1);
855 }
856 if (status & (ER_DC_ERROR | ER_DC_END)) {
857 mask &= ~(ER_DC_ERROR | ER_DC_END);
858 atomic_fetch_add(&s->error_count, start_i - end_i - 1);
859 }
860 if (status & (ER_MV_ERROR | ER_MV_END)) {
861 mask &= ~(ER_MV_ERROR | ER_MV_END);
862 atomic_fetch_add(&s->error_count, start_i - end_i - 1);
863 }
864
865 if (status & ER_MB_ERROR) {
866 s->error_occurred = 1;
867 atomic_store(&s->error_count, INT_MAX);
868 }
869
870 if (mask == ~0x7F) {
871 memset(&s->error_status_table[start_xy], 0,
872 (end_xy - start_xy) * sizeof(uint8_t));
873 } else {
874 int i;
875 for (i = start_xy; i < end_xy; i++)
876 s->error_status_table[i] &= mask;
877 }
878
879 if (end_i == s->mb_num)
880 atomic_store(&s->error_count, INT_MAX);
881 else {
882 s->error_status_table[end_xy] &= mask;
883 s->error_status_table[end_xy] |= status;
884 }
885
886 s->error_status_table[start_xy] |= VP_START;
887
888 if (start_xy > 0 && !(s->avctx->active_thread_type & FF_THREAD_SLICE) &&
889 er_supported(s) && s->avctx->skip_top * s->mb_width < start_i) {
890 int prev_status = s->error_status_table[s->mb_index2xy[start_i - 1]];
891
892 prev_status &= ~ VP_START;
893 if (prev_status != (ER_MV_END | ER_DC_END | ER_AC_END)) {
894 s->error_occurred = 1;
895 atomic_store(&s->error_count, INT_MAX);
896 }
897 }
898 }
899
ff_er_frame_end(ERContext * s)900 void ff_er_frame_end(ERContext *s)
901 {
902 int *linesize = NULL;
903 int i, mb_x, mb_y, error, error_type, dc_error, mv_error, ac_error;
904 int distance;
905 int threshold_part[4] = { 100, 100, 100 };
906 int threshold = 50;
907 int is_intra_likely;
908 int size = s->b8_stride * 2 * s->mb_height;
909
910 /* We do not support ER of field pictures yet,
911 * though it should not crash if enabled. */
912 if (!s->avctx->error_concealment || !atomic_load(&s->error_count) ||
913 s->avctx->lowres ||
914 !er_supported(s) ||
915 atomic_load(&s->error_count) == 3 * s->mb_width *
916 (s->avctx->skip_top + s->avctx->skip_bottom)) {
917 return;
918 }
919 linesize = s->cur_pic.f->linesize;
920 for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
921 int status = s->error_status_table[mb_x + (s->mb_height - 1) * s->mb_stride];
922 if (status != 0x7F)
923 break;
924 }
925
926 if ( mb_x == s->mb_width
927 && s->avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO
928 && (FFALIGN(s->avctx->height, 16)&16)
929 && atomic_load(&s->error_count) == 3 * s->mb_width * (s->avctx->skip_top + s->avctx->skip_bottom + 1)
930 ) {
931 av_log(s->avctx, AV_LOG_DEBUG, "ignoring last missing slice\n");
932 return;
933 }
934
935 if (s->last_pic.f) {
936 if (s->last_pic.f->width != s->cur_pic.f->width ||
937 s->last_pic.f->height != s->cur_pic.f->height ||
938 s->last_pic.f->format != s->cur_pic.f->format) {
939 av_log(s->avctx, AV_LOG_WARNING, "Cannot use previous picture in error concealment\n");
940 memset(&s->last_pic, 0, sizeof(s->last_pic));
941 }
942 }
943 if (s->next_pic.f) {
944 if (s->next_pic.f->width != s->cur_pic.f->width ||
945 s->next_pic.f->height != s->cur_pic.f->height ||
946 s->next_pic.f->format != s->cur_pic.f->format) {
947 av_log(s->avctx, AV_LOG_WARNING, "Cannot use next picture in error concealment\n");
948 memset(&s->next_pic, 0, sizeof(s->next_pic));
949 }
950 }
951
952 if (!s->cur_pic.motion_val[0] || !s->cur_pic.ref_index[0]) {
953 av_log(s->avctx, AV_LOG_ERROR, "Warning MVs not available\n");
954
955 for (i = 0; i < 2; i++) {
956 s->ref_index_buf[i] = av_buffer_allocz(s->mb_stride * s->mb_height * 4 * sizeof(uint8_t));
957 s->motion_val_buf[i] = av_buffer_allocz((size + 4) * 2 * sizeof(uint16_t));
958 if (!s->ref_index_buf[i] || !s->motion_val_buf[i])
959 break;
960 s->cur_pic.ref_index[i] = s->ref_index_buf[i]->data;
961 s->cur_pic.motion_val[i] = (int16_t (*)[2])s->motion_val_buf[i]->data + 4;
962 }
963 if (i < 2) {
964 for (i = 0; i < 2; i++) {
965 av_buffer_unref(&s->ref_index_buf[i]);
966 av_buffer_unref(&s->motion_val_buf[i]);
967 s->cur_pic.ref_index[i] = NULL;
968 s->cur_pic.motion_val[i] = NULL;
969 }
970 return;
971 }
972 }
973
974 if (s->avctx->debug & FF_DEBUG_ER) {
975 for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
976 for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
977 int status = s->error_status_table[mb_x + mb_y * s->mb_stride];
978
979 av_log(s->avctx, AV_LOG_DEBUG, "%2X ", status);
980 }
981 av_log(s->avctx, AV_LOG_DEBUG, "\n");
982 }
983 }
984
985 #if 1
986 /* handle overlapping slices */
987 for (error_type = 1; error_type <= 3; error_type++) {
988 int end_ok = 0;
989
990 for (i = s->mb_num - 1; i >= 0; i--) {
991 const int mb_xy = s->mb_index2xy[i];
992 int error = s->error_status_table[mb_xy];
993
994 if (error & (1 << error_type))
995 end_ok = 1;
996 if (error & (8 << error_type))
997 end_ok = 1;
998
999 if (!end_ok)
1000 s->error_status_table[mb_xy] |= 1 << error_type;
1001
1002 if (error & VP_START)
1003 end_ok = 0;
1004 }
1005 }
1006 #endif
1007 #if 1
1008 /* handle slices with partitions of different length */
1009 if (s->partitioned_frame) {
1010 int end_ok = 0;
1011
1012 for (i = s->mb_num - 1; i >= 0; i--) {
1013 const int mb_xy = s->mb_index2xy[i];
1014 int error = s->error_status_table[mb_xy];
1015
1016 if (error & ER_AC_END)
1017 end_ok = 0;
1018 if ((error & ER_MV_END) ||
1019 (error & ER_DC_END) ||
1020 (error & ER_AC_ERROR))
1021 end_ok = 1;
1022
1023 if (!end_ok)
1024 s->error_status_table[mb_xy]|= ER_AC_ERROR;
1025
1026 if (error & VP_START)
1027 end_ok = 0;
1028 }
1029 }
1030 #endif
1031 /* handle missing slices */
1032 if (s->avctx->err_recognition & AV_EF_EXPLODE) {
1033 int end_ok = 1;
1034
1035 // FIXME + 100 hack
1036 for (i = s->mb_num - 2; i >= s->mb_width + 100; i--) {
1037 const int mb_xy = s->mb_index2xy[i];
1038 int error1 = s->error_status_table[mb_xy];
1039 int error2 = s->error_status_table[s->mb_index2xy[i + 1]];
1040
1041 if (error1 & VP_START)
1042 end_ok = 1;
1043
1044 if (error2 == (VP_START | ER_MB_ERROR | ER_MB_END) &&
1045 error1 != (VP_START | ER_MB_ERROR | ER_MB_END) &&
1046 ((error1 & ER_AC_END) || (error1 & ER_DC_END) ||
1047 (error1 & ER_MV_END))) {
1048 // end & uninit
1049 end_ok = 0;
1050 }
1051
1052 if (!end_ok)
1053 s->error_status_table[mb_xy] |= ER_MB_ERROR;
1054 }
1055 }
1056
1057 #if 1
1058 /* backward mark errors */
1059 distance = 9999999;
1060 for (error_type = 1; error_type <= 3; error_type++) {
1061 for (i = s->mb_num - 1; i >= 0; i--) {
1062 const int mb_xy = s->mb_index2xy[i];
1063 int error = s->error_status_table[mb_xy];
1064
1065 if (!s->mbskip_table || !s->mbskip_table[mb_xy]) // FIXME partition specific
1066 distance++;
1067 if (error & (1 << error_type))
1068 distance = 0;
1069
1070 if (s->partitioned_frame) {
1071 if (distance < threshold_part[error_type - 1])
1072 s->error_status_table[mb_xy] |= 1 << error_type;
1073 } else {
1074 if (distance < threshold)
1075 s->error_status_table[mb_xy] |= 1 << error_type;
1076 }
1077
1078 if (error & VP_START)
1079 distance = 9999999;
1080 }
1081 }
1082 #endif
1083
1084 /* forward mark errors */
1085 error = 0;
1086 for (i = 0; i < s->mb_num; i++) {
1087 const int mb_xy = s->mb_index2xy[i];
1088 int old_error = s->error_status_table[mb_xy];
1089
1090 if (old_error & VP_START) {
1091 error = old_error & ER_MB_ERROR;
1092 } else {
1093 error |= old_error & ER_MB_ERROR;
1094 s->error_status_table[mb_xy] |= error;
1095 }
1096 }
1097 #if 1
1098 /* handle not partitioned case */
1099 if (!s->partitioned_frame) {
1100 for (i = 0; i < s->mb_num; i++) {
1101 const int mb_xy = s->mb_index2xy[i];
1102 int error = s->error_status_table[mb_xy];
1103 if (error & ER_MB_ERROR)
1104 error |= ER_MB_ERROR;
1105 s->error_status_table[mb_xy] = error;
1106 }
1107 }
1108 #endif
1109
1110 dc_error = ac_error = mv_error = 0;
1111 for (i = 0; i < s->mb_num; i++) {
1112 const int mb_xy = s->mb_index2xy[i];
1113 int error = s->error_status_table[mb_xy];
1114 if (error & ER_DC_ERROR)
1115 dc_error++;
1116 if (error & ER_AC_ERROR)
1117 ac_error++;
1118 if (error & ER_MV_ERROR)
1119 mv_error++;
1120 }
1121 av_log(s->avctx, AV_LOG_INFO, "concealing %d DC, %d AC, %d MV errors in %c frame\n",
1122 dc_error, ac_error, mv_error, av_get_picture_type_char(s->cur_pic.f->pict_type));
1123
1124 s->cur_pic.f->decode_error_flags |= FF_DECODE_ERROR_CONCEALMENT_ACTIVE;
1125
1126 is_intra_likely = is_intra_more_likely(s);
1127
1128 /* set unknown mb-type to most likely */
1129 for (i = 0; i < s->mb_num; i++) {
1130 const int mb_xy = s->mb_index2xy[i];
1131 int error = s->error_status_table[mb_xy];
1132 if (!((error & ER_DC_ERROR) && (error & ER_MV_ERROR)))
1133 continue;
1134
1135 if (is_intra_likely)
1136 s->cur_pic.mb_type[mb_xy] = MB_TYPE_INTRA4x4;
1137 else
1138 s->cur_pic.mb_type[mb_xy] = MB_TYPE_16x16 | MB_TYPE_L0;
1139 }
1140
1141 // change inter to intra blocks if no reference frames are available
1142 if (!(s->last_pic.f && s->last_pic.f->data[0]) &&
1143 !(s->next_pic.f && s->next_pic.f->data[0]))
1144 for (i = 0; i < s->mb_num; i++) {
1145 const int mb_xy = s->mb_index2xy[i];
1146 if (!IS_INTRA(s->cur_pic.mb_type[mb_xy]))
1147 s->cur_pic.mb_type[mb_xy] = MB_TYPE_INTRA4x4;
1148 }
1149
1150 /* handle inter blocks with damaged AC */
1151 for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1152 for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1153 const int mb_xy = mb_x + mb_y * s->mb_stride;
1154 const int mb_type = s->cur_pic.mb_type[mb_xy];
1155 const int dir = !(s->last_pic.f && s->last_pic.f->data[0]);
1156 const int mv_dir = dir ? MV_DIR_BACKWARD : MV_DIR_FORWARD;
1157 int mv_type;
1158
1159 int error = s->error_status_table[mb_xy];
1160
1161 if (IS_INTRA(mb_type))
1162 continue; // intra
1163 if (error & ER_MV_ERROR)
1164 continue; // inter with damaged MV
1165 if (!(error & ER_AC_ERROR))
1166 continue; // undamaged inter
1167
1168 if (IS_8X8(mb_type)) {
1169 int mb_index = mb_x * 2 + mb_y * 2 * s->b8_stride;
1170 int j;
1171 mv_type = MV_TYPE_8X8;
1172 for (j = 0; j < 4; j++) {
1173 s->mv[0][j][0] = s->cur_pic.motion_val[dir][mb_index + (j & 1) + (j >> 1) * s->b8_stride][0];
1174 s->mv[0][j][1] = s->cur_pic.motion_val[dir][mb_index + (j & 1) + (j >> 1) * s->b8_stride][1];
1175 }
1176 } else {
1177 mv_type = MV_TYPE_16X16;
1178 s->mv[0][0][0] = s->cur_pic.motion_val[dir][mb_x * 2 + mb_y * 2 * s->b8_stride][0];
1179 s->mv[0][0][1] = s->cur_pic.motion_val[dir][mb_x * 2 + mb_y * 2 * s->b8_stride][1];
1180 }
1181
1182 s->decode_mb(s->opaque, 0 /* FIXME H.264 partitioned slices need this set */,
1183 mv_dir, mv_type, &s->mv, mb_x, mb_y, 0, 0);
1184 }
1185 }
1186
1187 /* guess MVs */
1188 if (s->cur_pic.f->pict_type == AV_PICTURE_TYPE_B) {
1189 for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1190 for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1191 int xy = mb_x * 2 + mb_y * 2 * s->b8_stride;
1192 const int mb_xy = mb_x + mb_y * s->mb_stride;
1193 const int mb_type = s->cur_pic.mb_type[mb_xy];
1194 int mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
1195
1196 int error = s->error_status_table[mb_xy];
1197
1198 if (IS_INTRA(mb_type))
1199 continue;
1200 if (!(error & ER_MV_ERROR))
1201 continue; // inter with undamaged MV
1202 if (!(error & ER_AC_ERROR))
1203 continue; // undamaged inter
1204
1205 if (!(s->last_pic.f && s->last_pic.f->data[0]))
1206 mv_dir &= ~MV_DIR_FORWARD;
1207 if (!(s->next_pic.f && s->next_pic.f->data[0]))
1208 mv_dir &= ~MV_DIR_BACKWARD;
1209
1210 if (s->pp_time) {
1211 int time_pp = s->pp_time;
1212 int time_pb = s->pb_time;
1213
1214 av_assert0(s->avctx->codec_id != AV_CODEC_ID_H264);
1215 ff_thread_await_progress(s->next_pic.tf, mb_y, 0);
1216
1217 s->mv[0][0][0] = s->next_pic.motion_val[0][xy][0] * time_pb / time_pp;
1218 s->mv[0][0][1] = s->next_pic.motion_val[0][xy][1] * time_pb / time_pp;
1219 s->mv[1][0][0] = s->next_pic.motion_val[0][xy][0] * (time_pb - time_pp) / time_pp;
1220 s->mv[1][0][1] = s->next_pic.motion_val[0][xy][1] * (time_pb - time_pp) / time_pp;
1221 } else {
1222 s->mv[0][0][0] = 0;
1223 s->mv[0][0][1] = 0;
1224 s->mv[1][0][0] = 0;
1225 s->mv[1][0][1] = 0;
1226 }
1227
1228 s->decode_mb(s->opaque, 0, mv_dir, MV_TYPE_16X16, &s->mv,
1229 mb_x, mb_y, 0, 0);
1230 }
1231 }
1232 } else
1233 guess_mv(s);
1234
1235 /* the filters below manipulate raw image, skip them */
1236 if (CONFIG_XVMC && s->avctx->hwaccel && s->avctx->hwaccel->decode_mb)
1237 goto ec_clean;
1238 /* fill DC for inter blocks */
1239 for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1240 for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1241 int dc, dcu, dcv, y, n;
1242 int16_t *dc_ptr;
1243 uint8_t *dest_y, *dest_cb, *dest_cr;
1244 const int mb_xy = mb_x + mb_y * s->mb_stride;
1245 const int mb_type = s->cur_pic.mb_type[mb_xy];
1246
1247 // error = s->error_status_table[mb_xy];
1248
1249 if (IS_INTRA(mb_type) && s->partitioned_frame)
1250 continue;
1251 // if (error & ER_MV_ERROR)
1252 // continue; // inter data damaged FIXME is this good?
1253
1254 dest_y = s->cur_pic.f->data[0] + mb_x * 16 + mb_y * 16 * linesize[0];
1255 dest_cb = s->cur_pic.f->data[1] + mb_x * 8 + mb_y * 8 * linesize[1];
1256 dest_cr = s->cur_pic.f->data[2] + mb_x * 8 + mb_y * 8 * linesize[2];
1257
1258 dc_ptr = &s->dc_val[0][mb_x * 2 + mb_y * 2 * s->b8_stride];
1259 for (n = 0; n < 4; n++) {
1260 dc = 0;
1261 for (y = 0; y < 8; y++) {
1262 int x;
1263 for (x = 0; x < 8; x++)
1264 dc += dest_y[x + (n & 1) * 8 +
1265 (y + (n >> 1) * 8) * linesize[0]];
1266 }
1267 dc_ptr[(n & 1) + (n >> 1) * s->b8_stride] = (dc + 4) >> 3;
1268 }
1269
1270 if (!s->cur_pic.f->data[2])
1271 continue;
1272
1273 dcu = dcv = 0;
1274 for (y = 0; y < 8; y++) {
1275 int x;
1276 for (x = 0; x < 8; x++) {
1277 dcu += dest_cb[x + y * linesize[1]];
1278 dcv += dest_cr[x + y * linesize[2]];
1279 }
1280 }
1281 s->dc_val[1][mb_x + mb_y * s->mb_stride] = (dcu + 4) >> 3;
1282 s->dc_val[2][mb_x + mb_y * s->mb_stride] = (dcv + 4) >> 3;
1283 }
1284 }
1285 #if 1
1286 /* guess DC for damaged blocks */
1287 guess_dc(s, s->dc_val[0], s->mb_width*2, s->mb_height*2, s->b8_stride, 1);
1288 guess_dc(s, s->dc_val[1], s->mb_width , s->mb_height , s->mb_stride, 0);
1289 guess_dc(s, s->dc_val[2], s->mb_width , s->mb_height , s->mb_stride, 0);
1290 #endif
1291
1292 /* filter luma DC */
1293 filter181(s->dc_val[0], s->mb_width * 2, s->mb_height * 2, s->b8_stride);
1294
1295 #if 1
1296 /* render DC only intra */
1297 for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1298 for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1299 uint8_t *dest_y, *dest_cb, *dest_cr;
1300 const int mb_xy = mb_x + mb_y * s->mb_stride;
1301 const int mb_type = s->cur_pic.mb_type[mb_xy];
1302
1303 int error = s->error_status_table[mb_xy];
1304
1305 if (IS_INTER(mb_type))
1306 continue;
1307 if (!(error & ER_AC_ERROR))
1308 continue; // undamaged
1309
1310 dest_y = s->cur_pic.f->data[0] + mb_x * 16 + mb_y * 16 * linesize[0];
1311 dest_cb = s->cur_pic.f->data[1] + mb_x * 8 + mb_y * 8 * linesize[1];
1312 dest_cr = s->cur_pic.f->data[2] + mb_x * 8 + mb_y * 8 * linesize[2];
1313 if (!s->cur_pic.f->data[2])
1314 dest_cb = dest_cr = NULL;
1315
1316 put_dc(s, dest_y, dest_cb, dest_cr, mb_x, mb_y);
1317 }
1318 }
1319 #endif
1320
1321 if (s->avctx->error_concealment & FF_EC_DEBLOCK) {
1322 /* filter horizontal block boundaries */
1323 h_block_filter(s, s->cur_pic.f->data[0], s->mb_width * 2,
1324 s->mb_height * 2, linesize[0], 1);
1325
1326 /* filter vertical block boundaries */
1327 v_block_filter(s, s->cur_pic.f->data[0], s->mb_width * 2,
1328 s->mb_height * 2, linesize[0], 1);
1329
1330 if (s->cur_pic.f->data[2]) {
1331 h_block_filter(s, s->cur_pic.f->data[1], s->mb_width,
1332 s->mb_height, linesize[1], 0);
1333 h_block_filter(s, s->cur_pic.f->data[2], s->mb_width,
1334 s->mb_height, linesize[2], 0);
1335 v_block_filter(s, s->cur_pic.f->data[1], s->mb_width,
1336 s->mb_height, linesize[1], 0);
1337 v_block_filter(s, s->cur_pic.f->data[2], s->mb_width,
1338 s->mb_height, linesize[2], 0);
1339 }
1340 }
1341
1342 ec_clean:
1343 /* clean a few tables */
1344 for (i = 0; i < s->mb_num; i++) {
1345 const int mb_xy = s->mb_index2xy[i];
1346 int error = s->error_status_table[mb_xy];
1347
1348 if (s->mbskip_table && s->cur_pic.f->pict_type != AV_PICTURE_TYPE_B &&
1349 (error & (ER_DC_ERROR | ER_MV_ERROR | ER_AC_ERROR))) {
1350 s->mbskip_table[mb_xy] = 0;
1351 }
1352 if (s->mbintra_table)
1353 s->mbintra_table[mb_xy] = 1;
1354 }
1355
1356 for (i = 0; i < 2; i++) {
1357 av_buffer_unref(&s->ref_index_buf[i]);
1358 av_buffer_unref(&s->motion_val_buf[i]);
1359 s->cur_pic.ref_index[i] = NULL;
1360 s->cur_pic.motion_val[i] = NULL;
1361 }
1362
1363 memset(&s->cur_pic, 0, sizeof(ERPicture));
1364 memset(&s->last_pic, 0, sizeof(ERPicture));
1365 memset(&s->next_pic, 0, sizeof(ERPicture));
1366 }
1367