1 /*
2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11
12 #include "vpx_config.h"
13 #include "vp8_rtcd.h"
14 #if !defined(WIN32) && CONFIG_OS_SUPPORT == 1
15 # include <unistd.h>
16 #endif
17 #include "onyxd_int.h"
18 #include "vpx_mem/vpx_mem.h"
19 #include "vp8/common/threading.h"
20
21 #include "vp8/common/loopfilter.h"
22 #include "vp8/common/extend.h"
23 #include "vpx_ports/vpx_timer.h"
24 #include "detokenize.h"
25 #include "vp8/common/reconintra4x4.h"
26 #include "vp8/common/reconinter.h"
27 #include "vp8/common/setupintrarecon.h"
28 #if CONFIG_ERROR_CONCEALMENT
29 #include "error_concealment.h"
30 #endif
31
32 #define CALLOC_ARRAY(p, n) CHECK_MEM_ERROR((p), vpx_calloc(sizeof(*(p)), (n)))
33 #define CALLOC_ARRAY_ALIGNED(p, n, algn) do { \
34 CHECK_MEM_ERROR((p), vpx_memalign((algn), sizeof(*(p)) * (n))); \
35 memset((p), 0, (n) * sizeof(*(p))); \
36 } while (0)
37
38
39 void vp8_mb_init_dequantizer(VP8D_COMP *pbi, MACROBLOCKD *xd);
40
setup_decoding_thread_data(VP8D_COMP * pbi,MACROBLOCKD * xd,MB_ROW_DEC * mbrd,int count)41 static void setup_decoding_thread_data(VP8D_COMP *pbi, MACROBLOCKD *xd, MB_ROW_DEC *mbrd, int count)
42 {
43 VP8_COMMON *const pc = & pbi->common;
44 int i;
45
46 for (i = 0; i < count; i++)
47 {
48 MACROBLOCKD *mbd = &mbrd[i].mbd;
49 mbd->subpixel_predict = xd->subpixel_predict;
50 mbd->subpixel_predict8x4 = xd->subpixel_predict8x4;
51 mbd->subpixel_predict8x8 = xd->subpixel_predict8x8;
52 mbd->subpixel_predict16x16 = xd->subpixel_predict16x16;
53
54 mbd->mode_info_context = pc->mi + pc->mode_info_stride * (i + 1);
55 mbd->mode_info_stride = pc->mode_info_stride;
56
57 mbd->frame_type = pc->frame_type;
58 mbd->pre = xd->pre;
59 mbd->dst = xd->dst;
60
61 mbd->segmentation_enabled = xd->segmentation_enabled;
62 mbd->mb_segement_abs_delta = xd->mb_segement_abs_delta;
63 memcpy(mbd->segment_feature_data, xd->segment_feature_data, sizeof(xd->segment_feature_data));
64
65 /*signed char ref_lf_deltas[MAX_REF_LF_DELTAS];*/
66 memcpy(mbd->ref_lf_deltas, xd->ref_lf_deltas, sizeof(xd->ref_lf_deltas));
67 /*signed char mode_lf_deltas[MAX_MODE_LF_DELTAS];*/
68 memcpy(mbd->mode_lf_deltas, xd->mode_lf_deltas, sizeof(xd->mode_lf_deltas));
69 /*unsigned char mode_ref_lf_delta_enabled;
70 unsigned char mode_ref_lf_delta_update;*/
71 mbd->mode_ref_lf_delta_enabled = xd->mode_ref_lf_delta_enabled;
72 mbd->mode_ref_lf_delta_update = xd->mode_ref_lf_delta_update;
73
74 mbd->current_bc = &pbi->mbc[0];
75
76 memcpy(mbd->dequant_y1_dc, xd->dequant_y1_dc, sizeof(xd->dequant_y1_dc));
77 memcpy(mbd->dequant_y1, xd->dequant_y1, sizeof(xd->dequant_y1));
78 memcpy(mbd->dequant_y2, xd->dequant_y2, sizeof(xd->dequant_y2));
79 memcpy(mbd->dequant_uv, xd->dequant_uv, sizeof(xd->dequant_uv));
80
81 mbd->fullpixel_mask = 0xffffffff;
82
83 if (pc->full_pixel)
84 mbd->fullpixel_mask = 0xfffffff8;
85
86 }
87
88 for (i = 0; i < pc->mb_rows; i++)
89 pbi->mt_current_mb_col[i] = -1;
90 }
91
mt_decode_macroblock(VP8D_COMP * pbi,MACROBLOCKD * xd,unsigned int mb_idx)92 static void mt_decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd,
93 unsigned int mb_idx)
94 {
95 MB_PREDICTION_MODE mode;
96 int i;
97 #if CONFIG_ERROR_CONCEALMENT
98 int corruption_detected = 0;
99 #else
100 (void)mb_idx;
101 #endif
102
103 if (xd->mode_info_context->mbmi.mb_skip_coeff)
104 {
105 vp8_reset_mb_tokens_context(xd);
106 }
107 else if (!vp8dx_bool_error(xd->current_bc))
108 {
109 int eobtotal;
110 eobtotal = vp8_decode_mb_tokens(pbi, xd);
111
112 /* Special case: Force the loopfilter to skip when eobtotal is zero */
113 xd->mode_info_context->mbmi.mb_skip_coeff = (eobtotal==0);
114 }
115
116 mode = xd->mode_info_context->mbmi.mode;
117
118 if (xd->segmentation_enabled)
119 vp8_mb_init_dequantizer(pbi, xd);
120
121
122 #if CONFIG_ERROR_CONCEALMENT
123
124 if(pbi->ec_active)
125 {
126 int throw_residual;
127 /* When we have independent partitions we can apply residual even
128 * though other partitions within the frame are corrupt.
129 */
130 throw_residual = (!pbi->independent_partitions &&
131 pbi->frame_corrupt_residual);
132 throw_residual = (throw_residual || vp8dx_bool_error(xd->current_bc));
133
134 if ((mb_idx >= pbi->mvs_corrupt_from_mb || throw_residual))
135 {
136 /* MB with corrupt residuals or corrupt mode/motion vectors.
137 * Better to use the predictor as reconstruction.
138 */
139 pbi->frame_corrupt_residual = 1;
140 memset(xd->qcoeff, 0, sizeof(xd->qcoeff));
141 vp8_conceal_corrupt_mb(xd);
142
143
144 corruption_detected = 1;
145
146 /* force idct to be skipped for B_PRED and use the
147 * prediction only for reconstruction
148 * */
149 memset(xd->eobs, 0, 25);
150 }
151 }
152 #endif
153
154 /* do prediction */
155 if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME)
156 {
157 vp8_build_intra_predictors_mbuv_s(xd,
158 xd->recon_above[1],
159 xd->recon_above[2],
160 xd->recon_left[1],
161 xd->recon_left[2],
162 xd->recon_left_stride[1],
163 xd->dst.u_buffer, xd->dst.v_buffer,
164 xd->dst.uv_stride);
165
166 if (mode != B_PRED)
167 {
168 vp8_build_intra_predictors_mby_s(xd,
169 xd->recon_above[0],
170 xd->recon_left[0],
171 xd->recon_left_stride[0],
172 xd->dst.y_buffer,
173 xd->dst.y_stride);
174 }
175 else
176 {
177 short *DQC = xd->dequant_y1;
178 int dst_stride = xd->dst.y_stride;
179
180 /* clear out residual eob info */
181 if(xd->mode_info_context->mbmi.mb_skip_coeff)
182 memset(xd->eobs, 0, 25);
183
184 intra_prediction_down_copy(xd, xd->recon_above[0] + 16);
185
186 for (i = 0; i < 16; i++)
187 {
188 BLOCKD *b = &xd->block[i];
189 unsigned char *dst = xd->dst.y_buffer + b->offset;
190 B_PREDICTION_MODE b_mode =
191 xd->mode_info_context->bmi[i].as_mode;
192 unsigned char *Above;
193 unsigned char *yleft;
194 int left_stride;
195 unsigned char top_left;
196
197 /*Caution: For some b_mode, it needs 8 pixels (4 above + 4 above-right).*/
198 if (i < 4 && pbi->common.filter_level)
199 Above = xd->recon_above[0] + b->offset;
200 else
201 Above = dst - dst_stride;
202
203 if (i%4==0 && pbi->common.filter_level)
204 {
205 yleft = xd->recon_left[0] + i;
206 left_stride = 1;
207 }
208 else
209 {
210 yleft = dst - 1;
211 left_stride = dst_stride;
212 }
213
214 if ((i==4 || i==8 || i==12) && pbi->common.filter_level)
215 top_left = *(xd->recon_left[0] + i - 1);
216 else
217 top_left = Above[-1];
218
219 vp8_intra4x4_predict(Above, yleft, left_stride,
220 b_mode, dst, dst_stride, top_left);
221
222 if (xd->eobs[i] )
223 {
224 if (xd->eobs[i] > 1)
225 {
226 vp8_dequant_idct_add(b->qcoeff, DQC, dst, dst_stride);
227 }
228 else
229 {
230 vp8_dc_only_idct_add(b->qcoeff[0] * DQC[0],
231 dst, dst_stride, dst, dst_stride);
232 memset(b->qcoeff, 0, 2 * sizeof(b->qcoeff[0]));
233 }
234 }
235 }
236 }
237 }
238 else
239 {
240 vp8_build_inter_predictors_mb(xd);
241 }
242
243
244 #if CONFIG_ERROR_CONCEALMENT
245 if (corruption_detected)
246 {
247 return;
248 }
249 #endif
250
251 if(!xd->mode_info_context->mbmi.mb_skip_coeff)
252 {
253 /* dequantization and idct */
254 if (mode != B_PRED)
255 {
256 short *DQC = xd->dequant_y1;
257
258 if (mode != SPLITMV)
259 {
260 BLOCKD *b = &xd->block[24];
261
262 /* do 2nd order transform on the dc block */
263 if (xd->eobs[24] > 1)
264 {
265 vp8_dequantize_b(b, xd->dequant_y2);
266
267 vp8_short_inv_walsh4x4(&b->dqcoeff[0],
268 xd->qcoeff);
269 memset(b->qcoeff, 0, 16 * sizeof(b->qcoeff[0]));
270 }
271 else
272 {
273 b->dqcoeff[0] = b->qcoeff[0] * xd->dequant_y2[0];
274 vp8_short_inv_walsh4x4_1(&b->dqcoeff[0],
275 xd->qcoeff);
276 memset(b->qcoeff, 0, 2 * sizeof(b->qcoeff[0]));
277 }
278
279 /* override the dc dequant constant in order to preserve the
280 * dc components
281 */
282 DQC = xd->dequant_y1_dc;
283 }
284
285 vp8_dequant_idct_add_y_block
286 (xd->qcoeff, DQC,
287 xd->dst.y_buffer,
288 xd->dst.y_stride, xd->eobs);
289 }
290
291 vp8_dequant_idct_add_uv_block
292 (xd->qcoeff+16*16, xd->dequant_uv,
293 xd->dst.u_buffer, xd->dst.v_buffer,
294 xd->dst.uv_stride, xd->eobs+16);
295 }
296 }
297
mt_decode_mb_rows(VP8D_COMP * pbi,MACROBLOCKD * xd,int start_mb_row)298 static void mt_decode_mb_rows(VP8D_COMP *pbi, MACROBLOCKD *xd, int start_mb_row)
299 {
300 volatile const int *last_row_current_mb_col;
301 volatile int *current_mb_col;
302 int mb_row;
303 VP8_COMMON *pc = &pbi->common;
304 const int nsync = pbi->sync_range;
305 const int first_row_no_sync_above = pc->mb_cols + nsync;
306 int num_part = 1 << pbi->common.multi_token_partition;
307 int last_mb_row = start_mb_row;
308
309 YV12_BUFFER_CONFIG *yv12_fb_new = pbi->dec_fb_ref[INTRA_FRAME];
310 YV12_BUFFER_CONFIG *yv12_fb_lst = pbi->dec_fb_ref[LAST_FRAME];
311
312 int recon_y_stride = yv12_fb_new->y_stride;
313 int recon_uv_stride = yv12_fb_new->uv_stride;
314
315 unsigned char *ref_buffer[MAX_REF_FRAMES][3];
316 unsigned char *dst_buffer[3];
317 int i;
318 int ref_fb_corrupted[MAX_REF_FRAMES];
319
320 ref_fb_corrupted[INTRA_FRAME] = 0;
321
322 for(i = 1; i < MAX_REF_FRAMES; i++)
323 {
324 YV12_BUFFER_CONFIG *this_fb = pbi->dec_fb_ref[i];
325
326 ref_buffer[i][0] = this_fb->y_buffer;
327 ref_buffer[i][1] = this_fb->u_buffer;
328 ref_buffer[i][2] = this_fb->v_buffer;
329
330 ref_fb_corrupted[i] = this_fb->corrupted;
331 }
332
333 dst_buffer[0] = yv12_fb_new->y_buffer;
334 dst_buffer[1] = yv12_fb_new->u_buffer;
335 dst_buffer[2] = yv12_fb_new->v_buffer;
336
337 xd->up_available = (start_mb_row != 0);
338
339 for (mb_row = start_mb_row; mb_row < pc->mb_rows; mb_row += (pbi->decoding_thread_count + 1))
340 {
341 int recon_yoffset, recon_uvoffset;
342 int mb_col;
343 int filter_level;
344 loop_filter_info_n *lfi_n = &pc->lf_info;
345
346 /* save last row processed by this thread */
347 last_mb_row = mb_row;
348 /* select bool coder for current partition */
349 xd->current_bc = &pbi->mbc[mb_row%num_part];
350
351 if (mb_row > 0)
352 last_row_current_mb_col = &pbi->mt_current_mb_col[mb_row -1];
353 else
354 last_row_current_mb_col = &first_row_no_sync_above;
355
356 current_mb_col = &pbi->mt_current_mb_col[mb_row];
357
358 recon_yoffset = mb_row * recon_y_stride * 16;
359 recon_uvoffset = mb_row * recon_uv_stride * 8;
360
361 /* reset contexts */
362 xd->above_context = pc->above_context;
363 memset(xd->left_context, 0, sizeof(ENTROPY_CONTEXT_PLANES));
364
365 xd->left_available = 0;
366
367 xd->mb_to_top_edge = -((mb_row * 16)) << 3;
368 xd->mb_to_bottom_edge = ((pc->mb_rows - 1 - mb_row) * 16) << 3;
369
370 if (pbi->common.filter_level)
371 {
372 xd->recon_above[0] = pbi->mt_yabove_row[mb_row] + 0*16 +32;
373 xd->recon_above[1] = pbi->mt_uabove_row[mb_row] + 0*8 +16;
374 xd->recon_above[2] = pbi->mt_vabove_row[mb_row] + 0*8 +16;
375
376 xd->recon_left[0] = pbi->mt_yleft_col[mb_row];
377 xd->recon_left[1] = pbi->mt_uleft_col[mb_row];
378 xd->recon_left[2] = pbi->mt_vleft_col[mb_row];
379
380 /* TODO: move to outside row loop */
381 xd->recon_left_stride[0] = 1;
382 xd->recon_left_stride[1] = 1;
383 }
384 else
385 {
386 xd->recon_above[0] = dst_buffer[0] + recon_yoffset;
387 xd->recon_above[1] = dst_buffer[1] + recon_uvoffset;
388 xd->recon_above[2] = dst_buffer[2] + recon_uvoffset;
389
390 xd->recon_left[0] = xd->recon_above[0] - 1;
391 xd->recon_left[1] = xd->recon_above[1] - 1;
392 xd->recon_left[2] = xd->recon_above[2] - 1;
393
394 xd->recon_above[0] -= xd->dst.y_stride;
395 xd->recon_above[1] -= xd->dst.uv_stride;
396 xd->recon_above[2] -= xd->dst.uv_stride;
397
398 /* TODO: move to outside row loop */
399 xd->recon_left_stride[0] = xd->dst.y_stride;
400 xd->recon_left_stride[1] = xd->dst.uv_stride;
401
402 setup_intra_recon_left(xd->recon_left[0], xd->recon_left[1],
403 xd->recon_left[2], xd->dst.y_stride,
404 xd->dst.uv_stride);
405 }
406
407 for (mb_col = 0; mb_col < pc->mb_cols; mb_col++)
408 {
409 *current_mb_col = mb_col - 1;
410
411 if ((mb_col & (nsync - 1)) == 0)
412 {
413 while (mb_col > (*last_row_current_mb_col - nsync))
414 {
415 x86_pause_hint();
416 thread_sleep(0);
417 }
418 }
419
420 /* Distance of MB to the various image edges.
421 * These are specified to 8th pel as they are always
422 * compared to values that are in 1/8th pel units.
423 */
424 xd->mb_to_left_edge = -((mb_col * 16) << 3);
425 xd->mb_to_right_edge = ((pc->mb_cols - 1 - mb_col) * 16) << 3;
426
427 #if CONFIG_ERROR_CONCEALMENT
428 {
429 int corrupt_residual =
430 (!pbi->independent_partitions &&
431 pbi->frame_corrupt_residual) ||
432 vp8dx_bool_error(xd->current_bc);
433 if (pbi->ec_active &&
434 (xd->mode_info_context->mbmi.ref_frame ==
435 INTRA_FRAME) &&
436 corrupt_residual)
437 {
438 /* We have an intra block with corrupt
439 * coefficients, better to conceal with an inter
440 * block.
441 * Interpolate MVs from neighboring MBs
442 *
443 * Note that for the first mb with corrupt
444 * residual in a frame, we might not discover
445 * that before decoding the residual. That
446 * happens after this check, and therefore no
447 * inter concealment will be done.
448 */
449 vp8_interpolate_motion(xd,
450 mb_row, mb_col,
451 pc->mb_rows, pc->mb_cols,
452 pc->mode_info_stride);
453 }
454 }
455 #endif
456
457
458 xd->dst.y_buffer = dst_buffer[0] + recon_yoffset;
459 xd->dst.u_buffer = dst_buffer[1] + recon_uvoffset;
460 xd->dst.v_buffer = dst_buffer[2] + recon_uvoffset;
461
462 xd->pre.y_buffer = ref_buffer[xd->mode_info_context->mbmi.ref_frame][0] + recon_yoffset;
463 xd->pre.u_buffer = ref_buffer[xd->mode_info_context->mbmi.ref_frame][1] + recon_uvoffset;
464 xd->pre.v_buffer = ref_buffer[xd->mode_info_context->mbmi.ref_frame][2] + recon_uvoffset;
465
466 /* propagate errors from reference frames */
467 xd->corrupted |= ref_fb_corrupted[xd->mode_info_context->mbmi.ref_frame];
468
469 mt_decode_macroblock(pbi, xd, 0);
470
471 xd->left_available = 1;
472
473 /* check if the boolean decoder has suffered an error */
474 xd->corrupted |= vp8dx_bool_error(xd->current_bc);
475
476 xd->recon_above[0] += 16;
477 xd->recon_above[1] += 8;
478 xd->recon_above[2] += 8;
479
480 if (!pbi->common.filter_level)
481 {
482 xd->recon_left[0] += 16;
483 xd->recon_left[1] += 8;
484 xd->recon_left[2] += 8;
485 }
486
487 if (pbi->common.filter_level)
488 {
489 int skip_lf = (xd->mode_info_context->mbmi.mode != B_PRED &&
490 xd->mode_info_context->mbmi.mode != SPLITMV &&
491 xd->mode_info_context->mbmi.mb_skip_coeff);
492
493 const int mode_index = lfi_n->mode_lf_lut[xd->mode_info_context->mbmi.mode];
494 const int seg = xd->mode_info_context->mbmi.segment_id;
495 const int ref_frame = xd->mode_info_context->mbmi.ref_frame;
496
497 filter_level = lfi_n->lvl[seg][ref_frame][mode_index];
498
499 if( mb_row != pc->mb_rows-1 )
500 {
501 /* Save decoded MB last row data for next-row decoding */
502 memcpy((pbi->mt_yabove_row[mb_row + 1] + 32 + mb_col*16), (xd->dst.y_buffer + 15 * recon_y_stride), 16);
503 memcpy((pbi->mt_uabove_row[mb_row + 1] + 16 + mb_col*8), (xd->dst.u_buffer + 7 * recon_uv_stride), 8);
504 memcpy((pbi->mt_vabove_row[mb_row + 1] + 16 + mb_col*8), (xd->dst.v_buffer + 7 * recon_uv_stride), 8);
505 }
506
507 /* save left_col for next MB decoding */
508 if(mb_col != pc->mb_cols-1)
509 {
510 MODE_INFO *next = xd->mode_info_context +1;
511
512 if (next->mbmi.ref_frame == INTRA_FRAME)
513 {
514 for (i = 0; i < 16; i++)
515 pbi->mt_yleft_col[mb_row][i] = xd->dst.y_buffer [i* recon_y_stride + 15];
516 for (i = 0; i < 8; i++)
517 {
518 pbi->mt_uleft_col[mb_row][i] = xd->dst.u_buffer [i* recon_uv_stride + 7];
519 pbi->mt_vleft_col[mb_row][i] = xd->dst.v_buffer [i* recon_uv_stride + 7];
520 }
521 }
522 }
523
524 /* loopfilter on this macroblock. */
525 if (filter_level)
526 {
527 if(pc->filter_type == NORMAL_LOOPFILTER)
528 {
529 loop_filter_info lfi;
530 FRAME_TYPE frame_type = pc->frame_type;
531 const int hev_index = lfi_n->hev_thr_lut[frame_type][filter_level];
532 lfi.mblim = lfi_n->mblim[filter_level];
533 lfi.blim = lfi_n->blim[filter_level];
534 lfi.lim = lfi_n->lim[filter_level];
535 lfi.hev_thr = lfi_n->hev_thr[hev_index];
536
537 if (mb_col > 0)
538 vp8_loop_filter_mbv
539 (xd->dst.y_buffer, xd->dst.u_buffer, xd->dst.v_buffer, recon_y_stride, recon_uv_stride, &lfi);
540
541 if (!skip_lf)
542 vp8_loop_filter_bv
543 (xd->dst.y_buffer, xd->dst.u_buffer, xd->dst.v_buffer, recon_y_stride, recon_uv_stride, &lfi);
544
545 /* don't apply across umv border */
546 if (mb_row > 0)
547 vp8_loop_filter_mbh
548 (xd->dst.y_buffer, xd->dst.u_buffer, xd->dst.v_buffer, recon_y_stride, recon_uv_stride, &lfi);
549
550 if (!skip_lf)
551 vp8_loop_filter_bh
552 (xd->dst.y_buffer, xd->dst.u_buffer, xd->dst.v_buffer, recon_y_stride, recon_uv_stride, &lfi);
553 }
554 else
555 {
556 if (mb_col > 0)
557 vp8_loop_filter_simple_mbv
558 (xd->dst.y_buffer, recon_y_stride, lfi_n->mblim[filter_level]);
559
560 if (!skip_lf)
561 vp8_loop_filter_simple_bv
562 (xd->dst.y_buffer, recon_y_stride, lfi_n->blim[filter_level]);
563
564 /* don't apply across umv border */
565 if (mb_row > 0)
566 vp8_loop_filter_simple_mbh
567 (xd->dst.y_buffer, recon_y_stride, lfi_n->mblim[filter_level]);
568
569 if (!skip_lf)
570 vp8_loop_filter_simple_bh
571 (xd->dst.y_buffer, recon_y_stride, lfi_n->blim[filter_level]);
572 }
573 }
574
575 }
576
577 recon_yoffset += 16;
578 recon_uvoffset += 8;
579
580 ++xd->mode_info_context; /* next mb */
581
582 xd->above_context++;
583 }
584
585 /* adjust to the next row of mbs */
586 if (pbi->common.filter_level)
587 {
588 if(mb_row != pc->mb_rows-1)
589 {
590 int lasty = yv12_fb_lst->y_width + VP8BORDERINPIXELS;
591 int lastuv = (yv12_fb_lst->y_width>>1) + (VP8BORDERINPIXELS>>1);
592
593 for (i = 0; i < 4; i++)
594 {
595 pbi->mt_yabove_row[mb_row +1][lasty + i] = pbi->mt_yabove_row[mb_row +1][lasty -1];
596 pbi->mt_uabove_row[mb_row +1][lastuv + i] = pbi->mt_uabove_row[mb_row +1][lastuv -1];
597 pbi->mt_vabove_row[mb_row +1][lastuv + i] = pbi->mt_vabove_row[mb_row +1][lastuv -1];
598 }
599 }
600 }
601 else
602 vp8_extend_mb_row(yv12_fb_new, xd->dst.y_buffer + 16,
603 xd->dst.u_buffer + 8, xd->dst.v_buffer + 8);
604
605 /* last MB of row is ready just after extension is done */
606 *current_mb_col = mb_col + nsync;
607
608 ++xd->mode_info_context; /* skip prediction column */
609 xd->up_available = 1;
610
611 /* since we have multithread */
612 xd->mode_info_context += xd->mode_info_stride * pbi->decoding_thread_count;
613 }
614
615 /* signal end of frame decoding if this thread processed the last mb_row */
616 if (last_mb_row == (pc->mb_rows - 1))
617 sem_post(&pbi->h_event_end_decoding);
618
619 }
620
621
thread_decoding_proc(void * p_data)622 static THREAD_FUNCTION thread_decoding_proc(void *p_data)
623 {
624 int ithread = ((DECODETHREAD_DATA *)p_data)->ithread;
625 VP8D_COMP *pbi = (VP8D_COMP *)(((DECODETHREAD_DATA *)p_data)->ptr1);
626 MB_ROW_DEC *mbrd = (MB_ROW_DEC *)(((DECODETHREAD_DATA *)p_data)->ptr2);
627 ENTROPY_CONTEXT_PLANES mb_row_left_context;
628
629 while (1)
630 {
631 if (pbi->b_multithreaded_rd == 0)
632 break;
633
634 if (sem_wait(&pbi->h_event_start_decoding[ithread]) == 0)
635 {
636 if (pbi->b_multithreaded_rd == 0)
637 break;
638 else
639 {
640 MACROBLOCKD *xd = &mbrd->mbd;
641 xd->left_context = &mb_row_left_context;
642
643 mt_decode_mb_rows(pbi, xd, ithread+1);
644 }
645 }
646 }
647
648 return 0 ;
649 }
650
651
vp8_decoder_create_threads(VP8D_COMP * pbi)652 void vp8_decoder_create_threads(VP8D_COMP *pbi)
653 {
654 int core_count = 0;
655 unsigned int ithread;
656
657 pbi->b_multithreaded_rd = 0;
658 pbi->allocated_decoding_thread_count = 0;
659
660 /* limit decoding threads to the max number of token partitions */
661 core_count = (pbi->max_threads > 8) ? 8 : pbi->max_threads;
662
663 /* limit decoding threads to the available cores */
664 if (core_count > pbi->common.processor_core_count)
665 core_count = pbi->common.processor_core_count;
666
667 if (core_count > 1)
668 {
669 pbi->b_multithreaded_rd = 1;
670 pbi->decoding_thread_count = core_count - 1;
671
672 CALLOC_ARRAY(pbi->h_decoding_thread, pbi->decoding_thread_count);
673 CALLOC_ARRAY(pbi->h_event_start_decoding, pbi->decoding_thread_count);
674 CALLOC_ARRAY_ALIGNED(pbi->mb_row_di, pbi->decoding_thread_count, 32);
675 CALLOC_ARRAY(pbi->de_thread_data, pbi->decoding_thread_count);
676
677 for (ithread = 0; ithread < pbi->decoding_thread_count; ithread++)
678 {
679 sem_init(&pbi->h_event_start_decoding[ithread], 0, 0);
680
681 vp8_setup_block_dptrs(&pbi->mb_row_di[ithread].mbd);
682
683 pbi->de_thread_data[ithread].ithread = ithread;
684 pbi->de_thread_data[ithread].ptr1 = (void *)pbi;
685 pbi->de_thread_data[ithread].ptr2 = (void *) &pbi->mb_row_di[ithread];
686
687 pthread_create(&pbi->h_decoding_thread[ithread], 0, thread_decoding_proc, (&pbi->de_thread_data[ithread]));
688 }
689
690 sem_init(&pbi->h_event_end_decoding, 0, 0);
691
692 pbi->allocated_decoding_thread_count = pbi->decoding_thread_count;
693 }
694 }
695
696
vp8mt_de_alloc_temp_buffers(VP8D_COMP * pbi,int mb_rows)697 void vp8mt_de_alloc_temp_buffers(VP8D_COMP *pbi, int mb_rows)
698 {
699 int i;
700
701 if (pbi->b_multithreaded_rd)
702 {
703 vpx_free(pbi->mt_current_mb_col);
704 pbi->mt_current_mb_col = NULL ;
705
706 /* Free above_row buffers. */
707 if (pbi->mt_yabove_row)
708 {
709 for (i=0; i< mb_rows; i++)
710 {
711 vpx_free(pbi->mt_yabove_row[i]);
712 pbi->mt_yabove_row[i] = NULL ;
713 }
714 vpx_free(pbi->mt_yabove_row);
715 pbi->mt_yabove_row = NULL ;
716 }
717
718 if (pbi->mt_uabove_row)
719 {
720 for (i=0; i< mb_rows; i++)
721 {
722 vpx_free(pbi->mt_uabove_row[i]);
723 pbi->mt_uabove_row[i] = NULL ;
724 }
725 vpx_free(pbi->mt_uabove_row);
726 pbi->mt_uabove_row = NULL ;
727 }
728
729 if (pbi->mt_vabove_row)
730 {
731 for (i=0; i< mb_rows; i++)
732 {
733 vpx_free(pbi->mt_vabove_row[i]);
734 pbi->mt_vabove_row[i] = NULL ;
735 }
736 vpx_free(pbi->mt_vabove_row);
737 pbi->mt_vabove_row = NULL ;
738 }
739
740 /* Free left_col buffers. */
741 if (pbi->mt_yleft_col)
742 {
743 for (i=0; i< mb_rows; i++)
744 {
745 vpx_free(pbi->mt_yleft_col[i]);
746 pbi->mt_yleft_col[i] = NULL ;
747 }
748 vpx_free(pbi->mt_yleft_col);
749 pbi->mt_yleft_col = NULL ;
750 }
751
752 if (pbi->mt_uleft_col)
753 {
754 for (i=0; i< mb_rows; i++)
755 {
756 vpx_free(pbi->mt_uleft_col[i]);
757 pbi->mt_uleft_col[i] = NULL ;
758 }
759 vpx_free(pbi->mt_uleft_col);
760 pbi->mt_uleft_col = NULL ;
761 }
762
763 if (pbi->mt_vleft_col)
764 {
765 for (i=0; i< mb_rows; i++)
766 {
767 vpx_free(pbi->mt_vleft_col[i]);
768 pbi->mt_vleft_col[i] = NULL ;
769 }
770 vpx_free(pbi->mt_vleft_col);
771 pbi->mt_vleft_col = NULL ;
772 }
773 }
774 }
775
776
vp8mt_alloc_temp_buffers(VP8D_COMP * pbi,int width,int prev_mb_rows)777 void vp8mt_alloc_temp_buffers(VP8D_COMP *pbi, int width, int prev_mb_rows)
778 {
779 VP8_COMMON *const pc = & pbi->common;
780 int i;
781 int uv_width;
782
783 if (pbi->b_multithreaded_rd)
784 {
785 vp8mt_de_alloc_temp_buffers(pbi, prev_mb_rows);
786
787 /* our internal buffers are always multiples of 16 */
788 if ((width & 0xf) != 0)
789 width += 16 - (width & 0xf);
790
791 if (width < 640) pbi->sync_range = 1;
792 else if (width <= 1280) pbi->sync_range = 8;
793 else if (width <= 2560) pbi->sync_range =16;
794 else pbi->sync_range = 32;
795
796 uv_width = width >>1;
797
798 /* Allocate an int for each mb row. */
799 CALLOC_ARRAY(pbi->mt_current_mb_col, pc->mb_rows);
800
801 /* Allocate memory for above_row buffers. */
802 CALLOC_ARRAY(pbi->mt_yabove_row, pc->mb_rows);
803 for (i = 0; i < pc->mb_rows; i++)
804 CHECK_MEM_ERROR(pbi->mt_yabove_row[i], vpx_memalign(16,sizeof(unsigned char) * (width + (VP8BORDERINPIXELS<<1))));
805
806 CALLOC_ARRAY(pbi->mt_uabove_row, pc->mb_rows);
807 for (i = 0; i < pc->mb_rows; i++)
808 CHECK_MEM_ERROR(pbi->mt_uabove_row[i], vpx_memalign(16,sizeof(unsigned char) * (uv_width + VP8BORDERINPIXELS)));
809
810 CALLOC_ARRAY(pbi->mt_vabove_row, pc->mb_rows);
811 for (i = 0; i < pc->mb_rows; i++)
812 CHECK_MEM_ERROR(pbi->mt_vabove_row[i], vpx_memalign(16,sizeof(unsigned char) * (uv_width + VP8BORDERINPIXELS)));
813
814 /* Allocate memory for left_col buffers. */
815 CALLOC_ARRAY(pbi->mt_yleft_col, pc->mb_rows);
816 for (i = 0; i < pc->mb_rows; i++)
817 CHECK_MEM_ERROR(pbi->mt_yleft_col[i], vpx_calloc(sizeof(unsigned char) * 16, 1));
818
819 CALLOC_ARRAY(pbi->mt_uleft_col, pc->mb_rows);
820 for (i = 0; i < pc->mb_rows; i++)
821 CHECK_MEM_ERROR(pbi->mt_uleft_col[i], vpx_calloc(sizeof(unsigned char) * 8, 1));
822
823 CALLOC_ARRAY(pbi->mt_vleft_col, pc->mb_rows);
824 for (i = 0; i < pc->mb_rows; i++)
825 CHECK_MEM_ERROR(pbi->mt_vleft_col[i], vpx_calloc(sizeof(unsigned char) * 8, 1));
826 }
827 }
828
829
vp8_decoder_remove_threads(VP8D_COMP * pbi)830 void vp8_decoder_remove_threads(VP8D_COMP *pbi)
831 {
832 /* shutdown MB Decoding thread; */
833 if (pbi->b_multithreaded_rd)
834 {
835 int i;
836
837 pbi->b_multithreaded_rd = 0;
838
839 /* allow all threads to exit */
840 for (i = 0; i < pbi->allocated_decoding_thread_count; i++)
841 {
842 sem_post(&pbi->h_event_start_decoding[i]);
843 pthread_join(pbi->h_decoding_thread[i], NULL);
844 }
845
846 for (i = 0; i < pbi->allocated_decoding_thread_count; i++)
847 {
848 sem_destroy(&pbi->h_event_start_decoding[i]);
849 }
850
851 sem_destroy(&pbi->h_event_end_decoding);
852
853 vpx_free(pbi->h_decoding_thread);
854 pbi->h_decoding_thread = NULL;
855
856 vpx_free(pbi->h_event_start_decoding);
857 pbi->h_event_start_decoding = NULL;
858
859 vpx_free(pbi->mb_row_di);
860 pbi->mb_row_di = NULL ;
861
862 vpx_free(pbi->de_thread_data);
863 pbi->de_thread_data = NULL;
864 }
865 }
866
vp8mt_decode_mb_rows(VP8D_COMP * pbi,MACROBLOCKD * xd)867 void vp8mt_decode_mb_rows( VP8D_COMP *pbi, MACROBLOCKD *xd)
868 {
869 VP8_COMMON *pc = &pbi->common;
870 unsigned int i;
871 int j;
872
873 int filter_level = pc->filter_level;
874 YV12_BUFFER_CONFIG *yv12_fb_new = pbi->dec_fb_ref[INTRA_FRAME];
875
876 if (filter_level)
877 {
878 /* Set above_row buffer to 127 for decoding first MB row */
879 memset(pbi->mt_yabove_row[0] + VP8BORDERINPIXELS-1, 127, yv12_fb_new->y_width + 5);
880 memset(pbi->mt_uabove_row[0] + (VP8BORDERINPIXELS>>1)-1, 127, (yv12_fb_new->y_width>>1) +5);
881 memset(pbi->mt_vabove_row[0] + (VP8BORDERINPIXELS>>1)-1, 127, (yv12_fb_new->y_width>>1) +5);
882
883 for (j=1; j<pc->mb_rows; j++)
884 {
885 memset(pbi->mt_yabove_row[j] + VP8BORDERINPIXELS-1, (unsigned char)129, 1);
886 memset(pbi->mt_uabove_row[j] + (VP8BORDERINPIXELS>>1)-1, (unsigned char)129, 1);
887 memset(pbi->mt_vabove_row[j] + (VP8BORDERINPIXELS>>1)-1, (unsigned char)129, 1);
888 }
889
890 /* Set left_col to 129 initially */
891 for (j=0; j<pc->mb_rows; j++)
892 {
893 memset(pbi->mt_yleft_col[j], (unsigned char)129, 16);
894 memset(pbi->mt_uleft_col[j], (unsigned char)129, 8);
895 memset(pbi->mt_vleft_col[j], (unsigned char)129, 8);
896 }
897
898 /* Initialize the loop filter for this frame. */
899 vp8_loop_filter_frame_init(pc, &pbi->mb, filter_level);
900 }
901 else
902 vp8_setup_intra_recon_top_line(yv12_fb_new);
903
904 setup_decoding_thread_data(pbi, xd, pbi->mb_row_di, pbi->decoding_thread_count);
905
906 for (i = 0; i < pbi->decoding_thread_count; i++)
907 sem_post(&pbi->h_event_start_decoding[i]);
908
909 mt_decode_mb_rows(pbi, xd, 0);
910
911 sem_wait(&pbi->h_event_end_decoding); /* add back for each frame */
912 }
913