1 /*
2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11
12 #include "vpx_ports/config.h"
13 #include "recon.h"
14 #include "reconintra.h"
15 #include "vpx_mem/vpx_mem.h"
16 #include "onyxd_int.h"
17
18 /* For skip_recon_mb(), add vp8_build_intra_predictors_mby_s(MACROBLOCKD *x) and
19 * vp8_build_intra_predictors_mbuv_s(MACROBLOCKD *x).
20 */
21
vp8mt_build_intra_predictors_mby(VP8D_COMP * pbi,MACROBLOCKD * x,int mb_row,int mb_col)22 void vp8mt_build_intra_predictors_mby(VP8D_COMP *pbi, MACROBLOCKD *x, int mb_row, int mb_col)
23 {
24 #if CONFIG_MULTITHREAD
25 unsigned char *yabove_row; /* = x->dst.y_buffer - x->dst.y_stride; */
26 unsigned char *yleft_col;
27 unsigned char yleft_buf[16];
28 unsigned char ytop_left; /* = yabove_row[-1]; */
29 unsigned char *ypred_ptr = x->predictor;
30 int r, c, i;
31
32 if (pbi->common.filter_level)
33 {
34 yabove_row = pbi->mt_yabove_row[mb_row] + mb_col*16 +32;
35 yleft_col = pbi->mt_yleft_col[mb_row];
36 } else
37 {
38 yabove_row = x->dst.y_buffer - x->dst.y_stride;
39
40 for (i = 0; i < 16; i++)
41 yleft_buf[i] = x->dst.y_buffer [i* x->dst.y_stride -1];
42 yleft_col = yleft_buf;
43 }
44
45 ytop_left = yabove_row[-1];
46
47 /* for Y */
48 switch (x->mode_info_context->mbmi.mode)
49 {
50 case DC_PRED:
51 {
52 int expected_dc;
53 int i;
54 int shift;
55 int average = 0;
56
57
58 if (x->up_available || x->left_available)
59 {
60 if (x->up_available)
61 {
62 for (i = 0; i < 16; i++)
63 {
64 average += yabove_row[i];
65 }
66 }
67
68 if (x->left_available)
69 {
70
71 for (i = 0; i < 16; i++)
72 {
73 average += yleft_col[i];
74 }
75
76 }
77
78
79
80 shift = 3 + x->up_available + x->left_available;
81 expected_dc = (average + (1 << (shift - 1))) >> shift;
82 }
83 else
84 {
85 expected_dc = 128;
86 }
87
88 vpx_memset(ypred_ptr, expected_dc, 256);
89 }
90 break;
91 case V_PRED:
92 {
93
94 for (r = 0; r < 16; r++)
95 {
96
97 ((int *)ypred_ptr)[0] = ((int *)yabove_row)[0];
98 ((int *)ypred_ptr)[1] = ((int *)yabove_row)[1];
99 ((int *)ypred_ptr)[2] = ((int *)yabove_row)[2];
100 ((int *)ypred_ptr)[3] = ((int *)yabove_row)[3];
101 ypred_ptr += 16;
102 }
103 }
104 break;
105 case H_PRED:
106 {
107
108 for (r = 0; r < 16; r++)
109 {
110
111 vpx_memset(ypred_ptr, yleft_col[r], 16);
112 ypred_ptr += 16;
113 }
114
115 }
116 break;
117 case TM_PRED:
118 {
119
120 for (r = 0; r < 16; r++)
121 {
122 for (c = 0; c < 16; c++)
123 {
124 int pred = yleft_col[r] + yabove_row[ c] - ytop_left;
125
126 if (pred < 0)
127 pred = 0;
128
129 if (pred > 255)
130 pred = 255;
131
132 ypred_ptr[c] = pred;
133 }
134
135 ypred_ptr += 16;
136 }
137
138 }
139 break;
140 case B_PRED:
141 case NEARESTMV:
142 case NEARMV:
143 case ZEROMV:
144 case NEWMV:
145 case SPLITMV:
146 case MB_MODE_COUNT:
147 break;
148 }
149 #else
150 (void) pbi;
151 (void) x;
152 (void) mb_row;
153 (void) mb_col;
154 #endif
155 }
156
vp8mt_build_intra_predictors_mby_s(VP8D_COMP * pbi,MACROBLOCKD * x,int mb_row,int mb_col)157 void vp8mt_build_intra_predictors_mby_s(VP8D_COMP *pbi, MACROBLOCKD *x, int mb_row, int mb_col)
158 {
159 #if CONFIG_MULTITHREAD
160 unsigned char *yabove_row; /* = x->dst.y_buffer - x->dst.y_stride; */
161 unsigned char *yleft_col;
162 unsigned char yleft_buf[16];
163 unsigned char ytop_left; /* = yabove_row[-1]; */
164 unsigned char *ypred_ptr = x->predictor;
165 int r, c, i;
166
167 int y_stride = x->dst.y_stride;
168 ypred_ptr = x->dst.y_buffer; /*x->predictor;*/
169
170 if (pbi->common.filter_level)
171 {
172 yabove_row = pbi->mt_yabove_row[mb_row] + mb_col*16 +32;
173 yleft_col = pbi->mt_yleft_col[mb_row];
174 } else
175 {
176 yabove_row = x->dst.y_buffer - x->dst.y_stride;
177
178 for (i = 0; i < 16; i++)
179 yleft_buf[i] = x->dst.y_buffer [i* x->dst.y_stride -1];
180 yleft_col = yleft_buf;
181 }
182
183 ytop_left = yabove_row[-1];
184
185 /* for Y */
186 switch (x->mode_info_context->mbmi.mode)
187 {
188 case DC_PRED:
189 {
190 int expected_dc;
191 int i;
192 int shift;
193 int average = 0;
194
195
196 if (x->up_available || x->left_available)
197 {
198 if (x->up_available)
199 {
200 for (i = 0; i < 16; i++)
201 {
202 average += yabove_row[i];
203 }
204 }
205
206 if (x->left_available)
207 {
208
209 for (i = 0; i < 16; i++)
210 {
211 average += yleft_col[i];
212 }
213
214 }
215
216
217
218 shift = 3 + x->up_available + x->left_available;
219 expected_dc = (average + (1 << (shift - 1))) >> shift;
220 }
221 else
222 {
223 expected_dc = 128;
224 }
225
226 /*vpx_memset(ypred_ptr, expected_dc, 256);*/
227 for (r = 0; r < 16; r++)
228 {
229 vpx_memset(ypred_ptr, expected_dc, 16);
230 ypred_ptr += y_stride; /*16;*/
231 }
232 }
233 break;
234 case V_PRED:
235 {
236
237 for (r = 0; r < 16; r++)
238 {
239
240 ((int *)ypred_ptr)[0] = ((int *)yabove_row)[0];
241 ((int *)ypred_ptr)[1] = ((int *)yabove_row)[1];
242 ((int *)ypred_ptr)[2] = ((int *)yabove_row)[2];
243 ((int *)ypred_ptr)[3] = ((int *)yabove_row)[3];
244 ypred_ptr += y_stride; /*16;*/
245 }
246 }
247 break;
248 case H_PRED:
249 {
250
251 for (r = 0; r < 16; r++)
252 {
253
254 vpx_memset(ypred_ptr, yleft_col[r], 16);
255 ypred_ptr += y_stride; /*16;*/
256 }
257
258 }
259 break;
260 case TM_PRED:
261 {
262
263 for (r = 0; r < 16; r++)
264 {
265 for (c = 0; c < 16; c++)
266 {
267 int pred = yleft_col[r] + yabove_row[ c] - ytop_left;
268
269 if (pred < 0)
270 pred = 0;
271
272 if (pred > 255)
273 pred = 255;
274
275 ypred_ptr[c] = pred;
276 }
277
278 ypred_ptr += y_stride; /*16;*/
279 }
280
281 }
282 break;
283 case B_PRED:
284 case NEARESTMV:
285 case NEARMV:
286 case ZEROMV:
287 case NEWMV:
288 case SPLITMV:
289 case MB_MODE_COUNT:
290 break;
291 }
292 #else
293 (void) pbi;
294 (void) x;
295 (void) mb_row;
296 (void) mb_col;
297 #endif
298 }
299
vp8mt_build_intra_predictors_mbuv(VP8D_COMP * pbi,MACROBLOCKD * x,int mb_row,int mb_col)300 void vp8mt_build_intra_predictors_mbuv(VP8D_COMP *pbi, MACROBLOCKD *x, int mb_row, int mb_col)
301 {
302 #if CONFIG_MULTITHREAD
303 unsigned char *uabove_row; /* = x->dst.u_buffer - x->dst.uv_stride; */
304 unsigned char *uleft_col; /*[16];*/
305 unsigned char uleft_buf[8];
306 unsigned char utop_left; /* = uabove_row[-1]; */
307 unsigned char *vabove_row; /* = x->dst.v_buffer - x->dst.uv_stride; */
308 unsigned char *vleft_col; /*[20];*/
309 unsigned char vleft_buf[8];
310 unsigned char vtop_left; /* = vabove_row[-1]; */
311 unsigned char *upred_ptr = &x->predictor[256];
312 unsigned char *vpred_ptr = &x->predictor[320];
313 int i, j;
314
315 if (pbi->common.filter_level)
316 {
317 uabove_row = pbi->mt_uabove_row[mb_row] + mb_col*8 +16;
318 vabove_row = pbi->mt_vabove_row[mb_row] + mb_col*8 +16;
319 uleft_col = pbi->mt_uleft_col[mb_row];
320 vleft_col = pbi->mt_vleft_col[mb_row];
321 } else
322 {
323 uabove_row = x->dst.u_buffer - x->dst.uv_stride;
324 vabove_row = x->dst.v_buffer - x->dst.uv_stride;
325
326 for (i = 0; i < 8; i++)
327 {
328 uleft_buf[i] = x->dst.u_buffer [i* x->dst.uv_stride -1];
329 vleft_buf[i] = x->dst.v_buffer [i* x->dst.uv_stride -1];
330 }
331 uleft_col = uleft_buf;
332 vleft_col = vleft_buf;
333 }
334 utop_left = uabove_row[-1];
335 vtop_left = vabove_row[-1];
336
337 switch (x->mode_info_context->mbmi.uv_mode)
338 {
339 case DC_PRED:
340 {
341 int expected_udc;
342 int expected_vdc;
343 int i;
344 int shift;
345 int Uaverage = 0;
346 int Vaverage = 0;
347
348 if (x->up_available)
349 {
350 for (i = 0; i < 8; i++)
351 {
352 Uaverage += uabove_row[i];
353 Vaverage += vabove_row[i];
354 }
355 }
356
357 if (x->left_available)
358 {
359 for (i = 0; i < 8; i++)
360 {
361 Uaverage += uleft_col[i];
362 Vaverage += vleft_col[i];
363 }
364 }
365
366 if (!x->up_available && !x->left_available)
367 {
368 expected_udc = 128;
369 expected_vdc = 128;
370 }
371 else
372 {
373 shift = 2 + x->up_available + x->left_available;
374 expected_udc = (Uaverage + (1 << (shift - 1))) >> shift;
375 expected_vdc = (Vaverage + (1 << (shift - 1))) >> shift;
376 }
377
378
379 vpx_memset(upred_ptr, expected_udc, 64);
380 vpx_memset(vpred_ptr, expected_vdc, 64);
381
382
383 }
384 break;
385 case V_PRED:
386 {
387 int i;
388
389 for (i = 0; i < 8; i++)
390 {
391 vpx_memcpy(upred_ptr, uabove_row, 8);
392 vpx_memcpy(vpred_ptr, vabove_row, 8);
393 upred_ptr += 8;
394 vpred_ptr += 8;
395 }
396
397 }
398 break;
399 case H_PRED:
400 {
401 int i;
402
403 for (i = 0; i < 8; i++)
404 {
405 vpx_memset(upred_ptr, uleft_col[i], 8);
406 vpx_memset(vpred_ptr, vleft_col[i], 8);
407 upred_ptr += 8;
408 vpred_ptr += 8;
409 }
410 }
411
412 break;
413 case TM_PRED:
414 {
415 int i;
416
417 for (i = 0; i < 8; i++)
418 {
419 for (j = 0; j < 8; j++)
420 {
421 int predu = uleft_col[i] + uabove_row[j] - utop_left;
422 int predv = vleft_col[i] + vabove_row[j] - vtop_left;
423
424 if (predu < 0)
425 predu = 0;
426
427 if (predu > 255)
428 predu = 255;
429
430 if (predv < 0)
431 predv = 0;
432
433 if (predv > 255)
434 predv = 255;
435
436 upred_ptr[j] = predu;
437 vpred_ptr[j] = predv;
438 }
439
440 upred_ptr += 8;
441 vpred_ptr += 8;
442 }
443
444 }
445 break;
446 case B_PRED:
447 case NEARESTMV:
448 case NEARMV:
449 case ZEROMV:
450 case NEWMV:
451 case SPLITMV:
452 case MB_MODE_COUNT:
453 break;
454 }
455 #else
456 (void) pbi;
457 (void) x;
458 (void) mb_row;
459 (void) mb_col;
460 #endif
461 }
462
vp8mt_build_intra_predictors_mbuv_s(VP8D_COMP * pbi,MACROBLOCKD * x,int mb_row,int mb_col)463 void vp8mt_build_intra_predictors_mbuv_s(VP8D_COMP *pbi, MACROBLOCKD *x, int mb_row, int mb_col)
464 {
465 #if CONFIG_MULTITHREAD
466 unsigned char *uabove_row; /* = x->dst.u_buffer - x->dst.uv_stride; */
467 unsigned char *uleft_col; /*[16];*/
468 unsigned char uleft_buf[8];
469 unsigned char utop_left; /* = uabove_row[-1]; */
470 unsigned char *vabove_row; /* = x->dst.v_buffer - x->dst.uv_stride; */
471 unsigned char *vleft_col; /*[20];*/
472 unsigned char vleft_buf[8];
473 unsigned char vtop_left; /* = vabove_row[-1]; */
474 unsigned char *upred_ptr = x->dst.u_buffer; /*&x->predictor[256];*/
475 unsigned char *vpred_ptr = x->dst.v_buffer; /*&x->predictor[320];*/
476 int uv_stride = x->dst.uv_stride;
477 int i, j;
478
479 if (pbi->common.filter_level)
480 {
481 uabove_row = pbi->mt_uabove_row[mb_row] + mb_col*8 +16;
482 vabove_row = pbi->mt_vabove_row[mb_row] + mb_col*8 +16;
483 uleft_col = pbi->mt_uleft_col[mb_row];
484 vleft_col = pbi->mt_vleft_col[mb_row];
485 } else
486 {
487 uabove_row = x->dst.u_buffer - x->dst.uv_stride;
488 vabove_row = x->dst.v_buffer - x->dst.uv_stride;
489
490 for (i = 0; i < 8; i++)
491 {
492 uleft_buf[i] = x->dst.u_buffer [i* x->dst.uv_stride -1];
493 vleft_buf[i] = x->dst.v_buffer [i* x->dst.uv_stride -1];
494 }
495 uleft_col = uleft_buf;
496 vleft_col = vleft_buf;
497 }
498 utop_left = uabove_row[-1];
499 vtop_left = vabove_row[-1];
500
501 switch (x->mode_info_context->mbmi.uv_mode)
502 {
503 case DC_PRED:
504 {
505 int expected_udc;
506 int expected_vdc;
507 int i;
508 int shift;
509 int Uaverage = 0;
510 int Vaverage = 0;
511
512 if (x->up_available)
513 {
514 for (i = 0; i < 8; i++)
515 {
516 Uaverage += uabove_row[i];
517 Vaverage += vabove_row[i];
518 }
519 }
520
521 if (x->left_available)
522 {
523 for (i = 0; i < 8; i++)
524 {
525 Uaverage += uleft_col[i];
526 Vaverage += vleft_col[i];
527 }
528 }
529
530 if (!x->up_available && !x->left_available)
531 {
532 expected_udc = 128;
533 expected_vdc = 128;
534 }
535 else
536 {
537 shift = 2 + x->up_available + x->left_available;
538 expected_udc = (Uaverage + (1 << (shift - 1))) >> shift;
539 expected_vdc = (Vaverage + (1 << (shift - 1))) >> shift;
540 }
541
542
543 /*vpx_memset(upred_ptr,expected_udc,64);
544 vpx_memset(vpred_ptr,expected_vdc,64);*/
545 for (i = 0; i < 8; i++)
546 {
547 vpx_memset(upred_ptr, expected_udc, 8);
548 vpx_memset(vpred_ptr, expected_vdc, 8);
549 upred_ptr += uv_stride; /*8;*/
550 vpred_ptr += uv_stride; /*8;*/
551 }
552 }
553 break;
554 case V_PRED:
555 {
556 int i;
557
558 for (i = 0; i < 8; i++)
559 {
560 vpx_memcpy(upred_ptr, uabove_row, 8);
561 vpx_memcpy(vpred_ptr, vabove_row, 8);
562 upred_ptr += uv_stride; /*8;*/
563 vpred_ptr += uv_stride; /*8;*/
564 }
565
566 }
567 break;
568 case H_PRED:
569 {
570 int i;
571
572 for (i = 0; i < 8; i++)
573 {
574 vpx_memset(upred_ptr, uleft_col[i], 8);
575 vpx_memset(vpred_ptr, vleft_col[i], 8);
576 upred_ptr += uv_stride; /*8;*/
577 vpred_ptr += uv_stride; /*8;*/
578 }
579 }
580
581 break;
582 case TM_PRED:
583 {
584 int i;
585
586 for (i = 0; i < 8; i++)
587 {
588 for (j = 0; j < 8; j++)
589 {
590 int predu = uleft_col[i] + uabove_row[j] - utop_left;
591 int predv = vleft_col[i] + vabove_row[j] - vtop_left;
592
593 if (predu < 0)
594 predu = 0;
595
596 if (predu > 255)
597 predu = 255;
598
599 if (predv < 0)
600 predv = 0;
601
602 if (predv > 255)
603 predv = 255;
604
605 upred_ptr[j] = predu;
606 vpred_ptr[j] = predv;
607 }
608
609 upred_ptr += uv_stride; /*8;*/
610 vpred_ptr += uv_stride; /*8;*/
611 }
612
613 }
614 break;
615 case B_PRED:
616 case NEARESTMV:
617 case NEARMV:
618 case ZEROMV:
619 case NEWMV:
620 case SPLITMV:
621 case MB_MODE_COUNT:
622 break;
623 }
624 #else
625 (void) pbi;
626 (void) x;
627 (void) mb_row;
628 (void) mb_col;
629 #endif
630 }
631
632
vp8mt_predict_intra4x4(VP8D_COMP * pbi,MACROBLOCKD * xd,int b_mode,unsigned char * predictor,int mb_row,int mb_col,int num)633 void vp8mt_predict_intra4x4(VP8D_COMP *pbi,
634 MACROBLOCKD *xd,
635 int b_mode,
636 unsigned char *predictor,
637 int mb_row,
638 int mb_col,
639 int num)
640 {
641 #if CONFIG_MULTITHREAD
642 int i, r, c;
643
644 unsigned char *Above; /* = *(x->base_dst) + x->dst - x->dst_stride; */
645 unsigned char Left[4];
646 unsigned char top_left; /* = Above[-1]; */
647
648 BLOCKD *x = &xd->block[num];
649
650 /*Caution: For some b_mode, it needs 8 pixels (4 above + 4 above-right).*/
651 if (num < 4 && pbi->common.filter_level)
652 Above = pbi->mt_yabove_row[mb_row] + mb_col*16 + num*4 + 32;
653 else
654 Above = *(x->base_dst) + x->dst - x->dst_stride;
655
656 if (num%4==0 && pbi->common.filter_level)
657 {
658 for (i=0; i<4; i++)
659 Left[i] = pbi->mt_yleft_col[mb_row][num + i];
660 }else
661 {
662 Left[0] = (*(x->base_dst))[x->dst - 1];
663 Left[1] = (*(x->base_dst))[x->dst - 1 + x->dst_stride];
664 Left[2] = (*(x->base_dst))[x->dst - 1 + 2 * x->dst_stride];
665 Left[3] = (*(x->base_dst))[x->dst - 1 + 3 * x->dst_stride];
666 }
667
668 if ((num==4 || num==8 || num==12) && pbi->common.filter_level)
669 top_left = pbi->mt_yleft_col[mb_row][num-1];
670 else
671 top_left = Above[-1];
672
673 switch (b_mode)
674 {
675 case B_DC_PRED:
676 {
677 int expected_dc = 0;
678
679 for (i = 0; i < 4; i++)
680 {
681 expected_dc += Above[i];
682 expected_dc += Left[i];
683 }
684
685 expected_dc = (expected_dc + 4) >> 3;
686
687 for (r = 0; r < 4; r++)
688 {
689 for (c = 0; c < 4; c++)
690 {
691 predictor[c] = expected_dc;
692 }
693
694 predictor += 16;
695 }
696 }
697 break;
698 case B_TM_PRED:
699 {
700 /* prediction similar to true_motion prediction */
701 for (r = 0; r < 4; r++)
702 {
703 for (c = 0; c < 4; c++)
704 {
705 int pred = Above[c] - top_left + Left[r];
706
707 if (pred < 0)
708 pred = 0;
709
710 if (pred > 255)
711 pred = 255;
712
713 predictor[c] = pred;
714 }
715
716 predictor += 16;
717 }
718 }
719 break;
720
721 case B_VE_PRED:
722 {
723
724 unsigned int ap[4];
725 ap[0] = (top_left + 2 * Above[0] + Above[1] + 2) >> 2;
726 ap[1] = (Above[0] + 2 * Above[1] + Above[2] + 2) >> 2;
727 ap[2] = (Above[1] + 2 * Above[2] + Above[3] + 2) >> 2;
728 ap[3] = (Above[2] + 2 * Above[3] + Above[4] + 2) >> 2;
729
730 for (r = 0; r < 4; r++)
731 {
732 for (c = 0; c < 4; c++)
733 {
734
735 predictor[c] = ap[c];
736 }
737
738 predictor += 16;
739 }
740
741 }
742 break;
743
744
745 case B_HE_PRED:
746 {
747
748 unsigned int lp[4];
749 lp[0] = (top_left + 2 * Left[0] + Left[1] + 2) >> 2;
750 lp[1] = (Left[0] + 2 * Left[1] + Left[2] + 2) >> 2;
751 lp[2] = (Left[1] + 2 * Left[2] + Left[3] + 2) >> 2;
752 lp[3] = (Left[2] + 2 * Left[3] + Left[3] + 2) >> 2;
753
754 for (r = 0; r < 4; r++)
755 {
756 for (c = 0; c < 4; c++)
757 {
758 predictor[c] = lp[r];
759 }
760
761 predictor += 16;
762 }
763 }
764 break;
765 case B_LD_PRED:
766 {
767 unsigned char *ptr = Above;
768 predictor[0 * 16 + 0] = (ptr[0] + ptr[1] * 2 + ptr[2] + 2) >> 2;
769 predictor[0 * 16 + 1] =
770 predictor[1 * 16 + 0] = (ptr[1] + ptr[2] * 2 + ptr[3] + 2) >> 2;
771 predictor[0 * 16 + 2] =
772 predictor[1 * 16 + 1] =
773 predictor[2 * 16 + 0] = (ptr[2] + ptr[3] * 2 + ptr[4] + 2) >> 2;
774 predictor[0 * 16 + 3] =
775 predictor[1 * 16 + 2] =
776 predictor[2 * 16 + 1] =
777 predictor[3 * 16 + 0] = (ptr[3] + ptr[4] * 2 + ptr[5] + 2) >> 2;
778 predictor[1 * 16 + 3] =
779 predictor[2 * 16 + 2] =
780 predictor[3 * 16 + 1] = (ptr[4] + ptr[5] * 2 + ptr[6] + 2) >> 2;
781 predictor[2 * 16 + 3] =
782 predictor[3 * 16 + 2] = (ptr[5] + ptr[6] * 2 + ptr[7] + 2) >> 2;
783 predictor[3 * 16 + 3] = (ptr[6] + ptr[7] * 2 + ptr[7] + 2) >> 2;
784
785 }
786 break;
787 case B_RD_PRED:
788 {
789
790 unsigned char pp[9];
791
792 pp[0] = Left[3];
793 pp[1] = Left[2];
794 pp[2] = Left[1];
795 pp[3] = Left[0];
796 pp[4] = top_left;
797 pp[5] = Above[0];
798 pp[6] = Above[1];
799 pp[7] = Above[2];
800 pp[8] = Above[3];
801
802 predictor[3 * 16 + 0] = (pp[0] + pp[1] * 2 + pp[2] + 2) >> 2;
803 predictor[3 * 16 + 1] =
804 predictor[2 * 16 + 0] = (pp[1] + pp[2] * 2 + pp[3] + 2) >> 2;
805 predictor[3 * 16 + 2] =
806 predictor[2 * 16 + 1] =
807 predictor[1 * 16 + 0] = (pp[2] + pp[3] * 2 + pp[4] + 2) >> 2;
808 predictor[3 * 16 + 3] =
809 predictor[2 * 16 + 2] =
810 predictor[1 * 16 + 1] =
811 predictor[0 * 16 + 0] = (pp[3] + pp[4] * 2 + pp[5] + 2) >> 2;
812 predictor[2 * 16 + 3] =
813 predictor[1 * 16 + 2] =
814 predictor[0 * 16 + 1] = (pp[4] + pp[5] * 2 + pp[6] + 2) >> 2;
815 predictor[1 * 16 + 3] =
816 predictor[0 * 16 + 2] = (pp[5] + pp[6] * 2 + pp[7] + 2) >> 2;
817 predictor[0 * 16 + 3] = (pp[6] + pp[7] * 2 + pp[8] + 2) >> 2;
818
819 }
820 break;
821 case B_VR_PRED:
822 {
823
824 unsigned char pp[9];
825
826 pp[0] = Left[3];
827 pp[1] = Left[2];
828 pp[2] = Left[1];
829 pp[3] = Left[0];
830 pp[4] = top_left;
831 pp[5] = Above[0];
832 pp[6] = Above[1];
833 pp[7] = Above[2];
834 pp[8] = Above[3];
835
836
837 predictor[3 * 16 + 0] = (pp[1] + pp[2] * 2 + pp[3] + 2) >> 2;
838 predictor[2 * 16 + 0] = (pp[2] + pp[3] * 2 + pp[4] + 2) >> 2;
839 predictor[3 * 16 + 1] =
840 predictor[1 * 16 + 0] = (pp[3] + pp[4] * 2 + pp[5] + 2) >> 2;
841 predictor[2 * 16 + 1] =
842 predictor[0 * 16 + 0] = (pp[4] + pp[5] + 1) >> 1;
843 predictor[3 * 16 + 2] =
844 predictor[1 * 16 + 1] = (pp[4] + pp[5] * 2 + pp[6] + 2) >> 2;
845 predictor[2 * 16 + 2] =
846 predictor[0 * 16 + 1] = (pp[5] + pp[6] + 1) >> 1;
847 predictor[3 * 16 + 3] =
848 predictor[1 * 16 + 2] = (pp[5] + pp[6] * 2 + pp[7] + 2) >> 2;
849 predictor[2 * 16 + 3] =
850 predictor[0 * 16 + 2] = (pp[6] + pp[7] + 1) >> 1;
851 predictor[1 * 16 + 3] = (pp[6] + pp[7] * 2 + pp[8] + 2) >> 2;
852 predictor[0 * 16 + 3] = (pp[7] + pp[8] + 1) >> 1;
853
854 }
855 break;
856 case B_VL_PRED:
857 {
858
859 unsigned char *pp = Above;
860
861 predictor[0 * 16 + 0] = (pp[0] + pp[1] + 1) >> 1;
862 predictor[1 * 16 + 0] = (pp[0] + pp[1] * 2 + pp[2] + 2) >> 2;
863 predictor[2 * 16 + 0] =
864 predictor[0 * 16 + 1] = (pp[1] + pp[2] + 1) >> 1;
865 predictor[1 * 16 + 1] =
866 predictor[3 * 16 + 0] = (pp[1] + pp[2] * 2 + pp[3] + 2) >> 2;
867 predictor[2 * 16 + 1] =
868 predictor[0 * 16 + 2] = (pp[2] + pp[3] + 1) >> 1;
869 predictor[3 * 16 + 1] =
870 predictor[1 * 16 + 2] = (pp[2] + pp[3] * 2 + pp[4] + 2) >> 2;
871 predictor[0 * 16 + 3] =
872 predictor[2 * 16 + 2] = (pp[3] + pp[4] + 1) >> 1;
873 predictor[1 * 16 + 3] =
874 predictor[3 * 16 + 2] = (pp[3] + pp[4] * 2 + pp[5] + 2) >> 2;
875 predictor[2 * 16 + 3] = (pp[4] + pp[5] * 2 + pp[6] + 2) >> 2;
876 predictor[3 * 16 + 3] = (pp[5] + pp[6] * 2 + pp[7] + 2) >> 2;
877 }
878 break;
879
880 case B_HD_PRED:
881 {
882 unsigned char pp[9];
883 pp[0] = Left[3];
884 pp[1] = Left[2];
885 pp[2] = Left[1];
886 pp[3] = Left[0];
887 pp[4] = top_left;
888 pp[5] = Above[0];
889 pp[6] = Above[1];
890 pp[7] = Above[2];
891 pp[8] = Above[3];
892
893
894 predictor[3 * 16 + 0] = (pp[0] + pp[1] + 1) >> 1;
895 predictor[3 * 16 + 1] = (pp[0] + pp[1] * 2 + pp[2] + 2) >> 2;
896 predictor[2 * 16 + 0] =
897 predictor[3 * 16 + 2] = (pp[1] + pp[2] + 1) >> 1;
898 predictor[2 * 16 + 1] =
899 predictor[3 * 16 + 3] = (pp[1] + pp[2] * 2 + pp[3] + 2) >> 2;
900 predictor[2 * 16 + 2] =
901 predictor[1 * 16 + 0] = (pp[2] + pp[3] + 1) >> 1;
902 predictor[2 * 16 + 3] =
903 predictor[1 * 16 + 1] = (pp[2] + pp[3] * 2 + pp[4] + 2) >> 2;
904 predictor[1 * 16 + 2] =
905 predictor[0 * 16 + 0] = (pp[3] + pp[4] + 1) >> 1;
906 predictor[1 * 16 + 3] =
907 predictor[0 * 16 + 1] = (pp[3] + pp[4] * 2 + pp[5] + 2) >> 2;
908 predictor[0 * 16 + 2] = (pp[4] + pp[5] * 2 + pp[6] + 2) >> 2;
909 predictor[0 * 16 + 3] = (pp[5] + pp[6] * 2 + pp[7] + 2) >> 2;
910 }
911 break;
912
913
914 case B_HU_PRED:
915 {
916 unsigned char *pp = Left;
917 predictor[0 * 16 + 0] = (pp[0] + pp[1] + 1) >> 1;
918 predictor[0 * 16 + 1] = (pp[0] + pp[1] * 2 + pp[2] + 2) >> 2;
919 predictor[0 * 16 + 2] =
920 predictor[1 * 16 + 0] = (pp[1] + pp[2] + 1) >> 1;
921 predictor[0 * 16 + 3] =
922 predictor[1 * 16 + 1] = (pp[1] + pp[2] * 2 + pp[3] + 2) >> 2;
923 predictor[1 * 16 + 2] =
924 predictor[2 * 16 + 0] = (pp[2] + pp[3] + 1) >> 1;
925 predictor[1 * 16 + 3] =
926 predictor[2 * 16 + 1] = (pp[2] + pp[3] * 2 + pp[3] + 2) >> 2;
927 predictor[2 * 16 + 2] =
928 predictor[2 * 16 + 3] =
929 predictor[3 * 16 + 0] =
930 predictor[3 * 16 + 1] =
931 predictor[3 * 16 + 2] =
932 predictor[3 * 16 + 3] = pp[3];
933 }
934 break;
935
936
937 }
938 #else
939 (void) pbi;
940 (void) xd;
941 (void) b_mode;
942 (void) predictor;
943 (void) mb_row;
944 (void) mb_col;
945 (void) num;
946 #endif
947 }
948
949 /* copy 4 bytes from the above right down so that the 4x4 prediction modes using pixels above and
950 * to the right prediction have filled in pixels to use.
951 */
vp8mt_intra_prediction_down_copy(VP8D_COMP * pbi,MACROBLOCKD * x,int mb_row,int mb_col)952 void vp8mt_intra_prediction_down_copy(VP8D_COMP *pbi, MACROBLOCKD *x, int mb_row, int mb_col)
953 {
954 #if CONFIG_MULTITHREAD
955 unsigned char *above_right; /* = *(x->block[0].base_dst) + x->block[0].dst - x->block[0].dst_stride + 16; */
956 unsigned int *src_ptr;
957 unsigned int *dst_ptr0;
958 unsigned int *dst_ptr1;
959 unsigned int *dst_ptr2;
960
961 if (pbi->common.filter_level)
962 above_right = pbi->mt_yabove_row[mb_row] + mb_col*16 + 32 +16;
963 else
964 above_right = *(x->block[0].base_dst) + x->block[0].dst - x->block[0].dst_stride + 16;
965
966 src_ptr = (unsigned int *)above_right;
967 /*dst_ptr0 = (unsigned int *)(above_right + 4 * x->block[0].dst_stride);
968 dst_ptr1 = (unsigned int *)(above_right + 8 * x->block[0].dst_stride);
969 dst_ptr2 = (unsigned int *)(above_right + 12 * x->block[0].dst_stride);*/
970 dst_ptr0 = (unsigned int *)(*(x->block[0].base_dst) + x->block[0].dst + 16 + 3 * x->block[0].dst_stride);
971 dst_ptr1 = (unsigned int *)(*(x->block[0].base_dst) + x->block[0].dst + 16 + 7 * x->block[0].dst_stride);
972 dst_ptr2 = (unsigned int *)(*(x->block[0].base_dst) + x->block[0].dst + 16 + 11 * x->block[0].dst_stride);
973 *dst_ptr0 = *src_ptr;
974 *dst_ptr1 = *src_ptr;
975 *dst_ptr2 = *src_ptr;
976 #else
977 (void) pbi;
978 (void) x;
979 (void) mb_row;
980 (void) mb_col;
981 #endif
982 }
983