1 /*
2 * HEVC video decoder
3 *
4 * Copyright (C) 2012 - 2013 Guillaume Martres
5 *
6 * This file is part of FFmpeg.
7 *
8 * FFmpeg is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * FFmpeg is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with FFmpeg; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23 #include "libavutil/pixdesc.h"
24
25 #include "bit_depth_template.c"
26 #include "hevcpred.h"
27
28 #define POS(x, y) src[(x) + stride * (y)]
29
FUNC(intra_pred)30 static av_always_inline void FUNC(intra_pred)(HEVCContext *s, int x0, int y0,
31 int log2_size, int c_idx)
32 {
33 #define PU(x) \
34 ((x) >> s->ps.sps->log2_min_pu_size)
35 #define MVF(x, y) \
36 (s->ref->tab_mvf[(x) + (y) * min_pu_width])
37 #define MVF_PU(x, y) \
38 MVF(PU(x0 + ((x) * (1 << hshift))), PU(y0 + ((y) * (1 << vshift))))
39 #define IS_INTRA(x, y) \
40 (MVF_PU(x, y).pred_flag == PF_INTRA)
41 #define MIN_TB_ADDR_ZS(x, y) \
42 s->ps.pps->min_tb_addr_zs[(y) * (s->ps.sps->tb_mask+2) + (x)]
43 #define EXTEND(ptr, val, len) \
44 do { \
45 pixel4 pix = PIXEL_SPLAT_X4(val); \
46 for (i = 0; i < (len); i += 4) \
47 AV_WN4P(ptr + i, pix); \
48 } while (0)
49
50 #define EXTEND_RIGHT_CIP(ptr, start, length) \
51 for (i = start; i < (start) + (length); i += 4) \
52 if (!IS_INTRA(i, -1)) \
53 AV_WN4P(&ptr[i], a); \
54 else \
55 a = PIXEL_SPLAT_X4(ptr[i+3])
56 #define EXTEND_LEFT_CIP(ptr, start, length) \
57 for (i = start; i > (start) - (length); i--) \
58 if (!IS_INTRA(i - 1, -1)) \
59 ptr[i - 1] = ptr[i]
60 #define EXTEND_UP_CIP(ptr, start, length) \
61 for (i = (start); i > (start) - (length); i -= 4) \
62 if (!IS_INTRA(-1, i - 3)) \
63 AV_WN4P(&ptr[i - 3], a); \
64 else \
65 a = PIXEL_SPLAT_X4(ptr[i - 3])
66 #define EXTEND_DOWN_CIP(ptr, start, length) \
67 for (i = start; i < (start) + (length); i += 4) \
68 if (!IS_INTRA(-1, i)) \
69 AV_WN4P(&ptr[i], a); \
70 else \
71 a = PIXEL_SPLAT_X4(ptr[i + 3])
72
73 HEVCLocalContext *lc = s->HEVClc;
74 int i;
75 int hshift = s->ps.sps->hshift[c_idx];
76 int vshift = s->ps.sps->vshift[c_idx];
77 int size = (1 << log2_size);
78 int size_in_luma_h = size << hshift;
79 int size_in_tbs_h = size_in_luma_h >> s->ps.sps->log2_min_tb_size;
80 int size_in_luma_v = size << vshift;
81 int size_in_tbs_v = size_in_luma_v >> s->ps.sps->log2_min_tb_size;
82 int x = x0 >> hshift;
83 int y = y0 >> vshift;
84 int x_tb = (x0 >> s->ps.sps->log2_min_tb_size) & s->ps.sps->tb_mask;
85 int y_tb = (y0 >> s->ps.sps->log2_min_tb_size) & s->ps.sps->tb_mask;
86 int spin = c_idx && !size_in_tbs_v && ((2 * y0) & (1 << s->ps.sps->log2_min_tb_size));
87
88 int cur_tb_addr = MIN_TB_ADDR_ZS(x_tb, y_tb);
89
90 ptrdiff_t stride = s->frame->linesize[c_idx] / sizeof(pixel);
91 pixel *src = (pixel*)s->frame->data[c_idx] + x + y * stride;
92
93 int min_pu_width = s->ps.sps->min_pu_width;
94
95 enum IntraPredMode mode = c_idx ? lc->tu.intra_pred_mode_c :
96 lc->tu.intra_pred_mode;
97 pixel4 a;
98 pixel left_array[2 * MAX_TB_SIZE + 1];
99 pixel filtered_left_array[2 * MAX_TB_SIZE + 1];
100 pixel top_array[2 * MAX_TB_SIZE + 1];
101 pixel filtered_top_array[2 * MAX_TB_SIZE + 1];
102
103 pixel *left = left_array + 1;
104 pixel *top = top_array + 1;
105 pixel *filtered_left = filtered_left_array + 1;
106 pixel *filtered_top = filtered_top_array + 1;
107 int cand_bottom_left = lc->na.cand_bottom_left && cur_tb_addr > MIN_TB_ADDR_ZS( x_tb - 1, (y_tb + size_in_tbs_v + spin) & s->ps.sps->tb_mask);
108 int cand_left = lc->na.cand_left;
109 int cand_up_left = lc->na.cand_up_left;
110 int cand_up = lc->na.cand_up;
111 int cand_up_right = lc->na.cand_up_right && !spin && cur_tb_addr > MIN_TB_ADDR_ZS((x_tb + size_in_tbs_h) & s->ps.sps->tb_mask, y_tb - 1);
112
113 int bottom_left_size = (FFMIN(y0 + 2 * size_in_luma_v, s->ps.sps->height) -
114 (y0 + size_in_luma_v)) >> vshift;
115 int top_right_size = (FFMIN(x0 + 2 * size_in_luma_h, s->ps.sps->width) -
116 (x0 + size_in_luma_h)) >> hshift;
117
118 if (s->ps.pps->constrained_intra_pred_flag == 1) {
119 int size_in_luma_pu_v = PU(size_in_luma_v);
120 int size_in_luma_pu_h = PU(size_in_luma_h);
121 int on_pu_edge_x = !av_mod_uintp2(x0, s->ps.sps->log2_min_pu_size);
122 int on_pu_edge_y = !av_mod_uintp2(y0, s->ps.sps->log2_min_pu_size);
123 if (!size_in_luma_pu_h)
124 size_in_luma_pu_h++;
125 if (cand_bottom_left == 1 && on_pu_edge_x) {
126 int x_left_pu = PU(x0 - 1);
127 int y_bottom_pu = PU(y0 + size_in_luma_v);
128 int max = FFMIN(size_in_luma_pu_v, s->ps.sps->min_pu_height - y_bottom_pu);
129 cand_bottom_left = 0;
130 for (i = 0; i < max; i += 2)
131 cand_bottom_left |= (MVF(x_left_pu, y_bottom_pu + i).pred_flag == PF_INTRA);
132 }
133 if (cand_left == 1 && on_pu_edge_x) {
134 int x_left_pu = PU(x0 - 1);
135 int y_left_pu = PU(y0);
136 int max = FFMIN(size_in_luma_pu_v, s->ps.sps->min_pu_height - y_left_pu);
137 cand_left = 0;
138 for (i = 0; i < max; i += 2)
139 cand_left |= (MVF(x_left_pu, y_left_pu + i).pred_flag == PF_INTRA);
140 }
141 if (cand_up_left == 1) {
142 int x_left_pu = PU(x0 - 1);
143 int y_top_pu = PU(y0 - 1);
144 cand_up_left = MVF(x_left_pu, y_top_pu).pred_flag == PF_INTRA;
145 }
146 if (cand_up == 1 && on_pu_edge_y) {
147 int x_top_pu = PU(x0);
148 int y_top_pu = PU(y0 - 1);
149 int max = FFMIN(size_in_luma_pu_h, s->ps.sps->min_pu_width - x_top_pu);
150 cand_up = 0;
151 for (i = 0; i < max; i += 2)
152 cand_up |= (MVF(x_top_pu + i, y_top_pu).pred_flag == PF_INTRA);
153 }
154 if (cand_up_right == 1 && on_pu_edge_y) {
155 int y_top_pu = PU(y0 - 1);
156 int x_right_pu = PU(x0 + size_in_luma_h);
157 int max = FFMIN(size_in_luma_pu_h, s->ps.sps->min_pu_width - x_right_pu);
158 cand_up_right = 0;
159 for (i = 0; i < max; i += 2)
160 cand_up_right |= (MVF(x_right_pu + i, y_top_pu).pred_flag == PF_INTRA);
161 }
162 memset(left, 128, 2 * MAX_TB_SIZE*sizeof(pixel));
163 memset(top , 128, 2 * MAX_TB_SIZE*sizeof(pixel));
164 top[-1] = 128;
165 }
166 if (cand_up_left) {
167 left[-1] = POS(-1, -1);
168 top[-1] = left[-1];
169 }
170 if (cand_up)
171 memcpy(top, src - stride, size * sizeof(pixel));
172 if (cand_up_right) {
173 memcpy(top + size, src - stride + size, size * sizeof(pixel));
174 EXTEND(top + size + top_right_size, POS(size + top_right_size - 1, -1),
175 size - top_right_size);
176 }
177 if (cand_left)
178 for (i = 0; i < size; i++)
179 left[i] = POS(-1, i);
180 if (cand_bottom_left) {
181 for (i = size; i < size + bottom_left_size; i++)
182 left[i] = POS(-1, i);
183 EXTEND(left + size + bottom_left_size, POS(-1, size + bottom_left_size - 1),
184 size - bottom_left_size);
185 }
186
187 if (s->ps.pps->constrained_intra_pred_flag == 1) {
188 if (cand_bottom_left || cand_left || cand_up_left || cand_up || cand_up_right) {
189 int size_max_x = x0 + ((2 * size) << hshift) < s->ps.sps->width ?
190 2 * size : (s->ps.sps->width - x0) >> hshift;
191 int size_max_y = y0 + ((2 * size) << vshift) < s->ps.sps->height ?
192 2 * size : (s->ps.sps->height - y0) >> vshift;
193 int j = size + (cand_bottom_left? bottom_left_size: 0) -1;
194 if (!cand_up_right) {
195 size_max_x = x0 + ((size) << hshift) < s->ps.sps->width ?
196 size : (s->ps.sps->width - x0) >> hshift;
197 }
198 if (!cand_bottom_left) {
199 size_max_y = y0 + (( size) << vshift) < s->ps.sps->height ?
200 size : (s->ps.sps->height - y0) >> vshift;
201 }
202 if (cand_bottom_left || cand_left || cand_up_left) {
203 while (j > -1 && !IS_INTRA(-1, j))
204 j--;
205 if (!IS_INTRA(-1, j)) {
206 j = 0;
207 while (j < size_max_x && !IS_INTRA(j, -1))
208 j++;
209 EXTEND_LEFT_CIP(top, j, j + 1);
210 left[-1] = top[-1];
211 }
212 } else {
213 j = 0;
214 while (j < size_max_x && !IS_INTRA(j, -1))
215 j++;
216 if (j > 0)
217 if (cand_up_left) {
218 EXTEND_LEFT_CIP(top, j, j + 1);
219 } else {
220 EXTEND_LEFT_CIP(top, j, j);
221 top[-1] = top[0];
222 }
223 left[-1] = top[-1];
224 }
225 left[-1] = top[-1];
226 if (cand_bottom_left || cand_left) {
227 a = PIXEL_SPLAT_X4(left[-1]);
228 EXTEND_DOWN_CIP(left, 0, size_max_y);
229 }
230 if (!cand_left)
231 EXTEND(left, left[-1], size);
232 if (!cand_bottom_left)
233 EXTEND(left + size, left[size - 1], size);
234 if (x0 != 0 && y0 != 0) {
235 a = PIXEL_SPLAT_X4(left[size_max_y - 1]);
236 EXTEND_UP_CIP(left, size_max_y - 1, size_max_y);
237 if (!IS_INTRA(-1, - 1))
238 left[-1] = left[0];
239 } else if (x0 == 0) {
240 EXTEND(left, 0, size_max_y);
241 } else {
242 a = PIXEL_SPLAT_X4(left[size_max_y - 1]);
243 EXTEND_UP_CIP(left, size_max_y - 1, size_max_y);
244 }
245 top[-1] = left[-1];
246 if (y0 != 0) {
247 a = PIXEL_SPLAT_X4(left[-1]);
248 EXTEND_RIGHT_CIP(top, 0, size_max_x);
249 }
250 }
251 }
252 // Infer the unavailable samples
253 if (!cand_bottom_left) {
254 if (cand_left) {
255 EXTEND(left + size, left[size - 1], size);
256 } else if (cand_up_left) {
257 EXTEND(left, left[-1], 2 * size);
258 cand_left = 1;
259 } else if (cand_up) {
260 left[-1] = top[0];
261 EXTEND(left, left[-1], 2 * size);
262 cand_up_left = 1;
263 cand_left = 1;
264 } else if (cand_up_right) {
265 EXTEND(top, top[size], size);
266 left[-1] = top[size];
267 EXTEND(left, left[-1], 2 * size);
268 cand_up = 1;
269 cand_up_left = 1;
270 cand_left = 1;
271 } else { // No samples available
272 left[-1] = (1 << (BIT_DEPTH - 1));
273 EXTEND(top, left[-1], 2 * size);
274 EXTEND(left, left[-1], 2 * size);
275 }
276 }
277
278 if (!cand_left)
279 EXTEND(left, left[size], size);
280 if (!cand_up_left) {
281 left[-1] = left[0];
282 }
283 if (!cand_up)
284 EXTEND(top, left[-1], size);
285 if (!cand_up_right)
286 EXTEND(top + size, top[size - 1], size);
287
288 top[-1] = left[-1];
289
290 // Filtering process
291 if (!s->ps.sps->intra_smoothing_disabled_flag && (c_idx == 0 || s->ps.sps->chroma_format_idc == 3)) {
292 if (mode != INTRA_DC && size != 4){
293 int intra_hor_ver_dist_thresh[] = { 7, 1, 0 };
294 int min_dist_vert_hor = FFMIN(FFABS((int)(mode - 26U)),
295 FFABS((int)(mode - 10U)));
296 if (min_dist_vert_hor > intra_hor_ver_dist_thresh[log2_size - 3]) {
297 int threshold = 1 << (BIT_DEPTH - 5);
298 if (s->ps.sps->sps_strong_intra_smoothing_enable_flag && c_idx == 0 &&
299 log2_size == 5 &&
300 FFABS(top[-1] + top[63] - 2 * top[31]) < threshold &&
301 FFABS(left[-1] + left[63] - 2 * left[31]) < threshold) {
302 // We can't just overwrite values in top because it could be
303 // a pointer into src
304 filtered_top[-1] = top[-1];
305 filtered_top[63] = top[63];
306 for (i = 0; i < 63; i++)
307 filtered_top[i] = ((64 - (i + 1)) * top[-1] +
308 (i + 1) * top[63] + 32) >> 6;
309 for (i = 0; i < 63; i++)
310 left[i] = ((64 - (i + 1)) * left[-1] +
311 (i + 1) * left[63] + 32) >> 6;
312 top = filtered_top;
313 } else {
314 filtered_left[2 * size - 1] = left[2 * size - 1];
315 filtered_top[2 * size - 1] = top[2 * size - 1];
316 for (i = 2 * size - 2; i >= 0; i--)
317 filtered_left[i] = (left[i + 1] + 2 * left[i] +
318 left[i - 1] + 2) >> 2;
319 filtered_top[-1] =
320 filtered_left[-1] = (left[0] + 2 * left[-1] + top[0] + 2) >> 2;
321 for (i = 2 * size - 2; i >= 0; i--)
322 filtered_top[i] = (top[i + 1] + 2 * top[i] +
323 top[i - 1] + 2) >> 2;
324 left = filtered_left;
325 top = filtered_top;
326 }
327 }
328 }
329 }
330
331 switch (mode) {
332 case INTRA_PLANAR:
333 s->hpc.pred_planar[log2_size - 2]((uint8_t *)src, (uint8_t *)top,
334 (uint8_t *)left, stride);
335 break;
336 case INTRA_DC:
337 s->hpc.pred_dc((uint8_t *)src, (uint8_t *)top,
338 (uint8_t *)left, stride, log2_size, c_idx);
339 break;
340 default:
341 s->hpc.pred_angular[log2_size - 2]((uint8_t *)src, (uint8_t *)top,
342 (uint8_t *)left, stride, c_idx,
343 mode);
344 break;
345 }
346 }
347
348 #define INTRA_PRED(size) \
349 static void FUNC(intra_pred_ ## size)(HEVCContext *s, int x0, int y0, int c_idx) \
350 { \
351 FUNC(intra_pred)(s, x0, y0, size, c_idx); \
352 }
353
354 INTRA_PRED(2)
355 INTRA_PRED(3)
356 INTRA_PRED(4)
357 INTRA_PRED(5)
358
359 #undef INTRA_PRED
360
FUNC(pred_planar)361 static av_always_inline void FUNC(pred_planar)(uint8_t *_src, const uint8_t *_top,
362 const uint8_t *_left, ptrdiff_t stride,
363 int trafo_size)
364 {
365 int x, y;
366 pixel *src = (pixel *)_src;
367 const pixel *top = (const pixel *)_top;
368 const pixel *left = (const pixel *)_left;
369 int size = 1 << trafo_size;
370 for (y = 0; y < size; y++)
371 for (x = 0; x < size; x++)
372 POS(x, y) = ((size - 1 - x) * left[y] + (x + 1) * top[size] +
373 (size - 1 - y) * top[x] + (y + 1) * left[size] + size) >> (trafo_size + 1);
374 }
375
376 #define PRED_PLANAR(size)\
377 static void FUNC(pred_planar_ ## size)(uint8_t *src, const uint8_t *top, \
378 const uint8_t *left, ptrdiff_t stride) \
379 { \
380 FUNC(pred_planar)(src, top, left, stride, size + 2); \
381 }
382
383 PRED_PLANAR(0)
384 PRED_PLANAR(1)
385 PRED_PLANAR(2)
386 PRED_PLANAR(3)
387
388 #undef PRED_PLANAR
389
FUNC(pred_dc)390 static void FUNC(pred_dc)(uint8_t *_src, const uint8_t *_top,
391 const uint8_t *_left,
392 ptrdiff_t stride, int log2_size, int c_idx)
393 {
394 int i, j, x, y;
395 int size = (1 << log2_size);
396 pixel *src = (pixel *)_src;
397 const pixel *top = (const pixel *)_top;
398 const pixel *left = (const pixel *)_left;
399 int dc = size;
400 pixel4 a;
401 for (i = 0; i < size; i++)
402 dc += left[i] + top[i];
403
404 dc >>= log2_size + 1;
405
406 a = PIXEL_SPLAT_X4(dc);
407
408 for (i = 0; i < size; i++)
409 for (j = 0; j < size; j+=4)
410 AV_WN4P(&POS(j, i), a);
411
412 if (c_idx == 0 && size < 32) {
413 POS(0, 0) = (left[0] + 2 * dc + top[0] + 2) >> 2;
414 for (x = 1; x < size; x++)
415 POS(x, 0) = (top[x] + 3 * dc + 2) >> 2;
416 for (y = 1; y < size; y++)
417 POS(0, y) = (left[y] + 3 * dc + 2) >> 2;
418 }
419 }
420
FUNC(pred_angular)421 static av_always_inline void FUNC(pred_angular)(uint8_t *_src,
422 const uint8_t *_top,
423 const uint8_t *_left,
424 ptrdiff_t stride, int c_idx,
425 int mode, int size)
426 {
427 int x, y;
428 pixel *src = (pixel *)_src;
429 const pixel *top = (const pixel *)_top;
430 const pixel *left = (const pixel *)_left;
431
432 static const int intra_pred_angle[] = {
433 32, 26, 21, 17, 13, 9, 5, 2, 0, -2, -5, -9, -13, -17, -21, -26, -32,
434 -26, -21, -17, -13, -9, -5, -2, 0, 2, 5, 9, 13, 17, 21, 26, 32
435 };
436 static const int inv_angle[] = {
437 -4096, -1638, -910, -630, -482, -390, -315, -256, -315, -390, -482,
438 -630, -910, -1638, -4096
439 };
440
441 int angle = intra_pred_angle[mode - 2];
442 pixel ref_array[3 * MAX_TB_SIZE + 4];
443 pixel *ref_tmp = ref_array + size;
444 const pixel *ref;
445 int last = (size * angle) >> 5;
446
447 if (mode >= 18) {
448 ref = top - 1;
449 if (angle < 0 && last < -1) {
450 for (x = 0; x <= size; x += 4)
451 AV_WN4P(&ref_tmp[x], AV_RN4P(&top[x - 1]));
452 for (x = last; x <= -1; x++)
453 ref_tmp[x] = left[-1 + ((x * inv_angle[mode - 11] + 128) >> 8)];
454 ref = ref_tmp;
455 }
456
457 for (y = 0; y < size; y++) {
458 int idx = ((y + 1) * angle) >> 5;
459 int fact = ((y + 1) * angle) & 31;
460 if (fact) {
461 for (x = 0; x < size; x += 4) {
462 POS(x , y) = ((32 - fact) * ref[x + idx + 1] +
463 fact * ref[x + idx + 2] + 16) >> 5;
464 POS(x + 1, y) = ((32 - fact) * ref[x + 1 + idx + 1] +
465 fact * ref[x + 1 + idx + 2] + 16) >> 5;
466 POS(x + 2, y) = ((32 - fact) * ref[x + 2 + idx + 1] +
467 fact * ref[x + 2 + idx + 2] + 16) >> 5;
468 POS(x + 3, y) = ((32 - fact) * ref[x + 3 + idx + 1] +
469 fact * ref[x + 3 + idx + 2] + 16) >> 5;
470 }
471 } else {
472 for (x = 0; x < size; x += 4)
473 AV_WN4P(&POS(x, y), AV_RN4P(&ref[x + idx + 1]));
474 }
475 }
476 if (mode == 26 && c_idx == 0 && size < 32) {
477 for (y = 0; y < size; y++)
478 POS(0, y) = av_clip_pixel(top[0] + ((left[y] - left[-1]) >> 1));
479 }
480 } else {
481 ref = left - 1;
482 if (angle < 0 && last < -1) {
483 for (x = 0; x <= size; x += 4)
484 AV_WN4P(&ref_tmp[x], AV_RN4P(&left[x - 1]));
485 for (x = last; x <= -1; x++)
486 ref_tmp[x] = top[-1 + ((x * inv_angle[mode - 11] + 128) >> 8)];
487 ref = ref_tmp;
488 }
489
490 for (x = 0; x < size; x++) {
491 int idx = ((x + 1) * angle) >> 5;
492 int fact = ((x + 1) * angle) & 31;
493 if (fact) {
494 for (y = 0; y < size; y++) {
495 POS(x, y) = ((32 - fact) * ref[y + idx + 1] +
496 fact * ref[y + idx + 2] + 16) >> 5;
497 }
498 } else {
499 for (y = 0; y < size; y++)
500 POS(x, y) = ref[y + idx + 1];
501 }
502 }
503 if (mode == 10 && c_idx == 0 && size < 32) {
504 for (x = 0; x < size; x += 4) {
505 POS(x, 0) = av_clip_pixel(left[0] + ((top[x ] - top[-1]) >> 1));
506 POS(x + 1, 0) = av_clip_pixel(left[0] + ((top[x + 1] - top[-1]) >> 1));
507 POS(x + 2, 0) = av_clip_pixel(left[0] + ((top[x + 2] - top[-1]) >> 1));
508 POS(x + 3, 0) = av_clip_pixel(left[0] + ((top[x + 3] - top[-1]) >> 1));
509 }
510 }
511 }
512 }
513
FUNC(pred_angular_0)514 static void FUNC(pred_angular_0)(uint8_t *src, const uint8_t *top,
515 const uint8_t *left,
516 ptrdiff_t stride, int c_idx, int mode)
517 {
518 FUNC(pred_angular)(src, top, left, stride, c_idx, mode, 1 << 2);
519 }
520
FUNC(pred_angular_1)521 static void FUNC(pred_angular_1)(uint8_t *src, const uint8_t *top,
522 const uint8_t *left,
523 ptrdiff_t stride, int c_idx, int mode)
524 {
525 FUNC(pred_angular)(src, top, left, stride, c_idx, mode, 1 << 3);
526 }
527
FUNC(pred_angular_2)528 static void FUNC(pred_angular_2)(uint8_t *src, const uint8_t *top,
529 const uint8_t *left,
530 ptrdiff_t stride, int c_idx, int mode)
531 {
532 FUNC(pred_angular)(src, top, left, stride, c_idx, mode, 1 << 4);
533 }
534
FUNC(pred_angular_3)535 static void FUNC(pred_angular_3)(uint8_t *src, const uint8_t *top,
536 const uint8_t *left,
537 ptrdiff_t stride, int c_idx, int mode)
538 {
539 FUNC(pred_angular)(src, top, left, stride, c_idx, mode, 1 << 5);
540 }
541
542 #undef EXTEND_LEFT_CIP
543 #undef EXTEND_RIGHT_CIP
544 #undef EXTEND_UP_CIP
545 #undef EXTEND_DOWN_CIP
546 #undef IS_INTRA
547 #undef MVF_PU
548 #undef MVF
549 #undef PU
550 #undef EXTEND
551 #undef MIN_TB_ADDR_ZS
552 #undef POS
553