• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * DSP functions for Indeo Video Interactive codecs (Indeo4 and Indeo5)
3  *
4  * Copyright (c) 2009-2011 Maxim Poliakovski
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 /**
24  * @file
25  * DSP functions (inverse transforms, motion compensation, wavelet recompositions)
26  * for Indeo Video Interactive codecs.
27  */
28 
29 #include <string.h>
30 #include "libavutil/common.h"
31 #include "ivi.h"
32 #include "ivi_dsp.h"
33 
ff_ivi_recompose53(const IVIPlaneDesc * plane,uint8_t * dst,const ptrdiff_t dst_pitch)34 void ff_ivi_recompose53(const IVIPlaneDesc *plane, uint8_t *dst,
35                         const ptrdiff_t dst_pitch)
36 {
37     int             x, y, indx;
38     int32_t         p0, p1, p2, p3, tmp0, tmp1, tmp2;
39     int32_t         b0_1, b0_2, b1_1, b1_2, b1_3, b2_1, b2_2, b2_3, b2_4, b2_5, b2_6;
40     int32_t         b3_1, b3_2, b3_3, b3_4, b3_5, b3_6, b3_7, b3_8, b3_9;
41     ptrdiff_t       pitch, back_pitch;
42     const short     *b0_ptr, *b1_ptr, *b2_ptr, *b3_ptr;
43     const int       num_bands = 4;
44 
45     /* all bands should have the same pitch */
46     pitch = plane->bands[0].pitch;
47 
48     /* pixels at the position "y-1" will be set to pixels at the "y" for the 1st iteration */
49     back_pitch = 0;
50 
51     /* get pointers to the wavelet bands */
52     b0_ptr = plane->bands[0].buf;
53     b1_ptr = plane->bands[1].buf;
54     b2_ptr = plane->bands[2].buf;
55     b3_ptr = plane->bands[3].buf;
56 
57     for (y = 0; y < plane->height; y += 2) {
58 
59         if (y+2 >= plane->height)
60             pitch= 0;
61         /* load storage variables with values */
62         if (num_bands > 0) {
63             b0_1 = b0_ptr[0];
64             b0_2 = b0_ptr[pitch];
65         }
66 
67         if (num_bands > 1) {
68             b1_1 = b1_ptr[back_pitch];
69             b1_2 = b1_ptr[0];
70             b1_3 = b1_1 - b1_2*6 + b1_ptr[pitch];
71         }
72 
73         if (num_bands > 2) {
74             b2_2 = b2_ptr[0];     // b2[x,  y  ]
75             b2_3 = b2_2;          // b2[x+1,y  ] = b2[x,y]
76             b2_5 = b2_ptr[pitch]; // b2[x  ,y+1]
77             b2_6 = b2_5;          // b2[x+1,y+1] = b2[x,y+1]
78         }
79 
80         if (num_bands > 3) {
81             b3_2 = b3_ptr[back_pitch]; // b3[x  ,y-1]
82             b3_3 = b3_2;               // b3[x+1,y-1] = b3[x  ,y-1]
83             b3_5 = b3_ptr[0];          // b3[x  ,y  ]
84             b3_6 = b3_5;               // b3[x+1,y  ] = b3[x  ,y  ]
85             b3_8 = b3_2 - b3_5*6 + b3_ptr[pitch];
86             b3_9 = b3_8;
87         }
88 
89         for (x = 0, indx = 0; x < plane->width; x+=2, indx++) {
90             if (x+2 >= plane->width) {
91                 b0_ptr --;
92                 b1_ptr --;
93                 b2_ptr --;
94                 b3_ptr --;
95             }
96 
97             /* some values calculated in the previous iterations can */
98             /* be reused in the next ones, so do appropriate copying */
99             b2_1 = b2_2; // b2[x-1,y  ] = b2[x,  y  ]
100             b2_2 = b2_3; // b2[x  ,y  ] = b2[x+1,y  ]
101             b2_4 = b2_5; // b2[x-1,y+1] = b2[x  ,y+1]
102             b2_5 = b2_6; // b2[x  ,y+1] = b2[x+1,y+1]
103             b3_1 = b3_2; // b3[x-1,y-1] = b3[x  ,y-1]
104             b3_2 = b3_3; // b3[x  ,y-1] = b3[x+1,y-1]
105             b3_4 = b3_5; // b3[x-1,y  ] = b3[x  ,y  ]
106             b3_5 = b3_6; // b3[x  ,y  ] = b3[x+1,y  ]
107             b3_7 = b3_8; // vert_HPF(x-1)
108             b3_8 = b3_9; // vert_HPF(x  )
109 
110             p0 = p1 = p2 = p3 = 0;
111 
112             /* process the LL-band by applying LPF both vertically and horizontally */
113             if (num_bands > 0) {
114                 tmp0 = b0_1;
115                 tmp2 = b0_2;
116                 b0_1 = b0_ptr[indx+1];
117                 b0_2 = b0_ptr[pitch+indx+1];
118                 tmp1 = tmp0 + b0_1;
119 
120                 p0 =  tmp0 * 16;
121                 p1 =  tmp1 * 8;
122                 p2 = (tmp0 + tmp2) * 8;
123                 p3 = (tmp1 + tmp2 + b0_2) * 4;
124             }
125 
126             /* process the HL-band by applying HPF vertically and LPF horizontally */
127             if (num_bands > 1) {
128                 tmp0 = b1_2;
129                 tmp1 = b1_1;
130                 b1_2 = b1_ptr[indx+1];
131                 b1_1 = b1_ptr[back_pitch+indx+1];
132 
133                 tmp2 = tmp1 - tmp0*6 + b1_3;
134                 b1_3 = b1_1 - b1_2*6 + b1_ptr[pitch+indx+1];
135 
136                 p0 += (tmp0 + tmp1) * 8;
137                 p1 += (tmp0 + tmp1 + b1_1 + b1_2) * 4;
138                 p2 +=  tmp2 * 4;
139                 p3 += (tmp2 + b1_3) * 2;
140             }
141 
142             /* process the LH-band by applying LPF vertically and HPF horizontally */
143             if (num_bands > 2) {
144                 b2_3 = b2_ptr[indx+1];
145                 b2_6 = b2_ptr[pitch+indx+1];
146 
147                 tmp0 = b2_1 + b2_2;
148                 tmp1 = b2_1 - b2_2*6 + b2_3;
149 
150                 p0 += tmp0 * 8;
151                 p1 += tmp1 * 4;
152                 p2 += (tmp0 + b2_4 + b2_5) * 4;
153                 p3 += (tmp1 + b2_4 - b2_5*6 + b2_6) * 2;
154             }
155 
156             /* process the HH-band by applying HPF both vertically and horizontally */
157             if (num_bands > 3) {
158                 b3_6 = b3_ptr[indx+1];            // b3[x+1,y  ]
159                 b3_3 = b3_ptr[back_pitch+indx+1]; // b3[x+1,y-1]
160 
161                 tmp0 = b3_1 + b3_4;
162                 tmp1 = b3_2 + b3_5;
163                 tmp2 = b3_3 + b3_6;
164 
165                 b3_9 = b3_3 - b3_6*6 + b3_ptr[pitch+indx+1];
166 
167                 p0 += (tmp0 + tmp1) * 4;
168                 p1 += (tmp0 - tmp1*6 + tmp2) * 2;
169                 p2 += (b3_7 + b3_8) * 2;
170                 p3 +=  b3_7 - b3_8*6 + b3_9;
171             }
172 
173             /* output four pixels */
174             dst[x]             = av_clip_uint8((p0 >> 6) + 128);
175             dst[x+1]           = av_clip_uint8((p1 >> 6) + 128);
176             dst[dst_pitch+x]   = av_clip_uint8((p2 >> 6) + 128);
177             dst[dst_pitch+x+1] = av_clip_uint8((p3 >> 6) + 128);
178         }// for x
179 
180         dst += dst_pitch << 1;
181 
182         back_pitch = -pitch;
183 
184         b0_ptr += pitch + 1;
185         b1_ptr += pitch + 1;
186         b2_ptr += pitch + 1;
187         b3_ptr += pitch + 1;
188     }
189 }
190 
ff_ivi_recompose_haar(const IVIPlaneDesc * plane,uint8_t * dst,const ptrdiff_t dst_pitch)191 void ff_ivi_recompose_haar(const IVIPlaneDesc *plane, uint8_t *dst,
192                            const ptrdiff_t dst_pitch)
193 {
194     int             x, y, indx, b0, b1, b2, b3, p0, p1, p2, p3;
195     const short     *b0_ptr, *b1_ptr, *b2_ptr, *b3_ptr;
196     ptrdiff_t       pitch;
197 
198     /* all bands should have the same pitch */
199     pitch = plane->bands[0].pitch;
200 
201     /* get pointers to the wavelet bands */
202     b0_ptr = plane->bands[0].buf;
203     b1_ptr = plane->bands[1].buf;
204     b2_ptr = plane->bands[2].buf;
205     b3_ptr = plane->bands[3].buf;
206 
207     for (y = 0; y < plane->height; y += 2) {
208         for (x = 0, indx = 0; x < plane->width; x += 2, indx++) {
209             /* load coefficients */
210             b0 = b0_ptr[indx]; //should be: b0 = (num_bands > 0) ? b0_ptr[indx] : 0;
211             b1 = b1_ptr[indx]; //should be: b1 = (num_bands > 1) ? b1_ptr[indx] : 0;
212             b2 = b2_ptr[indx]; //should be: b2 = (num_bands > 2) ? b2_ptr[indx] : 0;
213             b3 = b3_ptr[indx]; //should be: b3 = (num_bands > 3) ? b3_ptr[indx] : 0;
214 
215             /* haar wavelet recomposition */
216             p0 = (b0 + b1 + b2 + b3 + 2) >> 2;
217             p1 = (b0 + b1 - b2 - b3 + 2) >> 2;
218             p2 = (b0 - b1 + b2 - b3 + 2) >> 2;
219             p3 = (b0 - b1 - b2 + b3 + 2) >> 2;
220 
221             /* bias, convert and output four pixels */
222             dst[x]                 = av_clip_uint8(p0 + 128);
223             dst[x + 1]             = av_clip_uint8(p1 + 128);
224             dst[dst_pitch + x]     = av_clip_uint8(p2 + 128);
225             dst[dst_pitch + x + 1] = av_clip_uint8(p3 + 128);
226         }// for x
227 
228         dst += dst_pitch << 1;
229 
230         b0_ptr += pitch;
231         b1_ptr += pitch;
232         b2_ptr += pitch;
233         b3_ptr += pitch;
234     }// for y
235 }
236 
237 /** butterfly operation for the inverse Haar transform */
238 #define IVI_HAAR_BFLY(s1, s2, o1, o2, t) \
239     t  = ((s1) - (s2)) >> 1;\
240     o1 = ((s1) + (s2)) >> 1;\
241     o2 = (t);\
242 
243 /** inverse 8-point Haar transform */
244 #define INV_HAAR8(s1, s5, s3, s7, s2, s4, s6, s8,\
245                   d1, d2, d3, d4, d5, d6, d7, d8,\
246                   t0, t1, t2, t3, t4, t5, t6, t7, t8) {\
247     t1 = (s1) * 2; t5 = (s5) * 2;\
248     IVI_HAAR_BFLY(t1, t5, t1, t5, t0); IVI_HAAR_BFLY(t1, s3, t1, t3, t0);\
249     IVI_HAAR_BFLY(t5, s7, t5, t7, t0); IVI_HAAR_BFLY(t1, s2, t1, t2, t0);\
250     IVI_HAAR_BFLY(t3, s4, t3, t4, t0); IVI_HAAR_BFLY(t5, s6, t5, t6, t0);\
251     IVI_HAAR_BFLY(t7, s8, t7, t8, t0);\
252     d1 = COMPENSATE(t1);\
253     d2 = COMPENSATE(t2);\
254     d3 = COMPENSATE(t3);\
255     d4 = COMPENSATE(t4);\
256     d5 = COMPENSATE(t5);\
257     d6 = COMPENSATE(t6);\
258     d7 = COMPENSATE(t7);\
259     d8 = COMPENSATE(t8); }
260 
261 /** inverse 4-point Haar transform */
262 #define INV_HAAR4(s1, s3, s5, s7, d1, d2, d3, d4, t0, t1, t2, t3, t4) {\
263     IVI_HAAR_BFLY(s1, s3, t0, t1, t4);\
264     IVI_HAAR_BFLY(t0, s5, t2, t3, t4);\
265     d1 = COMPENSATE(t2);\
266     d2 = COMPENSATE(t3);\
267     IVI_HAAR_BFLY(t1, s7, t2, t3, t4);\
268     d3 = COMPENSATE(t2);\
269     d4 = COMPENSATE(t3); }
270 
ff_ivi_inverse_haar_8x8(const int32_t * in,int16_t * out,ptrdiff_t pitch,const uint8_t * flags)271 void ff_ivi_inverse_haar_8x8(const int32_t *in, int16_t *out, ptrdiff_t pitch,
272                              const uint8_t *flags)
273 {
274     int     i, shift, sp1, sp2, sp3, sp4;
275     const int32_t *src;
276     int32_t *dst;
277     int     tmp[64];
278     int     t0, t1, t2, t3, t4, t5, t6, t7, t8;
279 
280     /* apply the InvHaar8 to all columns */
281 #define COMPENSATE(x) (x)
282     src = in;
283     dst = tmp;
284     for (i = 0; i < 8; i++) {
285         if (flags[i]) {
286             /* pre-scaling */
287             shift = !(i & 4);
288             sp1 = src[ 0] * (1 << shift);
289             sp2 = src[ 8] * (1 << shift);
290             sp3 = src[16] * (1 << shift);
291             sp4 = src[24] * (1 << shift);
292             INV_HAAR8(    sp1,     sp2,     sp3,     sp4,
293                       src[32], src[40], src[48], src[56],
294                       dst[ 0], dst[ 8], dst[16], dst[24],
295                       dst[32], dst[40], dst[48], dst[56],
296                       t0, t1, t2, t3, t4, t5, t6, t7, t8);
297         } else
298             dst[ 0] = dst[ 8] = dst[16] = dst[24] =
299             dst[32] = dst[40] = dst[48] = dst[56] = 0;
300 
301         src++;
302         dst++;
303     }
304 #undef  COMPENSATE
305 
306     /* apply the InvHaar8 to all rows */
307 #define COMPENSATE(x) (x)
308     src = tmp;
309     for (i = 0; i < 8; i++) {
310         if (   !src[0] && !src[1] && !src[2] && !src[3]
311             && !src[4] && !src[5] && !src[6] && !src[7]) {
312             memset(out, 0, 8 * sizeof(out[0]));
313         } else {
314             INV_HAAR8(src[0], src[1], src[2], src[3],
315                       src[4], src[5], src[6], src[7],
316                       out[0], out[1], out[2], out[3],
317                       out[4], out[5], out[6], out[7],
318                       t0, t1, t2, t3, t4, t5, t6, t7, t8);
319         }
320         src += 8;
321         out += pitch;
322     }
323 #undef  COMPENSATE
324 }
325 
ff_ivi_row_haar8(const int32_t * in,int16_t * out,ptrdiff_t pitch,const uint8_t * flags)326 void ff_ivi_row_haar8(const int32_t *in, int16_t *out, ptrdiff_t pitch,
327                       const uint8_t *flags)
328 {
329     int     i;
330     int     t0, t1, t2, t3, t4, t5, t6, t7, t8;
331 
332     /* apply the InvHaar8 to all rows */
333 #define COMPENSATE(x) (x)
334     for (i = 0; i < 8; i++) {
335         if (   !in[0] && !in[1] && !in[2] && !in[3]
336             && !in[4] && !in[5] && !in[6] && !in[7]) {
337             memset(out, 0, 8 * sizeof(out[0]));
338         } else {
339             INV_HAAR8(in[0],  in[1],  in[2],  in[3],
340                       in[4],  in[5],  in[6],  in[7],
341                       out[0], out[1], out[2], out[3],
342                       out[4], out[5], out[6], out[7],
343                       t0, t1, t2, t3, t4, t5, t6, t7, t8);
344         }
345         in  += 8;
346         out += pitch;
347     }
348 #undef  COMPENSATE
349 }
350 
ff_ivi_col_haar8(const int32_t * in,int16_t * out,ptrdiff_t pitch,const uint8_t * flags)351 void ff_ivi_col_haar8(const int32_t *in, int16_t *out, ptrdiff_t pitch,
352                       const uint8_t *flags)
353 {
354     int     i;
355     int     t0, t1, t2, t3, t4, t5, t6, t7, t8;
356 
357     /* apply the InvHaar8 to all columns */
358 #define COMPENSATE(x) (x)
359     for (i = 0; i < 8; i++) {
360         if (flags[i]) {
361             INV_HAAR8(in[ 0], in[ 8], in[16], in[24],
362                       in[32], in[40], in[48], in[56],
363                       out[0 * pitch], out[1 * pitch],
364                       out[2 * pitch], out[3 * pitch],
365                       out[4 * pitch], out[5 * pitch],
366                       out[6 * pitch], out[7 * pitch],
367                       t0, t1, t2, t3, t4, t5, t6, t7, t8);
368         } else
369             out[0 * pitch] = out[1 * pitch] =
370             out[2 * pitch] = out[3 * pitch] =
371             out[4 * pitch] = out[5 * pitch] =
372             out[6 * pitch] = out[7 * pitch] = 0;
373 
374         in++;
375         out++;
376     }
377 #undef  COMPENSATE
378 }
379 
ff_ivi_inverse_haar_4x4(const int32_t * in,int16_t * out,ptrdiff_t pitch,const uint8_t * flags)380 void ff_ivi_inverse_haar_4x4(const int32_t *in, int16_t *out, ptrdiff_t pitch,
381                              const uint8_t *flags)
382 {
383     int     i, shift, sp1, sp2;
384     const int32_t *src;
385     int32_t *dst;
386     int     tmp[16];
387     int     t0, t1, t2, t3, t4;
388 
389     /* apply the InvHaar4 to all columns */
390 #define COMPENSATE(x) (x)
391     src = in;
392     dst = tmp;
393     for (i = 0; i < 4; i++) {
394         if (flags[i]) {
395             /* pre-scaling */
396             shift = !(i & 2);
397             sp1 = src[0] * (1 << shift);
398             sp2 = src[4] * (1 << shift);
399             INV_HAAR4(   sp1,    sp2, src[8], src[12],
400                       dst[0], dst[4], dst[8], dst[12],
401                       t0, t1, t2, t3, t4);
402         } else
403             dst[0] = dst[4] = dst[8] = dst[12] = 0;
404 
405         src++;
406         dst++;
407     }
408 #undef  COMPENSATE
409 
410     /* apply the InvHaar8 to all rows */
411 #define COMPENSATE(x) (x)
412     src = tmp;
413     for (i = 0; i < 4; i++) {
414         if (!src[0] && !src[1] && !src[2] && !src[3]) {
415             memset(out, 0, 4 * sizeof(out[0]));
416         } else {
417             INV_HAAR4(src[0], src[1], src[2], src[3],
418                       out[0], out[1], out[2], out[3],
419                       t0, t1, t2, t3, t4);
420         }
421         src += 4;
422         out += pitch;
423     }
424 #undef  COMPENSATE
425 }
426 
ff_ivi_row_haar4(const int32_t * in,int16_t * out,ptrdiff_t pitch,const uint8_t * flags)427 void ff_ivi_row_haar4(const int32_t *in, int16_t *out, ptrdiff_t pitch,
428                       const uint8_t *flags)
429 {
430     int     i;
431     int     t0, t1, t2, t3, t4;
432 
433     /* apply the InvHaar4 to all rows */
434 #define COMPENSATE(x) (x)
435     for (i = 0; i < 4; i++) {
436         if (!in[0] && !in[1] && !in[2] && !in[3]) {
437             memset(out, 0, 4 * sizeof(out[0]));
438         } else {
439             INV_HAAR4(in[0], in[1], in[2], in[3],
440                       out[0], out[1], out[2], out[3],
441                       t0, t1, t2, t3, t4);
442         }
443         in  += 4;
444         out += pitch;
445     }
446 #undef  COMPENSATE
447 }
448 
ff_ivi_col_haar4(const int32_t * in,int16_t * out,ptrdiff_t pitch,const uint8_t * flags)449 void ff_ivi_col_haar4(const int32_t *in, int16_t *out, ptrdiff_t pitch,
450                       const uint8_t *flags)
451 {
452     int     i;
453     int     t0, t1, t2, t3, t4;
454 
455     /* apply the InvHaar8 to all columns */
456 #define COMPENSATE(x) (x)
457     for (i = 0; i < 4; i++) {
458         if (flags[i]) {
459             INV_HAAR4(in[0], in[4], in[8], in[12],
460                       out[0 * pitch], out[1 * pitch],
461                       out[2 * pitch], out[3 * pitch],
462                       t0, t1, t2, t3, t4);
463         } else
464             out[0 * pitch] = out[1 * pitch] =
465             out[2 * pitch] = out[3 * pitch] = 0;
466 
467         in++;
468         out++;
469     }
470 #undef  COMPENSATE
471 }
472 
ff_ivi_dc_haar_2d(const int32_t * in,int16_t * out,ptrdiff_t pitch,int blk_size)473 void ff_ivi_dc_haar_2d(const int32_t *in, int16_t *out, ptrdiff_t pitch,
474                        int blk_size)
475 {
476     int     x, y;
477     int16_t dc_coeff;
478 
479     dc_coeff = (*in + 0) >> 3;
480 
481     for (y = 0; y < blk_size; out += pitch, y++) {
482         for (x = 0; x < blk_size; x++)
483             out[x] = dc_coeff;
484     }
485 }
486 
487 /** butterfly operation for the inverse slant transform */
488 #define IVI_SLANT_BFLY(s1, s2, o1, o2, t) \
489     t  = (s1) - (s2);\
490     o1 = (s1) + (s2);\
491     o2 = (t);\
492 
493 /** This is a reflection a,b = 1/2, 5/4 for the inverse slant transform */
494 #define IVI_IREFLECT(s1, s2, o1, o2, t) \
495     t  = (((s1) + (s2)*2 + 2) >> 2) + (s1);\
496     o2 = (((s1)*2 - (s2) + 2) >> 2) - (s2);\
497     o1 = (t);\
498 
499 /** This is a reflection a,b = 1/2, 7/8 for the inverse slant transform */
500 #define IVI_SLANT_PART4(s1, s2, o1, o2, t) \
501     t  = (s2) + (((s1)*4  - (s2) + 4) >> 3);\
502     o2 = (s1) + ((-(s1) - (s2)*4 + 4) >> 3);\
503     o1 = (t);\
504 
505 /** inverse slant8 transform */
506 #define IVI_INV_SLANT8(s1, s4, s8, s5, s2, s6, s3, s7,\
507                        d1, d2, d3, d4, d5, d6, d7, d8,\
508                        t0, t1, t2, t3, t4, t5, t6, t7, t8) {\
509     IVI_SLANT_PART4(s4, s5, t4, t5, t0);\
510 \
511     IVI_SLANT_BFLY(s1, t5, t1, t5, t0); IVI_SLANT_BFLY(s2, s6, t2, t6, t0);\
512     IVI_SLANT_BFLY(s7, s3, t7, t3, t0); IVI_SLANT_BFLY(t4, s8, t4, t8, t0);\
513 \
514     IVI_SLANT_BFLY(t1, t2, t1, t2, t0); IVI_IREFLECT  (t4, t3, t4, t3, t0);\
515     IVI_SLANT_BFLY(t5, t6, t5, t6, t0); IVI_IREFLECT  (t8, t7, t8, t7, t0);\
516     IVI_SLANT_BFLY(t1, t4, t1, t4, t0); IVI_SLANT_BFLY(t2, t3, t2, t3, t0);\
517     IVI_SLANT_BFLY(t5, t8, t5, t8, t0); IVI_SLANT_BFLY(t6, t7, t6, t7, t0);\
518     d1 = COMPENSATE(t1);\
519     d2 = COMPENSATE(t2);\
520     d3 = COMPENSATE(t3);\
521     d4 = COMPENSATE(t4);\
522     d5 = COMPENSATE(t5);\
523     d6 = COMPENSATE(t6);\
524     d7 = COMPENSATE(t7);\
525     d8 = COMPENSATE(t8);}
526 
527 /** inverse slant4 transform */
528 #define IVI_INV_SLANT4(s1, s4, s2, s3, d1, d2, d3, d4, t0, t1, t2, t3, t4) {\
529     IVI_SLANT_BFLY(s1, s2, t1, t2, t0); IVI_IREFLECT  (s4, s3, t4, t3, t0);\
530 \
531     IVI_SLANT_BFLY(t1, t4, t1, t4, t0); IVI_SLANT_BFLY(t2, t3, t2, t3, t0);\
532     d1 = COMPENSATE(t1);\
533     d2 = COMPENSATE(t2);\
534     d3 = COMPENSATE(t3);\
535     d4 = COMPENSATE(t4);}
536 
ff_ivi_inverse_slant_8x8(const int32_t * in,int16_t * out,ptrdiff_t pitch,const uint8_t * flags)537 void ff_ivi_inverse_slant_8x8(const int32_t *in, int16_t *out, ptrdiff_t pitch, const uint8_t *flags)
538 {
539     int     i;
540     const int32_t *src;
541     int32_t *dst;
542     int     tmp[64];
543     int     t0, t1, t2, t3, t4, t5, t6, t7, t8;
544 
545 #define COMPENSATE(x) (x)
546     src = in;
547     dst = tmp;
548     for (i = 0; i < 8; i++) {
549         if (flags[i]) {
550             IVI_INV_SLANT8(src[0], src[8], src[16], src[24], src[32], src[40], src[48], src[56],
551                            dst[0], dst[8], dst[16], dst[24], dst[32], dst[40], dst[48], dst[56],
552                            t0, t1, t2, t3, t4, t5, t6, t7, t8);
553         } else
554             dst[0] = dst[8] = dst[16] = dst[24] = dst[32] = dst[40] = dst[48] = dst[56] = 0;
555 
556         src++;
557         dst++;
558     }
559 #undef COMPENSATE
560 
561 #define COMPENSATE(x) (((x) + 1)>>1)
562     src = tmp;
563     for (i = 0; i < 8; i++) {
564         if (!src[0] && !src[1] && !src[2] && !src[3] && !src[4] && !src[5] && !src[6] && !src[7]) {
565             memset(out, 0, 8*sizeof(out[0]));
566         } else {
567             IVI_INV_SLANT8(src[0], src[1], src[2], src[3], src[4], src[5], src[6], src[7],
568                            out[0], out[1], out[2], out[3], out[4], out[5], out[6], out[7],
569                            t0, t1, t2, t3, t4, t5, t6, t7, t8);
570         }
571         src += 8;
572         out += pitch;
573     }
574 #undef COMPENSATE
575 }
576 
ff_ivi_inverse_slant_4x4(const int32_t * in,int16_t * out,ptrdiff_t pitch,const uint8_t * flags)577 void ff_ivi_inverse_slant_4x4(const int32_t *in, int16_t *out, ptrdiff_t pitch, const uint8_t *flags)
578 {
579     int     i;
580     const int32_t *src;
581     int32_t *dst;
582     int     tmp[16];
583     int     t0, t1, t2, t3, t4;
584 
585 #define COMPENSATE(x) (x)
586     src = in;
587     dst = tmp;
588     for (i = 0; i < 4; i++) {
589         if (flags[i]) {
590             IVI_INV_SLANT4(src[0], src[4], src[8], src[12],
591                            dst[0], dst[4], dst[8], dst[12],
592                            t0, t1, t2, t3, t4);
593         } else
594             dst[0] = dst[4] = dst[8] = dst[12] = 0;
595 
596         src++;
597         dst++;
598     }
599 #undef COMPENSATE
600 
601 #define COMPENSATE(x) (((x) + 1)>>1)
602     src = tmp;
603     for (i = 0; i < 4; i++) {
604         if (!src[0] && !src[1] && !src[2] && !src[3]) {
605             out[0] = out[1] = out[2] = out[3] = 0;
606         } else {
607             IVI_INV_SLANT4(src[0], src[1], src[2], src[3],
608                            out[0], out[1], out[2], out[3],
609                            t0, t1, t2, t3, t4);
610         }
611         src += 4;
612         out += pitch;
613     }
614 #undef COMPENSATE
615 }
616 
ff_ivi_dc_slant_2d(const int32_t * in,int16_t * out,ptrdiff_t pitch,int blk_size)617 void ff_ivi_dc_slant_2d(const int32_t *in, int16_t *out, ptrdiff_t pitch, int blk_size)
618 {
619     int     x, y;
620     int16_t dc_coeff;
621 
622     dc_coeff = (*in + 1) >> 1;
623 
624     for (y = 0; y < blk_size; out += pitch, y++) {
625         for (x = 0; x < blk_size; x++)
626             out[x] = dc_coeff;
627     }
628 }
629 
ff_ivi_row_slant8(const int32_t * in,int16_t * out,ptrdiff_t pitch,const uint8_t * flags)630 void ff_ivi_row_slant8(const int32_t *in, int16_t *out, ptrdiff_t pitch, const uint8_t *flags)
631 {
632     int     i;
633     int     t0, t1, t2, t3, t4, t5, t6, t7, t8;
634 
635 #define COMPENSATE(x) (((x) + 1)>>1)
636     for (i = 0; i < 8; i++) {
637         if (!in[0] && !in[1] && !in[2] && !in[3] && !in[4] && !in[5] && !in[6] && !in[7]) {
638             memset(out, 0, 8*sizeof(out[0]));
639         } else {
640             IVI_INV_SLANT8( in[0],  in[1],  in[2],  in[3],  in[4],  in[5],  in[6],  in[7],
641                            out[0], out[1], out[2], out[3], out[4], out[5], out[6], out[7],
642                            t0, t1, t2, t3, t4, t5, t6, t7, t8);
643         }
644         in += 8;
645         out += pitch;
646     }
647 #undef COMPENSATE
648 }
649 
ff_ivi_dc_row_slant(const int32_t * in,int16_t * out,ptrdiff_t pitch,int blk_size)650 void ff_ivi_dc_row_slant(const int32_t *in, int16_t *out, ptrdiff_t pitch, int blk_size)
651 {
652     int     x, y;
653     int16_t dc_coeff;
654 
655     dc_coeff = (*in + 1) >> 1;
656 
657     for (x = 0; x < blk_size; x++)
658         out[x] = dc_coeff;
659 
660     out += pitch;
661 
662     for (y = 1; y < blk_size; out += pitch, y++) {
663         for (x = 0; x < blk_size; x++)
664             out[x] = 0;
665     }
666 }
667 
ff_ivi_col_slant8(const int32_t * in,int16_t * out,ptrdiff_t pitch,const uint8_t * flags)668 void ff_ivi_col_slant8(const int32_t *in, int16_t *out, ptrdiff_t pitch, const uint8_t *flags)
669 {
670     int     i, row2, row4, row8;
671     int     t0, t1, t2, t3, t4, t5, t6, t7, t8;
672 
673     row2 = pitch << 1;
674     row4 = pitch << 2;
675     row8 = pitch << 3;
676 
677 #define COMPENSATE(x) (((x) + 1)>>1)
678     for (i = 0; i < 8; i++) {
679         if (flags[i]) {
680             IVI_INV_SLANT8(in[0], in[8], in[16], in[24], in[32], in[40], in[48], in[56],
681                            out[0], out[pitch], out[row2], out[row2 + pitch], out[row4],
682                            out[row4 + pitch],  out[row4 + row2], out[row8 - pitch],
683                            t0, t1, t2, t3, t4, t5, t6, t7, t8);
684         } else {
685             out[0] = out[pitch] = out[row2] = out[row2 + pitch] = out[row4] =
686             out[row4 + pitch] =  out[row4 + row2] = out[row8 - pitch] = 0;
687         }
688 
689         in++;
690         out++;
691     }
692 #undef COMPENSATE
693 }
694 
ff_ivi_dc_col_slant(const int32_t * in,int16_t * out,ptrdiff_t pitch,int blk_size)695 void ff_ivi_dc_col_slant(const int32_t *in, int16_t *out, ptrdiff_t pitch, int blk_size)
696 {
697     int     x, y;
698     int16_t dc_coeff;
699 
700     dc_coeff = (*in + 1) >> 1;
701 
702     for (y = 0; y < blk_size; out += pitch, y++) {
703         out[0] = dc_coeff;
704         for (x = 1; x < blk_size; x++)
705             out[x] = 0;
706     }
707 }
708 
ff_ivi_row_slant4(const int32_t * in,int16_t * out,ptrdiff_t pitch,const uint8_t * flags)709 void ff_ivi_row_slant4(const int32_t *in, int16_t *out, ptrdiff_t pitch, const uint8_t *flags)
710 {
711     int     i;
712     int     t0, t1, t2, t3, t4;
713 
714 #define COMPENSATE(x) (((x) + 1)>>1)
715     for (i = 0; i < 4; i++) {
716         if (!in[0] && !in[1] && !in[2] && !in[3]) {
717             memset(out, 0, 4*sizeof(out[0]));
718         } else {
719             IVI_INV_SLANT4( in[0],  in[1],  in[2],  in[3],
720                            out[0], out[1], out[2], out[3],
721                            t0, t1, t2, t3, t4);
722         }
723         in  += 4;
724         out += pitch;
725     }
726 #undef COMPENSATE
727 }
728 
ff_ivi_col_slant4(const int32_t * in,int16_t * out,ptrdiff_t pitch,const uint8_t * flags)729 void ff_ivi_col_slant4(const int32_t *in, int16_t *out, ptrdiff_t pitch, const uint8_t *flags)
730 {
731     int     i, row2;
732     int     t0, t1, t2, t3, t4;
733 
734     row2 = pitch << 1;
735 
736 #define COMPENSATE(x) (((x) + 1)>>1)
737     for (i = 0; i < 4; i++) {
738         if (flags[i]) {
739             IVI_INV_SLANT4(in[0], in[4], in[8], in[12],
740                            out[0], out[pitch], out[row2], out[row2 + pitch],
741                            t0, t1, t2, t3, t4);
742         } else {
743             out[0] = out[pitch] = out[row2] = out[row2 + pitch] = 0;
744         }
745 
746         in++;
747         out++;
748     }
749 #undef COMPENSATE
750 }
751 
ff_ivi_put_pixels_8x8(const int32_t * in,int16_t * out,ptrdiff_t pitch,const uint8_t * flags)752 void ff_ivi_put_pixels_8x8(const int32_t *in, int16_t *out, ptrdiff_t pitch,
753                            const uint8_t *flags)
754 {
755     int     x, y;
756 
757     for (y = 0; y < 8; out += pitch, in += 8, y++)
758         for (x = 0; x < 8; x++)
759             out[x] = in[x];
760 }
761 
ff_ivi_put_dc_pixel_8x8(const int32_t * in,int16_t * out,ptrdiff_t pitch,int blk_size)762 void ff_ivi_put_dc_pixel_8x8(const int32_t *in, int16_t *out, ptrdiff_t pitch,
763                              int blk_size)
764 {
765     int     y;
766 
767     out[0] = in[0];
768     memset(out + 1, 0, 7*sizeof(out[0]));
769     out += pitch;
770 
771     for (y = 1; y < 8; out += pitch, y++)
772         memset(out, 0, 8*sizeof(out[0]));
773 }
774 
775 #define IVI_MC_TEMPLATE(size, suffix, OP) \
776 static void ivi_mc_ ## size ##x## size ## suffix(int16_t *buf, \
777                                                  ptrdiff_t dpitch, \
778                                                  const int16_t *ref_buf, \
779                                                  ptrdiff_t pitch, int mc_type) \
780 { \
781     int     i, j; \
782     const int16_t *wptr; \
783 \
784     switch (mc_type) { \
785     case 0: /* fullpel (no interpolation) */ \
786         for (i = 0; i < size; i++, buf += dpitch, ref_buf += pitch) { \
787             for (j = 0; j < size; j++) {\
788                 OP(buf[j], ref_buf[j]); \
789             } \
790         } \
791         break; \
792     case 1: /* horizontal halfpel interpolation */ \
793         for (i = 0; i < size; i++, buf += dpitch, ref_buf += pitch) \
794             for (j = 0; j < size; j++) \
795                 OP(buf[j], (ref_buf[j] + ref_buf[j+1]) >> 1); \
796         break; \
797     case 2: /* vertical halfpel interpolation */ \
798         wptr = ref_buf + pitch; \
799         for (i = 0; i < size; i++, buf += dpitch, wptr += pitch, ref_buf += pitch) \
800             for (j = 0; j < size; j++) \
801                 OP(buf[j], (ref_buf[j] + wptr[j]) >> 1); \
802         break; \
803     case 3: /* vertical and horizontal halfpel interpolation */ \
804         wptr = ref_buf + pitch; \
805         for (i = 0; i < size; i++, buf += dpitch, wptr += pitch, ref_buf += pitch) \
806             for (j = 0; j < size; j++) \
807                 OP(buf[j], (ref_buf[j] + ref_buf[j+1] + wptr[j] + wptr[j+1]) >> 2); \
808         break; \
809     } \
810 } \
811 \
812 void ff_ivi_mc_ ## size ##x## size ## suffix(int16_t *buf, const int16_t *ref_buf, \
813                                              ptrdiff_t pitch, int mc_type) \
814 { \
815     ivi_mc_ ## size ##x## size ## suffix(buf, pitch, ref_buf, pitch, mc_type); \
816 } \
817 
818 #define IVI_MC_AVG_TEMPLATE(size, suffix, OP) \
819 void ff_ivi_mc_avg_ ## size ##x## size ## suffix(int16_t *buf, \
820                                                  const int16_t *ref_buf, \
821                                                  const int16_t *ref_buf2, \
822                                                  ptrdiff_t pitch, \
823                                                  int mc_type, int mc_type2) \
824 { \
825     int16_t tmp[size * size]; \
826     int i, j; \
827 \
828     ivi_mc_ ## size ##x## size ## _no_delta(tmp, size, ref_buf, pitch, mc_type); \
829     ivi_mc_ ## size ##x## size ## _delta(tmp, size, ref_buf2, pitch, mc_type2); \
830     for (i = 0; i < size; i++, buf += pitch) { \
831         for (j = 0; j < size; j++) {\
832             OP(buf[j], tmp[i * size + j] >> 1); \
833         } \
834     } \
835 } \
836 
837 #define OP_PUT(a, b)  (a) = (b)
838 #define OP_ADD(a, b)  (a) += (b)
839 
840 IVI_MC_TEMPLATE(8, _no_delta, OP_PUT)
841 IVI_MC_TEMPLATE(8, _delta,    OP_ADD)
842 IVI_MC_TEMPLATE(4, _no_delta, OP_PUT)
843 IVI_MC_TEMPLATE(4, _delta,    OP_ADD)
844 IVI_MC_AVG_TEMPLATE(8, _no_delta, OP_PUT)
845 IVI_MC_AVG_TEMPLATE(8, _delta,    OP_ADD)
846 IVI_MC_AVG_TEMPLATE(4, _no_delta, OP_PUT)
847 IVI_MC_AVG_TEMPLATE(4, _delta,    OP_ADD)
848