• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * This file is part of FFmpeg.
3  *
4  * FFmpeg is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU Lesser General Public
6  * License as published by the Free Software Foundation; either
7  * version 2.1 of the License, or (at your option) any later version.
8  *
9  * FFmpeg is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
12  * Lesser General Public License for more details.
13  *
14  * You should have received a copy of the GNU Lesser General Public
15  * License along with FFmpeg; if not, write to the Free Software
16  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17  */
18 
19 #include "config.h"
20 #include "libavutil/attributes.h"
21 #include "libavutil/cpu.h"
22 #include "libavutil/x86/cpu.h"
23 #include "libavcodec/mpegvideodsp.h"
24 #include "libavcodec/videodsp.h"
25 
26 #if HAVE_INLINE_ASM
27 
gmc_mmx(uint8_t * dst,uint8_t * src,int stride,int h,int ox,int oy,int dxx,int dxy,int dyx,int dyy,int shift,int r,int width,int height)28 static void gmc_mmx(uint8_t *dst, uint8_t *src,
29                     int stride, int h, int ox, int oy,
30                     int dxx, int dxy, int dyx, int dyy,
31                     int shift, int r, int width, int height)
32 {
33     const int w    = 8;
34     const int ix   = ox  >> (16 + shift);
35     const int iy   = oy  >> (16 + shift);
36     const int oxs  = ox  >> 4;
37     const int oys  = oy  >> 4;
38     const int dxxs = dxx >> 4;
39     const int dxys = dxy >> 4;
40     const int dyxs = dyx >> 4;
41     const int dyys = dyy >> 4;
42     const uint16_t r4[4]   = { r, r, r, r };
43     const uint16_t dxy4[4] = { dxys, dxys, dxys, dxys };
44     const uint16_t dyy4[4] = { dyys, dyys, dyys, dyys };
45     const uint64_t shift2  = 2 * shift;
46 #define MAX_STRIDE 4096U
47 #define MAX_H 8U
48     uint8_t edge_buf[(MAX_H + 1) * MAX_STRIDE];
49     int x, y;
50 
51     const int dxw = (dxx - (1 << (16 + shift))) * (w - 1);
52     const int dyh = (dyy - (1 << (16 + shift))) * (h - 1);
53     const int dxh = dxy * (h - 1);
54     const int dyw = dyx * (w - 1);
55     int need_emu  =  (unsigned) ix >= width  - w || width < w ||
56                      (unsigned) iy >= height - h || height< h
57                      ;
58 
59     if ( // non-constant fullpel offset (3% of blocks)
60         ((ox ^ (ox + dxw)) | (ox ^ (ox + dxh)) | (ox ^ (ox + dxw + dxh)) |
61          (oy ^ (oy + dyw)) | (oy ^ (oy + dyh)) | (oy ^ (oy + dyw + dyh))) >> (16 + shift) ||
62         // uses more than 16 bits of subpel mv (only at huge resolution)
63         (dxx | dxy | dyx | dyy) & 15 ||
64         (need_emu && (h > MAX_H || stride > MAX_STRIDE))) {
65         // FIXME could still use mmx for some of the rows
66         ff_gmc_c(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy,
67                  shift, r, width, height);
68         return;
69     }
70 
71     src += ix + iy * stride;
72     if (need_emu) {
73         ff_emulated_edge_mc_8(edge_buf, src, stride, stride, w + 1, h + 1, ix, iy, width, height);
74         src = edge_buf;
75     }
76 
77     __asm__ volatile (
78         "movd         %0, %%mm6         \n\t"
79         "pxor      %%mm7, %%mm7         \n\t"
80         "punpcklwd %%mm6, %%mm6         \n\t"
81         "punpcklwd %%mm6, %%mm6         \n\t"
82         :: "r" (1 << shift));
83 
84     for (x = 0; x < w; x += 4) {
85         uint16_t dx4[4] = { oxs - dxys + dxxs * (x + 0),
86                             oxs - dxys + dxxs * (x + 1),
87                             oxs - dxys + dxxs * (x + 2),
88                             oxs - dxys + dxxs * (x + 3) };
89         uint16_t dy4[4] = { oys - dyys + dyxs * (x + 0),
90                             oys - dyys + dyxs * (x + 1),
91                             oys - dyys + dyxs * (x + 2),
92                             oys - dyys + dyxs * (x + 3) };
93 
94         for (y = 0; y < h; y++) {
95             __asm__ volatile (
96                 "movq      %0, %%mm4    \n\t"
97                 "movq      %1, %%mm5    \n\t"
98                 "paddw     %2, %%mm4    \n\t"
99                 "paddw     %3, %%mm5    \n\t"
100                 "movq   %%mm4, %0       \n\t"
101                 "movq   %%mm5, %1       \n\t"
102                 "psrlw    $12, %%mm4    \n\t"
103                 "psrlw    $12, %%mm5    \n\t"
104                 : "+m" (*dx4), "+m" (*dy4)
105                 : "m" (*dxy4), "m" (*dyy4));
106 
107             __asm__ volatile (
108                 "movq      %%mm6, %%mm2 \n\t"
109                 "movq      %%mm6, %%mm1 \n\t"
110                 "psubw     %%mm4, %%mm2 \n\t"
111                 "psubw     %%mm5, %%mm1 \n\t"
112                 "movq      %%mm2, %%mm0 \n\t"
113                 "movq      %%mm4, %%mm3 \n\t"
114                 "pmullw    %%mm1, %%mm0 \n\t" // (s - dx) * (s - dy)
115                 "pmullw    %%mm5, %%mm3 \n\t" // dx * dy
116                 "pmullw    %%mm5, %%mm2 \n\t" // (s - dx) * dy
117                 "pmullw    %%mm4, %%mm1 \n\t" // dx * (s - dy)
118 
119                 "movd         %4, %%mm5 \n\t"
120                 "movd         %3, %%mm4 \n\t"
121                 "punpcklbw %%mm7, %%mm5 \n\t"
122                 "punpcklbw %%mm7, %%mm4 \n\t"
123                 "pmullw    %%mm5, %%mm3 \n\t" // src[1, 1] * dx * dy
124                 "pmullw    %%mm4, %%mm2 \n\t" // src[0, 1] * (s - dx) * dy
125 
126                 "movd         %2, %%mm5 \n\t"
127                 "movd         %1, %%mm4 \n\t"
128                 "punpcklbw %%mm7, %%mm5 \n\t"
129                 "punpcklbw %%mm7, %%mm4 \n\t"
130                 "pmullw    %%mm5, %%mm1 \n\t" // src[1, 0] * dx * (s - dy)
131                 "pmullw    %%mm4, %%mm0 \n\t" // src[0, 0] * (s - dx) * (s - dy)
132                 "paddw        %5, %%mm1 \n\t"
133                 "paddw     %%mm3, %%mm2 \n\t"
134                 "paddw     %%mm1, %%mm0 \n\t"
135                 "paddw     %%mm2, %%mm0 \n\t"
136 
137                 "psrlw        %6, %%mm0 \n\t"
138                 "packuswb  %%mm0, %%mm0 \n\t"
139                 "movd      %%mm0, %0    \n\t"
140 
141                 : "=m" (dst[x + y * stride])
142                 : "m" (src[0]), "m" (src[1]),
143                   "m" (src[stride]), "m" (src[stride + 1]),
144                   "m" (*r4), "m" (shift2));
145             src += stride;
146         }
147         src += 4 - h * stride;
148     }
149 }
150 
151 #endif /* HAVE_INLINE_ASM */
152 
ff_mpegvideodsp_init_x86(MpegVideoDSPContext * c)153 av_cold void ff_mpegvideodsp_init_x86(MpegVideoDSPContext *c)
154 {
155 #if HAVE_INLINE_ASM
156     int cpu_flags = av_get_cpu_flags();
157 
158     if (INLINE_MMX(cpu_flags))
159         c->gmc = gmc_mmx;
160 #endif /* HAVE_INLINE_ASM */
161 }
162