1 /*
2 * VC-1 and WMV3 - DSP functions MMX-optimized
3 * Copyright (c) 2007 Christophe GISQUET <christophe.gisquet@free.fr>
4 *
5 * Permission is hereby granted, free of charge, to any person
6 * obtaining a copy of this software and associated documentation
7 * files (the "Software"), to deal in the Software without
8 * restriction, including without limitation the rights to use,
9 * copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following
12 * conditions:
13 *
14 * The above copyright notice and this permission notice shall be
15 * included in all copies or substantial portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
19 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
20 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
21 * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
22 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
23 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
24 * OTHER DEALINGS IN THE SOFTWARE.
25 */
26
27 #include "libavutil/attributes.h"
28 #include "libavutil/cpu.h"
29 #include "libavutil/x86/cpu.h"
30 #include "libavutil/x86/asm.h"
31 #include "libavcodec/vc1dsp.h"
32 #include "fpel.h"
33 #include "vc1dsp.h"
34 #include "config.h"
35
36 #define LOOP_FILTER4(EXT) \
37 void ff_vc1_v_loop_filter4_ ## EXT(uint8_t *src, ptrdiff_t stride, int pq); \
38 void ff_vc1_h_loop_filter4_ ## EXT(uint8_t *src, ptrdiff_t stride, int pq);
39 #define LOOP_FILTER816(EXT) \
40 void ff_vc1_v_loop_filter8_ ## EXT(uint8_t *src, ptrdiff_t stride, int pq); \
41 void ff_vc1_h_loop_filter8_ ## EXT(uint8_t *src, ptrdiff_t stride, int pq); \
42 \
43 static void vc1_v_loop_filter16_ ## EXT(uint8_t *src, ptrdiff_t stride, int pq) \
44 { \
45 ff_vc1_v_loop_filter8_ ## EXT(src, stride, pq); \
46 ff_vc1_v_loop_filter8_ ## EXT(src+8, stride, pq); \
47 } \
48 \
49 static void vc1_h_loop_filter16_ ## EXT(uint8_t *src, ptrdiff_t stride, int pq) \
50 { \
51 ff_vc1_h_loop_filter8_ ## EXT(src, stride, pq); \
52 ff_vc1_h_loop_filter8_ ## EXT(src+8*stride, stride, pq); \
53 }
54
55 #if HAVE_X86ASM
56 LOOP_FILTER4(mmxext)
57 LOOP_FILTER816(sse2)
58 LOOP_FILTER4(ssse3)
59 LOOP_FILTER816(ssse3)
60
61 void ff_vc1_h_loop_filter8_sse4(uint8_t *src, ptrdiff_t stride, int pq);
62
vc1_h_loop_filter16_sse4(uint8_t * src,ptrdiff_t stride,int pq)63 static void vc1_h_loop_filter16_sse4(uint8_t *src, ptrdiff_t stride, int pq)
64 {
65 ff_vc1_h_loop_filter8_sse4(src, stride, pq);
66 ff_vc1_h_loop_filter8_sse4(src+8*stride, stride, pq);
67 }
68
69 #define DECLARE_FUNCTION(OP, DEPTH, INSN) \
70 static void OP##vc1_mspel_mc00_##DEPTH##INSN(uint8_t *dst, \
71 const uint8_t *src, ptrdiff_t stride, int rnd) \
72 { \
73 ff_ ## OP ## pixels ## DEPTH ## INSN(dst, src, stride, DEPTH); \
74 }
75
76 DECLARE_FUNCTION(put_, 8, _mmx)
77 DECLARE_FUNCTION(avg_, 8, _mmxext)
78 DECLARE_FUNCTION(put_, 16, _sse2)
79 DECLARE_FUNCTION(avg_, 16, _sse2)
80
81 #endif /* HAVE_X86ASM */
82
83 void ff_put_vc1_chroma_mc8_nornd_mmx (uint8_t *dst, uint8_t *src,
84 ptrdiff_t stride, int h, int x, int y);
85 void ff_avg_vc1_chroma_mc8_nornd_mmxext(uint8_t *dst, uint8_t *src,
86 ptrdiff_t stride, int h, int x, int y);
87 void ff_put_vc1_chroma_mc8_nornd_ssse3(uint8_t *dst, uint8_t *src,
88 ptrdiff_t stride, int h, int x, int y);
89 void ff_avg_vc1_chroma_mc8_nornd_ssse3(uint8_t *dst, uint8_t *src,
90 ptrdiff_t stride, int h, int x, int y);
91 void ff_vc1_inv_trans_4x4_dc_mmxext(uint8_t *dest, ptrdiff_t linesize,
92 int16_t *block);
93 void ff_vc1_inv_trans_4x8_dc_mmxext(uint8_t *dest, ptrdiff_t linesize,
94 int16_t *block);
95 void ff_vc1_inv_trans_8x4_dc_mmxext(uint8_t *dest, ptrdiff_t linesize,
96 int16_t *block);
97 void ff_vc1_inv_trans_8x8_dc_mmxext(uint8_t *dest, ptrdiff_t linesize,
98 int16_t *block);
99
100
ff_vc1dsp_init_x86(VC1DSPContext * dsp)101 av_cold void ff_vc1dsp_init_x86(VC1DSPContext *dsp)
102 {
103 int cpu_flags = av_get_cpu_flags();
104
105 if (HAVE_6REGS && INLINE_MMX(cpu_flags))
106 if (EXTERNAL_MMX(cpu_flags))
107 ff_vc1dsp_init_mmx(dsp);
108
109 if (HAVE_6REGS && INLINE_MMXEXT(cpu_flags))
110 if (EXTERNAL_MMXEXT(cpu_flags))
111 ff_vc1dsp_init_mmxext(dsp);
112
113 #define ASSIGN_LF4(EXT) \
114 dsp->vc1_v_loop_filter4 = ff_vc1_v_loop_filter4_ ## EXT; \
115 dsp->vc1_h_loop_filter4 = ff_vc1_h_loop_filter4_ ## EXT
116 #define ASSIGN_LF816(EXT) \
117 dsp->vc1_v_loop_filter8 = ff_vc1_v_loop_filter8_ ## EXT; \
118 dsp->vc1_h_loop_filter8 = ff_vc1_h_loop_filter8_ ## EXT; \
119 dsp->vc1_v_loop_filter16 = vc1_v_loop_filter16_ ## EXT; \
120 dsp->vc1_h_loop_filter16 = vc1_h_loop_filter16_ ## EXT
121
122 #if HAVE_X86ASM
123 if (EXTERNAL_MMX(cpu_flags)) {
124 dsp->put_no_rnd_vc1_chroma_pixels_tab[0] = ff_put_vc1_chroma_mc8_nornd_mmx;
125
126 dsp->put_vc1_mspel_pixels_tab[1][0] = put_vc1_mspel_mc00_8_mmx;
127 }
128 if (EXTERNAL_MMXEXT(cpu_flags)) {
129 ASSIGN_LF4(mmxext);
130 dsp->avg_no_rnd_vc1_chroma_pixels_tab[0] = ff_avg_vc1_chroma_mc8_nornd_mmxext;
131
132 dsp->avg_vc1_mspel_pixels_tab[1][0] = avg_vc1_mspel_mc00_8_mmxext;
133
134 dsp->vc1_inv_trans_8x8_dc = ff_vc1_inv_trans_8x8_dc_mmxext;
135 dsp->vc1_inv_trans_4x8_dc = ff_vc1_inv_trans_4x8_dc_mmxext;
136 dsp->vc1_inv_trans_8x4_dc = ff_vc1_inv_trans_8x4_dc_mmxext;
137 dsp->vc1_inv_trans_4x4_dc = ff_vc1_inv_trans_4x4_dc_mmxext;
138 }
139 if (EXTERNAL_SSE2(cpu_flags)) {
140 ASSIGN_LF816(sse2);
141
142 dsp->put_vc1_mspel_pixels_tab[0][0] = put_vc1_mspel_mc00_16_sse2;
143 dsp->avg_vc1_mspel_pixels_tab[0][0] = avg_vc1_mspel_mc00_16_sse2;
144 }
145 if (EXTERNAL_SSSE3(cpu_flags)) {
146 ASSIGN_LF4(ssse3);
147 ASSIGN_LF816(ssse3);
148 dsp->put_no_rnd_vc1_chroma_pixels_tab[0] = ff_put_vc1_chroma_mc8_nornd_ssse3;
149 dsp->avg_no_rnd_vc1_chroma_pixels_tab[0] = ff_avg_vc1_chroma_mc8_nornd_ssse3;
150 }
151 if (EXTERNAL_SSE4(cpu_flags)) {
152 dsp->vc1_h_loop_filter8 = ff_vc1_h_loop_filter8_sse4;
153 dsp->vc1_h_loop_filter16 = vc1_h_loop_filter16_sse4;
154 }
155 #endif /* HAVE_X86ASM */
156 }
157