1 /*
2 * Copyright (C) 2001-2003 Michael Niedermayer <michaelni@gmx.at>
3 *
4 * This file is part of FFmpeg.
5 *
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20
21 #include "config.h"
22
23 #define _DEFAULT_SOURCE
24 #define _SVID_SOURCE // needed for MAP_ANONYMOUS
25 #define _DARWIN_C_SOURCE // needed for MAP_ANON
26 #include <inttypes.h>
27 #include <math.h>
28 #include <stdio.h>
29 #include <string.h>
30 #if HAVE_MMAP
31 #include <sys/mman.h>
32 #if defined(MAP_ANON) && !defined(MAP_ANONYMOUS)
33 #define MAP_ANONYMOUS MAP_ANON
34 #endif
35 #endif
36 #if HAVE_VIRTUALALLOC
37 #define WIN32_LEAN_AND_MEAN
38 #include <windows.h>
39 #endif
40
41 #include "libavutil/attributes.h"
42 #include "libavutil/avassert.h"
43 #include "libavutil/cpu.h"
44 #include "libavutil/imgutils.h"
45 #include "libavutil/intreadwrite.h"
46 #include "libavutil/libm.h"
47 #include "libavutil/mathematics.h"
48 #include "libavutil/opt.h"
49 #include "libavutil/pixdesc.h"
50 #include "libavutil/slicethread.h"
51 #include "libavutil/thread.h"
52 #include "libavutil/aarch64/cpu.h"
53 #include "libavutil/ppc/cpu.h"
54 #include "libavutil/x86/asm.h"
55 #include "libavutil/x86/cpu.h"
56
57 #include "rgb2rgb.h"
58 #include "swscale.h"
59 #include "swscale_internal.h"
60
61 static SwsVector *sws_getIdentityVec(void);
62 static void sws_addVec(SwsVector *a, SwsVector *b);
63 static void sws_shiftVec(SwsVector *a, int shift);
64 static void sws_printVec2(SwsVector *a, AVClass *log_ctx, int log_level);
65
66 static void handle_formats(SwsContext *c);
67
68 typedef struct FormatEntry {
69 uint8_t is_supported_in :1;
70 uint8_t is_supported_out :1;
71 uint8_t is_supported_endianness :1;
72 } FormatEntry;
73
74 static const FormatEntry format_entries[] = {
75 [AV_PIX_FMT_YUV420P] = { 1, 1 },
76 [AV_PIX_FMT_YUYV422] = { 1, 1 },
77 [AV_PIX_FMT_RGB24] = { 1, 1 },
78 [AV_PIX_FMT_BGR24] = { 1, 1 },
79 [AV_PIX_FMT_YUV422P] = { 1, 1 },
80 [AV_PIX_FMT_YUV444P] = { 1, 1 },
81 [AV_PIX_FMT_YUV410P] = { 1, 1 },
82 [AV_PIX_FMT_YUV411P] = { 1, 1 },
83 [AV_PIX_FMT_GRAY8] = { 1, 1 },
84 [AV_PIX_FMT_MONOWHITE] = { 1, 1 },
85 [AV_PIX_FMT_MONOBLACK] = { 1, 1 },
86 [AV_PIX_FMT_PAL8] = { 1, 0 },
87 [AV_PIX_FMT_YUVJ420P] = { 1, 1 },
88 [AV_PIX_FMT_YUVJ411P] = { 1, 1 },
89 [AV_PIX_FMT_YUVJ422P] = { 1, 1 },
90 [AV_PIX_FMT_YUVJ444P] = { 1, 1 },
91 [AV_PIX_FMT_YVYU422] = { 1, 1 },
92 [AV_PIX_FMT_UYVY422] = { 1, 1 },
93 [AV_PIX_FMT_UYYVYY411] = { 0, 0 },
94 [AV_PIX_FMT_BGR8] = { 1, 1 },
95 [AV_PIX_FMT_BGR4] = { 0, 1 },
96 [AV_PIX_FMT_BGR4_BYTE] = { 1, 1 },
97 [AV_PIX_FMT_RGB8] = { 1, 1 },
98 [AV_PIX_FMT_RGB4] = { 0, 1 },
99 [AV_PIX_FMT_RGB4_BYTE] = { 1, 1 },
100 [AV_PIX_FMT_NV12] = { 1, 1 },
101 [AV_PIX_FMT_NV21] = { 1, 1 },
102 [AV_PIX_FMT_ARGB] = { 1, 1 },
103 [AV_PIX_FMT_RGBA] = { 1, 1 },
104 [AV_PIX_FMT_ABGR] = { 1, 1 },
105 [AV_PIX_FMT_BGRA] = { 1, 1 },
106 [AV_PIX_FMT_0RGB] = { 1, 1 },
107 [AV_PIX_FMT_RGB0] = { 1, 1 },
108 [AV_PIX_FMT_0BGR] = { 1, 1 },
109 [AV_PIX_FMT_BGR0] = { 1, 1 },
110 [AV_PIX_FMT_GRAY9BE] = { 1, 1 },
111 [AV_PIX_FMT_GRAY9LE] = { 1, 1 },
112 [AV_PIX_FMT_GRAY10BE] = { 1, 1 },
113 [AV_PIX_FMT_GRAY10LE] = { 1, 1 },
114 [AV_PIX_FMT_GRAY12BE] = { 1, 1 },
115 [AV_PIX_FMT_GRAY12LE] = { 1, 1 },
116 [AV_PIX_FMT_GRAY14BE] = { 1, 1 },
117 [AV_PIX_FMT_GRAY14LE] = { 1, 1 },
118 [AV_PIX_FMT_GRAY16BE] = { 1, 1 },
119 [AV_PIX_FMT_GRAY16LE] = { 1, 1 },
120 [AV_PIX_FMT_YUV440P] = { 1, 1 },
121 [AV_PIX_FMT_YUVJ440P] = { 1, 1 },
122 [AV_PIX_FMT_YUV440P10LE] = { 1, 1 },
123 [AV_PIX_FMT_YUV440P10BE] = { 1, 1 },
124 [AV_PIX_FMT_YUV440P12LE] = { 1, 1 },
125 [AV_PIX_FMT_YUV440P12BE] = { 1, 1 },
126 [AV_PIX_FMT_YUVA420P] = { 1, 1 },
127 [AV_PIX_FMT_YUVA422P] = { 1, 1 },
128 [AV_PIX_FMT_YUVA444P] = { 1, 1 },
129 [AV_PIX_FMT_YUVA420P9BE] = { 1, 1 },
130 [AV_PIX_FMT_YUVA420P9LE] = { 1, 1 },
131 [AV_PIX_FMT_YUVA422P9BE] = { 1, 1 },
132 [AV_PIX_FMT_YUVA422P9LE] = { 1, 1 },
133 [AV_PIX_FMT_YUVA444P9BE] = { 1, 1 },
134 [AV_PIX_FMT_YUVA444P9LE] = { 1, 1 },
135 [AV_PIX_FMT_YUVA420P10BE]= { 1, 1 },
136 [AV_PIX_FMT_YUVA420P10LE]= { 1, 1 },
137 [AV_PIX_FMT_YUVA422P10BE]= { 1, 1 },
138 [AV_PIX_FMT_YUVA422P10LE]= { 1, 1 },
139 [AV_PIX_FMT_YUVA444P10BE]= { 1, 1 },
140 [AV_PIX_FMT_YUVA444P10LE]= { 1, 1 },
141 [AV_PIX_FMT_YUVA420P16BE]= { 1, 1 },
142 [AV_PIX_FMT_YUVA420P16LE]= { 1, 1 },
143 [AV_PIX_FMT_YUVA422P16BE]= { 1, 1 },
144 [AV_PIX_FMT_YUVA422P16LE]= { 1, 1 },
145 [AV_PIX_FMT_YUVA444P16BE]= { 1, 1 },
146 [AV_PIX_FMT_YUVA444P16LE]= { 1, 1 },
147 [AV_PIX_FMT_RGB48BE] = { 1, 1 },
148 [AV_PIX_FMT_RGB48LE] = { 1, 1 },
149 [AV_PIX_FMT_RGBA64BE] = { 1, 1, 1 },
150 [AV_PIX_FMT_RGBA64LE] = { 1, 1, 1 },
151 [AV_PIX_FMT_RGB565BE] = { 1, 1 },
152 [AV_PIX_FMT_RGB565LE] = { 1, 1 },
153 [AV_PIX_FMT_RGB555BE] = { 1, 1 },
154 [AV_PIX_FMT_RGB555LE] = { 1, 1 },
155 [AV_PIX_FMT_BGR565BE] = { 1, 1 },
156 [AV_PIX_FMT_BGR565LE] = { 1, 1 },
157 [AV_PIX_FMT_BGR555BE] = { 1, 1 },
158 [AV_PIX_FMT_BGR555LE] = { 1, 1 },
159 [AV_PIX_FMT_YUV420P16LE] = { 1, 1 },
160 [AV_PIX_FMT_YUV420P16BE] = { 1, 1 },
161 [AV_PIX_FMT_YUV422P16LE] = { 1, 1 },
162 [AV_PIX_FMT_YUV422P16BE] = { 1, 1 },
163 [AV_PIX_FMT_YUV444P16LE] = { 1, 1 },
164 [AV_PIX_FMT_YUV444P16BE] = { 1, 1 },
165 [AV_PIX_FMT_RGB444LE] = { 1, 1 },
166 [AV_PIX_FMT_RGB444BE] = { 1, 1 },
167 [AV_PIX_FMT_BGR444LE] = { 1, 1 },
168 [AV_PIX_FMT_BGR444BE] = { 1, 1 },
169 [AV_PIX_FMT_YA8] = { 1, 1 },
170 [AV_PIX_FMT_YA16BE] = { 1, 1 },
171 [AV_PIX_FMT_YA16LE] = { 1, 1 },
172 [AV_PIX_FMT_BGR48BE] = { 1, 1 },
173 [AV_PIX_FMT_BGR48LE] = { 1, 1 },
174 [AV_PIX_FMT_BGRA64BE] = { 1, 1, 1 },
175 [AV_PIX_FMT_BGRA64LE] = { 1, 1, 1 },
176 [AV_PIX_FMT_YUV420P9BE] = { 1, 1 },
177 [AV_PIX_FMT_YUV420P9LE] = { 1, 1 },
178 [AV_PIX_FMT_YUV420P10BE] = { 1, 1 },
179 [AV_PIX_FMT_YUV420P10LE] = { 1, 1 },
180 [AV_PIX_FMT_YUV420P12BE] = { 1, 1 },
181 [AV_PIX_FMT_YUV420P12LE] = { 1, 1 },
182 [AV_PIX_FMT_YUV420P14BE] = { 1, 1 },
183 [AV_PIX_FMT_YUV420P14LE] = { 1, 1 },
184 [AV_PIX_FMT_YUV422P9BE] = { 1, 1 },
185 [AV_PIX_FMT_YUV422P9LE] = { 1, 1 },
186 [AV_PIX_FMT_YUV422P10BE] = { 1, 1 },
187 [AV_PIX_FMT_YUV422P10LE] = { 1, 1 },
188 [AV_PIX_FMT_YUV422P12BE] = { 1, 1 },
189 [AV_PIX_FMT_YUV422P12LE] = { 1, 1 },
190 [AV_PIX_FMT_YUV422P14BE] = { 1, 1 },
191 [AV_PIX_FMT_YUV422P14LE] = { 1, 1 },
192 [AV_PIX_FMT_YUV444P9BE] = { 1, 1 },
193 [AV_PIX_FMT_YUV444P9LE] = { 1, 1 },
194 [AV_PIX_FMT_YUV444P10BE] = { 1, 1 },
195 [AV_PIX_FMT_YUV444P10LE] = { 1, 1 },
196 [AV_PIX_FMT_YUV444P12BE] = { 1, 1 },
197 [AV_PIX_FMT_YUV444P12LE] = { 1, 1 },
198 [AV_PIX_FMT_YUV444P14BE] = { 1, 1 },
199 [AV_PIX_FMT_YUV444P14LE] = { 1, 1 },
200 [AV_PIX_FMT_GBRP] = { 1, 1 },
201 [AV_PIX_FMT_GBRP9LE] = { 1, 1 },
202 [AV_PIX_FMT_GBRP9BE] = { 1, 1 },
203 [AV_PIX_FMT_GBRP10LE] = { 1, 1 },
204 [AV_PIX_FMT_GBRP10BE] = { 1, 1 },
205 [AV_PIX_FMT_GBRAP10LE] = { 1, 1 },
206 [AV_PIX_FMT_GBRAP10BE] = { 1, 1 },
207 [AV_PIX_FMT_GBRP12LE] = { 1, 1 },
208 [AV_PIX_FMT_GBRP12BE] = { 1, 1 },
209 [AV_PIX_FMT_GBRAP12LE] = { 1, 1 },
210 [AV_PIX_FMT_GBRAP12BE] = { 1, 1 },
211 [AV_PIX_FMT_GBRP14LE] = { 1, 1 },
212 [AV_PIX_FMT_GBRP14BE] = { 1, 1 },
213 [AV_PIX_FMT_GBRP16LE] = { 1, 1 },
214 [AV_PIX_FMT_GBRP16BE] = { 1, 1 },
215 [AV_PIX_FMT_GBRPF32LE] = { 1, 1 },
216 [AV_PIX_FMT_GBRPF32BE] = { 1, 1 },
217 [AV_PIX_FMT_GBRAPF32LE] = { 1, 1 },
218 [AV_PIX_FMT_GBRAPF32BE] = { 1, 1 },
219 [AV_PIX_FMT_GBRAP] = { 1, 1 },
220 [AV_PIX_FMT_GBRAP16LE] = { 1, 1 },
221 [AV_PIX_FMT_GBRAP16BE] = { 1, 1 },
222 [AV_PIX_FMT_BAYER_BGGR8] = { 1, 0 },
223 [AV_PIX_FMT_BAYER_RGGB8] = { 1, 0 },
224 [AV_PIX_FMT_BAYER_GBRG8] = { 1, 0 },
225 [AV_PIX_FMT_BAYER_GRBG8] = { 1, 0 },
226 [AV_PIX_FMT_BAYER_BGGR16LE] = { 1, 0 },
227 [AV_PIX_FMT_BAYER_BGGR16BE] = { 1, 0 },
228 [AV_PIX_FMT_BAYER_RGGB16LE] = { 1, 0 },
229 [AV_PIX_FMT_BAYER_RGGB16BE] = { 1, 0 },
230 [AV_PIX_FMT_BAYER_GBRG16LE] = { 1, 0 },
231 [AV_PIX_FMT_BAYER_GBRG16BE] = { 1, 0 },
232 [AV_PIX_FMT_BAYER_GRBG16LE] = { 1, 0 },
233 [AV_PIX_FMT_BAYER_GRBG16BE] = { 1, 0 },
234 [AV_PIX_FMT_XYZ12BE] = { 1, 1, 1 },
235 [AV_PIX_FMT_XYZ12LE] = { 1, 1, 1 },
236 [AV_PIX_FMT_AYUV64LE] = { 1, 1},
237 [AV_PIX_FMT_P010LE] = { 1, 1 },
238 [AV_PIX_FMT_P010BE] = { 1, 1 },
239 [AV_PIX_FMT_P016LE] = { 1, 1 },
240 [AV_PIX_FMT_P016BE] = { 1, 1 },
241 [AV_PIX_FMT_GRAYF32LE] = { 1, 1 },
242 [AV_PIX_FMT_GRAYF32BE] = { 1, 1 },
243 [AV_PIX_FMT_YUVA422P12BE] = { 1, 1 },
244 [AV_PIX_FMT_YUVA422P12LE] = { 1, 1 },
245 [AV_PIX_FMT_YUVA444P12BE] = { 1, 1 },
246 [AV_PIX_FMT_YUVA444P12LE] = { 1, 1 },
247 [AV_PIX_FMT_NV24] = { 1, 1 },
248 [AV_PIX_FMT_NV42] = { 1, 1 },
249 [AV_PIX_FMT_Y210LE] = { 1, 0 },
250 [AV_PIX_FMT_X2RGB10LE] = { 1, 1 },
251 [AV_PIX_FMT_X2BGR10LE] = { 1, 1 },
252 [AV_PIX_FMT_P210BE] = { 1, 1 },
253 [AV_PIX_FMT_P210LE] = { 1, 1 },
254 [AV_PIX_FMT_P410BE] = { 1, 1 },
255 [AV_PIX_FMT_P410LE] = { 1, 1 },
256 [AV_PIX_FMT_P216BE] = { 1, 1 },
257 [AV_PIX_FMT_P216LE] = { 1, 1 },
258 [AV_PIX_FMT_P416BE] = { 1, 1 },
259 [AV_PIX_FMT_P416LE] = { 1, 1 },
260 };
261
ff_shuffle_filter_coefficients(SwsContext * c,int * filterPos,int filterSize,int16_t * filter,int dstW)262 int ff_shuffle_filter_coefficients(SwsContext *c, int *filterPos,
263 int filterSize, int16_t *filter,
264 int dstW)
265 {
266 #if ARCH_X86_64
267 int i, j, k;
268 int cpu_flags = av_get_cpu_flags();
269 // avx2 hscale filter processes 16 pixel blocks.
270 if (!filter || dstW % 16 != 0)
271 return 0;
272 if (EXTERNAL_AVX2_FAST(cpu_flags) && !(cpu_flags & AV_CPU_FLAG_SLOW_GATHER)) {
273 if ((c->srcBpc == 8) && (c->dstBpc <= 14)) {
274 int16_t *filterCopy = NULL;
275 if (filterSize > 4) {
276 if (!FF_ALLOC_TYPED_ARRAY(filterCopy, dstW * filterSize))
277 return AVERROR(ENOMEM);
278 memcpy(filterCopy, filter, dstW * filterSize * sizeof(int16_t));
279 }
280 // Do not swap filterPos for pixels which won't be processed by
281 // the main loop.
282 for (i = 0; i + 8 <= dstW; i += 8) {
283 FFSWAP(int, filterPos[i + 2], filterPos[i + 4]);
284 FFSWAP(int, filterPos[i + 3], filterPos[i + 5]);
285 }
286 if (filterSize > 4) {
287 // 16 pixels are processed at a time.
288 for (i = 0; i + 16 <= dstW; i += 16) {
289 // 4 filter coeffs are processed at a time.
290 for (k = 0; k + 4 <= filterSize; k += 4) {
291 for (j = 0; j < 16; ++j) {
292 int from = (i + j) * filterSize + k;
293 int to = i * filterSize + j * 4 + k * 16;
294 memcpy(&filter[to], &filterCopy[from], 4 * sizeof(int16_t));
295 }
296 }
297 }
298 }
299 av_free(filterCopy);
300 }
301 }
302 #endif
303 return 0;
304 }
305
sws_isSupportedInput(enum AVPixelFormat pix_fmt)306 int sws_isSupportedInput(enum AVPixelFormat pix_fmt)
307 {
308 return (unsigned)pix_fmt < FF_ARRAY_ELEMS(format_entries) ?
309 format_entries[pix_fmt].is_supported_in : 0;
310 }
311
sws_isSupportedOutput(enum AVPixelFormat pix_fmt)312 int sws_isSupportedOutput(enum AVPixelFormat pix_fmt)
313 {
314 return (unsigned)pix_fmt < FF_ARRAY_ELEMS(format_entries) ?
315 format_entries[pix_fmt].is_supported_out : 0;
316 }
317
sws_isSupportedEndiannessConversion(enum AVPixelFormat pix_fmt)318 int sws_isSupportedEndiannessConversion(enum AVPixelFormat pix_fmt)
319 {
320 return (unsigned)pix_fmt < FF_ARRAY_ELEMS(format_entries) ?
321 format_entries[pix_fmt].is_supported_endianness : 0;
322 }
323
getSplineCoeff(double a,double b,double c,double d,double dist)324 static double getSplineCoeff(double a, double b, double c, double d,
325 double dist)
326 {
327 if (dist <= 1.0)
328 return ((d * dist + c) * dist + b) * dist + a;
329 else
330 return getSplineCoeff(0.0,
331 b + 2.0 * c + 3.0 * d,
332 c + 3.0 * d,
333 -b - 3.0 * c - 6.0 * d,
334 dist - 1.0);
335 }
336
get_local_pos(SwsContext * s,int chr_subsample,int pos,int dir)337 static av_cold int get_local_pos(SwsContext *s, int chr_subsample, int pos, int dir)
338 {
339 if (pos == -1 || pos <= -513) {
340 pos = (128 << chr_subsample) - 128;
341 }
342 pos += 128; // relative to ideal left edge
343 return pos >> chr_subsample;
344 }
345
346 typedef struct {
347 int flag; ///< flag associated to the algorithm
348 const char *description; ///< human-readable description
349 int size_factor; ///< size factor used when initing the filters
350 } ScaleAlgorithm;
351
352 static const ScaleAlgorithm scale_algorithms[] = {
353 { SWS_AREA, "area averaging", 1 /* downscale only, for upscale it is bilinear */ },
354 { SWS_BICUBIC, "bicubic", 4 },
355 { SWS_BICUBLIN, "luma bicubic / chroma bilinear", -1 },
356 { SWS_BILINEAR, "bilinear", 2 },
357 { SWS_FAST_BILINEAR, "fast bilinear", -1 },
358 { SWS_GAUSS, "Gaussian", 8 /* infinite ;) */ },
359 { SWS_LANCZOS, "Lanczos", -1 /* custom */ },
360 { SWS_POINT, "nearest neighbor / point", -1 },
361 { SWS_SINC, "sinc", 20 /* infinite ;) */ },
362 { SWS_SPLINE, "bicubic spline", 20 /* infinite :)*/ },
363 { SWS_X, "experimental", 8 },
364 };
365
initFilter(int16_t ** outFilter,int32_t ** filterPos,int * outFilterSize,int xInc,int srcW,int dstW,int filterAlign,int one,int flags,int cpu_flags,SwsVector * srcFilter,SwsVector * dstFilter,double param[2],int srcPos,int dstPos)366 static av_cold int initFilter(int16_t **outFilter, int32_t **filterPos,
367 int *outFilterSize, int xInc, int srcW,
368 int dstW, int filterAlign, int one,
369 int flags, int cpu_flags,
370 SwsVector *srcFilter, SwsVector *dstFilter,
371 double param[2], int srcPos, int dstPos)
372 {
373 int i;
374 int filterSize;
375 int filter2Size;
376 int minFilterSize;
377 int64_t *filter = NULL;
378 int64_t *filter2 = NULL;
379 const int64_t fone = 1LL << (54 - FFMIN(av_log2(srcW/dstW), 8));
380 int ret = -1;
381
382 emms_c(); // FIXME should not be required but IS (even for non-MMX versions)
383
384 // NOTE: the +3 is for the MMX(+1) / SSE(+3) scaler which reads over the end
385 if (!FF_ALLOC_TYPED_ARRAY(*filterPos, dstW + 3))
386 goto nomem;
387
388 if (FFABS(xInc - 0x10000) < 10 && srcPos == dstPos) { // unscaled
389 int i;
390 filterSize = 1;
391 if (!FF_ALLOCZ_TYPED_ARRAY(filter, dstW * filterSize))
392 goto nomem;
393
394 for (i = 0; i < dstW; i++) {
395 filter[i * filterSize] = fone;
396 (*filterPos)[i] = i;
397 }
398 } else if (flags & SWS_POINT) { // lame looking point sampling mode
399 int i;
400 int64_t xDstInSrc;
401 filterSize = 1;
402 if (!FF_ALLOC_TYPED_ARRAY(filter, dstW * filterSize))
403 goto nomem;
404
405 xDstInSrc = ((dstPos*(int64_t)xInc)>>8) - ((srcPos*0x8000LL)>>7);
406 for (i = 0; i < dstW; i++) {
407 int xx = (xDstInSrc - ((filterSize - 1) << 15) + (1 << 15)) >> 16;
408
409 (*filterPos)[i] = xx;
410 filter[i] = fone;
411 xDstInSrc += xInc;
412 }
413 } else if ((xInc <= (1 << 16) && (flags & SWS_AREA)) ||
414 (flags & SWS_FAST_BILINEAR)) { // bilinear upscale
415 int i;
416 int64_t xDstInSrc;
417 filterSize = 2;
418 if (!FF_ALLOC_TYPED_ARRAY(filter, dstW * filterSize))
419 goto nomem;
420
421 xDstInSrc = ((dstPos*(int64_t)xInc)>>8) - ((srcPos*0x8000LL)>>7);
422 for (i = 0; i < dstW; i++) {
423 int xx = (xDstInSrc - ((filterSize - 1) << 15) + (1 << 15)) >> 16;
424 int j;
425
426 (*filterPos)[i] = xx;
427 // bilinear upscale / linear interpolate / area averaging
428 for (j = 0; j < filterSize; j++) {
429 int64_t coeff = fone - FFABS((int64_t)xx * (1 << 16) - xDstInSrc) * (fone >> 16);
430 if (coeff < 0)
431 coeff = 0;
432 filter[i * filterSize + j] = coeff;
433 xx++;
434 }
435 xDstInSrc += xInc;
436 }
437 } else {
438 int64_t xDstInSrc;
439 int sizeFactor = -1;
440
441 for (i = 0; i < FF_ARRAY_ELEMS(scale_algorithms); i++) {
442 if (flags & scale_algorithms[i].flag && scale_algorithms[i].size_factor > 0) {
443 sizeFactor = scale_algorithms[i].size_factor;
444 break;
445 }
446 }
447 if (flags & SWS_LANCZOS)
448 sizeFactor = param[0] != SWS_PARAM_DEFAULT ? ceil(2 * param[0]) : 6;
449 av_assert0(sizeFactor > 0);
450
451 if (xInc <= 1 << 16)
452 filterSize = 1 + sizeFactor; // upscale
453 else
454 filterSize = 1 + (sizeFactor * srcW + dstW - 1) / dstW;
455
456 filterSize = FFMIN(filterSize, srcW - 2);
457 filterSize = FFMAX(filterSize, 1);
458
459 if (!FF_ALLOC_TYPED_ARRAY(filter, dstW * filterSize))
460 goto nomem;
461 xDstInSrc = ((dstPos*(int64_t)xInc)>>7) - ((srcPos*0x10000LL)>>7);
462 for (i = 0; i < dstW; i++) {
463 int xx = (xDstInSrc - (filterSize - 2) * (1LL<<16)) / (1 << 17);
464 int j;
465 (*filterPos)[i] = xx;
466 for (j = 0; j < filterSize; j++) {
467 int64_t d = (FFABS(((int64_t)xx * (1 << 17)) - xDstInSrc)) << 13;
468 double floatd;
469 int64_t coeff;
470
471 if (xInc > 1 << 16)
472 d = d * dstW / srcW;
473 floatd = d * (1.0 / (1 << 30));
474
475 if (flags & SWS_BICUBIC) {
476 int64_t B = (param[0] != SWS_PARAM_DEFAULT ? param[0] : 0) * (1 << 24);
477 int64_t C = (param[1] != SWS_PARAM_DEFAULT ? param[1] : 0.6) * (1 << 24);
478
479 if (d >= 1LL << 31) {
480 coeff = 0.0;
481 } else {
482 int64_t dd = (d * d) >> 30;
483 int64_t ddd = (dd * d) >> 30;
484
485 if (d < 1LL << 30)
486 coeff = (12 * (1 << 24) - 9 * B - 6 * C) * ddd +
487 (-18 * (1 << 24) + 12 * B + 6 * C) * dd +
488 (6 * (1 << 24) - 2 * B) * (1 << 30);
489 else
490 coeff = (-B - 6 * C) * ddd +
491 (6 * B + 30 * C) * dd +
492 (-12 * B - 48 * C) * d +
493 (8 * B + 24 * C) * (1 << 30);
494 }
495 coeff /= (1LL<<54)/fone;
496 } else if (flags & SWS_X) {
497 double A = param[0] != SWS_PARAM_DEFAULT ? param[0] : 1.0;
498 double c;
499
500 if (floatd < 1.0)
501 c = cos(floatd * M_PI);
502 else
503 c = -1.0;
504 if (c < 0.0)
505 c = -pow(-c, A);
506 else
507 c = pow(c, A);
508 coeff = (c * 0.5 + 0.5) * fone;
509 } else if (flags & SWS_AREA) {
510 int64_t d2 = d - (1 << 29);
511 if (d2 * xInc < -(1LL << (29 + 16)))
512 coeff = 1.0 * (1LL << (30 + 16));
513 else if (d2 * xInc < (1LL << (29 + 16)))
514 coeff = -d2 * xInc + (1LL << (29 + 16));
515 else
516 coeff = 0.0;
517 coeff *= fone >> (30 + 16);
518 } else if (flags & SWS_GAUSS) {
519 double p = param[0] != SWS_PARAM_DEFAULT ? param[0] : 3.0;
520 coeff = exp2(-p * floatd * floatd) * fone;
521 } else if (flags & SWS_SINC) {
522 coeff = (d ? sin(floatd * M_PI) / (floatd * M_PI) : 1.0) * fone;
523 } else if (flags & SWS_LANCZOS) {
524 double p = param[0] != SWS_PARAM_DEFAULT ? param[0] : 3.0;
525 coeff = (d ? sin(floatd * M_PI) * sin(floatd * M_PI / p) /
526 (floatd * floatd * M_PI * M_PI / p) : 1.0) * fone;
527 if (floatd > p)
528 coeff = 0;
529 } else if (flags & SWS_BILINEAR) {
530 coeff = (1 << 30) - d;
531 if (coeff < 0)
532 coeff = 0;
533 coeff *= fone >> 30;
534 } else if (flags & SWS_SPLINE) {
535 double p = -2.196152422706632;
536 coeff = getSplineCoeff(1.0, 0.0, p, -p - 1.0, floatd) * fone;
537 } else {
538 av_assert0(0);
539 }
540
541 filter[i * filterSize + j] = coeff;
542 xx++;
543 }
544 xDstInSrc += 2 * xInc;
545 }
546 }
547
548 /* apply src & dst Filter to filter -> filter2
549 * av_free(filter);
550 */
551 av_assert0(filterSize > 0);
552 filter2Size = filterSize;
553 if (srcFilter)
554 filter2Size += srcFilter->length - 1;
555 if (dstFilter)
556 filter2Size += dstFilter->length - 1;
557 av_assert0(filter2Size > 0);
558 if (!FF_ALLOCZ_TYPED_ARRAY(filter2, dstW * filter2Size))
559 goto nomem;
560 for (i = 0; i < dstW; i++) {
561 int j, k;
562
563 if (srcFilter) {
564 for (k = 0; k < srcFilter->length; k++) {
565 for (j = 0; j < filterSize; j++)
566 filter2[i * filter2Size + k + j] +=
567 srcFilter->coeff[k] * filter[i * filterSize + j];
568 }
569 } else {
570 for (j = 0; j < filterSize; j++)
571 filter2[i * filter2Size + j] = filter[i * filterSize + j];
572 }
573 // FIXME dstFilter
574
575 (*filterPos)[i] += (filterSize - 1) / 2 - (filter2Size - 1) / 2;
576 }
577 av_freep(&filter);
578
579 /* try to reduce the filter-size (step1 find size and shift left) */
580 // Assume it is near normalized (*0.5 or *2.0 is OK but * 0.001 is not).
581 minFilterSize = 0;
582 for (i = dstW - 1; i >= 0; i--) {
583 int min = filter2Size;
584 int j;
585 int64_t cutOff = 0.0;
586
587 /* get rid of near zero elements on the left by shifting left */
588 for (j = 0; j < filter2Size; j++) {
589 int k;
590 cutOff += FFABS(filter2[i * filter2Size]);
591
592 if (cutOff > SWS_MAX_REDUCE_CUTOFF * fone)
593 break;
594
595 /* preserve monotonicity because the core can't handle the
596 * filter otherwise */
597 if (i < dstW - 1 && (*filterPos)[i] >= (*filterPos)[i + 1])
598 break;
599
600 // move filter coefficients left
601 for (k = 1; k < filter2Size; k++)
602 filter2[i * filter2Size + k - 1] = filter2[i * filter2Size + k];
603 filter2[i * filter2Size + k - 1] = 0;
604 (*filterPos)[i]++;
605 }
606
607 cutOff = 0;
608 /* count near zeros on the right */
609 for (j = filter2Size - 1; j > 0; j--) {
610 cutOff += FFABS(filter2[i * filter2Size + j]);
611
612 if (cutOff > SWS_MAX_REDUCE_CUTOFF * fone)
613 break;
614 min--;
615 }
616
617 if (min > minFilterSize)
618 minFilterSize = min;
619 }
620
621 if (PPC_ALTIVEC(cpu_flags)) {
622 // we can handle the special case 4, so we don't want to go the full 8
623 if (minFilterSize < 5)
624 filterAlign = 4;
625
626 /* We really don't want to waste our time doing useless computation, so
627 * fall back on the scalar C code for very small filters.
628 * Vectorizing is worth it only if you have a decent-sized vector. */
629 if (minFilterSize < 3)
630 filterAlign = 1;
631 }
632
633 if (HAVE_MMX && cpu_flags & AV_CPU_FLAG_MMX) {
634 // special case for unscaled vertical filtering
635 if (minFilterSize == 1 && filterAlign == 2)
636 filterAlign = 1;
637 }
638
639 av_assert0(minFilterSize > 0);
640 filterSize = (minFilterSize + (filterAlign - 1)) & (~(filterAlign - 1));
641 av_assert0(filterSize > 0);
642 filter = av_malloc_array(dstW, filterSize * sizeof(*filter));
643 if (!filter)
644 goto nomem;
645 if (filterSize >= MAX_FILTER_SIZE * 16 /
646 ((flags & SWS_ACCURATE_RND) ? APCK_SIZE : 16)) {
647 ret = RETCODE_USE_CASCADE;
648 goto fail;
649 }
650 *outFilterSize = filterSize;
651
652 if (flags & SWS_PRINT_INFO)
653 av_log(NULL, AV_LOG_VERBOSE,
654 "SwScaler: reducing / aligning filtersize %d -> %d\n",
655 filter2Size, filterSize);
656 /* try to reduce the filter-size (step2 reduce it) */
657 for (i = 0; i < dstW; i++) {
658 int j;
659
660 for (j = 0; j < filterSize; j++) {
661 if (j >= filter2Size)
662 filter[i * filterSize + j] = 0;
663 else
664 filter[i * filterSize + j] = filter2[i * filter2Size + j];
665 if ((flags & SWS_BITEXACT) && j >= minFilterSize)
666 filter[i * filterSize + j] = 0;
667 }
668 }
669
670 // FIXME try to align filterPos if possible
671
672 // fix borders
673 for (i = 0; i < dstW; i++) {
674 int j;
675 if ((*filterPos)[i] < 0) {
676 // move filter coefficients left to compensate for filterPos
677 for (j = 1; j < filterSize; j++) {
678 int left = FFMAX(j + (*filterPos)[i], 0);
679 filter[i * filterSize + left] += filter[i * filterSize + j];
680 filter[i * filterSize + j] = 0;
681 }
682 (*filterPos)[i]= 0;
683 }
684
685 if ((*filterPos)[i] + filterSize > srcW) {
686 int shift = (*filterPos)[i] + FFMIN(filterSize - srcW, 0);
687 int64_t acc = 0;
688
689 for (j = filterSize - 1; j >= 0; j--) {
690 if ((*filterPos)[i] + j >= srcW) {
691 acc += filter[i * filterSize + j];
692 filter[i * filterSize + j] = 0;
693 }
694 }
695 for (j = filterSize - 1; j >= 0; j--) {
696 if (j < shift) {
697 filter[i * filterSize + j] = 0;
698 } else {
699 filter[i * filterSize + j] = filter[i * filterSize + j - shift];
700 }
701 }
702
703 (*filterPos)[i]-= shift;
704 filter[i * filterSize + srcW - 1 - (*filterPos)[i]] += acc;
705 }
706 av_assert0((*filterPos)[i] >= 0);
707 av_assert0((*filterPos)[i] < srcW);
708 if ((*filterPos)[i] + filterSize > srcW) {
709 for (j = 0; j < filterSize; j++) {
710 av_assert0((*filterPos)[i] + j < srcW || !filter[i * filterSize + j]);
711 }
712 }
713 }
714
715 // Note the +1 is for the MMX scaler which reads over the end
716 /* align at 16 for AltiVec (needed by hScale_altivec_real) */
717 if (!FF_ALLOCZ_TYPED_ARRAY(*outFilter, *outFilterSize * (dstW + 3)))
718 goto nomem;
719
720 /* normalize & store in outFilter */
721 for (i = 0; i < dstW; i++) {
722 int j;
723 int64_t error = 0;
724 int64_t sum = 0;
725
726 for (j = 0; j < filterSize; j++) {
727 sum += filter[i * filterSize + j];
728 }
729 sum = (sum + one / 2) / one;
730 if (!sum) {
731 av_log(NULL, AV_LOG_WARNING, "SwScaler: zero vector in scaling\n");
732 sum = 1;
733 }
734 for (j = 0; j < *outFilterSize; j++) {
735 int64_t v = filter[i * filterSize + j] + error;
736 int intV = ROUNDED_DIV(v, sum);
737 (*outFilter)[i * (*outFilterSize) + j] = intV;
738 error = v - intV * sum;
739 }
740 }
741
742 (*filterPos)[dstW + 0] =
743 (*filterPos)[dstW + 1] =
744 (*filterPos)[dstW + 2] = (*filterPos)[dstW - 1]; /* the MMX/SSE scaler will
745 * read over the end */
746 for (i = 0; i < *outFilterSize; i++) {
747 int k = (dstW - 1) * (*outFilterSize) + i;
748 (*outFilter)[k + 1 * (*outFilterSize)] =
749 (*outFilter)[k + 2 * (*outFilterSize)] =
750 (*outFilter)[k + 3 * (*outFilterSize)] = (*outFilter)[k];
751 }
752
753 ret = 0;
754 goto done;
755 nomem:
756 ret = AVERROR(ENOMEM);
757 fail:
758 if(ret < 0)
759 av_log(NULL, ret == RETCODE_USE_CASCADE ? AV_LOG_DEBUG : AV_LOG_ERROR, "sws: initFilter failed\n");
760 done:
761 av_free(filter);
762 av_free(filter2);
763 return ret;
764 }
765
fill_rgb2yuv_table(SwsContext * c,const int table[4],int dstRange)766 static void fill_rgb2yuv_table(SwsContext *c, const int table[4], int dstRange)
767 {
768 int64_t W, V, Z, Cy, Cu, Cv;
769 int64_t vr = table[0];
770 int64_t ub = table[1];
771 int64_t ug = -table[2];
772 int64_t vg = -table[3];
773 int64_t ONE = 65536;
774 int64_t cy = ONE;
775 uint8_t *p = (uint8_t*)c->input_rgb2yuv_table;
776 int i;
777 static const int8_t map[] = {
778 BY_IDX, GY_IDX, -1 , BY_IDX, BY_IDX, GY_IDX, -1 , BY_IDX,
779 RY_IDX, -1 , GY_IDX, RY_IDX, RY_IDX, -1 , GY_IDX, RY_IDX,
780 RY_IDX, GY_IDX, -1 , RY_IDX, RY_IDX, GY_IDX, -1 , RY_IDX,
781 BY_IDX, -1 , GY_IDX, BY_IDX, BY_IDX, -1 , GY_IDX, BY_IDX,
782 BU_IDX, GU_IDX, -1 , BU_IDX, BU_IDX, GU_IDX, -1 , BU_IDX,
783 RU_IDX, -1 , GU_IDX, RU_IDX, RU_IDX, -1 , GU_IDX, RU_IDX,
784 RU_IDX, GU_IDX, -1 , RU_IDX, RU_IDX, GU_IDX, -1 , RU_IDX,
785 BU_IDX, -1 , GU_IDX, BU_IDX, BU_IDX, -1 , GU_IDX, BU_IDX,
786 BV_IDX, GV_IDX, -1 , BV_IDX, BV_IDX, GV_IDX, -1 , BV_IDX,
787 RV_IDX, -1 , GV_IDX, RV_IDX, RV_IDX, -1 , GV_IDX, RV_IDX,
788 RV_IDX, GV_IDX, -1 , RV_IDX, RV_IDX, GV_IDX, -1 , RV_IDX,
789 BV_IDX, -1 , GV_IDX, BV_IDX, BV_IDX, -1 , GV_IDX, BV_IDX,
790 RY_IDX, BY_IDX, RY_IDX, BY_IDX, RY_IDX, BY_IDX, RY_IDX, BY_IDX,
791 BY_IDX, RY_IDX, BY_IDX, RY_IDX, BY_IDX, RY_IDX, BY_IDX, RY_IDX,
792 GY_IDX, -1 , GY_IDX, -1 , GY_IDX, -1 , GY_IDX, -1 ,
793 -1 , GY_IDX, -1 , GY_IDX, -1 , GY_IDX, -1 , GY_IDX,
794 RU_IDX, BU_IDX, RU_IDX, BU_IDX, RU_IDX, BU_IDX, RU_IDX, BU_IDX,
795 BU_IDX, RU_IDX, BU_IDX, RU_IDX, BU_IDX, RU_IDX, BU_IDX, RU_IDX,
796 GU_IDX, -1 , GU_IDX, -1 , GU_IDX, -1 , GU_IDX, -1 ,
797 -1 , GU_IDX, -1 , GU_IDX, -1 , GU_IDX, -1 , GU_IDX,
798 RV_IDX, BV_IDX, RV_IDX, BV_IDX, RV_IDX, BV_IDX, RV_IDX, BV_IDX,
799 BV_IDX, RV_IDX, BV_IDX, RV_IDX, BV_IDX, RV_IDX, BV_IDX, RV_IDX,
800 GV_IDX, -1 , GV_IDX, -1 , GV_IDX, -1 , GV_IDX, -1 ,
801 -1 , GV_IDX, -1 , GV_IDX, -1 , GV_IDX, -1 , GV_IDX, //23
802 -1 , -1 , -1 , -1 , -1 , -1 , -1 , -1 , //24
803 -1 , -1 , -1 , -1 , -1 , -1 , -1 , -1 , //25
804 -1 , -1 , -1 , -1 , -1 , -1 , -1 , -1 , //26
805 -1 , -1 , -1 , -1 , -1 , -1 , -1 , -1 , //27
806 -1 , -1 , -1 , -1 , -1 , -1 , -1 , -1 , //28
807 -1 , -1 , -1 , -1 , -1 , -1 , -1 , -1 , //29
808 -1 , -1 , -1 , -1 , -1 , -1 , -1 , -1 , //30
809 -1 , -1 , -1 , -1 , -1 , -1 , -1 , -1 , //31
810 BY_IDX, GY_IDX, RY_IDX, -1 , -1 , -1 , -1 , -1 , //32
811 BU_IDX, GU_IDX, RU_IDX, -1 , -1 , -1 , -1 , -1 , //33
812 BV_IDX, GV_IDX, RV_IDX, -1 , -1 , -1 , -1 , -1 , //34
813 };
814
815 dstRange = 0; //FIXME range = 1 is handled elsewhere
816
817 if (!dstRange) {
818 cy = cy * 255 / 219;
819 } else {
820 vr = vr * 224 / 255;
821 ub = ub * 224 / 255;
822 ug = ug * 224 / 255;
823 vg = vg * 224 / 255;
824 }
825 W = ROUNDED_DIV(ONE*ONE*ug, ub);
826 V = ROUNDED_DIV(ONE*ONE*vg, vr);
827 Z = ONE*ONE-W-V;
828
829 Cy = ROUNDED_DIV(cy*Z, ONE);
830 Cu = ROUNDED_DIV(ub*Z, ONE);
831 Cv = ROUNDED_DIV(vr*Z, ONE);
832
833 c->input_rgb2yuv_table[RY_IDX] = -ROUNDED_DIV((1 << RGB2YUV_SHIFT)*V , Cy);
834 c->input_rgb2yuv_table[GY_IDX] = ROUNDED_DIV((1 << RGB2YUV_SHIFT)*ONE*ONE , Cy);
835 c->input_rgb2yuv_table[BY_IDX] = -ROUNDED_DIV((1 << RGB2YUV_SHIFT)*W , Cy);
836
837 c->input_rgb2yuv_table[RU_IDX] = ROUNDED_DIV((1 << RGB2YUV_SHIFT)*V , Cu);
838 c->input_rgb2yuv_table[GU_IDX] = -ROUNDED_DIV((1 << RGB2YUV_SHIFT)*ONE*ONE , Cu);
839 c->input_rgb2yuv_table[BU_IDX] = ROUNDED_DIV((1 << RGB2YUV_SHIFT)*(Z+W) , Cu);
840
841 c->input_rgb2yuv_table[RV_IDX] = ROUNDED_DIV((1 << RGB2YUV_SHIFT)*(V+Z) , Cv);
842 c->input_rgb2yuv_table[GV_IDX] = -ROUNDED_DIV((1 << RGB2YUV_SHIFT)*ONE*ONE , Cv);
843 c->input_rgb2yuv_table[BV_IDX] = ROUNDED_DIV((1 << RGB2YUV_SHIFT)*W , Cv);
844
845 if(/*!dstRange && */!memcmp(table, ff_yuv2rgb_coeffs[SWS_CS_DEFAULT], sizeof(ff_yuv2rgb_coeffs[SWS_CS_DEFAULT]))) {
846 c->input_rgb2yuv_table[BY_IDX] = ((int)(0.114 * 219 / 255 * (1 << RGB2YUV_SHIFT) + 0.5));
847 c->input_rgb2yuv_table[BV_IDX] = (-(int)(0.081 * 224 / 255 * (1 << RGB2YUV_SHIFT) + 0.5));
848 c->input_rgb2yuv_table[BU_IDX] = ((int)(0.500 * 224 / 255 * (1 << RGB2YUV_SHIFT) + 0.5));
849 c->input_rgb2yuv_table[GY_IDX] = ((int)(0.587 * 219 / 255 * (1 << RGB2YUV_SHIFT) + 0.5));
850 c->input_rgb2yuv_table[GV_IDX] = (-(int)(0.419 * 224 / 255 * (1 << RGB2YUV_SHIFT) + 0.5));
851 c->input_rgb2yuv_table[GU_IDX] = (-(int)(0.331 * 224 / 255 * (1 << RGB2YUV_SHIFT) + 0.5));
852 c->input_rgb2yuv_table[RY_IDX] = ((int)(0.299 * 219 / 255 * (1 << RGB2YUV_SHIFT) + 0.5));
853 c->input_rgb2yuv_table[RV_IDX] = ((int)(0.500 * 224 / 255 * (1 << RGB2YUV_SHIFT) + 0.5));
854 c->input_rgb2yuv_table[RU_IDX] = (-(int)(0.169 * 224 / 255 * (1 << RGB2YUV_SHIFT) + 0.5));
855 }
856 for(i=0; i<FF_ARRAY_ELEMS(map); i++)
857 AV_WL16(p + 16*4 + 2*i, map[i] >= 0 ? c->input_rgb2yuv_table[map[i]] : 0);
858 }
859
fill_xyztables(struct SwsContext * c)860 static void fill_xyztables(struct SwsContext *c)
861 {
862 int i;
863 double xyzgamma = XYZ_GAMMA;
864 double rgbgamma = 1.0 / RGB_GAMMA;
865 double xyzgammainv = 1.0 / XYZ_GAMMA;
866 double rgbgammainv = RGB_GAMMA;
867 static const int16_t xyz2rgb_matrix[3][4] = {
868 {13270, -6295, -2041},
869 {-3969, 7682, 170},
870 { 228, -835, 4329} };
871 static const int16_t rgb2xyz_matrix[3][4] = {
872 {1689, 1464, 739},
873 { 871, 2929, 296},
874 { 79, 488, 3891} };
875 static int16_t xyzgamma_tab[4096], rgbgamma_tab[4096], xyzgammainv_tab[4096], rgbgammainv_tab[4096];
876
877 memcpy(c->xyz2rgb_matrix, xyz2rgb_matrix, sizeof(c->xyz2rgb_matrix));
878 memcpy(c->rgb2xyz_matrix, rgb2xyz_matrix, sizeof(c->rgb2xyz_matrix));
879 c->xyzgamma = xyzgamma_tab;
880 c->rgbgamma = rgbgamma_tab;
881 c->xyzgammainv = xyzgammainv_tab;
882 c->rgbgammainv = rgbgammainv_tab;
883
884 if (rgbgamma_tab[4095])
885 return;
886
887 /* set gamma vectors */
888 for (i = 0; i < 4096; i++) {
889 xyzgamma_tab[i] = lrint(pow(i / 4095.0, xyzgamma) * 4095.0);
890 rgbgamma_tab[i] = lrint(pow(i / 4095.0, rgbgamma) * 4095.0);
891 xyzgammainv_tab[i] = lrint(pow(i / 4095.0, xyzgammainv) * 4095.0);
892 rgbgammainv_tab[i] = lrint(pow(i / 4095.0, rgbgammainv) * 4095.0);
893 }
894 }
895
range_override_needed(enum AVPixelFormat format)896 static int range_override_needed(enum AVPixelFormat format)
897 {
898 return !isYUV(format) && !isGray(format);
899 }
900
sws_setColorspaceDetails(struct SwsContext * c,const int inv_table[4],int srcRange,const int table[4],int dstRange,int brightness,int contrast,int saturation)901 int sws_setColorspaceDetails(struct SwsContext *c, const int inv_table[4],
902 int srcRange, const int table[4], int dstRange,
903 int brightness, int contrast, int saturation)
904 {
905 const AVPixFmtDescriptor *desc_dst;
906 const AVPixFmtDescriptor *desc_src;
907 int need_reinit = 0;
908
909 if (c->nb_slice_ctx) {
910 int parent_ret = 0;
911 for (int i = 0; i < c->nb_slice_ctx; i++) {
912 int ret = sws_setColorspaceDetails(c->slice_ctx[i], inv_table,
913 srcRange, table, dstRange,
914 brightness, contrast, saturation);
915 if (ret < 0)
916 parent_ret = ret;
917 }
918
919 return parent_ret;
920 }
921
922 handle_formats(c);
923 desc_dst = av_pix_fmt_desc_get(c->dstFormat);
924 desc_src = av_pix_fmt_desc_get(c->srcFormat);
925
926 if(range_override_needed(c->dstFormat))
927 dstRange = 0;
928 if(range_override_needed(c->srcFormat))
929 srcRange = 0;
930
931 if (c->srcRange != srcRange ||
932 c->dstRange != dstRange ||
933 c->brightness != brightness ||
934 c->contrast != contrast ||
935 c->saturation != saturation ||
936 memcmp(c->srcColorspaceTable, inv_table, sizeof(int) * 4) ||
937 memcmp(c->dstColorspaceTable, table, sizeof(int) * 4)
938 )
939 need_reinit = 1;
940
941 memmove(c->srcColorspaceTable, inv_table, sizeof(int) * 4);
942 memmove(c->dstColorspaceTable, table, sizeof(int) * 4);
943
944
945
946 c->brightness = brightness;
947 c->contrast = contrast;
948 c->saturation = saturation;
949 c->srcRange = srcRange;
950 c->dstRange = dstRange;
951
952 //The srcBpc check is possibly wrong but we seem to lack a definitive reference to test this
953 //and what we have in ticket 2939 looks better with this check
954 if (need_reinit && (c->srcBpc == 8 || !isYUV(c->srcFormat)))
955 ff_sws_init_range_convert(c);
956
957 c->dstFormatBpp = av_get_bits_per_pixel(desc_dst);
958 c->srcFormatBpp = av_get_bits_per_pixel(desc_src);
959
960 if (c->cascaded_context[c->cascaded_mainindex])
961 return sws_setColorspaceDetails(c->cascaded_context[c->cascaded_mainindex],inv_table, srcRange,table, dstRange, brightness, contrast, saturation);
962
963 if (!need_reinit)
964 return 0;
965
966 if ((isYUV(c->dstFormat) || isGray(c->dstFormat)) && (isYUV(c->srcFormat) || isGray(c->srcFormat))) {
967 if (!c->cascaded_context[0] &&
968 memcmp(c->dstColorspaceTable, c->srcColorspaceTable, sizeof(int) * 4) &&
969 c->srcW && c->srcH && c->dstW && c->dstH) {
970 enum AVPixelFormat tmp_format;
971 int tmp_width, tmp_height;
972 int srcW = c->srcW;
973 int srcH = c->srcH;
974 int dstW = c->dstW;
975 int dstH = c->dstH;
976 int ret;
977 av_log(c, AV_LOG_VERBOSE, "YUV color matrix differs for YUV->YUV, using intermediate RGB to convert\n");
978
979 if (isNBPS(c->dstFormat) || is16BPS(c->dstFormat)) {
980 if (isALPHA(c->srcFormat) && isALPHA(c->dstFormat)) {
981 tmp_format = AV_PIX_FMT_BGRA64;
982 } else {
983 tmp_format = AV_PIX_FMT_BGR48;
984 }
985 } else {
986 if (isALPHA(c->srcFormat) && isALPHA(c->dstFormat)) {
987 tmp_format = AV_PIX_FMT_BGRA;
988 } else {
989 tmp_format = AV_PIX_FMT_BGR24;
990 }
991 }
992
993 if (srcW*srcH > dstW*dstH) {
994 tmp_width = dstW;
995 tmp_height = dstH;
996 } else {
997 tmp_width = srcW;
998 tmp_height = srcH;
999 }
1000
1001 ret = av_image_alloc(c->cascaded_tmp, c->cascaded_tmpStride,
1002 tmp_width, tmp_height, tmp_format, 64);
1003 if (ret < 0)
1004 return ret;
1005
1006 c->cascaded_context[0] = sws_alloc_set_opts(srcW, srcH, c->srcFormat,
1007 tmp_width, tmp_height, tmp_format,
1008 c->flags, c->param);
1009 if (!c->cascaded_context[0])
1010 return -1;
1011
1012 c->cascaded_context[0]->alphablend = c->alphablend;
1013 ret = sws_init_context(c->cascaded_context[0], NULL , NULL);
1014 if (ret < 0)
1015 return ret;
1016 //we set both src and dst depending on that the RGB side will be ignored
1017 sws_setColorspaceDetails(c->cascaded_context[0], inv_table,
1018 srcRange, table, dstRange,
1019 brightness, contrast, saturation);
1020
1021 c->cascaded_context[1] = sws_alloc_set_opts(tmp_width, tmp_height, tmp_format,
1022 dstW, dstH, c->dstFormat,
1023 c->flags, c->param);
1024 if (!c->cascaded_context[1])
1025 return -1;
1026 c->cascaded_context[1]->srcRange = srcRange;
1027 c->cascaded_context[1]->dstRange = dstRange;
1028 ret = sws_init_context(c->cascaded_context[1], NULL , NULL);
1029 if (ret < 0)
1030 return ret;
1031 sws_setColorspaceDetails(c->cascaded_context[1], inv_table,
1032 srcRange, table, dstRange,
1033 0, 1 << 16, 1 << 16);
1034 return 0;
1035 }
1036 //We do not support this combination currently, we need to cascade more contexts to compensate
1037 if (c->cascaded_context[0] && memcmp(c->dstColorspaceTable, c->srcColorspaceTable, sizeof(int) * 4))
1038 return -1; //AVERROR_PATCHWELCOME;
1039 return 0;
1040 }
1041
1042 if (!isYUV(c->dstFormat) && !isGray(c->dstFormat)) {
1043 ff_yuv2rgb_c_init_tables(c, inv_table, srcRange, brightness,
1044 contrast, saturation);
1045 // FIXME factorize
1046
1047 #if ARCH_PPC
1048 ff_yuv2rgb_init_tables_ppc(c, inv_table, brightness,
1049 contrast, saturation);
1050 #endif
1051 }
1052
1053 fill_rgb2yuv_table(c, table, dstRange);
1054
1055 return 0;
1056 }
1057
sws_getColorspaceDetails(struct SwsContext * c,int ** inv_table,int * srcRange,int ** table,int * dstRange,int * brightness,int * contrast,int * saturation)1058 int sws_getColorspaceDetails(struct SwsContext *c, int **inv_table,
1059 int *srcRange, int **table, int *dstRange,
1060 int *brightness, int *contrast, int *saturation)
1061 {
1062 if (!c )
1063 return -1;
1064
1065 if (c->nb_slice_ctx) {
1066 return sws_getColorspaceDetails(c->slice_ctx[0], inv_table, srcRange,
1067 table, dstRange, brightness, contrast,
1068 saturation);
1069 }
1070
1071 *inv_table = c->srcColorspaceTable;
1072 *table = c->dstColorspaceTable;
1073 *srcRange = range_override_needed(c->srcFormat) ? 1 : c->srcRange;
1074 *dstRange = range_override_needed(c->dstFormat) ? 1 : c->dstRange;
1075 *brightness = c->brightness;
1076 *contrast = c->contrast;
1077 *saturation = c->saturation;
1078
1079 return 0;
1080 }
1081
handle_jpeg(enum AVPixelFormat * format)1082 static int handle_jpeg(enum AVPixelFormat *format)
1083 {
1084 switch (*format) {
1085 case AV_PIX_FMT_YUVJ420P:
1086 *format = AV_PIX_FMT_YUV420P;
1087 return 1;
1088 case AV_PIX_FMT_YUVJ411P:
1089 *format = AV_PIX_FMT_YUV411P;
1090 return 1;
1091 case AV_PIX_FMT_YUVJ422P:
1092 *format = AV_PIX_FMT_YUV422P;
1093 return 1;
1094 case AV_PIX_FMT_YUVJ444P:
1095 *format = AV_PIX_FMT_YUV444P;
1096 return 1;
1097 case AV_PIX_FMT_YUVJ440P:
1098 *format = AV_PIX_FMT_YUV440P;
1099 return 1;
1100 case AV_PIX_FMT_GRAY8:
1101 case AV_PIX_FMT_YA8:
1102 case AV_PIX_FMT_GRAY9LE:
1103 case AV_PIX_FMT_GRAY9BE:
1104 case AV_PIX_FMT_GRAY10LE:
1105 case AV_PIX_FMT_GRAY10BE:
1106 case AV_PIX_FMT_GRAY12LE:
1107 case AV_PIX_FMT_GRAY12BE:
1108 case AV_PIX_FMT_GRAY14LE:
1109 case AV_PIX_FMT_GRAY14BE:
1110 case AV_PIX_FMT_GRAY16LE:
1111 case AV_PIX_FMT_GRAY16BE:
1112 case AV_PIX_FMT_YA16BE:
1113 case AV_PIX_FMT_YA16LE:
1114 return 1;
1115 default:
1116 return 0;
1117 }
1118 }
1119
handle_0alpha(enum AVPixelFormat * format)1120 static int handle_0alpha(enum AVPixelFormat *format)
1121 {
1122 switch (*format) {
1123 case AV_PIX_FMT_0BGR : *format = AV_PIX_FMT_ABGR ; return 1;
1124 case AV_PIX_FMT_BGR0 : *format = AV_PIX_FMT_BGRA ; return 4;
1125 case AV_PIX_FMT_0RGB : *format = AV_PIX_FMT_ARGB ; return 1;
1126 case AV_PIX_FMT_RGB0 : *format = AV_PIX_FMT_RGBA ; return 4;
1127 default: return 0;
1128 }
1129 }
1130
handle_xyz(enum AVPixelFormat * format)1131 static int handle_xyz(enum AVPixelFormat *format)
1132 {
1133 switch (*format) {
1134 case AV_PIX_FMT_XYZ12BE : *format = AV_PIX_FMT_RGB48BE; return 1;
1135 case AV_PIX_FMT_XYZ12LE : *format = AV_PIX_FMT_RGB48LE; return 1;
1136 default: return 0;
1137 }
1138 }
1139
handle_formats(SwsContext * c)1140 static void handle_formats(SwsContext *c)
1141 {
1142 c->src0Alpha |= handle_0alpha(&c->srcFormat);
1143 c->dst0Alpha |= handle_0alpha(&c->dstFormat);
1144 c->srcXYZ |= handle_xyz(&c->srcFormat);
1145 c->dstXYZ |= handle_xyz(&c->dstFormat);
1146 if (c->srcXYZ || c->dstXYZ)
1147 fill_xyztables(c);
1148 }
1149
sws_alloc_context(void)1150 SwsContext *sws_alloc_context(void)
1151 {
1152 SwsContext *c = av_mallocz(sizeof(SwsContext));
1153
1154 av_assert0(offsetof(SwsContext, redDither) + DITHER32_INT == offsetof(SwsContext, dither32));
1155
1156 if (c) {
1157 c->av_class = &ff_sws_context_class;
1158 av_opt_set_defaults(c);
1159 atomic_init(&c->stride_unaligned_warned, 0);
1160 atomic_init(&c->data_unaligned_warned, 0);
1161 }
1162
1163 return c;
1164 }
1165
alloc_gamma_tbl(double e)1166 static uint16_t * alloc_gamma_tbl(double e)
1167 {
1168 int i = 0;
1169 uint16_t * tbl;
1170 tbl = (uint16_t*)av_malloc(sizeof(uint16_t) * 1 << 16);
1171 if (!tbl)
1172 return NULL;
1173
1174 for (i = 0; i < 65536; ++i) {
1175 tbl[i] = pow(i / 65535.0, e) * 65535.0;
1176 }
1177 return tbl;
1178 }
1179
alphaless_fmt(enum AVPixelFormat fmt)1180 static enum AVPixelFormat alphaless_fmt(enum AVPixelFormat fmt)
1181 {
1182 switch(fmt) {
1183 case AV_PIX_FMT_ARGB: return AV_PIX_FMT_RGB24;
1184 case AV_PIX_FMT_RGBA: return AV_PIX_FMT_RGB24;
1185 case AV_PIX_FMT_ABGR: return AV_PIX_FMT_BGR24;
1186 case AV_PIX_FMT_BGRA: return AV_PIX_FMT_BGR24;
1187 case AV_PIX_FMT_YA8: return AV_PIX_FMT_GRAY8;
1188
1189 case AV_PIX_FMT_YUVA420P: return AV_PIX_FMT_YUV420P;
1190 case AV_PIX_FMT_YUVA422P: return AV_PIX_FMT_YUV422P;
1191 case AV_PIX_FMT_YUVA444P: return AV_PIX_FMT_YUV444P;
1192
1193 case AV_PIX_FMT_GBRAP: return AV_PIX_FMT_GBRP;
1194
1195 case AV_PIX_FMT_GBRAP10LE: return AV_PIX_FMT_GBRP10;
1196 case AV_PIX_FMT_GBRAP10BE: return AV_PIX_FMT_GBRP10;
1197
1198 case AV_PIX_FMT_GBRAP12LE: return AV_PIX_FMT_GBRP12;
1199 case AV_PIX_FMT_GBRAP12BE: return AV_PIX_FMT_GBRP12;
1200
1201 case AV_PIX_FMT_GBRAP16LE: return AV_PIX_FMT_GBRP16;
1202 case AV_PIX_FMT_GBRAP16BE: return AV_PIX_FMT_GBRP16;
1203
1204 case AV_PIX_FMT_RGBA64LE: return AV_PIX_FMT_RGB48;
1205 case AV_PIX_FMT_RGBA64BE: return AV_PIX_FMT_RGB48;
1206 case AV_PIX_FMT_BGRA64LE: return AV_PIX_FMT_BGR48;
1207 case AV_PIX_FMT_BGRA64BE: return AV_PIX_FMT_BGR48;
1208
1209 case AV_PIX_FMT_YA16BE: return AV_PIX_FMT_GRAY16;
1210 case AV_PIX_FMT_YA16LE: return AV_PIX_FMT_GRAY16;
1211
1212 case AV_PIX_FMT_YUVA420P9BE: return AV_PIX_FMT_YUV420P9;
1213 case AV_PIX_FMT_YUVA422P9BE: return AV_PIX_FMT_YUV422P9;
1214 case AV_PIX_FMT_YUVA444P9BE: return AV_PIX_FMT_YUV444P9;
1215 case AV_PIX_FMT_YUVA420P9LE: return AV_PIX_FMT_YUV420P9;
1216 case AV_PIX_FMT_YUVA422P9LE: return AV_PIX_FMT_YUV422P9;
1217 case AV_PIX_FMT_YUVA444P9LE: return AV_PIX_FMT_YUV444P9;
1218 case AV_PIX_FMT_YUVA420P10BE: return AV_PIX_FMT_YUV420P10;
1219 case AV_PIX_FMT_YUVA422P10BE: return AV_PIX_FMT_YUV422P10;
1220 case AV_PIX_FMT_YUVA444P10BE: return AV_PIX_FMT_YUV444P10;
1221 case AV_PIX_FMT_YUVA420P10LE: return AV_PIX_FMT_YUV420P10;
1222 case AV_PIX_FMT_YUVA422P10LE: return AV_PIX_FMT_YUV422P10;
1223 case AV_PIX_FMT_YUVA444P10LE: return AV_PIX_FMT_YUV444P10;
1224 case AV_PIX_FMT_YUVA420P16BE: return AV_PIX_FMT_YUV420P16;
1225 case AV_PIX_FMT_YUVA422P16BE: return AV_PIX_FMT_YUV422P16;
1226 case AV_PIX_FMT_YUVA444P16BE: return AV_PIX_FMT_YUV444P16;
1227 case AV_PIX_FMT_YUVA420P16LE: return AV_PIX_FMT_YUV420P16;
1228 case AV_PIX_FMT_YUVA422P16LE: return AV_PIX_FMT_YUV422P16;
1229 case AV_PIX_FMT_YUVA444P16LE: return AV_PIX_FMT_YUV444P16;
1230
1231 // case AV_PIX_FMT_AYUV64LE:
1232 // case AV_PIX_FMT_AYUV64BE:
1233 // case AV_PIX_FMT_PAL8:
1234 default: return AV_PIX_FMT_NONE;
1235 }
1236 }
1237
context_init_threaded(SwsContext * c,SwsFilter * src_filter,SwsFilter * dst_filter)1238 static int context_init_threaded(SwsContext *c,
1239 SwsFilter *src_filter, SwsFilter *dst_filter)
1240 {
1241 int ret;
1242
1243 ret = avpriv_slicethread_create(&c->slicethread, (void*)c,
1244 ff_sws_slice_worker, NULL, c->nb_threads);
1245 if (ret == AVERROR(ENOSYS)) {
1246 c->nb_threads = 1;
1247 return 0;
1248 } else if (ret < 0)
1249 return ret;
1250
1251 c->nb_threads = ret;
1252
1253 c->slice_ctx = av_calloc(c->nb_threads, sizeof(*c->slice_ctx));
1254 c->slice_err = av_calloc(c->nb_threads, sizeof(*c->slice_err));
1255 if (!c->slice_ctx || !c->slice_err)
1256 return AVERROR(ENOMEM);
1257
1258 for (int i = 0; i < c->nb_threads; i++) {
1259 c->slice_ctx[i] = sws_alloc_context();
1260 if (!c->slice_ctx[i])
1261 return AVERROR(ENOMEM);
1262
1263 c->slice_ctx[i]->parent = c;
1264
1265 ret = av_opt_copy((void*)c->slice_ctx[i], (void*)c);
1266 if (ret < 0)
1267 return ret;
1268
1269 c->slice_ctx[i]->nb_threads = 1;
1270
1271 ret = sws_init_context(c->slice_ctx[i], src_filter, dst_filter);
1272 if (ret < 0)
1273 return ret;
1274
1275 c->nb_slice_ctx++;
1276
1277 if (c->slice_ctx[i]->dither == SWS_DITHER_ED) {
1278 av_log(c, AV_LOG_VERBOSE,
1279 "Error-diffusion dither is in use, scaling will be single-threaded.");
1280 break;
1281 }
1282 }
1283
1284 c->frame_src = av_frame_alloc();
1285 c->frame_dst = av_frame_alloc();
1286 if (!c->frame_src || !c->frame_dst)
1287 return AVERROR(ENOMEM);
1288
1289 return 0;
1290 }
1291
sws_init_context(SwsContext * c,SwsFilter * srcFilter,SwsFilter * dstFilter)1292 av_cold int sws_init_context(SwsContext *c, SwsFilter *srcFilter,
1293 SwsFilter *dstFilter)
1294 {
1295 int i;
1296 int usesVFilter, usesHFilter;
1297 int unscaled;
1298 SwsFilter dummyFilter = { NULL, NULL, NULL, NULL };
1299 int srcW = c->srcW;
1300 int srcH = c->srcH;
1301 int dstW = c->dstW;
1302 int dstH = c->dstH;
1303 int dst_stride = FFALIGN(dstW * sizeof(int16_t) + 66, 16);
1304 int flags, cpu_flags;
1305 enum AVPixelFormat srcFormat = c->srcFormat;
1306 enum AVPixelFormat dstFormat = c->dstFormat;
1307 const AVPixFmtDescriptor *desc_src;
1308 const AVPixFmtDescriptor *desc_dst;
1309 int ret = 0;
1310 enum AVPixelFormat tmpFmt;
1311 static const float float_mult = 1.0f / 255.0f;
1312 static AVOnce rgb2rgb_once = AV_ONCE_INIT;
1313
1314 if (c->nb_threads != 1) {
1315 ret = context_init_threaded(c, srcFilter, dstFilter);
1316 if (ret < 0 || c->nb_threads > 1)
1317 return ret;
1318 // threading disabled in this build, init as single-threaded
1319 }
1320
1321 cpu_flags = av_get_cpu_flags();
1322 flags = c->flags;
1323 emms_c();
1324 if (ff_thread_once(&rgb2rgb_once, ff_sws_rgb2rgb_init) != 0)
1325 return AVERROR_UNKNOWN;
1326
1327 unscaled = (srcW == dstW && srcH == dstH);
1328
1329 c->srcRange |= handle_jpeg(&c->srcFormat);
1330 c->dstRange |= handle_jpeg(&c->dstFormat);
1331
1332 if(srcFormat!=c->srcFormat || dstFormat!=c->dstFormat)
1333 av_log(c, AV_LOG_WARNING, "deprecated pixel format used, make sure you did set range correctly\n");
1334
1335 if (!c->contrast && !c->saturation && !c->dstFormatBpp)
1336 sws_setColorspaceDetails(c, ff_yuv2rgb_coeffs[SWS_CS_DEFAULT], c->srcRange,
1337 ff_yuv2rgb_coeffs[SWS_CS_DEFAULT],
1338 c->dstRange, 0, 1 << 16, 1 << 16);
1339
1340 handle_formats(c);
1341 srcFormat = c->srcFormat;
1342 dstFormat = c->dstFormat;
1343 desc_src = av_pix_fmt_desc_get(srcFormat);
1344 desc_dst = av_pix_fmt_desc_get(dstFormat);
1345
1346 // If the source has no alpha then disable alpha blendaway
1347 if (c->src0Alpha)
1348 c->alphablend = SWS_ALPHA_BLEND_NONE;
1349
1350 if (!(unscaled && sws_isSupportedEndiannessConversion(srcFormat) &&
1351 av_pix_fmt_swap_endianness(srcFormat) == dstFormat)) {
1352 if (!sws_isSupportedInput(srcFormat)) {
1353 av_log(c, AV_LOG_ERROR, "%s is not supported as input pixel format\n",
1354 av_get_pix_fmt_name(srcFormat));
1355 return AVERROR(EINVAL);
1356 }
1357 if (!sws_isSupportedOutput(dstFormat)) {
1358 av_log(c, AV_LOG_ERROR, "%s is not supported as output pixel format\n",
1359 av_get_pix_fmt_name(dstFormat));
1360 return AVERROR(EINVAL);
1361 }
1362 }
1363 av_assert2(desc_src && desc_dst);
1364
1365 i = flags & (SWS_POINT |
1366 SWS_AREA |
1367 SWS_BILINEAR |
1368 SWS_FAST_BILINEAR |
1369 SWS_BICUBIC |
1370 SWS_X |
1371 SWS_GAUSS |
1372 SWS_LANCZOS |
1373 SWS_SINC |
1374 SWS_SPLINE |
1375 SWS_BICUBLIN);
1376
1377 /* provide a default scaler if not set by caller */
1378 if (!i) {
1379 if (dstW < srcW && dstH < srcH)
1380 flags |= SWS_BICUBIC;
1381 else if (dstW > srcW && dstH > srcH)
1382 flags |= SWS_BICUBIC;
1383 else
1384 flags |= SWS_BICUBIC;
1385 c->flags = flags;
1386 } else if (i & (i - 1)) {
1387 av_log(c, AV_LOG_ERROR,
1388 "Exactly one scaler algorithm must be chosen, got %X\n", i);
1389 return AVERROR(EINVAL);
1390 }
1391 /* sanity check */
1392 if (srcW < 1 || srcH < 1 || dstW < 1 || dstH < 1) {
1393 /* FIXME check if these are enough and try to lower them after
1394 * fixing the relevant parts of the code */
1395 av_log(c, AV_LOG_ERROR, "%dx%d -> %dx%d is invalid scaling dimension\n",
1396 srcW, srcH, dstW, dstH);
1397 return AVERROR(EINVAL);
1398 }
1399 if (flags & SWS_FAST_BILINEAR) {
1400 if (srcW < 8 || dstW < 8) {
1401 flags ^= SWS_FAST_BILINEAR | SWS_BILINEAR;
1402 c->flags = flags;
1403 }
1404 }
1405
1406 if (!dstFilter)
1407 dstFilter = &dummyFilter;
1408 if (!srcFilter)
1409 srcFilter = &dummyFilter;
1410
1411 c->lumXInc = (((int64_t)srcW << 16) + (dstW >> 1)) / dstW;
1412 c->lumYInc = (((int64_t)srcH << 16) + (dstH >> 1)) / dstH;
1413 c->dstFormatBpp = av_get_bits_per_pixel(desc_dst);
1414 c->srcFormatBpp = av_get_bits_per_pixel(desc_src);
1415 c->vRounder = 4 * 0x0001000100010001ULL;
1416
1417 usesVFilter = (srcFilter->lumV && srcFilter->lumV->length > 1) ||
1418 (srcFilter->chrV && srcFilter->chrV->length > 1) ||
1419 (dstFilter->lumV && dstFilter->lumV->length > 1) ||
1420 (dstFilter->chrV && dstFilter->chrV->length > 1);
1421 usesHFilter = (srcFilter->lumH && srcFilter->lumH->length > 1) ||
1422 (srcFilter->chrH && srcFilter->chrH->length > 1) ||
1423 (dstFilter->lumH && dstFilter->lumH->length > 1) ||
1424 (dstFilter->chrH && dstFilter->chrH->length > 1);
1425
1426 av_pix_fmt_get_chroma_sub_sample(srcFormat, &c->chrSrcHSubSample, &c->chrSrcVSubSample);
1427 av_pix_fmt_get_chroma_sub_sample(dstFormat, &c->chrDstHSubSample, &c->chrDstVSubSample);
1428
1429 c->dst_slice_align = 1 << c->chrDstVSubSample;
1430
1431 if (isAnyRGB(dstFormat) && !(flags&SWS_FULL_CHR_H_INT)) {
1432 if (dstW&1) {
1433 av_log(c, AV_LOG_DEBUG, "Forcing full internal H chroma due to odd output size\n");
1434 flags |= SWS_FULL_CHR_H_INT;
1435 c->flags = flags;
1436 }
1437
1438 if ( c->chrSrcHSubSample == 0
1439 && c->chrSrcVSubSample == 0
1440 && c->dither != SWS_DITHER_BAYER //SWS_FULL_CHR_H_INT is currently not supported with SWS_DITHER_BAYER
1441 && !(c->flags & SWS_FAST_BILINEAR)
1442 ) {
1443 av_log(c, AV_LOG_DEBUG, "Forcing full internal H chroma due to input having non subsampled chroma\n");
1444 flags |= SWS_FULL_CHR_H_INT;
1445 c->flags = flags;
1446 }
1447 }
1448
1449 if (c->dither == SWS_DITHER_AUTO) {
1450 if (flags & SWS_ERROR_DIFFUSION)
1451 c->dither = SWS_DITHER_ED;
1452 }
1453
1454 if(dstFormat == AV_PIX_FMT_BGR4_BYTE ||
1455 dstFormat == AV_PIX_FMT_RGB4_BYTE ||
1456 dstFormat == AV_PIX_FMT_BGR8 ||
1457 dstFormat == AV_PIX_FMT_RGB8) {
1458 if (c->dither == SWS_DITHER_AUTO)
1459 c->dither = (flags & SWS_FULL_CHR_H_INT) ? SWS_DITHER_ED : SWS_DITHER_BAYER;
1460 if (!(flags & SWS_FULL_CHR_H_INT)) {
1461 if (c->dither == SWS_DITHER_ED || c->dither == SWS_DITHER_A_DITHER || c->dither == SWS_DITHER_X_DITHER || c->dither == SWS_DITHER_NONE) {
1462 av_log(c, AV_LOG_DEBUG,
1463 "Desired dithering only supported in full chroma interpolation for destination format '%s'\n",
1464 av_get_pix_fmt_name(dstFormat));
1465 flags |= SWS_FULL_CHR_H_INT;
1466 c->flags = flags;
1467 }
1468 }
1469 if (flags & SWS_FULL_CHR_H_INT) {
1470 if (c->dither == SWS_DITHER_BAYER) {
1471 av_log(c, AV_LOG_DEBUG,
1472 "Ordered dither is not supported in full chroma interpolation for destination format '%s'\n",
1473 av_get_pix_fmt_name(dstFormat));
1474 c->dither = SWS_DITHER_ED;
1475 }
1476 }
1477 }
1478 if (isPlanarRGB(dstFormat)) {
1479 if (!(flags & SWS_FULL_CHR_H_INT)) {
1480 av_log(c, AV_LOG_DEBUG,
1481 "%s output is not supported with half chroma resolution, switching to full\n",
1482 av_get_pix_fmt_name(dstFormat));
1483 flags |= SWS_FULL_CHR_H_INT;
1484 c->flags = flags;
1485 }
1486 }
1487
1488 /* reuse chroma for 2 pixels RGB/BGR unless user wants full
1489 * chroma interpolation */
1490 if (flags & SWS_FULL_CHR_H_INT &&
1491 isAnyRGB(dstFormat) &&
1492 !isPlanarRGB(dstFormat) &&
1493 dstFormat != AV_PIX_FMT_RGBA64LE &&
1494 dstFormat != AV_PIX_FMT_RGBA64BE &&
1495 dstFormat != AV_PIX_FMT_BGRA64LE &&
1496 dstFormat != AV_PIX_FMT_BGRA64BE &&
1497 dstFormat != AV_PIX_FMT_RGB48LE &&
1498 dstFormat != AV_PIX_FMT_RGB48BE &&
1499 dstFormat != AV_PIX_FMT_BGR48LE &&
1500 dstFormat != AV_PIX_FMT_BGR48BE &&
1501 dstFormat != AV_PIX_FMT_RGBA &&
1502 dstFormat != AV_PIX_FMT_ARGB &&
1503 dstFormat != AV_PIX_FMT_BGRA &&
1504 dstFormat != AV_PIX_FMT_ABGR &&
1505 dstFormat != AV_PIX_FMT_RGB24 &&
1506 dstFormat != AV_PIX_FMT_BGR24 &&
1507 dstFormat != AV_PIX_FMT_BGR4_BYTE &&
1508 dstFormat != AV_PIX_FMT_RGB4_BYTE &&
1509 dstFormat != AV_PIX_FMT_BGR8 &&
1510 dstFormat != AV_PIX_FMT_RGB8
1511 ) {
1512 av_log(c, AV_LOG_WARNING,
1513 "full chroma interpolation for destination format '%s' not yet implemented\n",
1514 av_get_pix_fmt_name(dstFormat));
1515 flags &= ~SWS_FULL_CHR_H_INT;
1516 c->flags = flags;
1517 }
1518 if (isAnyRGB(dstFormat) && !(flags & SWS_FULL_CHR_H_INT))
1519 c->chrDstHSubSample = 1;
1520
1521 // drop some chroma lines if the user wants it
1522 c->vChrDrop = (flags & SWS_SRC_V_CHR_DROP_MASK) >>
1523 SWS_SRC_V_CHR_DROP_SHIFT;
1524 c->chrSrcVSubSample += c->vChrDrop;
1525
1526 /* drop every other pixel for chroma calculation unless user
1527 * wants full chroma */
1528 if (isAnyRGB(srcFormat) && !(flags & SWS_FULL_CHR_H_INP) &&
1529 srcFormat != AV_PIX_FMT_RGB8 && srcFormat != AV_PIX_FMT_BGR8 &&
1530 srcFormat != AV_PIX_FMT_RGB4 && srcFormat != AV_PIX_FMT_BGR4 &&
1531 srcFormat != AV_PIX_FMT_RGB4_BYTE && srcFormat != AV_PIX_FMT_BGR4_BYTE &&
1532 srcFormat != AV_PIX_FMT_GBRP9BE && srcFormat != AV_PIX_FMT_GBRP9LE &&
1533 srcFormat != AV_PIX_FMT_GBRP10BE && srcFormat != AV_PIX_FMT_GBRP10LE &&
1534 srcFormat != AV_PIX_FMT_GBRAP10BE && srcFormat != AV_PIX_FMT_GBRAP10LE &&
1535 srcFormat != AV_PIX_FMT_GBRP12BE && srcFormat != AV_PIX_FMT_GBRP12LE &&
1536 srcFormat != AV_PIX_FMT_GBRAP12BE && srcFormat != AV_PIX_FMT_GBRAP12LE &&
1537 srcFormat != AV_PIX_FMT_GBRP14BE && srcFormat != AV_PIX_FMT_GBRP14LE &&
1538 srcFormat != AV_PIX_FMT_GBRP16BE && srcFormat != AV_PIX_FMT_GBRP16LE &&
1539 srcFormat != AV_PIX_FMT_GBRAP16BE && srcFormat != AV_PIX_FMT_GBRAP16LE &&
1540 srcFormat != AV_PIX_FMT_GBRPF32BE && srcFormat != AV_PIX_FMT_GBRPF32LE &&
1541 srcFormat != AV_PIX_FMT_GBRAPF32BE && srcFormat != AV_PIX_FMT_GBRAPF32LE &&
1542 ((dstW >> c->chrDstHSubSample) <= (srcW >> 1) ||
1543 (flags & SWS_FAST_BILINEAR)))
1544 c->chrSrcHSubSample = 1;
1545
1546 // Note the AV_CEIL_RSHIFT is so that we always round toward +inf.
1547 c->chrSrcW = AV_CEIL_RSHIFT(srcW, c->chrSrcHSubSample);
1548 c->chrSrcH = AV_CEIL_RSHIFT(srcH, c->chrSrcVSubSample);
1549 c->chrDstW = AV_CEIL_RSHIFT(dstW, c->chrDstHSubSample);
1550 c->chrDstH = AV_CEIL_RSHIFT(dstH, c->chrDstVSubSample);
1551
1552 if (!FF_ALLOCZ_TYPED_ARRAY(c->formatConvBuffer, FFALIGN(srcW * 2 + 78, 16) * 2))
1553 goto nomem;
1554
1555 c->frame_src = av_frame_alloc();
1556 c->frame_dst = av_frame_alloc();
1557 if (!c->frame_src || !c->frame_dst)
1558 goto nomem;
1559
1560 c->srcBpc = desc_src->comp[0].depth;
1561 if (c->srcBpc < 8)
1562 c->srcBpc = 8;
1563 c->dstBpc = desc_dst->comp[0].depth;
1564 if (c->dstBpc < 8)
1565 c->dstBpc = 8;
1566 if (isAnyRGB(srcFormat) || srcFormat == AV_PIX_FMT_PAL8)
1567 c->srcBpc = 16;
1568 if (c->dstBpc == 16)
1569 dst_stride <<= 1;
1570
1571 if (INLINE_MMXEXT(cpu_flags) && c->srcBpc == 8 && c->dstBpc <= 14) {
1572 c->canMMXEXTBeUsed = dstW >= srcW && (dstW & 31) == 0 &&
1573 c->chrDstW >= c->chrSrcW &&
1574 (srcW & 15) == 0;
1575 if (!c->canMMXEXTBeUsed && dstW >= srcW && c->chrDstW >= c->chrSrcW && (srcW & 15) == 0
1576
1577 && (flags & SWS_FAST_BILINEAR)) {
1578 if (flags & SWS_PRINT_INFO)
1579 av_log(c, AV_LOG_INFO,
1580 "output width is not a multiple of 32 -> no MMXEXT scaler\n");
1581 }
1582 if (usesHFilter || isNBPS(c->srcFormat) || is16BPS(c->srcFormat) || isAnyRGB(c->srcFormat))
1583 c->canMMXEXTBeUsed = 0;
1584 } else
1585 c->canMMXEXTBeUsed = 0;
1586
1587 c->chrXInc = (((int64_t)c->chrSrcW << 16) + (c->chrDstW >> 1)) / c->chrDstW;
1588 c->chrYInc = (((int64_t)c->chrSrcH << 16) + (c->chrDstH >> 1)) / c->chrDstH;
1589
1590 /* Match pixel 0 of the src to pixel 0 of dst and match pixel n-2 of src
1591 * to pixel n-2 of dst, but only for the FAST_BILINEAR mode otherwise do
1592 * correct scaling.
1593 * n-2 is the last chrominance sample available.
1594 * This is not perfect, but no one should notice the difference, the more
1595 * correct variant would be like the vertical one, but that would require
1596 * some special code for the first and last pixel */
1597 if (flags & SWS_FAST_BILINEAR) {
1598 if (c->canMMXEXTBeUsed) {
1599 c->lumXInc += 20;
1600 c->chrXInc += 20;
1601 }
1602 // we don't use the x86 asm scaler if MMX is available
1603 else if (INLINE_MMX(cpu_flags) && c->dstBpc <= 14) {
1604 c->lumXInc = ((int64_t)(srcW - 2) << 16) / (dstW - 2) - 20;
1605 c->chrXInc = ((int64_t)(c->chrSrcW - 2) << 16) / (c->chrDstW - 2) - 20;
1606 }
1607 }
1608
1609 // hardcoded for now
1610 c->gamma_value = 2.2;
1611 tmpFmt = AV_PIX_FMT_RGBA64LE;
1612
1613
1614 if (!unscaled && c->gamma_flag && (srcFormat != tmpFmt || dstFormat != tmpFmt)) {
1615 SwsContext *c2;
1616 c->cascaded_context[0] = NULL;
1617
1618 ret = av_image_alloc(c->cascaded_tmp, c->cascaded_tmpStride,
1619 srcW, srcH, tmpFmt, 64);
1620 if (ret < 0)
1621 return ret;
1622
1623 c->cascaded_context[0] = sws_getContext(srcW, srcH, srcFormat,
1624 srcW, srcH, tmpFmt,
1625 flags, NULL, NULL, c->param);
1626 if (!c->cascaded_context[0]) {
1627 return AVERROR(ENOMEM);
1628 }
1629
1630 c->cascaded_context[1] = sws_getContext(srcW, srcH, tmpFmt,
1631 dstW, dstH, tmpFmt,
1632 flags, srcFilter, dstFilter, c->param);
1633
1634 if (!c->cascaded_context[1])
1635 return AVERROR(ENOMEM);
1636
1637 c2 = c->cascaded_context[1];
1638 c2->is_internal_gamma = 1;
1639 c2->gamma = alloc_gamma_tbl( c->gamma_value);
1640 c2->inv_gamma = alloc_gamma_tbl(1.f/c->gamma_value);
1641 if (!c2->gamma || !c2->inv_gamma)
1642 return AVERROR(ENOMEM);
1643
1644 // is_internal_flag is set after creating the context
1645 // to properly create the gamma convert FilterDescriptor
1646 // we have to re-initialize it
1647 ff_free_filters(c2);
1648 if ((ret = ff_init_filters(c2)) < 0) {
1649 sws_freeContext(c2);
1650 c->cascaded_context[1] = NULL;
1651 return ret;
1652 }
1653
1654 c->cascaded_context[2] = NULL;
1655 if (dstFormat != tmpFmt) {
1656 ret = av_image_alloc(c->cascaded1_tmp, c->cascaded1_tmpStride,
1657 dstW, dstH, tmpFmt, 64);
1658 if (ret < 0)
1659 return ret;
1660
1661 c->cascaded_context[2] = sws_getContext(dstW, dstH, tmpFmt,
1662 dstW, dstH, dstFormat,
1663 flags, NULL, NULL, c->param);
1664 if (!c->cascaded_context[2])
1665 return AVERROR(ENOMEM);
1666 }
1667 return 0;
1668 }
1669
1670 if (isBayer(srcFormat)) {
1671 if (!unscaled ||
1672 (dstFormat != AV_PIX_FMT_RGB24 && dstFormat != AV_PIX_FMT_YUV420P &&
1673 dstFormat != AV_PIX_FMT_RGB48)) {
1674 enum AVPixelFormat tmpFormat = isBayer16BPS(srcFormat) ? AV_PIX_FMT_RGB48 : AV_PIX_FMT_RGB24;
1675
1676 ret = av_image_alloc(c->cascaded_tmp, c->cascaded_tmpStride,
1677 srcW, srcH, tmpFormat, 64);
1678 if (ret < 0)
1679 return ret;
1680
1681 c->cascaded_context[0] = sws_getContext(srcW, srcH, srcFormat,
1682 srcW, srcH, tmpFormat,
1683 flags, srcFilter, NULL, c->param);
1684 if (!c->cascaded_context[0])
1685 return AVERROR(ENOMEM);
1686
1687 c->cascaded_context[1] = sws_getContext(srcW, srcH, tmpFormat,
1688 dstW, dstH, dstFormat,
1689 flags, NULL, dstFilter, c->param);
1690 if (!c->cascaded_context[1])
1691 return AVERROR(ENOMEM);
1692 return 0;
1693 }
1694 }
1695
1696 if (unscaled && c->srcBpc == 8 && dstFormat == AV_PIX_FMT_GRAYF32){
1697 for (i = 0; i < 256; ++i){
1698 c->uint2float_lut[i] = (float)i * float_mult;
1699 }
1700 }
1701
1702 // float will be converted to uint16_t
1703 if ((srcFormat == AV_PIX_FMT_GRAYF32BE || srcFormat == AV_PIX_FMT_GRAYF32LE) &&
1704 (!unscaled || unscaled && dstFormat != srcFormat && (srcFormat != AV_PIX_FMT_GRAYF32 ||
1705 dstFormat != AV_PIX_FMT_GRAY8))){
1706 c->srcBpc = 16;
1707 }
1708
1709 if (CONFIG_SWSCALE_ALPHA && isALPHA(srcFormat) && !isALPHA(dstFormat)) {
1710 enum AVPixelFormat tmpFormat = alphaless_fmt(srcFormat);
1711
1712 if (tmpFormat != AV_PIX_FMT_NONE && c->alphablend != SWS_ALPHA_BLEND_NONE) {
1713 if (!unscaled ||
1714 dstFormat != tmpFormat ||
1715 usesHFilter || usesVFilter ||
1716 c->srcRange != c->dstRange
1717 ) {
1718 c->cascaded_mainindex = 1;
1719 ret = av_image_alloc(c->cascaded_tmp, c->cascaded_tmpStride,
1720 srcW, srcH, tmpFormat, 64);
1721 if (ret < 0)
1722 return ret;
1723
1724 c->cascaded_context[0] = sws_alloc_set_opts(srcW, srcH, srcFormat,
1725 srcW, srcH, tmpFormat,
1726 flags, c->param);
1727 if (!c->cascaded_context[0])
1728 return AVERROR(EINVAL);
1729 c->cascaded_context[0]->alphablend = c->alphablend;
1730 ret = sws_init_context(c->cascaded_context[0], NULL , NULL);
1731 if (ret < 0)
1732 return ret;
1733
1734 c->cascaded_context[1] = sws_alloc_set_opts(srcW, srcH, tmpFormat,
1735 dstW, dstH, dstFormat,
1736 flags, c->param);
1737 if (!c->cascaded_context[1])
1738 return AVERROR(EINVAL);
1739
1740 c->cascaded_context[1]->srcRange = c->srcRange;
1741 c->cascaded_context[1]->dstRange = c->dstRange;
1742 ret = sws_init_context(c->cascaded_context[1], srcFilter , dstFilter);
1743 if (ret < 0)
1744 return ret;
1745
1746 return 0;
1747 }
1748 }
1749 }
1750
1751 #if HAVE_MMAP && HAVE_MPROTECT && defined(MAP_ANONYMOUS)
1752 #define USE_MMAP 1
1753 #else
1754 #define USE_MMAP 0
1755 #endif
1756
1757 /* precalculate horizontal scaler filter coefficients */
1758 {
1759 #if HAVE_MMXEXT_INLINE
1760 // can't downscale !!!
1761 if (c->canMMXEXTBeUsed && (flags & SWS_FAST_BILINEAR)) {
1762 c->lumMmxextFilterCodeSize = ff_init_hscaler_mmxext(dstW, c->lumXInc, NULL,
1763 NULL, NULL, 8);
1764 c->chrMmxextFilterCodeSize = ff_init_hscaler_mmxext(c->chrDstW, c->chrXInc,
1765 NULL, NULL, NULL, 4);
1766
1767 #if USE_MMAP
1768 c->lumMmxextFilterCode = mmap(NULL, c->lumMmxextFilterCodeSize,
1769 PROT_READ | PROT_WRITE,
1770 MAP_PRIVATE | MAP_ANONYMOUS,
1771 -1, 0);
1772 c->chrMmxextFilterCode = mmap(NULL, c->chrMmxextFilterCodeSize,
1773 PROT_READ | PROT_WRITE,
1774 MAP_PRIVATE | MAP_ANONYMOUS,
1775 -1, 0);
1776 #elif HAVE_VIRTUALALLOC
1777 c->lumMmxextFilterCode = VirtualAlloc(NULL,
1778 c->lumMmxextFilterCodeSize,
1779 MEM_COMMIT,
1780 PAGE_EXECUTE_READWRITE);
1781 c->chrMmxextFilterCode = VirtualAlloc(NULL,
1782 c->chrMmxextFilterCodeSize,
1783 MEM_COMMIT,
1784 PAGE_EXECUTE_READWRITE);
1785 #else
1786 c->lumMmxextFilterCode = av_malloc(c->lumMmxextFilterCodeSize);
1787 c->chrMmxextFilterCode = av_malloc(c->chrMmxextFilterCodeSize);
1788 #endif
1789
1790 #ifdef MAP_ANONYMOUS
1791 if (c->lumMmxextFilterCode == MAP_FAILED || c->chrMmxextFilterCode == MAP_FAILED)
1792 #else
1793 if (!c->lumMmxextFilterCode || !c->chrMmxextFilterCode)
1794 #endif
1795 {
1796 av_log(c, AV_LOG_ERROR, "Failed to allocate MMX2FilterCode\n");
1797 return AVERROR(ENOMEM);
1798 }
1799
1800 if (!FF_ALLOCZ_TYPED_ARRAY(c->hLumFilter, dstW / 8 + 8) ||
1801 !FF_ALLOCZ_TYPED_ARRAY(c->hChrFilter, c->chrDstW / 4 + 8) ||
1802 !FF_ALLOCZ_TYPED_ARRAY(c->hLumFilterPos, dstW / 2 / 8 + 8) ||
1803 !FF_ALLOCZ_TYPED_ARRAY(c->hChrFilterPos, c->chrDstW / 2 / 4 + 8))
1804 goto nomem;
1805
1806 ff_init_hscaler_mmxext( dstW, c->lumXInc, c->lumMmxextFilterCode,
1807 c->hLumFilter, (uint32_t*)c->hLumFilterPos, 8);
1808 ff_init_hscaler_mmxext(c->chrDstW, c->chrXInc, c->chrMmxextFilterCode,
1809 c->hChrFilter, (uint32_t*)c->hChrFilterPos, 4);
1810
1811 #if USE_MMAP
1812 if ( mprotect(c->lumMmxextFilterCode, c->lumMmxextFilterCodeSize, PROT_EXEC | PROT_READ) == -1
1813 || mprotect(c->chrMmxextFilterCode, c->chrMmxextFilterCodeSize, PROT_EXEC | PROT_READ) == -1) {
1814 av_log(c, AV_LOG_ERROR, "mprotect failed, cannot use fast bilinear scaler\n");
1815 ret = AVERROR(EINVAL);
1816 goto fail;
1817 }
1818 #endif
1819 } else
1820 #endif /* HAVE_MMXEXT_INLINE */
1821 {
1822 const int filterAlign = X86_MMX(cpu_flags) ? 4 :
1823 PPC_ALTIVEC(cpu_flags) ? 8 :
1824 have_neon(cpu_flags) ? 4 : 1;
1825
1826 if ((ret = initFilter(&c->hLumFilter, &c->hLumFilterPos,
1827 &c->hLumFilterSize, c->lumXInc,
1828 srcW, dstW, filterAlign, 1 << 14,
1829 (flags & SWS_BICUBLIN) ? (flags | SWS_BICUBIC) : flags,
1830 cpu_flags, srcFilter->lumH, dstFilter->lumH,
1831 c->param,
1832 get_local_pos(c, 0, 0, 0),
1833 get_local_pos(c, 0, 0, 0))) < 0)
1834 goto fail;
1835 if (ff_shuffle_filter_coefficients(c, c->hLumFilterPos, c->hLumFilterSize, c->hLumFilter, dstW) < 0)
1836 goto nomem;
1837 if ((ret = initFilter(&c->hChrFilter, &c->hChrFilterPos,
1838 &c->hChrFilterSize, c->chrXInc,
1839 c->chrSrcW, c->chrDstW, filterAlign, 1 << 14,
1840 (flags & SWS_BICUBLIN) ? (flags | SWS_BILINEAR) : flags,
1841 cpu_flags, srcFilter->chrH, dstFilter->chrH,
1842 c->param,
1843 get_local_pos(c, c->chrSrcHSubSample, c->src_h_chr_pos, 0),
1844 get_local_pos(c, c->chrDstHSubSample, c->dst_h_chr_pos, 0))) < 0)
1845 goto fail;
1846 if (ff_shuffle_filter_coefficients(c, c->hChrFilterPos, c->hChrFilterSize, c->hChrFilter, c->chrDstW) < 0)
1847 goto nomem;
1848 }
1849 } // initialize horizontal stuff
1850
1851 /* precalculate vertical scaler filter coefficients */
1852 {
1853 const int filterAlign = X86_MMX(cpu_flags) ? 2 :
1854 PPC_ALTIVEC(cpu_flags) ? 8 :
1855 have_neon(cpu_flags) ? 2 : 1;
1856
1857 if ((ret = initFilter(&c->vLumFilter, &c->vLumFilterPos, &c->vLumFilterSize,
1858 c->lumYInc, srcH, dstH, filterAlign, (1 << 12),
1859 (flags & SWS_BICUBLIN) ? (flags | SWS_BICUBIC) : flags,
1860 cpu_flags, srcFilter->lumV, dstFilter->lumV,
1861 c->param,
1862 get_local_pos(c, 0, 0, 1),
1863 get_local_pos(c, 0, 0, 1))) < 0)
1864 goto fail;
1865 if ((ret = initFilter(&c->vChrFilter, &c->vChrFilterPos, &c->vChrFilterSize,
1866 c->chrYInc, c->chrSrcH, c->chrDstH,
1867 filterAlign, (1 << 12),
1868 (flags & SWS_BICUBLIN) ? (flags | SWS_BILINEAR) : flags,
1869 cpu_flags, srcFilter->chrV, dstFilter->chrV,
1870 c->param,
1871 get_local_pos(c, c->chrSrcVSubSample, c->src_v_chr_pos, 1),
1872 get_local_pos(c, c->chrDstVSubSample, c->dst_v_chr_pos, 1))) < 0)
1873
1874 goto fail;
1875
1876 #if HAVE_ALTIVEC
1877 if (!FF_ALLOC_TYPED_ARRAY(c->vYCoeffsBank, c->vLumFilterSize * c->dstH) ||
1878 !FF_ALLOC_TYPED_ARRAY(c->vCCoeffsBank, c->vChrFilterSize * c->chrDstH))
1879 goto nomem;
1880
1881 for (i = 0; i < c->vLumFilterSize * c->dstH; i++) {
1882 int j;
1883 short *p = (short *)&c->vYCoeffsBank[i];
1884 for (j = 0; j < 8; j++)
1885 p[j] = c->vLumFilter[i];
1886 }
1887
1888 for (i = 0; i < c->vChrFilterSize * c->chrDstH; i++) {
1889 int j;
1890 short *p = (short *)&c->vCCoeffsBank[i];
1891 for (j = 0; j < 8; j++)
1892 p[j] = c->vChrFilter[i];
1893 }
1894 #endif
1895 }
1896
1897 for (i = 0; i < 4; i++)
1898 if (!FF_ALLOCZ_TYPED_ARRAY(c->dither_error[i], c->dstW + 2))
1899 goto nomem;
1900
1901 c->needAlpha = (CONFIG_SWSCALE_ALPHA && isALPHA(c->srcFormat) && isALPHA(c->dstFormat)) ? 1 : 0;
1902
1903 // 64 / c->scalingBpp is the same as 16 / sizeof(scaling_intermediate)
1904 c->uv_off = (dst_stride>>1) + 64 / (c->dstBpc &~ 7);
1905 c->uv_offx2 = dst_stride + 16;
1906
1907 av_assert0(c->chrDstH <= dstH);
1908
1909 if (flags & SWS_PRINT_INFO) {
1910 const char *scaler = NULL, *cpucaps;
1911
1912 for (i = 0; i < FF_ARRAY_ELEMS(scale_algorithms); i++) {
1913 if (flags & scale_algorithms[i].flag) {
1914 scaler = scale_algorithms[i].description;
1915 break;
1916 }
1917 }
1918 if (!scaler)
1919 scaler = "ehh flags invalid?!";
1920 av_log(c, AV_LOG_INFO, "%s scaler, from %s to %s%s ",
1921 scaler,
1922 av_get_pix_fmt_name(srcFormat),
1923 #ifdef DITHER1XBPP
1924 dstFormat == AV_PIX_FMT_BGR555 || dstFormat == AV_PIX_FMT_BGR565 ||
1925 dstFormat == AV_PIX_FMT_RGB444BE || dstFormat == AV_PIX_FMT_RGB444LE ||
1926 dstFormat == AV_PIX_FMT_BGR444BE || dstFormat == AV_PIX_FMT_BGR444LE ?
1927 "dithered " : "",
1928 #else
1929 "",
1930 #endif
1931 av_get_pix_fmt_name(dstFormat));
1932
1933 if (INLINE_MMXEXT(cpu_flags))
1934 cpucaps = "MMXEXT";
1935 else if (INLINE_AMD3DNOW(cpu_flags))
1936 cpucaps = "3DNOW";
1937 else if (INLINE_MMX(cpu_flags))
1938 cpucaps = "MMX";
1939 else if (PPC_ALTIVEC(cpu_flags))
1940 cpucaps = "AltiVec";
1941 else
1942 cpucaps = "C";
1943
1944 av_log(c, AV_LOG_INFO, "using %s\n", cpucaps);
1945
1946 av_log(c, AV_LOG_VERBOSE, "%dx%d -> %dx%d\n", srcW, srcH, dstW, dstH);
1947 av_log(c, AV_LOG_DEBUG,
1948 "lum srcW=%d srcH=%d dstW=%d dstH=%d xInc=%d yInc=%d\n",
1949 c->srcW, c->srcH, c->dstW, c->dstH, c->lumXInc, c->lumYInc);
1950 av_log(c, AV_LOG_DEBUG,
1951 "chr srcW=%d srcH=%d dstW=%d dstH=%d xInc=%d yInc=%d\n",
1952 c->chrSrcW, c->chrSrcH, c->chrDstW, c->chrDstH,
1953 c->chrXInc, c->chrYInc);
1954 }
1955
1956 /* alpha blend special case, note this has been split via cascaded contexts if its scaled */
1957 if (unscaled && !usesHFilter && !usesVFilter &&
1958 c->alphablend != SWS_ALPHA_BLEND_NONE &&
1959 isALPHA(srcFormat) &&
1960 (c->srcRange == c->dstRange || isAnyRGB(dstFormat)) &&
1961 alphaless_fmt(srcFormat) == dstFormat
1962 ) {
1963 c->convert_unscaled = ff_sws_alphablendaway;
1964
1965 if (flags & SWS_PRINT_INFO)
1966 av_log(c, AV_LOG_INFO,
1967 "using alpha blendaway %s -> %s special converter\n",
1968 av_get_pix_fmt_name(srcFormat), av_get_pix_fmt_name(dstFormat));
1969 return 0;
1970 }
1971
1972 /* unscaled special cases */
1973 if (unscaled && !usesHFilter && !usesVFilter &&
1974 (c->srcRange == c->dstRange || isAnyRGB(dstFormat) ||
1975 isFloat(srcFormat) || isFloat(dstFormat))){
1976 ff_get_unscaled_swscale(c);
1977
1978 if (c->convert_unscaled) {
1979 if (flags & SWS_PRINT_INFO)
1980 av_log(c, AV_LOG_INFO,
1981 "using unscaled %s -> %s special converter\n",
1982 av_get_pix_fmt_name(srcFormat), av_get_pix_fmt_name(dstFormat));
1983 return 0;
1984 }
1985 }
1986
1987 ff_sws_init_scale(c);
1988
1989 return ff_init_filters(c);
1990 nomem:
1991 ret = AVERROR(ENOMEM);
1992 fail: // FIXME replace things by appropriate error codes
1993 if (ret == RETCODE_USE_CASCADE) {
1994 int tmpW = sqrt(srcW * (int64_t)dstW);
1995 int tmpH = sqrt(srcH * (int64_t)dstH);
1996 enum AVPixelFormat tmpFormat = AV_PIX_FMT_YUV420P;
1997
1998 if (isALPHA(srcFormat))
1999 tmpFormat = AV_PIX_FMT_YUVA420P;
2000
2001 if (srcW*(int64_t)srcH <= 4LL*dstW*dstH)
2002 return AVERROR(EINVAL);
2003
2004 ret = av_image_alloc(c->cascaded_tmp, c->cascaded_tmpStride,
2005 tmpW, tmpH, tmpFormat, 64);
2006 if (ret < 0)
2007 return ret;
2008
2009 c->cascaded_context[0] = sws_getContext(srcW, srcH, srcFormat,
2010 tmpW, tmpH, tmpFormat,
2011 flags, srcFilter, NULL, c->param);
2012 if (!c->cascaded_context[0])
2013 return AVERROR(ENOMEM);
2014
2015 c->cascaded_context[1] = sws_getContext(tmpW, tmpH, tmpFormat,
2016 dstW, dstH, dstFormat,
2017 flags, NULL, dstFilter, c->param);
2018 if (!c->cascaded_context[1])
2019 return AVERROR(ENOMEM);
2020 return 0;
2021 }
2022 return ret;
2023 }
2024
sws_alloc_set_opts(int srcW,int srcH,enum AVPixelFormat srcFormat,int dstW,int dstH,enum AVPixelFormat dstFormat,int flags,const double * param)2025 SwsContext *sws_alloc_set_opts(int srcW, int srcH, enum AVPixelFormat srcFormat,
2026 int dstW, int dstH, enum AVPixelFormat dstFormat,
2027 int flags, const double *param)
2028 {
2029 SwsContext *c;
2030
2031 if (!(c = sws_alloc_context()))
2032 return NULL;
2033
2034 c->flags = flags;
2035 c->srcW = srcW;
2036 c->srcH = srcH;
2037 c->dstW = dstW;
2038 c->dstH = dstH;
2039 c->srcFormat = srcFormat;
2040 c->dstFormat = dstFormat;
2041
2042 if (param) {
2043 c->param[0] = param[0];
2044 c->param[1] = param[1];
2045 }
2046
2047 return c;
2048 }
2049
sws_getContext(int srcW,int srcH,enum AVPixelFormat srcFormat,int dstW,int dstH,enum AVPixelFormat dstFormat,int flags,SwsFilter * srcFilter,SwsFilter * dstFilter,const double * param)2050 SwsContext *sws_getContext(int srcW, int srcH, enum AVPixelFormat srcFormat,
2051 int dstW, int dstH, enum AVPixelFormat dstFormat,
2052 int flags, SwsFilter *srcFilter,
2053 SwsFilter *dstFilter, const double *param)
2054 {
2055 SwsContext *c;
2056
2057 c = sws_alloc_set_opts(srcW, srcH, srcFormat,
2058 dstW, dstH, dstFormat,
2059 flags, param);
2060 if (!c)
2061 return NULL;
2062
2063 if (sws_init_context(c, srcFilter, dstFilter) < 0) {
2064 sws_freeContext(c);
2065 return NULL;
2066 }
2067
2068 return c;
2069 }
2070
isnan_vec(SwsVector * a)2071 static int isnan_vec(SwsVector *a)
2072 {
2073 int i;
2074 for (i=0; i<a->length; i++)
2075 if (isnan(a->coeff[i]))
2076 return 1;
2077 return 0;
2078 }
2079
makenan_vec(SwsVector * a)2080 static void makenan_vec(SwsVector *a)
2081 {
2082 int i;
2083 for (i=0; i<a->length; i++)
2084 a->coeff[i] = NAN;
2085 }
2086
sws_getDefaultFilter(float lumaGBlur,float chromaGBlur,float lumaSharpen,float chromaSharpen,float chromaHShift,float chromaVShift,int verbose)2087 SwsFilter *sws_getDefaultFilter(float lumaGBlur, float chromaGBlur,
2088 float lumaSharpen, float chromaSharpen,
2089 float chromaHShift, float chromaVShift,
2090 int verbose)
2091 {
2092 SwsFilter *filter = av_malloc(sizeof(SwsFilter));
2093 if (!filter)
2094 return NULL;
2095
2096 if (lumaGBlur != 0.0) {
2097 filter->lumH = sws_getGaussianVec(lumaGBlur, 3.0);
2098 filter->lumV = sws_getGaussianVec(lumaGBlur, 3.0);
2099 } else {
2100 filter->lumH = sws_getIdentityVec();
2101 filter->lumV = sws_getIdentityVec();
2102 }
2103
2104 if (chromaGBlur != 0.0) {
2105 filter->chrH = sws_getGaussianVec(chromaGBlur, 3.0);
2106 filter->chrV = sws_getGaussianVec(chromaGBlur, 3.0);
2107 } else {
2108 filter->chrH = sws_getIdentityVec();
2109 filter->chrV = sws_getIdentityVec();
2110 }
2111
2112 if (!filter->lumH || !filter->lumV || !filter->chrH || !filter->chrV)
2113 goto fail;
2114
2115 if (chromaSharpen != 0.0) {
2116 SwsVector *id = sws_getIdentityVec();
2117 if (!id)
2118 goto fail;
2119 sws_scaleVec(filter->chrH, -chromaSharpen);
2120 sws_scaleVec(filter->chrV, -chromaSharpen);
2121 sws_addVec(filter->chrH, id);
2122 sws_addVec(filter->chrV, id);
2123 sws_freeVec(id);
2124 }
2125
2126 if (lumaSharpen != 0.0) {
2127 SwsVector *id = sws_getIdentityVec();
2128 if (!id)
2129 goto fail;
2130 sws_scaleVec(filter->lumH, -lumaSharpen);
2131 sws_scaleVec(filter->lumV, -lumaSharpen);
2132 sws_addVec(filter->lumH, id);
2133 sws_addVec(filter->lumV, id);
2134 sws_freeVec(id);
2135 }
2136
2137 if (chromaHShift != 0.0)
2138 sws_shiftVec(filter->chrH, (int)(chromaHShift + 0.5));
2139
2140 if (chromaVShift != 0.0)
2141 sws_shiftVec(filter->chrV, (int)(chromaVShift + 0.5));
2142
2143 sws_normalizeVec(filter->chrH, 1.0);
2144 sws_normalizeVec(filter->chrV, 1.0);
2145 sws_normalizeVec(filter->lumH, 1.0);
2146 sws_normalizeVec(filter->lumV, 1.0);
2147
2148 if (isnan_vec(filter->chrH) ||
2149 isnan_vec(filter->chrV) ||
2150 isnan_vec(filter->lumH) ||
2151 isnan_vec(filter->lumV))
2152 goto fail;
2153
2154 if (verbose)
2155 sws_printVec2(filter->chrH, NULL, AV_LOG_DEBUG);
2156 if (verbose)
2157 sws_printVec2(filter->lumH, NULL, AV_LOG_DEBUG);
2158
2159 return filter;
2160
2161 fail:
2162 sws_freeVec(filter->lumH);
2163 sws_freeVec(filter->lumV);
2164 sws_freeVec(filter->chrH);
2165 sws_freeVec(filter->chrV);
2166 av_freep(&filter);
2167 return NULL;
2168 }
2169
sws_allocVec(int length)2170 SwsVector *sws_allocVec(int length)
2171 {
2172 SwsVector *vec;
2173
2174 if(length <= 0 || length > INT_MAX/ sizeof(double))
2175 return NULL;
2176
2177 vec = av_malloc(sizeof(SwsVector));
2178 if (!vec)
2179 return NULL;
2180 vec->length = length;
2181 vec->coeff = av_malloc(sizeof(double) * length);
2182 if (!vec->coeff)
2183 av_freep(&vec);
2184 return vec;
2185 }
2186
sws_getGaussianVec(double variance,double quality)2187 SwsVector *sws_getGaussianVec(double variance, double quality)
2188 {
2189 const int length = (int)(variance * quality + 0.5) | 1;
2190 int i;
2191 double middle = (length - 1) * 0.5;
2192 SwsVector *vec;
2193
2194 if(variance < 0 || quality < 0)
2195 return NULL;
2196
2197 vec = sws_allocVec(length);
2198
2199 if (!vec)
2200 return NULL;
2201
2202 for (i = 0; i < length; i++) {
2203 double dist = i - middle;
2204 vec->coeff[i] = exp(-dist * dist / (2 * variance * variance)) /
2205 sqrt(2 * variance * M_PI);
2206 }
2207
2208 sws_normalizeVec(vec, 1.0);
2209
2210 return vec;
2211 }
2212
2213 /**
2214 * Allocate and return a vector with length coefficients, all
2215 * with the same value c.
2216 */
2217 static
sws_getConstVec(double c,int length)2218 SwsVector *sws_getConstVec(double c, int length)
2219 {
2220 int i;
2221 SwsVector *vec = sws_allocVec(length);
2222
2223 if (!vec)
2224 return NULL;
2225
2226 for (i = 0; i < length; i++)
2227 vec->coeff[i] = c;
2228
2229 return vec;
2230 }
2231
2232 /**
2233 * Allocate and return a vector with just one coefficient, with
2234 * value 1.0.
2235 */
2236 static
sws_getIdentityVec(void)2237 SwsVector *sws_getIdentityVec(void)
2238 {
2239 return sws_getConstVec(1.0, 1);
2240 }
2241
sws_dcVec(SwsVector * a)2242 static double sws_dcVec(SwsVector *a)
2243 {
2244 int i;
2245 double sum = 0;
2246
2247 for (i = 0; i < a->length; i++)
2248 sum += a->coeff[i];
2249
2250 return sum;
2251 }
2252
sws_scaleVec(SwsVector * a,double scalar)2253 void sws_scaleVec(SwsVector *a, double scalar)
2254 {
2255 int i;
2256
2257 for (i = 0; i < a->length; i++)
2258 a->coeff[i] *= scalar;
2259 }
2260
sws_normalizeVec(SwsVector * a,double height)2261 void sws_normalizeVec(SwsVector *a, double height)
2262 {
2263 sws_scaleVec(a, height / sws_dcVec(a));
2264 }
2265
sws_sumVec(SwsVector * a,SwsVector * b)2266 static SwsVector *sws_sumVec(SwsVector *a, SwsVector *b)
2267 {
2268 int length = FFMAX(a->length, b->length);
2269 int i;
2270 SwsVector *vec = sws_getConstVec(0.0, length);
2271
2272 if (!vec)
2273 return NULL;
2274
2275 for (i = 0; i < a->length; i++)
2276 vec->coeff[i + (length - 1) / 2 - (a->length - 1) / 2] += a->coeff[i];
2277 for (i = 0; i < b->length; i++)
2278 vec->coeff[i + (length - 1) / 2 - (b->length - 1) / 2] += b->coeff[i];
2279
2280 return vec;
2281 }
2282
2283 /* shift left / or right if "shift" is negative */
sws_getShiftedVec(SwsVector * a,int shift)2284 static SwsVector *sws_getShiftedVec(SwsVector *a, int shift)
2285 {
2286 int length = a->length + FFABS(shift) * 2;
2287 int i;
2288 SwsVector *vec = sws_getConstVec(0.0, length);
2289
2290 if (!vec)
2291 return NULL;
2292
2293 for (i = 0; i < a->length; i++) {
2294 vec->coeff[i + (length - 1) / 2 -
2295 (a->length - 1) / 2 - shift] = a->coeff[i];
2296 }
2297
2298 return vec;
2299 }
2300
2301 static
sws_shiftVec(SwsVector * a,int shift)2302 void sws_shiftVec(SwsVector *a, int shift)
2303 {
2304 SwsVector *shifted = sws_getShiftedVec(a, shift);
2305 if (!shifted) {
2306 makenan_vec(a);
2307 return;
2308 }
2309 av_free(a->coeff);
2310 a->coeff = shifted->coeff;
2311 a->length = shifted->length;
2312 av_free(shifted);
2313 }
2314
2315 static
sws_addVec(SwsVector * a,SwsVector * b)2316 void sws_addVec(SwsVector *a, SwsVector *b)
2317 {
2318 SwsVector *sum = sws_sumVec(a, b);
2319 if (!sum) {
2320 makenan_vec(a);
2321 return;
2322 }
2323 av_free(a->coeff);
2324 a->coeff = sum->coeff;
2325 a->length = sum->length;
2326 av_free(sum);
2327 }
2328
2329 /**
2330 * Print with av_log() a textual representation of the vector a
2331 * if log_level <= av_log_level.
2332 */
2333 static
sws_printVec2(SwsVector * a,AVClass * log_ctx,int log_level)2334 void sws_printVec2(SwsVector *a, AVClass *log_ctx, int log_level)
2335 {
2336 int i;
2337 double max = 0;
2338 double min = 0;
2339 double range;
2340
2341 for (i = 0; i < a->length; i++)
2342 if (a->coeff[i] > max)
2343 max = a->coeff[i];
2344
2345 for (i = 0; i < a->length; i++)
2346 if (a->coeff[i] < min)
2347 min = a->coeff[i];
2348
2349 range = max - min;
2350
2351 for (i = 0; i < a->length; i++) {
2352 int x = (int)((a->coeff[i] - min) * 60.0 / range + 0.5);
2353 av_log(log_ctx, log_level, "%1.3f ", a->coeff[i]);
2354 for (; x > 0; x--)
2355 av_log(log_ctx, log_level, " ");
2356 av_log(log_ctx, log_level, "|\n");
2357 }
2358 }
2359
sws_freeVec(SwsVector * a)2360 void sws_freeVec(SwsVector *a)
2361 {
2362 if (!a)
2363 return;
2364 av_freep(&a->coeff);
2365 a->length = 0;
2366 av_free(a);
2367 }
2368
sws_freeFilter(SwsFilter * filter)2369 void sws_freeFilter(SwsFilter *filter)
2370 {
2371 if (!filter)
2372 return;
2373
2374 sws_freeVec(filter->lumH);
2375 sws_freeVec(filter->lumV);
2376 sws_freeVec(filter->chrH);
2377 sws_freeVec(filter->chrV);
2378 av_free(filter);
2379 }
2380
sws_freeContext(SwsContext * c)2381 void sws_freeContext(SwsContext *c)
2382 {
2383 int i;
2384 if (!c)
2385 return;
2386
2387 for (i = 0; i < c->nb_slice_ctx; i++)
2388 sws_freeContext(c->slice_ctx[i]);
2389 av_freep(&c->slice_ctx);
2390 av_freep(&c->slice_err);
2391
2392 avpriv_slicethread_free(&c->slicethread);
2393
2394 for (i = 0; i < 4; i++)
2395 av_freep(&c->dither_error[i]);
2396
2397 av_frame_free(&c->frame_src);
2398 av_frame_free(&c->frame_dst);
2399
2400 av_freep(&c->src_ranges.ranges);
2401
2402 av_freep(&c->vLumFilter);
2403 av_freep(&c->vChrFilter);
2404 av_freep(&c->hLumFilter);
2405 av_freep(&c->hChrFilter);
2406 #if HAVE_ALTIVEC
2407 av_freep(&c->vYCoeffsBank);
2408 av_freep(&c->vCCoeffsBank);
2409 #endif
2410
2411 av_freep(&c->vLumFilterPos);
2412 av_freep(&c->vChrFilterPos);
2413 av_freep(&c->hLumFilterPos);
2414 av_freep(&c->hChrFilterPos);
2415
2416 #if HAVE_MMX_INLINE
2417 #if USE_MMAP
2418 if (c->lumMmxextFilterCode)
2419 munmap(c->lumMmxextFilterCode, c->lumMmxextFilterCodeSize);
2420 if (c->chrMmxextFilterCode)
2421 munmap(c->chrMmxextFilterCode, c->chrMmxextFilterCodeSize);
2422 #elif HAVE_VIRTUALALLOC
2423 if (c->lumMmxextFilterCode)
2424 VirtualFree(c->lumMmxextFilterCode, 0, MEM_RELEASE);
2425 if (c->chrMmxextFilterCode)
2426 VirtualFree(c->chrMmxextFilterCode, 0, MEM_RELEASE);
2427 #else
2428 av_free(c->lumMmxextFilterCode);
2429 av_free(c->chrMmxextFilterCode);
2430 #endif
2431 c->lumMmxextFilterCode = NULL;
2432 c->chrMmxextFilterCode = NULL;
2433 #endif /* HAVE_MMX_INLINE */
2434
2435 av_freep(&c->yuvTable);
2436 av_freep(&c->formatConvBuffer);
2437
2438 sws_freeContext(c->cascaded_context[0]);
2439 sws_freeContext(c->cascaded_context[1]);
2440 sws_freeContext(c->cascaded_context[2]);
2441 memset(c->cascaded_context, 0, sizeof(c->cascaded_context));
2442 av_freep(&c->cascaded_tmp[0]);
2443 av_freep(&c->cascaded1_tmp[0]);
2444
2445 av_freep(&c->gamma);
2446 av_freep(&c->inv_gamma);
2447
2448 av_freep(&c->rgb0_scratch);
2449 av_freep(&c->xyz_scratch);
2450
2451 ff_free_filters(c);
2452
2453 av_free(c);
2454 }
2455
sws_getCachedContext(struct SwsContext * context,int srcW,int srcH,enum AVPixelFormat srcFormat,int dstW,int dstH,enum AVPixelFormat dstFormat,int flags,SwsFilter * srcFilter,SwsFilter * dstFilter,const double * param)2456 struct SwsContext *sws_getCachedContext(struct SwsContext *context, int srcW,
2457 int srcH, enum AVPixelFormat srcFormat,
2458 int dstW, int dstH,
2459 enum AVPixelFormat dstFormat, int flags,
2460 SwsFilter *srcFilter,
2461 SwsFilter *dstFilter,
2462 const double *param)
2463 {
2464 static const double default_param[2] = { SWS_PARAM_DEFAULT,
2465 SWS_PARAM_DEFAULT };
2466 int64_t src_h_chr_pos = -513, dst_h_chr_pos = -513,
2467 src_v_chr_pos = -513, dst_v_chr_pos = -513;
2468
2469 if (!param)
2470 param = default_param;
2471
2472 if (context &&
2473 (context->srcW != srcW ||
2474 context->srcH != srcH ||
2475 context->srcFormat != srcFormat ||
2476 context->dstW != dstW ||
2477 context->dstH != dstH ||
2478 context->dstFormat != dstFormat ||
2479 context->flags != flags ||
2480 context->param[0] != param[0] ||
2481 context->param[1] != param[1])) {
2482
2483 av_opt_get_int(context, "src_h_chr_pos", 0, &src_h_chr_pos);
2484 av_opt_get_int(context, "src_v_chr_pos", 0, &src_v_chr_pos);
2485 av_opt_get_int(context, "dst_h_chr_pos", 0, &dst_h_chr_pos);
2486 av_opt_get_int(context, "dst_v_chr_pos", 0, &dst_v_chr_pos);
2487 sws_freeContext(context);
2488 context = NULL;
2489 }
2490
2491 if (!context) {
2492 if (!(context = sws_alloc_context()))
2493 return NULL;
2494 context->srcW = srcW;
2495 context->srcH = srcH;
2496 context->srcFormat = srcFormat;
2497 context->dstW = dstW;
2498 context->dstH = dstH;
2499 context->dstFormat = dstFormat;
2500 context->flags = flags;
2501 context->param[0] = param[0];
2502 context->param[1] = param[1];
2503
2504 av_opt_set_int(context, "src_h_chr_pos", src_h_chr_pos, 0);
2505 av_opt_set_int(context, "src_v_chr_pos", src_v_chr_pos, 0);
2506 av_opt_set_int(context, "dst_h_chr_pos", dst_h_chr_pos, 0);
2507 av_opt_set_int(context, "dst_v_chr_pos", dst_v_chr_pos, 0);
2508
2509 if (sws_init_context(context, srcFilter, dstFilter) < 0) {
2510 sws_freeContext(context);
2511 return NULL;
2512 }
2513 }
2514 return context;
2515 }
2516
ff_range_add(RangeList * rl,unsigned int start,unsigned int len)2517 int ff_range_add(RangeList *rl, unsigned int start, unsigned int len)
2518 {
2519 Range *tmp;
2520 unsigned int idx;
2521
2522 /* find the first existing range after the new one */
2523 for (idx = 0; idx < rl->nb_ranges; idx++)
2524 if (rl->ranges[idx].start > start)
2525 break;
2526
2527 /* check for overlap */
2528 if (idx > 0) {
2529 Range *prev = &rl->ranges[idx - 1];
2530 if (prev->start + prev->len > start)
2531 return AVERROR(EINVAL);
2532 }
2533 if (idx < rl->nb_ranges) {
2534 Range *next = &rl->ranges[idx];
2535 if (start + len > next->start)
2536 return AVERROR(EINVAL);
2537 }
2538
2539 tmp = av_fast_realloc(rl->ranges, &rl->ranges_allocated,
2540 (rl->nb_ranges + 1) * sizeof(*rl->ranges));
2541 if (!tmp)
2542 return AVERROR(ENOMEM);
2543 rl->ranges = tmp;
2544
2545 memmove(rl->ranges + idx + 1, rl->ranges + idx,
2546 sizeof(*rl->ranges) * (rl->nb_ranges - idx));
2547 rl->ranges[idx].start = start;
2548 rl->ranges[idx].len = len;
2549 rl->nb_ranges++;
2550
2551 /* merge ranges */
2552 if (idx > 0) {
2553 Range *prev = &rl->ranges[idx - 1];
2554 Range *cur = &rl->ranges[idx];
2555 if (prev->start + prev->len == cur->start) {
2556 prev->len += cur->len;
2557 memmove(rl->ranges + idx - 1, rl->ranges + idx,
2558 sizeof(*rl->ranges) * (rl->nb_ranges - idx));
2559 rl->nb_ranges--;
2560 idx--;
2561 }
2562 }
2563 if (idx < rl->nb_ranges - 1) {
2564 Range *cur = &rl->ranges[idx];
2565 Range *next = &rl->ranges[idx + 1];
2566 if (cur->start + cur->len == next->start) {
2567 cur->len += next->len;
2568 memmove(rl->ranges + idx, rl->ranges + idx + 1,
2569 sizeof(*rl->ranges) * (rl->nb_ranges - idx - 1));
2570 rl->nb_ranges--;
2571 }
2572 }
2573
2574 return 0;
2575 }
2576