1 /*
2 * Copyright (C) 2001-2011 Michael Niedermayer <michaelni@gmx.at>
3 *
4 * This file is part of FFmpeg.
5 *
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20
21 #ifndef SWSCALE_SWSCALE_INTERNAL_H
22 #define SWSCALE_SWSCALE_INTERNAL_H
23
24 #include <stdatomic.h>
25
26 #include "config.h"
27
28 #include "libavutil/avassert.h"
29 #include "libavutil/common.h"
30 #include "libavutil/frame.h"
31 #include "libavutil/intreadwrite.h"
32 #include "libavutil/log.h"
33 #include "libavutil/mem_internal.h"
34 #include "libavutil/pixfmt.h"
35 #include "libavutil/pixdesc.h"
36 #include "libavutil/slicethread.h"
37 #include "libavutil/ppc/util_altivec.h"
38
39 #define STR(s) AV_TOSTRING(s) // AV_STRINGIFY is too long
40
41 #define YUVRGB_TABLE_HEADROOM 512
42 #define YUVRGB_TABLE_LUMA_HEADROOM 512
43
44 #define MAX_FILTER_SIZE SWS_MAX_FILTER_SIZE
45
46 #define DITHER1XBPP
47
48 #if HAVE_BIGENDIAN
49 #define ALT32_CORR (-1)
50 #else
51 #define ALT32_CORR 1
52 #endif
53
54 #if ARCH_X86_64
55 # define APCK_PTR2 8
56 # define APCK_COEF 16
57 # define APCK_SIZE 24
58 #else
59 # define APCK_PTR2 4
60 # define APCK_COEF 8
61 # define APCK_SIZE 16
62 #endif
63
64 #define RETCODE_USE_CASCADE -12345
65
66 struct SwsContext;
67
68 typedef enum SwsDither {
69 SWS_DITHER_NONE = 0,
70 SWS_DITHER_AUTO,
71 SWS_DITHER_BAYER,
72 SWS_DITHER_ED,
73 SWS_DITHER_A_DITHER,
74 SWS_DITHER_X_DITHER,
75 NB_SWS_DITHER,
76 } SwsDither;
77
78 typedef enum SwsAlphaBlend {
79 SWS_ALPHA_BLEND_NONE = 0,
80 SWS_ALPHA_BLEND_UNIFORM,
81 SWS_ALPHA_BLEND_CHECKERBOARD,
82 SWS_ALPHA_BLEND_NB,
83 } SwsAlphaBlend;
84
85 typedef struct Range {
86 unsigned int start;
87 unsigned int len;
88 } Range;
89
90 typedef struct RangeList {
91 Range *ranges;
92 unsigned int nb_ranges;
93 int ranges_allocated;
94 } RangeList;
95
96 int ff_range_add(RangeList *r, unsigned int start, unsigned int len);
97
98 typedef int (*SwsFunc)(struct SwsContext *context, const uint8_t *src[],
99 int srcStride[], int srcSliceY, int srcSliceH,
100 uint8_t *dst[], int dstStride[]);
101
102 /**
103 * Write one line of horizontally scaled data to planar output
104 * without any additional vertical scaling (or point-scaling).
105 *
106 * @param src scaled source data, 15 bits for 8-10-bit output,
107 * 19 bits for 16-bit output (in int32_t)
108 * @param dest pointer to the output plane. For >8-bit
109 * output, this is in uint16_t
110 * @param dstW width of destination in pixels
111 * @param dither ordered dither array of type int16_t and size 8
112 * @param offset Dither offset
113 */
114 typedef void (*yuv2planar1_fn)(const int16_t *src, uint8_t *dest, int dstW,
115 const uint8_t *dither, int offset);
116
117 /**
118 * Write one line of horizontally scaled data to planar output
119 * with multi-point vertical scaling between input pixels.
120 *
121 * @param filter vertical luma/alpha scaling coefficients, 12 bits [0,4096]
122 * @param src scaled luma (Y) or alpha (A) source data, 15 bits for
123 * 8-10-bit output, 19 bits for 16-bit output (in int32_t)
124 * @param filterSize number of vertical input lines to scale
125 * @param dest pointer to output plane. For >8-bit
126 * output, this is in uint16_t
127 * @param dstW width of destination pixels
128 * @param offset Dither offset
129 */
130 typedef void (*yuv2planarX_fn)(const int16_t *filter, int filterSize,
131 const int16_t **src, uint8_t *dest, int dstW,
132 const uint8_t *dither, int offset);
133
134 /**
135 * Write one line of horizontally scaled chroma to interleaved output
136 * with multi-point vertical scaling between input pixels.
137 *
138 * @param dstFormat destination pixel format
139 * @param chrDither ordered dither array of type uint8_t and size 8
140 * @param chrFilter vertical chroma scaling coefficients, 12 bits [0,4096]
141 * @param chrUSrc scaled chroma (U) source data, 15 bits for 8-10-bit
142 * output, 19 bits for 16-bit output (in int32_t)
143 * @param chrVSrc scaled chroma (V) source data, 15 bits for 8-10-bit
144 * output, 19 bits for 16-bit output (in int32_t)
145 * @param chrFilterSize number of vertical chroma input lines to scale
146 * @param dest pointer to the output plane. For >8-bit
147 * output, this is in uint16_t
148 * @param dstW width of chroma planes
149 */
150 typedef void (*yuv2interleavedX_fn)(enum AVPixelFormat dstFormat,
151 const uint8_t *chrDither,
152 const int16_t *chrFilter,
153 int chrFilterSize,
154 const int16_t **chrUSrc,
155 const int16_t **chrVSrc,
156 uint8_t *dest, int dstW);
157
158 /**
159 * Write one line of horizontally scaled Y/U/V/A to packed-pixel YUV/RGB
160 * output without any additional vertical scaling (or point-scaling). Note
161 * that this function may do chroma scaling, see the "uvalpha" argument.
162 *
163 * @param c SWS scaling context
164 * @param lumSrc scaled luma (Y) source data, 15 bits for 8-10-bit output,
165 * 19 bits for 16-bit output (in int32_t)
166 * @param chrUSrc scaled chroma (U) source data, 15 bits for 8-10-bit output,
167 * 19 bits for 16-bit output (in int32_t)
168 * @param chrVSrc scaled chroma (V) source data, 15 bits for 8-10-bit output,
169 * 19 bits for 16-bit output (in int32_t)
170 * @param alpSrc scaled alpha (A) source data, 15 bits for 8-10-bit output,
171 * 19 bits for 16-bit output (in int32_t)
172 * @param dest pointer to the output plane. For 16-bit output, this is
173 * uint16_t
174 * @param dstW width of lumSrc and alpSrc in pixels, number of pixels
175 * to write into dest[]
176 * @param uvalpha chroma scaling coefficient for the second line of chroma
177 * pixels, either 2048 or 0. If 0, one chroma input is used
178 * for 2 output pixels (or if the SWS_FLAG_FULL_CHR_INT flag
179 * is set, it generates 1 output pixel). If 2048, two chroma
180 * input pixels should be averaged for 2 output pixels (this
181 * only happens if SWS_FLAG_FULL_CHR_INT is not set)
182 * @param y vertical line number for this output. This does not need
183 * to be used to calculate the offset in the destination,
184 * but can be used to generate comfort noise using dithering
185 * for some output formats.
186 */
187 typedef void (*yuv2packed1_fn)(struct SwsContext *c, const int16_t *lumSrc,
188 const int16_t *chrUSrc[2],
189 const int16_t *chrVSrc[2],
190 const int16_t *alpSrc, uint8_t *dest,
191 int dstW, int uvalpha, int y);
192 /**
193 * Write one line of horizontally scaled Y/U/V/A to packed-pixel YUV/RGB
194 * output by doing bilinear scaling between two input lines.
195 *
196 * @param c SWS scaling context
197 * @param lumSrc scaled luma (Y) source data, 15 bits for 8-10-bit output,
198 * 19 bits for 16-bit output (in int32_t)
199 * @param chrUSrc scaled chroma (U) source data, 15 bits for 8-10-bit output,
200 * 19 bits for 16-bit output (in int32_t)
201 * @param chrVSrc scaled chroma (V) source data, 15 bits for 8-10-bit output,
202 * 19 bits for 16-bit output (in int32_t)
203 * @param alpSrc scaled alpha (A) source data, 15 bits for 8-10-bit output,
204 * 19 bits for 16-bit output (in int32_t)
205 * @param dest pointer to the output plane. For 16-bit output, this is
206 * uint16_t
207 * @param dstW width of lumSrc and alpSrc in pixels, number of pixels
208 * to write into dest[]
209 * @param yalpha luma/alpha scaling coefficients for the second input line.
210 * The first line's coefficients can be calculated by using
211 * 4096 - yalpha
212 * @param uvalpha chroma scaling coefficient for the second input line. The
213 * first line's coefficients can be calculated by using
214 * 4096 - uvalpha
215 * @param y vertical line number for this output. This does not need
216 * to be used to calculate the offset in the destination,
217 * but can be used to generate comfort noise using dithering
218 * for some output formats.
219 */
220 typedef void (*yuv2packed2_fn)(struct SwsContext *c, const int16_t *lumSrc[2],
221 const int16_t *chrUSrc[2],
222 const int16_t *chrVSrc[2],
223 const int16_t *alpSrc[2],
224 uint8_t *dest,
225 int dstW, int yalpha, int uvalpha, int y);
226 /**
227 * Write one line of horizontally scaled Y/U/V/A to packed-pixel YUV/RGB
228 * output by doing multi-point vertical scaling between input pixels.
229 *
230 * @param c SWS scaling context
231 * @param lumFilter vertical luma/alpha scaling coefficients, 12 bits [0,4096]
232 * @param lumSrc scaled luma (Y) source data, 15 bits for 8-10-bit output,
233 * 19 bits for 16-bit output (in int32_t)
234 * @param lumFilterSize number of vertical luma/alpha input lines to scale
235 * @param chrFilter vertical chroma scaling coefficients, 12 bits [0,4096]
236 * @param chrUSrc scaled chroma (U) source data, 15 bits for 8-10-bit output,
237 * 19 bits for 16-bit output (in int32_t)
238 * @param chrVSrc scaled chroma (V) source data, 15 bits for 8-10-bit output,
239 * 19 bits for 16-bit output (in int32_t)
240 * @param chrFilterSize number of vertical chroma input lines to scale
241 * @param alpSrc scaled alpha (A) source data, 15 bits for 8-10-bit output,
242 * 19 bits for 16-bit output (in int32_t)
243 * @param dest pointer to the output plane. For 16-bit output, this is
244 * uint16_t
245 * @param dstW width of lumSrc and alpSrc in pixels, number of pixels
246 * to write into dest[]
247 * @param y vertical line number for this output. This does not need
248 * to be used to calculate the offset in the destination,
249 * but can be used to generate comfort noise using dithering
250 * or some output formats.
251 */
252 typedef void (*yuv2packedX_fn)(struct SwsContext *c, const int16_t *lumFilter,
253 const int16_t **lumSrc, int lumFilterSize,
254 const int16_t *chrFilter,
255 const int16_t **chrUSrc,
256 const int16_t **chrVSrc, int chrFilterSize,
257 const int16_t **alpSrc, uint8_t *dest,
258 int dstW, int y);
259
260 /**
261 * Write one line of horizontally scaled Y/U/V/A to YUV/RGB
262 * output by doing multi-point vertical scaling between input pixels.
263 *
264 * @param c SWS scaling context
265 * @param lumFilter vertical luma/alpha scaling coefficients, 12 bits [0,4096]
266 * @param lumSrc scaled luma (Y) source data, 15 bits for 8-10-bit output,
267 * 19 bits for 16-bit output (in int32_t)
268 * @param lumFilterSize number of vertical luma/alpha input lines to scale
269 * @param chrFilter vertical chroma scaling coefficients, 12 bits [0,4096]
270 * @param chrUSrc scaled chroma (U) source data, 15 bits for 8-10-bit output,
271 * 19 bits for 16-bit output (in int32_t)
272 * @param chrVSrc scaled chroma (V) source data, 15 bits for 8-10-bit output,
273 * 19 bits for 16-bit output (in int32_t)
274 * @param chrFilterSize number of vertical chroma input lines to scale
275 * @param alpSrc scaled alpha (A) source data, 15 bits for 8-10-bit output,
276 * 19 bits for 16-bit output (in int32_t)
277 * @param dest pointer to the output planes. For 16-bit output, this is
278 * uint16_t
279 * @param dstW width of lumSrc and alpSrc in pixels, number of pixels
280 * to write into dest[]
281 * @param y vertical line number for this output. This does not need
282 * to be used to calculate the offset in the destination,
283 * but can be used to generate comfort noise using dithering
284 * or some output formats.
285 */
286 typedef void (*yuv2anyX_fn)(struct SwsContext *c, const int16_t *lumFilter,
287 const int16_t **lumSrc, int lumFilterSize,
288 const int16_t *chrFilter,
289 const int16_t **chrUSrc,
290 const int16_t **chrVSrc, int chrFilterSize,
291 const int16_t **alpSrc, uint8_t **dest,
292 int dstW, int y);
293
294 struct SwsSlice;
295 struct SwsFilterDescriptor;
296
297 /* This struct should be aligned on at least a 32-byte boundary. */
298 typedef struct SwsContext {
299 /**
300 * info on struct for av_log
301 */
302 const AVClass *av_class;
303
304 struct SwsContext *parent;
305
306 AVSliceThread *slicethread;
307 struct SwsContext **slice_ctx;
308 int *slice_err;
309 int nb_slice_ctx;
310
311 // values passed to current sws_receive_slice() call
312 int dst_slice_start;
313 int dst_slice_height;
314
315 /**
316 * Note that src, dst, srcStride, dstStride will be copied in the
317 * sws_scale() wrapper so they can be freely modified here.
318 */
319 SwsFunc convert_unscaled;
320 int srcW; ///< Width of source luma/alpha planes.
321 int srcH; ///< Height of source luma/alpha planes.
322 int dstH; ///< Height of destination luma/alpha planes.
323 int chrSrcW; ///< Width of source chroma planes.
324 int chrSrcH; ///< Height of source chroma planes.
325 int chrDstW; ///< Width of destination chroma planes.
326 int chrDstH; ///< Height of destination chroma planes.
327 int lumXInc, chrXInc;
328 int lumYInc, chrYInc;
329 enum AVPixelFormat dstFormat; ///< Destination pixel format.
330 enum AVPixelFormat srcFormat; ///< Source pixel format.
331 int dstFormatBpp; ///< Number of bits per pixel of the destination pixel format.
332 int srcFormatBpp; ///< Number of bits per pixel of the source pixel format.
333 int dstBpc, srcBpc;
334 int chrSrcHSubSample; ///< Binary logarithm of horizontal subsampling factor between luma/alpha and chroma planes in source image.
335 int chrSrcVSubSample; ///< Binary logarithm of vertical subsampling factor between luma/alpha and chroma planes in source image.
336 int chrDstHSubSample; ///< Binary logarithm of horizontal subsampling factor between luma/alpha and chroma planes in destination image.
337 int chrDstVSubSample; ///< Binary logarithm of vertical subsampling factor between luma/alpha and chroma planes in destination image.
338 int vChrDrop; ///< Binary logarithm of extra vertical subsampling factor in source image chroma planes specified by user.
339 int sliceDir; ///< Direction that slices are fed to the scaler (1 = top-to-bottom, -1 = bottom-to-top).
340 int nb_threads; ///< Number of threads used for scaling
341 double param[2]; ///< Input parameters for scaling algorithms that need them.
342
343 AVFrame *frame_src;
344 AVFrame *frame_dst;
345
346 RangeList src_ranges;
347
348 /* The cascaded_* fields allow spliting a scaler task into multiple
349 * sequential steps, this is for example used to limit the maximum
350 * downscaling factor that needs to be supported in one scaler.
351 */
352 struct SwsContext *cascaded_context[3];
353 int cascaded_tmpStride[4];
354 uint8_t *cascaded_tmp[4];
355 int cascaded1_tmpStride[4];
356 uint8_t *cascaded1_tmp[4];
357 int cascaded_mainindex;
358
359 double gamma_value;
360 int gamma_flag;
361 int is_internal_gamma;
362 uint16_t *gamma;
363 uint16_t *inv_gamma;
364
365 int numDesc;
366 int descIndex[2];
367 int numSlice;
368 struct SwsSlice *slice;
369 struct SwsFilterDescriptor *desc;
370
371 uint32_t pal_yuv[256];
372 uint32_t pal_rgb[256];
373
374 float uint2float_lut[256];
375
376 /**
377 * @name Scaled horizontal lines ring buffer.
378 * The horizontal scaler keeps just enough scaled lines in a ring buffer
379 * so they may be passed to the vertical scaler. The pointers to the
380 * allocated buffers for each line are duplicated in sequence in the ring
381 * buffer to simplify indexing and avoid wrapping around between lines
382 * inside the vertical scaler code. The wrapping is done before the
383 * vertical scaler is called.
384 */
385 //@{
386 int lastInLumBuf; ///< Last scaled horizontal luma/alpha line from source in the ring buffer.
387 int lastInChrBuf; ///< Last scaled horizontal chroma line from source in the ring buffer.
388 //@}
389
390 uint8_t *formatConvBuffer;
391 int needAlpha;
392
393 /**
394 * @name Horizontal and vertical filters.
395 * To better understand the following fields, here is a pseudo-code of
396 * their usage in filtering a horizontal line:
397 * @code
398 * for (i = 0; i < width; i++) {
399 * dst[i] = 0;
400 * for (j = 0; j < filterSize; j++)
401 * dst[i] += src[ filterPos[i] + j ] * filter[ filterSize * i + j ];
402 * dst[i] >>= FRAC_BITS; // The actual implementation is fixed-point.
403 * }
404 * @endcode
405 */
406 //@{
407 int16_t *hLumFilter; ///< Array of horizontal filter coefficients for luma/alpha planes.
408 int16_t *hChrFilter; ///< Array of horizontal filter coefficients for chroma planes.
409 int16_t *vLumFilter; ///< Array of vertical filter coefficients for luma/alpha planes.
410 int16_t *vChrFilter; ///< Array of vertical filter coefficients for chroma planes.
411 int32_t *hLumFilterPos; ///< Array of horizontal filter starting positions for each dst[i] for luma/alpha planes.
412 int32_t *hChrFilterPos; ///< Array of horizontal filter starting positions for each dst[i] for chroma planes.
413 int32_t *vLumFilterPos; ///< Array of vertical filter starting positions for each dst[i] for luma/alpha planes.
414 int32_t *vChrFilterPos; ///< Array of vertical filter starting positions for each dst[i] for chroma planes.
415 int hLumFilterSize; ///< Horizontal filter size for luma/alpha pixels.
416 int hChrFilterSize; ///< Horizontal filter size for chroma pixels.
417 int vLumFilterSize; ///< Vertical filter size for luma/alpha pixels.
418 int vChrFilterSize; ///< Vertical filter size for chroma pixels.
419 //@}
420
421 int lumMmxextFilterCodeSize; ///< Runtime-generated MMXEXT horizontal fast bilinear scaler code size for luma/alpha planes.
422 int chrMmxextFilterCodeSize; ///< Runtime-generated MMXEXT horizontal fast bilinear scaler code size for chroma planes.
423 uint8_t *lumMmxextFilterCode; ///< Runtime-generated MMXEXT horizontal fast bilinear scaler code for luma/alpha planes.
424 uint8_t *chrMmxextFilterCode; ///< Runtime-generated MMXEXT horizontal fast bilinear scaler code for chroma planes.
425
426 int canMMXEXTBeUsed;
427 int warned_unuseable_bilinear;
428
429 int dstY; ///< Last destination vertical line output from last slice.
430 int flags; ///< Flags passed by the user to select scaler algorithm, optimizations, subsampling, etc...
431 void *yuvTable; // pointer to the yuv->rgb table start so it can be freed()
432 // alignment ensures the offset can be added in a single
433 // instruction on e.g. ARM
434 DECLARE_ALIGNED(16, int, table_gV)[256 + 2*YUVRGB_TABLE_HEADROOM];
435 uint8_t *table_rV[256 + 2*YUVRGB_TABLE_HEADROOM];
436 uint8_t *table_gU[256 + 2*YUVRGB_TABLE_HEADROOM];
437 uint8_t *table_bU[256 + 2*YUVRGB_TABLE_HEADROOM];
438 DECLARE_ALIGNED(16, int32_t, input_rgb2yuv_table)[16+40*4]; // This table can contain both C and SIMD formatted values, the C vales are always at the XY_IDX points
439 #define RY_IDX 0
440 #define GY_IDX 1
441 #define BY_IDX 2
442 #define RU_IDX 3
443 #define GU_IDX 4
444 #define BU_IDX 5
445 #define RV_IDX 6
446 #define GV_IDX 7
447 #define BV_IDX 8
448 #define RGB2YUV_SHIFT 15
449
450 int *dither_error[4];
451
452 //Colorspace stuff
453 int contrast, brightness, saturation; // for sws_getColorspaceDetails
454 int srcColorspaceTable[4];
455 int dstColorspaceTable[4];
456 int srcRange; ///< 0 = MPG YUV range, 1 = JPG YUV range (source image).
457 int dstRange; ///< 0 = MPG YUV range, 1 = JPG YUV range (destination image).
458 int src0Alpha;
459 int dst0Alpha;
460 int srcXYZ;
461 int dstXYZ;
462 int src_h_chr_pos;
463 int dst_h_chr_pos;
464 int src_v_chr_pos;
465 int dst_v_chr_pos;
466 int yuv2rgb_y_offset;
467 int yuv2rgb_y_coeff;
468 int yuv2rgb_v2r_coeff;
469 int yuv2rgb_v2g_coeff;
470 int yuv2rgb_u2g_coeff;
471 int yuv2rgb_u2b_coeff;
472
473 #define RED_DITHER "0*8"
474 #define GREEN_DITHER "1*8"
475 #define BLUE_DITHER "2*8"
476 #define Y_COEFF "3*8"
477 #define VR_COEFF "4*8"
478 #define UB_COEFF "5*8"
479 #define VG_COEFF "6*8"
480 #define UG_COEFF "7*8"
481 #define Y_OFFSET "8*8"
482 #define U_OFFSET "9*8"
483 #define V_OFFSET "10*8"
484 #define LUM_MMX_FILTER_OFFSET "11*8"
485 #define CHR_MMX_FILTER_OFFSET "11*8+4*4*"AV_STRINGIFY(MAX_FILTER_SIZE)
486 #define DSTW_OFFSET "11*8+4*4*"AV_STRINGIFY(MAX_FILTER_SIZE)"*2"
487 #define ESP_OFFSET "11*8+4*4*"AV_STRINGIFY(MAX_FILTER_SIZE)"*2+8"
488 #define VROUNDER_OFFSET "11*8+4*4*"AV_STRINGIFY(MAX_FILTER_SIZE)"*2+16"
489 #define U_TEMP "11*8+4*4*"AV_STRINGIFY(MAX_FILTER_SIZE)"*2+24"
490 #define V_TEMP "11*8+4*4*"AV_STRINGIFY(MAX_FILTER_SIZE)"*2+32"
491 #define Y_TEMP "11*8+4*4*"AV_STRINGIFY(MAX_FILTER_SIZE)"*2+40"
492 #define ALP_MMX_FILTER_OFFSET "11*8+4*4*"AV_STRINGIFY(MAX_FILTER_SIZE)"*2+48"
493 #define UV_OFF_PX "11*8+4*4*"AV_STRINGIFY(MAX_FILTER_SIZE)"*3+48"
494 #define UV_OFF_BYTE "11*8+4*4*"AV_STRINGIFY(MAX_FILTER_SIZE)"*3+56"
495 #define DITHER16 "11*8+4*4*"AV_STRINGIFY(MAX_FILTER_SIZE)"*3+64"
496 #define DITHER32 "11*8+4*4*"AV_STRINGIFY(MAX_FILTER_SIZE)"*3+80"
497 #define DITHER32_INT (11*8+4*4*MAX_FILTER_SIZE*3+80) // value equal to above, used for checking that the struct hasn't been changed by mistake
498
499 DECLARE_ALIGNED(8, uint64_t, redDither);
500 DECLARE_ALIGNED(8, uint64_t, greenDither);
501 DECLARE_ALIGNED(8, uint64_t, blueDither);
502
503 DECLARE_ALIGNED(8, uint64_t, yCoeff);
504 DECLARE_ALIGNED(8, uint64_t, vrCoeff);
505 DECLARE_ALIGNED(8, uint64_t, ubCoeff);
506 DECLARE_ALIGNED(8, uint64_t, vgCoeff);
507 DECLARE_ALIGNED(8, uint64_t, ugCoeff);
508 DECLARE_ALIGNED(8, uint64_t, yOffset);
509 DECLARE_ALIGNED(8, uint64_t, uOffset);
510 DECLARE_ALIGNED(8, uint64_t, vOffset);
511 int32_t lumMmxFilter[4 * MAX_FILTER_SIZE];
512 int32_t chrMmxFilter[4 * MAX_FILTER_SIZE];
513 int dstW; ///< Width of destination luma/alpha planes.
514 DECLARE_ALIGNED(8, uint64_t, esp);
515 DECLARE_ALIGNED(8, uint64_t, vRounder);
516 DECLARE_ALIGNED(8, uint64_t, u_temp);
517 DECLARE_ALIGNED(8, uint64_t, v_temp);
518 DECLARE_ALIGNED(8, uint64_t, y_temp);
519 int32_t alpMmxFilter[4 * MAX_FILTER_SIZE];
520 // alignment of these values is not necessary, but merely here
521 // to maintain the same offset across x8632 and x86-64. Once we
522 // use proper offset macros in the asm, they can be removed.
523 DECLARE_ALIGNED(8, ptrdiff_t, uv_off); ///< offset (in pixels) between u and v planes
524 DECLARE_ALIGNED(8, ptrdiff_t, uv_offx2); ///< offset (in bytes) between u and v planes
525 DECLARE_ALIGNED(8, uint16_t, dither16)[8];
526 DECLARE_ALIGNED(8, uint32_t, dither32)[8];
527
528 const uint8_t *chrDither8, *lumDither8;
529
530 #if HAVE_ALTIVEC
531 vector signed short CY;
532 vector signed short CRV;
533 vector signed short CBU;
534 vector signed short CGU;
535 vector signed short CGV;
536 vector signed short OY;
537 vector unsigned short CSHIFT;
538 vector signed short *vYCoeffsBank, *vCCoeffsBank;
539 #endif
540
541 int use_mmx_vfilter;
542
543 /* pre defined color-spaces gamma */
544 #define XYZ_GAMMA (2.6f)
545 #define RGB_GAMMA (2.2f)
546 int16_t *xyzgamma;
547 int16_t *rgbgamma;
548 int16_t *xyzgammainv;
549 int16_t *rgbgammainv;
550 int16_t xyz2rgb_matrix[3][4];
551 int16_t rgb2xyz_matrix[3][4];
552
553 /* function pointers for swscale() */
554 yuv2planar1_fn yuv2plane1;
555 yuv2planarX_fn yuv2planeX;
556 yuv2interleavedX_fn yuv2nv12cX;
557 yuv2packed1_fn yuv2packed1;
558 yuv2packed2_fn yuv2packed2;
559 yuv2packedX_fn yuv2packedX;
560 yuv2anyX_fn yuv2anyX;
561
562 /// Unscaled conversion of luma plane to YV12 for horizontal scaler.
563 void (*lumToYV12)(uint8_t *dst, const uint8_t *src, const uint8_t *src2, const uint8_t *src3,
564 int width, uint32_t *pal);
565 /// Unscaled conversion of alpha plane to YV12 for horizontal scaler.
566 void (*alpToYV12)(uint8_t *dst, const uint8_t *src, const uint8_t *src2, const uint8_t *src3,
567 int width, uint32_t *pal);
568 /// Unscaled conversion of chroma planes to YV12 for horizontal scaler.
569 void (*chrToYV12)(uint8_t *dstU, uint8_t *dstV,
570 const uint8_t *src1, const uint8_t *src2, const uint8_t *src3,
571 int width, uint32_t *pal);
572
573 /**
574 * Functions to read planar input, such as planar RGB, and convert
575 * internally to Y/UV/A.
576 */
577 /** @{ */
578 void (*readLumPlanar)(uint8_t *dst, const uint8_t *src[4], int width, int32_t *rgb2yuv);
579 void (*readChrPlanar)(uint8_t *dstU, uint8_t *dstV, const uint8_t *src[4],
580 int width, int32_t *rgb2yuv);
581 void (*readAlpPlanar)(uint8_t *dst, const uint8_t *src[4], int width, int32_t *rgb2yuv);
582 /** @} */
583
584 /**
585 * Scale one horizontal line of input data using a bilinear filter
586 * to produce one line of output data. Compared to SwsContext->hScale(),
587 * please take note of the following caveats when using these:
588 * - Scaling is done using only 7 bits instead of 14-bit coefficients.
589 * - You can use no more than 5 input pixels to produce 4 output
590 * pixels. Therefore, this filter should not be used for downscaling
591 * by more than ~20% in width (because that equals more than 5/4th
592 * downscaling and thus more than 5 pixels input per 4 pixels output).
593 * - In general, bilinear filters create artifacts during downscaling
594 * (even when <20%), because one output pixel will span more than one
595 * input pixel, and thus some pixels will need edges of both neighbor
596 * pixels to interpolate the output pixel. Since you can use at most
597 * two input pixels per output pixel in bilinear scaling, this is
598 * impossible and thus downscaling by any size will create artifacts.
599 * To enable this type of scaling, set SWS_FLAG_FAST_BILINEAR
600 * in SwsContext->flags.
601 */
602 /** @{ */
603 void (*hyscale_fast)(struct SwsContext *c,
604 int16_t *dst, int dstWidth,
605 const uint8_t *src, int srcW, int xInc);
606 void (*hcscale_fast)(struct SwsContext *c,
607 int16_t *dst1, int16_t *dst2, int dstWidth,
608 const uint8_t *src1, const uint8_t *src2,
609 int srcW, int xInc);
610 /** @} */
611
612 /**
613 * Scale one horizontal line of input data using a filter over the input
614 * lines, to produce one (differently sized) line of output data.
615 *
616 * @param dst pointer to destination buffer for horizontally scaled
617 * data. If the number of bits per component of one
618 * destination pixel (SwsContext->dstBpc) is <= 10, data
619 * will be 15 bpc in 16 bits (int16_t) width. Else (i.e.
620 * SwsContext->dstBpc == 16), data will be 19bpc in
621 * 32 bits (int32_t) width.
622 * @param dstW width of destination image
623 * @param src pointer to source data to be scaled. If the number of
624 * bits per component of a source pixel (SwsContext->srcBpc)
625 * is 8, this is 8bpc in 8 bits (uint8_t) width. Else
626 * (i.e. SwsContext->dstBpc > 8), this is native depth
627 * in 16 bits (uint16_t) width. In other words, for 9-bit
628 * YUV input, this is 9bpc, for 10-bit YUV input, this is
629 * 10bpc, and for 16-bit RGB or YUV, this is 16bpc.
630 * @param filter filter coefficients to be used per output pixel for
631 * scaling. This contains 14bpp filtering coefficients.
632 * Guaranteed to contain dstW * filterSize entries.
633 * @param filterPos position of the first input pixel to be used for
634 * each output pixel during scaling. Guaranteed to
635 * contain dstW entries.
636 * @param filterSize the number of input coefficients to be used (and
637 * thus the number of input pixels to be used) for
638 * creating a single output pixel. Is aligned to 4
639 * (and input coefficients thus padded with zeroes)
640 * to simplify creating SIMD code.
641 */
642 /** @{ */
643 void (*hyScale)(struct SwsContext *c, int16_t *dst, int dstW,
644 const uint8_t *src, const int16_t *filter,
645 const int32_t *filterPos, int filterSize);
646 void (*hcScale)(struct SwsContext *c, int16_t *dst, int dstW,
647 const uint8_t *src, const int16_t *filter,
648 const int32_t *filterPos, int filterSize);
649 /** @} */
650
651 /// Color range conversion function for luma plane if needed.
652 void (*lumConvertRange)(int16_t *dst, int width);
653 /// Color range conversion function for chroma planes if needed.
654 void (*chrConvertRange)(int16_t *dst1, int16_t *dst2, int width);
655
656 int needs_hcscale; ///< Set if there are chroma planes to be converted.
657
658 SwsDither dither;
659
660 SwsAlphaBlend alphablend;
661
662 // scratch buffer for converting packed rgb0 sources
663 // filled with a copy of the input frame + fully opaque alpha,
664 // then passed as input to further conversion
665 uint8_t *rgb0_scratch;
666 unsigned int rgb0_scratch_allocated;
667
668 // scratch buffer for converting XYZ sources
669 // filled with the input converted to rgb48
670 // then passed as input to further conversion
671 uint8_t *xyz_scratch;
672 unsigned int xyz_scratch_allocated;
673
674 unsigned int dst_slice_align;
675 atomic_int stride_unaligned_warned;
676 atomic_int data_unaligned_warned;
677 } SwsContext;
678 //FIXME check init (where 0)
679
680 SwsFunc ff_yuv2rgb_get_func_ptr(SwsContext *c);
681 int ff_yuv2rgb_c_init_tables(SwsContext *c, const int inv_table[4],
682 int fullRange, int brightness,
683 int contrast, int saturation);
684 void ff_yuv2rgb_init_tables_ppc(SwsContext *c, const int inv_table[4],
685 int brightness, int contrast, int saturation);
686
687 void ff_updateMMXDitherTables(SwsContext *c, int dstY);
688
689 av_cold void ff_sws_init_range_convert(SwsContext *c);
690
691 SwsFunc ff_yuv2rgb_init_x86(SwsContext *c);
692 SwsFunc ff_yuv2rgb_init_ppc(SwsContext *c);
693
is16BPS(enum AVPixelFormat pix_fmt)694 static av_always_inline int is16BPS(enum AVPixelFormat pix_fmt)
695 {
696 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt);
697 av_assert0(desc);
698 return desc->comp[0].depth == 16;
699 }
700
is32BPS(enum AVPixelFormat pix_fmt)701 static av_always_inline int is32BPS(enum AVPixelFormat pix_fmt)
702 {
703 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt);
704 av_assert0(desc);
705 return desc->comp[0].depth == 32;
706 }
707
isNBPS(enum AVPixelFormat pix_fmt)708 static av_always_inline int isNBPS(enum AVPixelFormat pix_fmt)
709 {
710 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt);
711 av_assert0(desc);
712 return desc->comp[0].depth >= 9 && desc->comp[0].depth <= 14;
713 }
714
isBE(enum AVPixelFormat pix_fmt)715 static av_always_inline int isBE(enum AVPixelFormat pix_fmt)
716 {
717 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt);
718 av_assert0(desc);
719 return desc->flags & AV_PIX_FMT_FLAG_BE;
720 }
721
isYUV(enum AVPixelFormat pix_fmt)722 static av_always_inline int isYUV(enum AVPixelFormat pix_fmt)
723 {
724 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt);
725 av_assert0(desc);
726 return !(desc->flags & AV_PIX_FMT_FLAG_RGB) && desc->nb_components >= 2;
727 }
728
isPlanarYUV(enum AVPixelFormat pix_fmt)729 static av_always_inline int isPlanarYUV(enum AVPixelFormat pix_fmt)
730 {
731 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt);
732 av_assert0(desc);
733 return ((desc->flags & AV_PIX_FMT_FLAG_PLANAR) && isYUV(pix_fmt));
734 }
735
736 /*
737 * Identity semi-planar YUV formats. Specifically, those are YUV formats
738 * where the second and third components (U & V) are on the same plane.
739 */
isSemiPlanarYUV(enum AVPixelFormat pix_fmt)740 static av_always_inline int isSemiPlanarYUV(enum AVPixelFormat pix_fmt)
741 {
742 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt);
743 av_assert0(desc);
744 return (isPlanarYUV(pix_fmt) && desc->comp[1].plane == desc->comp[2].plane);
745 }
746
isRGB(enum AVPixelFormat pix_fmt)747 static av_always_inline int isRGB(enum AVPixelFormat pix_fmt)
748 {
749 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt);
750 av_assert0(desc);
751 return (desc->flags & AV_PIX_FMT_FLAG_RGB);
752 }
753
isGray(enum AVPixelFormat pix_fmt)754 static av_always_inline int isGray(enum AVPixelFormat pix_fmt)
755 {
756 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt);
757 av_assert0(desc);
758 return !(desc->flags & AV_PIX_FMT_FLAG_PAL) &&
759 !(desc->flags & AV_PIX_FMT_FLAG_HWACCEL) &&
760 desc->nb_components <= 2 &&
761 pix_fmt != AV_PIX_FMT_MONOBLACK &&
762 pix_fmt != AV_PIX_FMT_MONOWHITE;
763 }
764
isRGBinInt(enum AVPixelFormat pix_fmt)765 static av_always_inline int isRGBinInt(enum AVPixelFormat pix_fmt)
766 {
767 return pix_fmt == AV_PIX_FMT_RGB48BE ||
768 pix_fmt == AV_PIX_FMT_RGB48LE ||
769 pix_fmt == AV_PIX_FMT_RGB32 ||
770 pix_fmt == AV_PIX_FMT_RGB32_1 ||
771 pix_fmt == AV_PIX_FMT_RGB24 ||
772 pix_fmt == AV_PIX_FMT_RGB565BE ||
773 pix_fmt == AV_PIX_FMT_RGB565LE ||
774 pix_fmt == AV_PIX_FMT_RGB555BE ||
775 pix_fmt == AV_PIX_FMT_RGB555LE ||
776 pix_fmt == AV_PIX_FMT_RGB444BE ||
777 pix_fmt == AV_PIX_FMT_RGB444LE ||
778 pix_fmt == AV_PIX_FMT_RGB8 ||
779 pix_fmt == AV_PIX_FMT_RGB4 ||
780 pix_fmt == AV_PIX_FMT_RGB4_BYTE ||
781 pix_fmt == AV_PIX_FMT_RGBA64BE ||
782 pix_fmt == AV_PIX_FMT_RGBA64LE ||
783 pix_fmt == AV_PIX_FMT_MONOBLACK ||
784 pix_fmt == AV_PIX_FMT_MONOWHITE;
785 }
786
isBGRinInt(enum AVPixelFormat pix_fmt)787 static av_always_inline int isBGRinInt(enum AVPixelFormat pix_fmt)
788 {
789 return pix_fmt == AV_PIX_FMT_BGR48BE ||
790 pix_fmt == AV_PIX_FMT_BGR48LE ||
791 pix_fmt == AV_PIX_FMT_BGR32 ||
792 pix_fmt == AV_PIX_FMT_BGR32_1 ||
793 pix_fmt == AV_PIX_FMT_BGR24 ||
794 pix_fmt == AV_PIX_FMT_BGR565BE ||
795 pix_fmt == AV_PIX_FMT_BGR565LE ||
796 pix_fmt == AV_PIX_FMT_BGR555BE ||
797 pix_fmt == AV_PIX_FMT_BGR555LE ||
798 pix_fmt == AV_PIX_FMT_BGR444BE ||
799 pix_fmt == AV_PIX_FMT_BGR444LE ||
800 pix_fmt == AV_PIX_FMT_BGR8 ||
801 pix_fmt == AV_PIX_FMT_BGR4 ||
802 pix_fmt == AV_PIX_FMT_BGR4_BYTE ||
803 pix_fmt == AV_PIX_FMT_BGRA64BE ||
804 pix_fmt == AV_PIX_FMT_BGRA64LE ||
805 pix_fmt == AV_PIX_FMT_MONOBLACK ||
806 pix_fmt == AV_PIX_FMT_MONOWHITE;
807 }
808
isBayer(enum AVPixelFormat pix_fmt)809 static av_always_inline int isBayer(enum AVPixelFormat pix_fmt)
810 {
811 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt);
812 av_assert0(desc);
813 return !!(desc->flags & AV_PIX_FMT_FLAG_BAYER);
814 }
815
isBayer16BPS(enum AVPixelFormat pix_fmt)816 static av_always_inline int isBayer16BPS(enum AVPixelFormat pix_fmt)
817 {
818 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt);
819 av_assert0(desc);
820 return desc->comp[1].depth == 8;
821 }
822
isAnyRGB(enum AVPixelFormat pix_fmt)823 static av_always_inline int isAnyRGB(enum AVPixelFormat pix_fmt)
824 {
825 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt);
826 av_assert0(desc);
827 return (desc->flags & AV_PIX_FMT_FLAG_RGB) ||
828 pix_fmt == AV_PIX_FMT_MONOBLACK || pix_fmt == AV_PIX_FMT_MONOWHITE;
829 }
830
isFloat(enum AVPixelFormat pix_fmt)831 static av_always_inline int isFloat(enum AVPixelFormat pix_fmt)
832 {
833 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt);
834 av_assert0(desc);
835 return desc->flags & AV_PIX_FMT_FLAG_FLOAT;
836 }
837
isALPHA(enum AVPixelFormat pix_fmt)838 static av_always_inline int isALPHA(enum AVPixelFormat pix_fmt)
839 {
840 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt);
841 av_assert0(desc);
842 if (pix_fmt == AV_PIX_FMT_PAL8)
843 return 1;
844 return desc->flags & AV_PIX_FMT_FLAG_ALPHA;
845 }
846
isPacked(enum AVPixelFormat pix_fmt)847 static av_always_inline int isPacked(enum AVPixelFormat pix_fmt)
848 {
849 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt);
850 av_assert0(desc);
851 return (desc->nb_components >= 2 && !(desc->flags & AV_PIX_FMT_FLAG_PLANAR)) ||
852 pix_fmt == AV_PIX_FMT_PAL8 ||
853 pix_fmt == AV_PIX_FMT_MONOBLACK || pix_fmt == AV_PIX_FMT_MONOWHITE;
854 }
855
isPlanar(enum AVPixelFormat pix_fmt)856 static av_always_inline int isPlanar(enum AVPixelFormat pix_fmt)
857 {
858 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt);
859 av_assert0(desc);
860 return (desc->nb_components >= 2 && (desc->flags & AV_PIX_FMT_FLAG_PLANAR));
861 }
862
isPackedRGB(enum AVPixelFormat pix_fmt)863 static av_always_inline int isPackedRGB(enum AVPixelFormat pix_fmt)
864 {
865 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt);
866 av_assert0(desc);
867 return ((desc->flags & (AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_RGB)) == AV_PIX_FMT_FLAG_RGB);
868 }
869
isPlanarRGB(enum AVPixelFormat pix_fmt)870 static av_always_inline int isPlanarRGB(enum AVPixelFormat pix_fmt)
871 {
872 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt);
873 av_assert0(desc);
874 return ((desc->flags & (AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_RGB)) ==
875 (AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_RGB));
876 }
877
usePal(enum AVPixelFormat pix_fmt)878 static av_always_inline int usePal(enum AVPixelFormat pix_fmt)
879 {
880 switch (pix_fmt) {
881 case AV_PIX_FMT_PAL8:
882 case AV_PIX_FMT_BGR4_BYTE:
883 case AV_PIX_FMT_BGR8:
884 case AV_PIX_FMT_GRAY8:
885 case AV_PIX_FMT_RGB4_BYTE:
886 case AV_PIX_FMT_RGB8:
887 return 1;
888 default:
889 return 0;
890 }
891 }
892
893 /*
894 * Identity formats where the data is in the high bits, and the low bits are shifted away.
895 */
isDataInHighBits(enum AVPixelFormat pix_fmt)896 static av_always_inline int isDataInHighBits(enum AVPixelFormat pix_fmt)
897 {
898 int i;
899 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt);
900 av_assert0(desc);
901 if (desc->flags & (AV_PIX_FMT_FLAG_BITSTREAM | AV_PIX_FMT_FLAG_HWACCEL))
902 return 0;
903 for (i = 0; i < desc->nb_components; i++) {
904 if (!desc->comp[i].shift)
905 return 0;
906 if ((desc->comp[i].shift + desc->comp[i].depth) & 0x7)
907 return 0;
908 }
909 return 1;
910 }
911
912 /*
913 * Identity formats where the chroma planes are swapped (CrCb order).
914 */
isSwappedChroma(enum AVPixelFormat pix_fmt)915 static av_always_inline int isSwappedChroma(enum AVPixelFormat pix_fmt)
916 {
917 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt);
918 av_assert0(desc);
919 if (!isYUV(pix_fmt))
920 return 0;
921 if ((desc->flags & AV_PIX_FMT_FLAG_ALPHA) && desc->nb_components < 4)
922 return 0;
923 if (desc->nb_components < 3)
924 return 0;
925 if (!isPlanarYUV(pix_fmt) || isSemiPlanarYUV(pix_fmt))
926 return desc->comp[1].offset > desc->comp[2].offset;
927 else
928 return desc->comp[1].plane > desc->comp[2].plane;
929 }
930
931 extern const uint64_t ff_dither4[2];
932 extern const uint64_t ff_dither8[2];
933
934 extern const uint8_t ff_dither_2x2_4[3][8];
935 extern const uint8_t ff_dither_2x2_8[3][8];
936 extern const uint8_t ff_dither_4x4_16[5][8];
937 extern const uint8_t ff_dither_8x8_32[9][8];
938 extern const uint8_t ff_dither_8x8_73[9][8];
939 extern const uint8_t ff_dither_8x8_128[9][8];
940 extern const uint8_t ff_dither_8x8_220[9][8];
941
942 extern const int32_t ff_yuv2rgb_coeffs[11][4];
943
944 extern const AVClass ff_sws_context_class;
945
946 /**
947 * Set c->convert_unscaled to an unscaled converter if one exists for the
948 * specific source and destination formats, bit depths, flags, etc.
949 */
950 void ff_get_unscaled_swscale(SwsContext *c);
951 void ff_get_unscaled_swscale_ppc(SwsContext *c);
952 void ff_get_unscaled_swscale_arm(SwsContext *c);
953 void ff_get_unscaled_swscale_aarch64(SwsContext *c);
954
955 void ff_sws_init_scale(SwsContext *c);
956
957 void ff_sws_init_input_funcs(SwsContext *c);
958 void ff_sws_init_output_funcs(SwsContext *c,
959 yuv2planar1_fn *yuv2plane1,
960 yuv2planarX_fn *yuv2planeX,
961 yuv2interleavedX_fn *yuv2nv12cX,
962 yuv2packed1_fn *yuv2packed1,
963 yuv2packed2_fn *yuv2packed2,
964 yuv2packedX_fn *yuv2packedX,
965 yuv2anyX_fn *yuv2anyX);
966 void ff_sws_init_swscale_ppc(SwsContext *c);
967 void ff_sws_init_swscale_vsx(SwsContext *c);
968 void ff_sws_init_swscale_x86(SwsContext *c);
969 void ff_sws_init_swscale_aarch64(SwsContext *c);
970 void ff_sws_init_swscale_arm(SwsContext *c);
971
972 void ff_hyscale_fast_c(SwsContext *c, int16_t *dst, int dstWidth,
973 const uint8_t *src, int srcW, int xInc);
974 void ff_hcscale_fast_c(SwsContext *c, int16_t *dst1, int16_t *dst2,
975 int dstWidth, const uint8_t *src1,
976 const uint8_t *src2, int srcW, int xInc);
977 int ff_init_hscaler_mmxext(int dstW, int xInc, uint8_t *filterCode,
978 int16_t *filter, int32_t *filterPos,
979 int numSplits);
980 void ff_hyscale_fast_mmxext(SwsContext *c, int16_t *dst,
981 int dstWidth, const uint8_t *src,
982 int srcW, int xInc);
983 void ff_hcscale_fast_mmxext(SwsContext *c, int16_t *dst1, int16_t *dst2,
984 int dstWidth, const uint8_t *src1,
985 const uint8_t *src2, int srcW, int xInc);
986
987 /**
988 * Allocate and return an SwsContext.
989 * This is like sws_getContext() but does not perform the init step, allowing
990 * the user to set additional AVOptions.
991 *
992 * @see sws_getContext()
993 */
994 struct SwsContext *sws_alloc_set_opts(int srcW, int srcH, enum AVPixelFormat srcFormat,
995 int dstW, int dstH, enum AVPixelFormat dstFormat,
996 int flags, const double *param);
997
998 int ff_sws_alphablendaway(SwsContext *c, const uint8_t *src[],
999 int srcStride[], int srcSliceY, int srcSliceH,
1000 uint8_t *dst[], int dstStride[]);
1001
fillPlane16(uint8_t * plane,int stride,int width,int height,int y,int alpha,int bits,const int big_endian)1002 static inline void fillPlane16(uint8_t *plane, int stride, int width, int height, int y,
1003 int alpha, int bits, const int big_endian)
1004 {
1005 int i, j;
1006 uint8_t *ptr = plane + stride * y;
1007 int v = alpha ? 0xFFFF>>(16-bits) : (1<<(bits-1));
1008 for (i = 0; i < height; i++) {
1009 #define FILL(wfunc) \
1010 for (j = 0; j < width; j++) {\
1011 wfunc(ptr+2*j, v);\
1012 }
1013 if (big_endian) {
1014 FILL(AV_WB16);
1015 } else {
1016 FILL(AV_WL16);
1017 }
1018 ptr += stride;
1019 }
1020 #undef FILL
1021 }
1022
fillPlane32(uint8_t * plane,int stride,int width,int height,int y,int alpha,int bits,const int big_endian,int is_float)1023 static inline void fillPlane32(uint8_t *plane, int stride, int width, int height, int y,
1024 int alpha, int bits, const int big_endian, int is_float)
1025 {
1026 int i, j;
1027 uint8_t *ptr = plane + stride * y;
1028 uint32_t v;
1029 uint32_t onef32 = 0x3f800000;
1030 if (is_float)
1031 v = alpha ? onef32 : 0;
1032 else
1033 v = alpha ? 0xFFFFFFFF>>(32-bits) : (1<<(bits-1));
1034
1035 for (i = 0; i < height; i++) {
1036 #define FILL(wfunc) \
1037 for (j = 0; j < width; j++) {\
1038 wfunc(ptr+4*j, v);\
1039 }
1040 if (big_endian) {
1041 FILL(AV_WB32);
1042 } else {
1043 FILL(AV_WL32);
1044 }
1045 ptr += stride;
1046 }
1047 #undef FILL
1048 }
1049
1050
1051 #define MAX_SLICE_PLANES 4
1052
1053 /// Slice plane
1054 typedef struct SwsPlane
1055 {
1056 int available_lines; ///< max number of lines that can be hold by this plane
1057 int sliceY; ///< index of first line
1058 int sliceH; ///< number of lines
1059 uint8_t **line; ///< line buffer
1060 uint8_t **tmp; ///< Tmp line buffer used by mmx code
1061 } SwsPlane;
1062
1063 /**
1064 * Struct which defines a slice of an image to be scaled or an output for
1065 * a scaled slice.
1066 * A slice can also be used as intermediate ring buffer for scaling steps.
1067 */
1068 typedef struct SwsSlice
1069 {
1070 int width; ///< Slice line width
1071 int h_chr_sub_sample; ///< horizontal chroma subsampling factor
1072 int v_chr_sub_sample; ///< vertical chroma subsampling factor
1073 int is_ring; ///< flag to identify if this slice is a ring buffer
1074 int should_free_lines; ///< flag to identify if there are dynamic allocated lines
1075 enum AVPixelFormat fmt; ///< planes pixel format
1076 SwsPlane plane[MAX_SLICE_PLANES]; ///< color planes
1077 } SwsSlice;
1078
1079 /**
1080 * Struct which holds all necessary data for processing a slice.
1081 * A processing step can be a color conversion or horizontal/vertical scaling.
1082 */
1083 typedef struct SwsFilterDescriptor
1084 {
1085 SwsSlice *src; ///< Source slice
1086 SwsSlice *dst; ///< Output slice
1087
1088 int alpha; ///< Flag for processing alpha channel
1089 void *instance; ///< Filter instance data
1090
1091 /// Function for processing input slice sliceH lines starting from line sliceY
1092 int (*process)(SwsContext *c, struct SwsFilterDescriptor *desc, int sliceY, int sliceH);
1093 } SwsFilterDescriptor;
1094
1095 // warp input lines in the form (src + width*i + j) to slice format (line[i][j])
1096 // relative=true means first line src[x][0] otherwise first line is src[x][lum/crh Y]
1097 int ff_init_slice_from_src(SwsSlice * s, uint8_t *src[4], int stride[4], int srcW, int lumY, int lumH, int chrY, int chrH, int relative);
1098
1099 // Initialize scaler filter descriptor chain
1100 int ff_init_filters(SwsContext *c);
1101
1102 // Free all filter data
1103 int ff_free_filters(SwsContext *c);
1104
1105 /*
1106 function for applying ring buffer logic into slice s
1107 It checks if the slice can hold more @lum lines, if yes
1108 do nothing otherwise remove @lum least used lines.
1109 It applies the same procedure for @chr lines.
1110 */
1111 int ff_rotate_slice(SwsSlice *s, int lum, int chr);
1112
1113 /// initializes gamma conversion descriptor
1114 int ff_init_gamma_convert(SwsFilterDescriptor *desc, SwsSlice * src, uint16_t *table);
1115
1116 /// initializes lum pixel format conversion descriptor
1117 int ff_init_desc_fmt_convert(SwsFilterDescriptor *desc, SwsSlice * src, SwsSlice *dst, uint32_t *pal);
1118
1119 /// initializes lum horizontal scaling descriptor
1120 int ff_init_desc_hscale(SwsFilterDescriptor *desc, SwsSlice *src, SwsSlice *dst, uint16_t *filter, int * filter_pos, int filter_size, int xInc);
1121
1122 /// initializes chr pixel format conversion descriptor
1123 int ff_init_desc_cfmt_convert(SwsFilterDescriptor *desc, SwsSlice * src, SwsSlice *dst, uint32_t *pal);
1124
1125 /// initializes chr horizontal scaling descriptor
1126 int ff_init_desc_chscale(SwsFilterDescriptor *desc, SwsSlice *src, SwsSlice *dst, uint16_t *filter, int * filter_pos, int filter_size, int xInc);
1127
1128 int ff_init_desc_no_chr(SwsFilterDescriptor *desc, SwsSlice * src, SwsSlice *dst);
1129
1130 /// initializes vertical scaling descriptors
1131 int ff_init_vscale(SwsContext *c, SwsFilterDescriptor *desc, SwsSlice *src, SwsSlice *dst);
1132
1133 /// setup vertical scaler functions
1134 void ff_init_vscale_pfn(SwsContext *c, yuv2planar1_fn yuv2plane1, yuv2planarX_fn yuv2planeX,
1135 yuv2interleavedX_fn yuv2nv12cX, yuv2packed1_fn yuv2packed1, yuv2packed2_fn yuv2packed2,
1136 yuv2packedX_fn yuv2packedX, yuv2anyX_fn yuv2anyX, int use_mmx);
1137
1138 void ff_sws_slice_worker(void *priv, int jobnr, int threadnr,
1139 int nb_jobs, int nb_threads);
1140
1141 //number of extra lines to process
1142 #define MAX_LINES_AHEAD 4
1143
1144 //shuffle filter and filterPos for hyScale and hcScale filters in avx2
1145 int ff_shuffle_filter_coefficients(SwsContext *c, int* filterPos, int filterSize, int16_t *filter, int dstW);
1146 #endif /* SWSCALE_SWSCALE_INTERNAL_H */
1147