1 /*
2 * Copyright 2011 The LibYuv Project Authors. All rights reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #include "libyuv/scale.h"
12
13 #include <assert.h>
14 #include <string.h>
15
16 #include "libyuv/cpu_id.h"
17 #include "libyuv/planar_functions.h" // For CopyARGB
18 #include "libyuv/row.h"
19 #include "libyuv/scale_row.h"
20
21 #ifdef __cplusplus
22 namespace libyuv {
23 extern "C" {
24 #endif
25
Abs(int v)26 static __inline int Abs(int v) {
27 return v >= 0 ? v : -v;
28 }
29
30 // ScaleARGB ARGB, 1/2
31 // This is an optimized version for scaling down a ARGB to 1/2 of
32 // its original size.
ScaleARGBDown2(int src_width,int src_height,int dst_width,int dst_height,int src_stride,int dst_stride,const uint8_t * src_argb,uint8_t * dst_argb,int x,int dx,int y,int dy,enum FilterMode filtering)33 static void ScaleARGBDown2(int src_width,
34 int src_height,
35 int dst_width,
36 int dst_height,
37 int src_stride,
38 int dst_stride,
39 const uint8_t* src_argb,
40 uint8_t* dst_argb,
41 int x,
42 int dx,
43 int y,
44 int dy,
45 enum FilterMode filtering) {
46 int j;
47 int row_stride = src_stride * (dy >> 16);
48 void (*ScaleARGBRowDown2)(const uint8_t* src_argb, ptrdiff_t src_stride,
49 uint8_t* dst_argb, int dst_width) =
50 filtering == kFilterNone
51 ? ScaleARGBRowDown2_C
52 : (filtering == kFilterLinear ? ScaleARGBRowDown2Linear_C
53 : ScaleARGBRowDown2Box_C);
54 (void)src_width;
55 (void)src_height;
56 (void)dx;
57 assert(dx == 65536 * 2); // Test scale factor of 2.
58 assert((dy & 0x1ffff) == 0); // Test vertical scale is multiple of 2.
59 // Advance to odd row, even column.
60 if (filtering == kFilterBilinear) {
61 src_argb += (y >> 16) * (int64_t)src_stride + (x >> 16) * 4;
62 } else {
63 src_argb += (y >> 16) * (int64_t)src_stride + ((x >> 16) - 1) * 4;
64 }
65
66 #if defined(HAS_SCALEARGBROWDOWN2_SSE2)
67 if (TestCpuFlag(kCpuHasSSE2)) {
68 ScaleARGBRowDown2 =
69 filtering == kFilterNone
70 ? ScaleARGBRowDown2_Any_SSE2
71 : (filtering == kFilterLinear ? ScaleARGBRowDown2Linear_Any_SSE2
72 : ScaleARGBRowDown2Box_Any_SSE2);
73 if (IS_ALIGNED(dst_width, 4)) {
74 ScaleARGBRowDown2 =
75 filtering == kFilterNone
76 ? ScaleARGBRowDown2_SSE2
77 : (filtering == kFilterLinear ? ScaleARGBRowDown2Linear_SSE2
78 : ScaleARGBRowDown2Box_SSE2);
79 }
80 }
81 #endif
82 #if defined(HAS_SCALEARGBROWDOWN2_NEON)
83 if (TestCpuFlag(kCpuHasNEON)) {
84 ScaleARGBRowDown2 =
85 filtering == kFilterNone
86 ? ScaleARGBRowDown2_Any_NEON
87 : (filtering == kFilterLinear ? ScaleARGBRowDown2Linear_Any_NEON
88 : ScaleARGBRowDown2Box_Any_NEON);
89 if (IS_ALIGNED(dst_width, 8)) {
90 ScaleARGBRowDown2 =
91 filtering == kFilterNone
92 ? ScaleARGBRowDown2_NEON
93 : (filtering == kFilterLinear ? ScaleARGBRowDown2Linear_NEON
94 : ScaleARGBRowDown2Box_NEON);
95 }
96 }
97 #endif
98 #if defined(HAS_SCALEARGBROWDOWN2_MSA)
99 if (TestCpuFlag(kCpuHasMSA)) {
100 ScaleARGBRowDown2 =
101 filtering == kFilterNone
102 ? ScaleARGBRowDown2_Any_MSA
103 : (filtering == kFilterLinear ? ScaleARGBRowDown2Linear_Any_MSA
104 : ScaleARGBRowDown2Box_Any_MSA);
105 if (IS_ALIGNED(dst_width, 4)) {
106 ScaleARGBRowDown2 =
107 filtering == kFilterNone
108 ? ScaleARGBRowDown2_MSA
109 : (filtering == kFilterLinear ? ScaleARGBRowDown2Linear_MSA
110 : ScaleARGBRowDown2Box_MSA);
111 }
112 }
113 #endif
114 #if defined(HAS_SCALEARGBROWDOWN2_LSX)
115 if (TestCpuFlag(kCpuHasLSX)) {
116 ScaleARGBRowDown2 =
117 filtering == kFilterNone
118 ? ScaleARGBRowDown2_Any_LSX
119 : (filtering == kFilterLinear ? ScaleARGBRowDown2Linear_Any_LSX
120 : ScaleARGBRowDown2Box_Any_LSX);
121 if (IS_ALIGNED(dst_width, 4)) {
122 ScaleARGBRowDown2 =
123 filtering == kFilterNone
124 ? ScaleARGBRowDown2_LSX
125 : (filtering == kFilterLinear ? ScaleARGBRowDown2Linear_LSX
126 : ScaleARGBRowDown2Box_LSX);
127 }
128 }
129 #endif
130
131 if (filtering == kFilterLinear) {
132 src_stride = 0;
133 }
134 for (j = 0; j < dst_height; ++j) {
135 ScaleARGBRowDown2(src_argb, src_stride, dst_argb, dst_width);
136 src_argb += row_stride;
137 dst_argb += dst_stride;
138 }
139 }
140
141 // ScaleARGB ARGB, 1/4
142 // This is an optimized version for scaling down a ARGB to 1/4 of
143 // its original size.
ScaleARGBDown4Box(int src_width,int src_height,int dst_width,int dst_height,int src_stride,int dst_stride,const uint8_t * src_argb,uint8_t * dst_argb,int x,int dx,int y,int dy)144 static void ScaleARGBDown4Box(int src_width,
145 int src_height,
146 int dst_width,
147 int dst_height,
148 int src_stride,
149 int dst_stride,
150 const uint8_t* src_argb,
151 uint8_t* dst_argb,
152 int x,
153 int dx,
154 int y,
155 int dy) {
156 int j;
157 // Allocate 2 rows of ARGB.
158 const int kRowSize = (dst_width * 2 * 4 + 31) & ~31;
159 align_buffer_64(row, kRowSize * 2);
160 int row_stride = src_stride * (dy >> 16);
161 void (*ScaleARGBRowDown2)(const uint8_t* src_argb, ptrdiff_t src_stride,
162 uint8_t* dst_argb, int dst_width) =
163 ScaleARGBRowDown2Box_C;
164 // Advance to odd row, even column.
165 src_argb += (y >> 16) * (int64_t)src_stride + (x >> 16) * 4;
166 (void)src_width;
167 (void)src_height;
168 (void)dx;
169 assert(dx == 65536 * 4); // Test scale factor of 4.
170 assert((dy & 0x3ffff) == 0); // Test vertical scale is multiple of 4.
171 #if defined(HAS_SCALEARGBROWDOWN2_SSE2)
172 if (TestCpuFlag(kCpuHasSSE2)) {
173 ScaleARGBRowDown2 = ScaleARGBRowDown2Box_Any_SSE2;
174 if (IS_ALIGNED(dst_width, 4)) {
175 ScaleARGBRowDown2 = ScaleARGBRowDown2Box_SSE2;
176 }
177 }
178 #endif
179 #if defined(HAS_SCALEARGBROWDOWN2_NEON)
180 if (TestCpuFlag(kCpuHasNEON)) {
181 ScaleARGBRowDown2 = ScaleARGBRowDown2Box_Any_NEON;
182 if (IS_ALIGNED(dst_width, 8)) {
183 ScaleARGBRowDown2 = ScaleARGBRowDown2Box_NEON;
184 }
185 }
186 #endif
187
188 for (j = 0; j < dst_height; ++j) {
189 ScaleARGBRowDown2(src_argb, src_stride, row, dst_width * 2);
190 ScaleARGBRowDown2(src_argb + src_stride * 2, src_stride, row + kRowSize,
191 dst_width * 2);
192 ScaleARGBRowDown2(row, kRowSize, dst_argb, dst_width);
193 src_argb += row_stride;
194 dst_argb += dst_stride;
195 }
196 free_aligned_buffer_64(row);
197 }
198
199 // ScaleARGB ARGB Even
200 // This is an optimized version for scaling down a ARGB to even
201 // multiple of its original size.
ScaleARGBDownEven(int src_width,int src_height,int dst_width,int dst_height,int src_stride,int dst_stride,const uint8_t * src_argb,uint8_t * dst_argb,int x,int dx,int y,int dy,enum FilterMode filtering)202 static void ScaleARGBDownEven(int src_width,
203 int src_height,
204 int dst_width,
205 int dst_height,
206 int src_stride,
207 int dst_stride,
208 const uint8_t* src_argb,
209 uint8_t* dst_argb,
210 int x,
211 int dx,
212 int y,
213 int dy,
214 enum FilterMode filtering) {
215 int j;
216 int col_step = dx >> 16;
217 int row_stride = (dy >> 16) * (int64_t)src_stride;
218 void (*ScaleARGBRowDownEven)(const uint8_t* src_argb, ptrdiff_t src_stride,
219 int src_step, uint8_t* dst_argb, int dst_width) =
220 filtering ? ScaleARGBRowDownEvenBox_C : ScaleARGBRowDownEven_C;
221 (void)src_width;
222 (void)src_height;
223 assert(IS_ALIGNED(src_width, 2));
224 assert(IS_ALIGNED(src_height, 2));
225 src_argb += (y >> 16) * (int64_t)src_stride + (x >> 16) * 4;
226 #if defined(HAS_SCALEARGBROWDOWNEVEN_SSE2)
227 if (TestCpuFlag(kCpuHasSSE2)) {
228 ScaleARGBRowDownEven = filtering ? ScaleARGBRowDownEvenBox_Any_SSE2
229 : ScaleARGBRowDownEven_Any_SSE2;
230 if (IS_ALIGNED(dst_width, 4)) {
231 ScaleARGBRowDownEven =
232 filtering ? ScaleARGBRowDownEvenBox_SSE2 : ScaleARGBRowDownEven_SSE2;
233 }
234 }
235 #endif
236 #if defined(HAS_SCALEARGBROWDOWNEVEN_NEON)
237 if (TestCpuFlag(kCpuHasNEON)) {
238 ScaleARGBRowDownEven = filtering ? ScaleARGBRowDownEvenBox_Any_NEON
239 : ScaleARGBRowDownEven_Any_NEON;
240 if (IS_ALIGNED(dst_width, 4)) {
241 ScaleARGBRowDownEven =
242 filtering ? ScaleARGBRowDownEvenBox_NEON : ScaleARGBRowDownEven_NEON;
243 }
244 }
245 #endif
246 #if defined(HAS_SCALEARGBROWDOWNEVEN_MSA)
247 if (TestCpuFlag(kCpuHasMSA)) {
248 ScaleARGBRowDownEven = filtering ? ScaleARGBRowDownEvenBox_Any_MSA
249 : ScaleARGBRowDownEven_Any_MSA;
250 if (IS_ALIGNED(dst_width, 4)) {
251 ScaleARGBRowDownEven =
252 filtering ? ScaleARGBRowDownEvenBox_MSA : ScaleARGBRowDownEven_MSA;
253 }
254 }
255 #endif
256 #if defined(HAS_SCALEARGBROWDOWNEVEN_LSX)
257 if (TestCpuFlag(kCpuHasLSX)) {
258 ScaleARGBRowDownEven = filtering ? ScaleARGBRowDownEvenBox_Any_LSX
259 : ScaleARGBRowDownEven_Any_LSX;
260 if (IS_ALIGNED(dst_width, 4)) {
261 ScaleARGBRowDownEven =
262 filtering ? ScaleARGBRowDownEvenBox_LSX : ScaleARGBRowDownEven_LSX;
263 }
264 }
265 #endif
266
267 if (filtering == kFilterLinear) {
268 src_stride = 0;
269 }
270 for (j = 0; j < dst_height; ++j) {
271 ScaleARGBRowDownEven(src_argb, src_stride, col_step, dst_argb, dst_width);
272 src_argb += row_stride;
273 dst_argb += dst_stride;
274 }
275 }
276
277 // Scale ARGB down with bilinear interpolation.
ScaleARGBBilinearDown(int src_width,int src_height,int dst_width,int dst_height,int src_stride,int dst_stride,const uint8_t * src_argb,uint8_t * dst_argb,int x,int dx,int y,int dy,enum FilterMode filtering)278 static void ScaleARGBBilinearDown(int src_width,
279 int src_height,
280 int dst_width,
281 int dst_height,
282 int src_stride,
283 int dst_stride,
284 const uint8_t* src_argb,
285 uint8_t* dst_argb,
286 int x,
287 int dx,
288 int y,
289 int dy,
290 enum FilterMode filtering) {
291 int j;
292 void (*InterpolateRow)(uint8_t * dst_argb, const uint8_t* src_argb,
293 ptrdiff_t src_stride, int dst_width,
294 int source_y_fraction) = InterpolateRow_C;
295 void (*ScaleARGBFilterCols)(uint8_t * dst_argb, const uint8_t* src_argb,
296 int dst_width, int x, int dx) =
297 (src_width >= 32768) ? ScaleARGBFilterCols64_C : ScaleARGBFilterCols_C;
298 int64_t xlast = x + (int64_t)(dst_width - 1) * dx;
299 int64_t xl = (dx >= 0) ? x : xlast;
300 int64_t xr = (dx >= 0) ? xlast : x;
301 int clip_src_width;
302 xl = (xl >> 16) & ~3; // Left edge aligned.
303 xr = (xr >> 16) + 1; // Right most pixel used. Bilinear uses 2 pixels.
304 xr = (xr + 1 + 3) & ~3; // 1 beyond 4 pixel aligned right most pixel.
305 if (xr > src_width) {
306 xr = src_width;
307 }
308 clip_src_width = (int)(xr - xl) * 4; // Width aligned to 4.
309 src_argb += xl * 4;
310 x -= (int)(xl << 16);
311 #if defined(HAS_INTERPOLATEROW_SSSE3)
312 if (TestCpuFlag(kCpuHasSSSE3)) {
313 InterpolateRow = InterpolateRow_Any_SSSE3;
314 if (IS_ALIGNED(clip_src_width, 16)) {
315 InterpolateRow = InterpolateRow_SSSE3;
316 }
317 }
318 #endif
319 #if defined(HAS_INTERPOLATEROW_AVX2)
320 if (TestCpuFlag(kCpuHasAVX2)) {
321 InterpolateRow = InterpolateRow_Any_AVX2;
322 if (IS_ALIGNED(clip_src_width, 32)) {
323 InterpolateRow = InterpolateRow_AVX2;
324 }
325 }
326 #endif
327 #if defined(HAS_INTERPOLATEROW_NEON)
328 if (TestCpuFlag(kCpuHasNEON)) {
329 InterpolateRow = InterpolateRow_Any_NEON;
330 if (IS_ALIGNED(clip_src_width, 16)) {
331 InterpolateRow = InterpolateRow_NEON;
332 }
333 }
334 #endif
335 #if defined(HAS_INTERPOLATEROW_MSA)
336 if (TestCpuFlag(kCpuHasMSA)) {
337 InterpolateRow = InterpolateRow_Any_MSA;
338 if (IS_ALIGNED(clip_src_width, 32)) {
339 InterpolateRow = InterpolateRow_MSA;
340 }
341 }
342 #endif
343 #if defined(HAS_INTERPOLATEROW_LSX)
344 if (TestCpuFlag(kCpuHasLSX)) {
345 InterpolateRow = InterpolateRow_Any_LSX;
346 if (IS_ALIGNED(clip_src_width, 32)) {
347 InterpolateRow = InterpolateRow_LSX;
348 }
349 }
350 #endif
351 #if defined(HAS_SCALEARGBFILTERCOLS_SSSE3)
352 if (TestCpuFlag(kCpuHasSSSE3) && src_width < 32768) {
353 ScaleARGBFilterCols = ScaleARGBFilterCols_SSSE3;
354 }
355 #endif
356 #if defined(HAS_SCALEARGBFILTERCOLS_NEON)
357 if (TestCpuFlag(kCpuHasNEON)) {
358 ScaleARGBFilterCols = ScaleARGBFilterCols_Any_NEON;
359 if (IS_ALIGNED(dst_width, 4)) {
360 ScaleARGBFilterCols = ScaleARGBFilterCols_NEON;
361 }
362 }
363 #endif
364 #if defined(HAS_SCALEARGBFILTERCOLS_MSA)
365 if (TestCpuFlag(kCpuHasMSA)) {
366 ScaleARGBFilterCols = ScaleARGBFilterCols_Any_MSA;
367 if (IS_ALIGNED(dst_width, 8)) {
368 ScaleARGBFilterCols = ScaleARGBFilterCols_MSA;
369 }
370 }
371 #endif
372 #if defined(HAS_SCALEARGBFILTERCOLS_LSX)
373 if (TestCpuFlag(kCpuHasLSX)) {
374 ScaleARGBFilterCols = ScaleARGBFilterCols_Any_LSX;
375 if (IS_ALIGNED(dst_width, 8)) {
376 ScaleARGBFilterCols = ScaleARGBFilterCols_LSX;
377 }
378 }
379 #endif
380 // TODO(fbarchard): Consider not allocating row buffer for kFilterLinear.
381 // Allocate a row of ARGB.
382 {
383 align_buffer_64(row, clip_src_width * 4);
384
385 const int max_y = (src_height - 1) << 16;
386 if (y > max_y) {
387 y = max_y;
388 }
389 for (j = 0; j < dst_height; ++j) {
390 int yi = y >> 16;
391 const uint8_t* src = src_argb + yi * (int64_t)src_stride;
392 if (filtering == kFilterLinear) {
393 ScaleARGBFilterCols(dst_argb, src, dst_width, x, dx);
394 } else {
395 int yf = (y >> 8) & 255;
396 InterpolateRow(row, src, src_stride, clip_src_width, yf);
397 ScaleARGBFilterCols(dst_argb, row, dst_width, x, dx);
398 }
399 dst_argb += dst_stride;
400 y += dy;
401 if (y > max_y) {
402 y = max_y;
403 }
404 }
405 free_aligned_buffer_64(row);
406 }
407 }
408
409 // Scale ARGB up with bilinear interpolation.
ScaleARGBBilinearUp(int src_width,int src_height,int dst_width,int dst_height,int src_stride,int dst_stride,const uint8_t * src_argb,uint8_t * dst_argb,int x,int dx,int y,int dy,enum FilterMode filtering)410 static void ScaleARGBBilinearUp(int src_width,
411 int src_height,
412 int dst_width,
413 int dst_height,
414 int src_stride,
415 int dst_stride,
416 const uint8_t* src_argb,
417 uint8_t* dst_argb,
418 int x,
419 int dx,
420 int y,
421 int dy,
422 enum FilterMode filtering) {
423 int j;
424 void (*InterpolateRow)(uint8_t * dst_argb, const uint8_t* src_argb,
425 ptrdiff_t src_stride, int dst_width,
426 int source_y_fraction) = InterpolateRow_C;
427 void (*ScaleARGBFilterCols)(uint8_t * dst_argb, const uint8_t* src_argb,
428 int dst_width, int x, int dx) =
429 filtering ? ScaleARGBFilterCols_C : ScaleARGBCols_C;
430 const int max_y = (src_height - 1) << 16;
431 #if defined(HAS_INTERPOLATEROW_SSSE3)
432 if (TestCpuFlag(kCpuHasSSSE3)) {
433 InterpolateRow = InterpolateRow_Any_SSSE3;
434 if (IS_ALIGNED(dst_width, 4)) {
435 InterpolateRow = InterpolateRow_SSSE3;
436 }
437 }
438 #endif
439 #if defined(HAS_INTERPOLATEROW_AVX2)
440 if (TestCpuFlag(kCpuHasAVX2)) {
441 InterpolateRow = InterpolateRow_Any_AVX2;
442 if (IS_ALIGNED(dst_width, 8)) {
443 InterpolateRow = InterpolateRow_AVX2;
444 }
445 }
446 #endif
447 #if defined(HAS_INTERPOLATEROW_NEON)
448 if (TestCpuFlag(kCpuHasNEON)) {
449 InterpolateRow = InterpolateRow_Any_NEON;
450 if (IS_ALIGNED(dst_width, 4)) {
451 InterpolateRow = InterpolateRow_NEON;
452 }
453 }
454 #endif
455 #if defined(HAS_INTERPOLATEROW_MSA)
456 if (TestCpuFlag(kCpuHasMSA)) {
457 InterpolateRow = InterpolateRow_Any_MSA;
458 if (IS_ALIGNED(dst_width, 8)) {
459 InterpolateRow = InterpolateRow_MSA;
460 }
461 }
462 #endif
463 #if defined(HAS_INTERPOLATEROW_LSX)
464 if (TestCpuFlag(kCpuHasLSX)) {
465 InterpolateRow = InterpolateRow_Any_LSX;
466 if (IS_ALIGNED(dst_width, 8)) {
467 InterpolateRow = InterpolateRow_LSX;
468 }
469 }
470 #endif
471 if (src_width >= 32768) {
472 ScaleARGBFilterCols =
473 filtering ? ScaleARGBFilterCols64_C : ScaleARGBCols64_C;
474 }
475 #if defined(HAS_SCALEARGBFILTERCOLS_SSSE3)
476 if (filtering && TestCpuFlag(kCpuHasSSSE3) && src_width < 32768) {
477 ScaleARGBFilterCols = ScaleARGBFilterCols_SSSE3;
478 }
479 #endif
480 #if defined(HAS_SCALEARGBFILTERCOLS_NEON)
481 if (filtering && TestCpuFlag(kCpuHasNEON)) {
482 ScaleARGBFilterCols = ScaleARGBFilterCols_Any_NEON;
483 if (IS_ALIGNED(dst_width, 4)) {
484 ScaleARGBFilterCols = ScaleARGBFilterCols_NEON;
485 }
486 }
487 #endif
488 #if defined(HAS_SCALEARGBFILTERCOLS_MSA)
489 if (filtering && TestCpuFlag(kCpuHasMSA)) {
490 ScaleARGBFilterCols = ScaleARGBFilterCols_Any_MSA;
491 if (IS_ALIGNED(dst_width, 8)) {
492 ScaleARGBFilterCols = ScaleARGBFilterCols_MSA;
493 }
494 }
495 #endif
496 #if defined(HAS_SCALEARGBFILTERCOLS_LSX)
497 if (filtering && TestCpuFlag(kCpuHasLSX)) {
498 ScaleARGBFilterCols = ScaleARGBFilterCols_Any_LSX;
499 if (IS_ALIGNED(dst_width, 8)) {
500 ScaleARGBFilterCols = ScaleARGBFilterCols_LSX;
501 }
502 }
503 #endif
504 #if defined(HAS_SCALEARGBCOLS_SSE2)
505 if (!filtering && TestCpuFlag(kCpuHasSSE2) && src_width < 32768) {
506 ScaleARGBFilterCols = ScaleARGBCols_SSE2;
507 }
508 #endif
509 #if defined(HAS_SCALEARGBCOLS_NEON)
510 if (!filtering && TestCpuFlag(kCpuHasNEON)) {
511 ScaleARGBFilterCols = ScaleARGBCols_Any_NEON;
512 if (IS_ALIGNED(dst_width, 8)) {
513 ScaleARGBFilterCols = ScaleARGBCols_NEON;
514 }
515 }
516 #endif
517 #if defined(HAS_SCALEARGBCOLS_MSA)
518 if (!filtering && TestCpuFlag(kCpuHasMSA)) {
519 ScaleARGBFilterCols = ScaleARGBCols_Any_MSA;
520 if (IS_ALIGNED(dst_width, 4)) {
521 ScaleARGBFilterCols = ScaleARGBCols_MSA;
522 }
523 }
524 #endif
525 #if defined(HAS_SCALEARGBCOLS_LSX)
526 if (!filtering && TestCpuFlag(kCpuHasLSX)) {
527 ScaleARGBFilterCols = ScaleARGBCols_Any_LSX;
528 if (IS_ALIGNED(dst_width, 4)) {
529 ScaleARGBFilterCols = ScaleARGBCols_LSX;
530 }
531 }
532 #endif
533 if (!filtering && src_width * 2 == dst_width && x < 0x8000) {
534 ScaleARGBFilterCols = ScaleARGBColsUp2_C;
535 #if defined(HAS_SCALEARGBCOLSUP2_SSE2)
536 if (TestCpuFlag(kCpuHasSSE2) && IS_ALIGNED(dst_width, 8)) {
537 ScaleARGBFilterCols = ScaleARGBColsUp2_SSE2;
538 }
539 #endif
540 }
541
542 if (y > max_y) {
543 y = max_y;
544 }
545
546 {
547 int yi = y >> 16;
548 const uint8_t* src = src_argb + yi * (int64_t)src_stride;
549
550 // Allocate 2 rows of ARGB.
551 const int kRowSize = (dst_width * 4 + 31) & ~31;
552 align_buffer_64(row, kRowSize * 2);
553
554 uint8_t* rowptr = row;
555 int rowstride = kRowSize;
556 int lasty = yi;
557
558 ScaleARGBFilterCols(rowptr, src, dst_width, x, dx);
559 if (src_height > 1) {
560 src += src_stride;
561 }
562 ScaleARGBFilterCols(rowptr + rowstride, src, dst_width, x, dx);
563 if (src_height > 2) {
564 src += src_stride;
565 }
566
567 for (j = 0; j < dst_height; ++j) {
568 yi = y >> 16;
569 if (yi != lasty) {
570 if (y > max_y) {
571 y = max_y;
572 yi = y >> 16;
573 src = src_argb + yi * (int64_t)src_stride;
574 }
575 if (yi != lasty) {
576 ScaleARGBFilterCols(rowptr, src, dst_width, x, dx);
577 rowptr += rowstride;
578 rowstride = -rowstride;
579 lasty = yi;
580 if ((y + 65536) < max_y) {
581 src += src_stride;
582 }
583 }
584 }
585 if (filtering == kFilterLinear) {
586 InterpolateRow(dst_argb, rowptr, 0, dst_width * 4, 0);
587 } else {
588 int yf = (y >> 8) & 255;
589 InterpolateRow(dst_argb, rowptr, rowstride, dst_width * 4, yf);
590 }
591 dst_argb += dst_stride;
592 y += dy;
593 }
594 free_aligned_buffer_64(row);
595 }
596 }
597
598 #ifdef YUVSCALEUP
599 // Scale YUV to ARGB up with bilinear interpolation.
ScaleYUVToARGBBilinearUp(int src_width,int src_height,int dst_width,int dst_height,int src_stride_y,int src_stride_u,int src_stride_v,int dst_stride_argb,const uint8_t * src_y,const uint8_t * src_u,const uint8_t * src_v,uint8_t * dst_argb,int x,int dx,int y,int dy,enum FilterMode filtering)600 static void ScaleYUVToARGBBilinearUp(int src_width,
601 int src_height,
602 int dst_width,
603 int dst_height,
604 int src_stride_y,
605 int src_stride_u,
606 int src_stride_v,
607 int dst_stride_argb,
608 const uint8_t* src_y,
609 const uint8_t* src_u,
610 const uint8_t* src_v,
611 uint8_t* dst_argb,
612 int x,
613 int dx,
614 int y,
615 int dy,
616 enum FilterMode filtering) {
617 int j;
618 void (*I422ToARGBRow)(const uint8_t* y_buf, const uint8_t* u_buf,
619 const uint8_t* v_buf, uint8_t* rgb_buf, int width) =
620 I422ToARGBRow_C;
621 #if defined(HAS_I422TOARGBROW_SSSE3)
622 if (TestCpuFlag(kCpuHasSSSE3)) {
623 I422ToARGBRow = I422ToARGBRow_Any_SSSE3;
624 if (IS_ALIGNED(src_width, 8)) {
625 I422ToARGBRow = I422ToARGBRow_SSSE3;
626 }
627 }
628 #endif
629 #if defined(HAS_I422TOARGBROW_AVX2)
630 if (TestCpuFlag(kCpuHasAVX2)) {
631 I422ToARGBRow = I422ToARGBRow_Any_AVX2;
632 if (IS_ALIGNED(src_width, 16)) {
633 I422ToARGBRow = I422ToARGBRow_AVX2;
634 }
635 }
636 #endif
637 #if defined(HAS_I422TOARGBROW_AVX512BW)
638 if (TestCpuFlag(kCpuHasAVX512BW | kCpuHasAVX512VL) ==
639 (kCpuHasAVX512BW | kCpuHasAVX512VL)) {
640 I422ToARGBRow = I422ToARGBRow_Any_AVX512BW;
641 if (IS_ALIGNED(src_width, 32)) {
642 I422ToARGBRow = I422ToARGBRow_AVX512BW;
643 }
644 }
645 #endif
646 #if defined(HAS_I422TOARGBROW_NEON)
647 if (TestCpuFlag(kCpuHasNEON)) {
648 I422ToARGBRow = I422ToARGBRow_Any_NEON;
649 if (IS_ALIGNED(src_width, 8)) {
650 I422ToARGBRow = I422ToARGBRow_NEON;
651 }
652 }
653 #endif
654 #if defined(HAS_I422TOARGBROW_MSA)
655 if (TestCpuFlag(kCpuHasMSA)) {
656 I422ToARGBRow = I422ToARGBRow_Any_MSA;
657 if (IS_ALIGNED(src_width, 8)) {
658 I422ToARGBRow = I422ToARGBRow_MSA;
659 }
660 }
661 #endif
662 #if defined(HAS_I422TOARGBROW_LASX)
663 if (TestCpuFlag(kCpuHasLASX)) {
664 I422ToARGBRow = I422ToARGBRow_Any_LASX;
665 if (IS_ALIGNED(src_width, 32)) {
666 I422ToARGBRow = I422ToARGBRow_LASX;
667 }
668 }
669 #endif
670
671 void (*InterpolateRow)(uint8_t * dst_argb, const uint8_t* src_argb,
672 ptrdiff_t src_stride, int dst_width,
673 int source_y_fraction) = InterpolateRow_C;
674 #if defined(HAS_INTERPOLATEROW_SSSE3)
675 if (TestCpuFlag(kCpuHasSSSE3)) {
676 InterpolateRow = InterpolateRow_Any_SSSE3;
677 if (IS_ALIGNED(dst_width, 4)) {
678 InterpolateRow = InterpolateRow_SSSE3;
679 }
680 }
681 #endif
682 #if defined(HAS_INTERPOLATEROW_AVX2)
683 if (TestCpuFlag(kCpuHasAVX2)) {
684 InterpolateRow = InterpolateRow_Any_AVX2;
685 if (IS_ALIGNED(dst_width, 8)) {
686 InterpolateRow = InterpolateRow_AVX2;
687 }
688 }
689 #endif
690 #if defined(HAS_INTERPOLATEROW_NEON)
691 if (TestCpuFlag(kCpuHasNEON)) {
692 InterpolateRow = InterpolateRow_Any_NEON;
693 if (IS_ALIGNED(dst_width, 4)) {
694 InterpolateRow = InterpolateRow_NEON;
695 }
696 }
697 #endif
698 #if defined(HAS_INTERPOLATEROW_MSA)
699 if (TestCpuFlag(kCpuHasMSA)) {
700 InterpolateRow = InterpolateRow_Any_MSA;
701 if (IS_ALIGNED(dst_width, 8)) {
702 InterpolateRow = InterpolateRow_MSA;
703 }
704 }
705 #endif
706 #if defined(HAS_INTERPOLATEROW_LSX)
707 if (TestCpuFlag(kCpuHasLSX)) {
708 InterpolateRow = InterpolateRow_Any_LSX;
709 if (IS_ALIGNED(dst_width, 8)) {
710 InterpolateRow = InterpolateRow_LSX;
711 }
712 }
713 #endif
714
715 void (*ScaleARGBFilterCols)(uint8_t * dst_argb, const uint8_t* src_argb,
716 int dst_width, int x, int dx) =
717 filtering ? ScaleARGBFilterCols_C : ScaleARGBCols_C;
718 if (src_width >= 32768) {
719 ScaleARGBFilterCols =
720 filtering ? ScaleARGBFilterCols64_C : ScaleARGBCols64_C;
721 }
722 #if defined(HAS_SCALEARGBFILTERCOLS_SSSE3)
723 if (filtering && TestCpuFlag(kCpuHasSSSE3) && src_width < 32768) {
724 ScaleARGBFilterCols = ScaleARGBFilterCols_SSSE3;
725 }
726 #endif
727 #if defined(HAS_SCALEARGBFILTERCOLS_NEON)
728 if (filtering && TestCpuFlag(kCpuHasNEON)) {
729 ScaleARGBFilterCols = ScaleARGBFilterCols_Any_NEON;
730 if (IS_ALIGNED(dst_width, 4)) {
731 ScaleARGBFilterCols = ScaleARGBFilterCols_NEON;
732 }
733 }
734 #endif
735 #if defined(HAS_SCALEARGBFILTERCOLS_MSA)
736 if (filtering && TestCpuFlag(kCpuHasMSA)) {
737 ScaleARGBFilterCols = ScaleARGBFilterCols_Any_MSA;
738 if (IS_ALIGNED(dst_width, 8)) {
739 ScaleARGBFilterCols = ScaleARGBFilterCols_MSA;
740 }
741 }
742 #endif
743 #if defined(HAS_SCALEARGBFILTERCOLS_LSX)
744 if (filtering && TestCpuFlag(kCpuHasLSX)) {
745 ScaleARGBFilterCols = ScaleARGBFilterCols_Any_LSX;
746 if (IS_ALIGNED(dst_width, 8)) {
747 ScaleARGBFilterCols = ScaleARGBFilterCols_LSX;
748 }
749 }
750 #endif
751 #if defined(HAS_SCALEARGBCOLS_SSE2)
752 if (!filtering && TestCpuFlag(kCpuHasSSE2) && src_width < 32768) {
753 ScaleARGBFilterCols = ScaleARGBCols_SSE2;
754 }
755 #endif
756 #if defined(HAS_SCALEARGBCOLS_NEON)
757 if (!filtering && TestCpuFlag(kCpuHasNEON)) {
758 ScaleARGBFilterCols = ScaleARGBCols_Any_NEON;
759 if (IS_ALIGNED(dst_width, 8)) {
760 ScaleARGBFilterCols = ScaleARGBCols_NEON;
761 }
762 }
763 #endif
764 #if defined(HAS_SCALEARGBCOLS_MSA)
765 if (!filtering && TestCpuFlag(kCpuHasMSA)) {
766 ScaleARGBFilterCols = ScaleARGBCols_Any_MSA;
767 if (IS_ALIGNED(dst_width, 4)) {
768 ScaleARGBFilterCols = ScaleARGBCols_MSA;
769 }
770 }
771 #endif
772 #if defined(HAS_SCALEARGBCOLS_LSX)
773 if (!filtering && TestCpuFlag(kCpuHasLSX)) {
774 ScaleARGBFilterCols = ScaleARGBCols_Any_LSX;
775 if (IS_ALIGNED(dst_width, 4)) {
776 ScaleARGBFilterCols = ScaleARGBCols_LSX;
777 }
778 }
779 #endif
780 if (!filtering && src_width * 2 == dst_width && x < 0x8000) {
781 ScaleARGBFilterCols = ScaleARGBColsUp2_C;
782 #if defined(HAS_SCALEARGBCOLSUP2_SSE2)
783 if (TestCpuFlag(kCpuHasSSE2) && IS_ALIGNED(dst_width, 8)) {
784 ScaleARGBFilterCols = ScaleARGBColsUp2_SSE2;
785 }
786 #endif
787 }
788
789 const int max_y = (src_height - 1) << 16;
790 if (y > max_y) {
791 y = max_y;
792 }
793 const int kYShift = 1; // Shift Y by 1 to convert Y plane to UV coordinate.
794 int yi = y >> 16;
795 int uv_yi = yi >> kYShift;
796 const uint8_t* src_row_y = src_y + yi * (int64_t)src_stride_y;
797 const uint8_t* src_row_u = src_u + uv_yi * (int64_t)src_stride_u;
798 const uint8_t* src_row_v = src_v + uv_yi * (int64_t)src_stride_v;
799
800 // Allocate 2 rows of ARGB.
801 const int kRowSize = (dst_width * 4 + 31) & ~31;
802 align_buffer_64(row, kRowSize * 2);
803
804 // Allocate 1 row of ARGB for source conversion.
805 align_buffer_64(argb_row, src_width * 4);
806
807 uint8_t* rowptr = row;
808 int rowstride = kRowSize;
809 int lasty = yi;
810
811 // TODO(fbarchard): Convert first 2 rows of YUV to ARGB.
812 ScaleARGBFilterCols(rowptr, src_row_y, dst_width, x, dx);
813 if (src_height > 1) {
814 src_row_y += src_stride_y;
815 if (yi & 1) {
816 src_row_u += src_stride_u;
817 src_row_v += src_stride_v;
818 }
819 }
820 ScaleARGBFilterCols(rowptr + rowstride, src_row_y, dst_width, x, dx);
821 if (src_height > 2) {
822 src_row_y += src_stride_y;
823 if (!(yi & 1)) {
824 src_row_u += src_stride_u;
825 src_row_v += src_stride_v;
826 }
827 }
828
829 for (j = 0; j < dst_height; ++j) {
830 yi = y >> 16;
831 if (yi != lasty) {
832 if (y > max_y) {
833 y = max_y;
834 yi = y >> 16;
835 uv_yi = yi >> kYShift;
836 src_row_y = src_y + yi * (int64_t)src_stride_y;
837 src_row_u = src_u + uv_yi * (int64_t)src_stride_u;
838 src_row_v = src_v + uv_yi * (int64_t)src_stride_v;
839 }
840 if (yi != lasty) {
841 // TODO(fbarchard): Convert the clipped region of row.
842 I422ToARGBRow(src_row_y, src_row_u, src_row_v, argb_row, src_width);
843 ScaleARGBFilterCols(rowptr, argb_row, dst_width, x, dx);
844 rowptr += rowstride;
845 rowstride = -rowstride;
846 lasty = yi;
847 src_row_y += src_stride_y;
848 if (yi & 1) {
849 src_row_u += src_stride_u;
850 src_row_v += src_stride_v;
851 }
852 }
853 }
854 if (filtering == kFilterLinear) {
855 InterpolateRow(dst_argb, rowptr, 0, dst_width * 4, 0);
856 } else {
857 int yf = (y >> 8) & 255;
858 InterpolateRow(dst_argb, rowptr, rowstride, dst_width * 4, yf);
859 }
860 dst_argb += dst_stride_argb;
861 y += dy;
862 }
863 free_aligned_buffer_64(row);
864 free_aligned_buffer_64(row_argb);
865 }
866 #endif
867
868 // Scale ARGB to/from any dimensions, without interpolation.
869 // Fixed point math is used for performance: The upper 16 bits
870 // of x and dx is the integer part of the source position and
871 // the lower 16 bits are the fixed decimal part.
872
ScaleARGBSimple(int src_width,int src_height,int dst_width,int dst_height,int src_stride,int dst_stride,const uint8_t * src_argb,uint8_t * dst_argb,int x,int dx,int y,int dy)873 static void ScaleARGBSimple(int src_width,
874 int src_height,
875 int dst_width,
876 int dst_height,
877 int src_stride,
878 int dst_stride,
879 const uint8_t* src_argb,
880 uint8_t* dst_argb,
881 int x,
882 int dx,
883 int y,
884 int dy) {
885 int j;
886 void (*ScaleARGBCols)(uint8_t * dst_argb, const uint8_t* src_argb,
887 int dst_width, int x, int dx) =
888 (src_width >= 32768) ? ScaleARGBCols64_C : ScaleARGBCols_C;
889 (void)src_height;
890 #if defined(HAS_SCALEARGBCOLS_SSE2)
891 if (TestCpuFlag(kCpuHasSSE2) && src_width < 32768) {
892 ScaleARGBCols = ScaleARGBCols_SSE2;
893 }
894 #endif
895 #if defined(HAS_SCALEARGBCOLS_NEON)
896 if (TestCpuFlag(kCpuHasNEON)) {
897 ScaleARGBCols = ScaleARGBCols_Any_NEON;
898 if (IS_ALIGNED(dst_width, 8)) {
899 ScaleARGBCols = ScaleARGBCols_NEON;
900 }
901 }
902 #endif
903 #if defined(HAS_SCALEARGBCOLS_MSA)
904 if (TestCpuFlag(kCpuHasMSA)) {
905 ScaleARGBCols = ScaleARGBCols_Any_MSA;
906 if (IS_ALIGNED(dst_width, 4)) {
907 ScaleARGBCols = ScaleARGBCols_MSA;
908 }
909 }
910 #endif
911 #if defined(HAS_SCALEARGBCOLS_LSX)
912 if (TestCpuFlag(kCpuHasLSX)) {
913 ScaleARGBCols = ScaleARGBCols_Any_LSX;
914 if (IS_ALIGNED(dst_width, 4)) {
915 ScaleARGBCols = ScaleARGBCols_LSX;
916 }
917 }
918 #endif
919 if (src_width * 2 == dst_width && x < 0x8000) {
920 ScaleARGBCols = ScaleARGBColsUp2_C;
921 #if defined(HAS_SCALEARGBCOLSUP2_SSE2)
922 if (TestCpuFlag(kCpuHasSSE2) && IS_ALIGNED(dst_width, 8)) {
923 ScaleARGBCols = ScaleARGBColsUp2_SSE2;
924 }
925 #endif
926 }
927
928 for (j = 0; j < dst_height; ++j) {
929 ScaleARGBCols(dst_argb, src_argb + (y >> 16) * (int64_t)src_stride,
930 dst_width, x, dx);
931 dst_argb += dst_stride;
932 y += dy;
933 }
934 }
935
936 // ScaleARGB a ARGB.
937 // This function in turn calls a scaling function
938 // suitable for handling the desired resolutions.
ScaleARGB(const uint8_t * src,int src_stride,int src_width,int src_height,uint8_t * dst,int dst_stride,int dst_width,int dst_height,int clip_x,int clip_y,int clip_width,int clip_height,enum FilterMode filtering)939 static void ScaleARGB(const uint8_t* src,
940 int src_stride,
941 int src_width,
942 int src_height,
943 uint8_t* dst,
944 int dst_stride,
945 int dst_width,
946 int dst_height,
947 int clip_x,
948 int clip_y,
949 int clip_width,
950 int clip_height,
951 enum FilterMode filtering) {
952 // Initial source x/y coordinate and step values as 16.16 fixed point.
953 int x = 0;
954 int y = 0;
955 int dx = 0;
956 int dy = 0;
957 // ARGB does not support box filter yet, but allow the user to pass it.
958 // Simplify filtering when possible.
959 filtering = ScaleFilterReduce(src_width, src_height, dst_width, dst_height,
960 filtering);
961
962 // Negative src_height means invert the image.
963 if (src_height < 0) {
964 src_height = -src_height;
965 src = src + (src_height - 1) * (int64_t)src_stride;
966 src_stride = -src_stride;
967 }
968 ScaleSlope(src_width, src_height, dst_width, dst_height, filtering, &x, &y,
969 &dx, &dy);
970 src_width = Abs(src_width);
971 if (clip_x) {
972 int64_t clipf = (int64_t)(clip_x)*dx;
973 x += (clipf & 0xffff);
974 src += (clipf >> 16) * 4;
975 dst += clip_x * 4;
976 }
977 if (clip_y) {
978 int64_t clipf = (int64_t)(clip_y)*dy;
979 y += (clipf & 0xffff);
980 src += (clipf >> 16) * (int64_t)src_stride;
981 dst += clip_y * dst_stride;
982 }
983
984 // Special case for integer step values.
985 if (((dx | dy) & 0xffff) == 0) {
986 if (!dx || !dy) { // 1 pixel wide and/or tall.
987 filtering = kFilterNone;
988 } else {
989 // Optimized even scale down. ie 2, 4, 6, 8, 10x.
990 if (!(dx & 0x10000) && !(dy & 0x10000)) {
991 if (dx == 0x20000) {
992 // Optimized 1/2 downsample.
993 ScaleARGBDown2(src_width, src_height, clip_width, clip_height,
994 src_stride, dst_stride, src, dst, x, dx, y, dy,
995 filtering);
996 return;
997 }
998 if (dx == 0x40000 && filtering == kFilterBox) {
999 // Optimized 1/4 box downsample.
1000 ScaleARGBDown4Box(src_width, src_height, clip_width, clip_height,
1001 src_stride, dst_stride, src, dst, x, dx, y, dy);
1002 return;
1003 }
1004 ScaleARGBDownEven(src_width, src_height, clip_width, clip_height,
1005 src_stride, dst_stride, src, dst, x, dx, y, dy,
1006 filtering);
1007 return;
1008 }
1009 // Optimized odd scale down. ie 3, 5, 7, 9x.
1010 if ((dx & 0x10000) && (dy & 0x10000)) {
1011 filtering = kFilterNone;
1012 if (dx == 0x10000 && dy == 0x10000) {
1013 // Straight copy.
1014 ARGBCopy(src + (y >> 16) * (int64_t)src_stride + (x >> 16) * 4,
1015 src_stride, dst, dst_stride, clip_width, clip_height);
1016 return;
1017 }
1018 }
1019 }
1020 }
1021 if (dx == 0x10000 && (x & 0xffff) == 0) {
1022 // Arbitrary scale vertically, but unscaled horizontally.
1023 ScalePlaneVertical(src_height, clip_width, clip_height, src_stride,
1024 dst_stride, src, dst, x, y, dy, /*bpp=*/4, filtering);
1025 return;
1026 }
1027 if (filtering && dy < 65536) {
1028 ScaleARGBBilinearUp(src_width, src_height, clip_width, clip_height,
1029 src_stride, dst_stride, src, dst, x, dx, y, dy,
1030 filtering);
1031 return;
1032 }
1033 if (filtering) {
1034 ScaleARGBBilinearDown(src_width, src_height, clip_width, clip_height,
1035 src_stride, dst_stride, src, dst, x, dx, y, dy,
1036 filtering);
1037 return;
1038 }
1039 ScaleARGBSimple(src_width, src_height, clip_width, clip_height, src_stride,
1040 dst_stride, src, dst, x, dx, y, dy);
1041 }
1042
1043 LIBYUV_API
ARGBScaleClip(const uint8_t * src_argb,int src_stride_argb,int src_width,int src_height,uint8_t * dst_argb,int dst_stride_argb,int dst_width,int dst_height,int clip_x,int clip_y,int clip_width,int clip_height,enum FilterMode filtering)1044 int ARGBScaleClip(const uint8_t* src_argb,
1045 int src_stride_argb,
1046 int src_width,
1047 int src_height,
1048 uint8_t* dst_argb,
1049 int dst_stride_argb,
1050 int dst_width,
1051 int dst_height,
1052 int clip_x,
1053 int clip_y,
1054 int clip_width,
1055 int clip_height,
1056 enum FilterMode filtering) {
1057 if (!src_argb || src_width == 0 || src_height == 0 || !dst_argb ||
1058 dst_width <= 0 || dst_height <= 0 || clip_x < 0 || clip_y < 0 ||
1059 clip_width > 32768 || clip_height > 32768 ||
1060 (clip_x + clip_width) > dst_width ||
1061 (clip_y + clip_height) > dst_height) {
1062 return -1;
1063 }
1064 ScaleARGB(src_argb, src_stride_argb, src_width, src_height, dst_argb,
1065 dst_stride_argb, dst_width, dst_height, clip_x, clip_y, clip_width,
1066 clip_height, filtering);
1067 return 0;
1068 }
1069
1070 // Scale an ARGB image.
1071 LIBYUV_API
ARGBScale(const uint8_t * src_argb,int src_stride_argb,int src_width,int src_height,uint8_t * dst_argb,int dst_stride_argb,int dst_width,int dst_height,enum FilterMode filtering)1072 int ARGBScale(const uint8_t* src_argb,
1073 int src_stride_argb,
1074 int src_width,
1075 int src_height,
1076 uint8_t* dst_argb,
1077 int dst_stride_argb,
1078 int dst_width,
1079 int dst_height,
1080 enum FilterMode filtering) {
1081 if (!src_argb || src_width == 0 || src_height == 0 || src_width > 32768 ||
1082 src_height > 32768 || !dst_argb || dst_width <= 0 || dst_height <= 0) {
1083 return -1;
1084 }
1085 ScaleARGB(src_argb, src_stride_argb, src_width, src_height, dst_argb,
1086 dst_stride_argb, dst_width, dst_height, 0, 0, dst_width, dst_height,
1087 filtering);
1088 return 0;
1089 }
1090
1091 // Scale with YUV conversion to ARGB and clipping.
1092 LIBYUV_API
YUVToARGBScaleClip(const uint8_t * src_y,int src_stride_y,const uint8_t * src_u,int src_stride_u,const uint8_t * src_v,int src_stride_v,uint32_t src_fourcc,int src_width,int src_height,uint8_t * dst_argb,int dst_stride_argb,uint32_t dst_fourcc,int dst_width,int dst_height,int clip_x,int clip_y,int clip_width,int clip_height,enum FilterMode filtering)1093 int YUVToARGBScaleClip(const uint8_t* src_y,
1094 int src_stride_y,
1095 const uint8_t* src_u,
1096 int src_stride_u,
1097 const uint8_t* src_v,
1098 int src_stride_v,
1099 uint32_t src_fourcc,
1100 int src_width,
1101 int src_height,
1102 uint8_t* dst_argb,
1103 int dst_stride_argb,
1104 uint32_t dst_fourcc,
1105 int dst_width,
1106 int dst_height,
1107 int clip_x,
1108 int clip_y,
1109 int clip_width,
1110 int clip_height,
1111 enum FilterMode filtering) {
1112 uint8_t* argb_buffer = (uint8_t*)malloc(src_width * src_height * 4);
1113 int r;
1114 (void)src_fourcc; // TODO(fbarchard): implement and/or assert.
1115 (void)dst_fourcc;
1116 I420ToARGB(src_y, src_stride_y, src_u, src_stride_u, src_v, src_stride_v,
1117 argb_buffer, src_width * 4, src_width, src_height);
1118
1119 r = ARGBScaleClip(argb_buffer, src_width * 4, src_width, src_height, dst_argb,
1120 dst_stride_argb, dst_width, dst_height, clip_x, clip_y,
1121 clip_width, clip_height, filtering);
1122 free(argb_buffer);
1123 return r;
1124 }
1125
1126 #ifdef __cplusplus
1127 } // extern "C"
1128 } // namespace libyuv
1129 #endif
1130