1 /*
2 * Copyright 2012 The LibYuv Project Authors. All rights reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #include "libyuv/convert_from_argb.h"
12
13 #include "libyuv/basic_types.h"
14 #include "libyuv/cpu_id.h"
15 #include "libyuv/planar_functions.h"
16 #include "libyuv/row.h"
17
18 #ifdef __cplusplus
19 namespace libyuv {
20 extern "C" {
21 #endif
22
23 // ARGB little endian (bgra in memory) to I444
24 LIBYUV_API
ARGBToI444(const uint8_t * src_argb,int src_stride_argb,uint8_t * dst_y,int dst_stride_y,uint8_t * dst_u,int dst_stride_u,uint8_t * dst_v,int dst_stride_v,int width,int height)25 int ARGBToI444(const uint8_t* src_argb,
26 int src_stride_argb,
27 uint8_t* dst_y,
28 int dst_stride_y,
29 uint8_t* dst_u,
30 int dst_stride_u,
31 uint8_t* dst_v,
32 int dst_stride_v,
33 int width,
34 int height) {
35 int y;
36 void (*ARGBToYRow)(const uint8_t* src_argb, uint8_t* dst_y, int width) =
37 ARGBToYRow_C;
38 void (*ARGBToUV444Row)(const uint8_t* src_argb, uint8_t* dst_u,
39 uint8_t* dst_v, int width) = ARGBToUV444Row_C;
40 if (!src_argb || !dst_y || !dst_u || !dst_v || width <= 0 || height == 0) {
41 return -1;
42 }
43 if (height < 0) {
44 height = -height;
45 src_argb = src_argb + (height - 1) * src_stride_argb;
46 src_stride_argb = -src_stride_argb;
47 }
48 // Coalesce rows.
49 if (src_stride_argb == width * 4 && dst_stride_y == width &&
50 dst_stride_u == width && dst_stride_v == width) {
51 width *= height;
52 height = 1;
53 src_stride_argb = dst_stride_y = dst_stride_u = dst_stride_v = 0;
54 }
55 #if defined(HAS_ARGBTOUV444ROW_SSSE3)
56 if (TestCpuFlag(kCpuHasSSSE3)) {
57 ARGBToUV444Row = ARGBToUV444Row_Any_SSSE3;
58 if (IS_ALIGNED(width, 16)) {
59 ARGBToUV444Row = ARGBToUV444Row_SSSE3;
60 }
61 }
62 #endif
63 #if defined(HAS_ARGBTOUV444ROW_NEON)
64 if (TestCpuFlag(kCpuHasNEON)) {
65 ARGBToUV444Row = ARGBToUV444Row_Any_NEON;
66 if (IS_ALIGNED(width, 8)) {
67 ARGBToUV444Row = ARGBToUV444Row_NEON;
68 }
69 }
70 #endif
71 #if defined(HAS_ARGBTOUV444ROW_MSA)
72 if (TestCpuFlag(kCpuHasMSA)) {
73 ARGBToUV444Row = ARGBToUV444Row_Any_MSA;
74 if (IS_ALIGNED(width, 16)) {
75 ARGBToUV444Row = ARGBToUV444Row_MSA;
76 }
77 }
78 #endif
79 #if defined(HAS_ARGBTOYROW_SSSE3)
80 if (TestCpuFlag(kCpuHasSSSE3)) {
81 ARGBToYRow = ARGBToYRow_Any_SSSE3;
82 if (IS_ALIGNED(width, 16)) {
83 ARGBToYRow = ARGBToYRow_SSSE3;
84 }
85 }
86 #endif
87 #if defined(HAS_ARGBTOYROW_AVX2)
88 if (TestCpuFlag(kCpuHasAVX2)) {
89 ARGBToYRow = ARGBToYRow_Any_AVX2;
90 if (IS_ALIGNED(width, 32)) {
91 ARGBToYRow = ARGBToYRow_AVX2;
92 }
93 }
94 #endif
95 #if defined(HAS_ARGBTOYROW_NEON)
96 if (TestCpuFlag(kCpuHasNEON)) {
97 ARGBToYRow = ARGBToYRow_Any_NEON;
98 if (IS_ALIGNED(width, 8)) {
99 ARGBToYRow = ARGBToYRow_NEON;
100 }
101 }
102 #endif
103 #if defined(HAS_ARGBTOYROW_MSA)
104 if (TestCpuFlag(kCpuHasMSA)) {
105 ARGBToYRow = ARGBToYRow_Any_MSA;
106 if (IS_ALIGNED(width, 16)) {
107 ARGBToYRow = ARGBToYRow_MSA;
108 }
109 }
110 #endif
111
112 for (y = 0; y < height; ++y) {
113 ARGBToUV444Row(src_argb, dst_u, dst_v, width);
114 ARGBToYRow(src_argb, dst_y, width);
115 src_argb += src_stride_argb;
116 dst_y += dst_stride_y;
117 dst_u += dst_stride_u;
118 dst_v += dst_stride_v;
119 }
120 return 0;
121 }
122
123 // ARGB little endian (bgra in memory) to I422
124 LIBYUV_API
ARGBToI422(const uint8_t * src_argb,int src_stride_argb,uint8_t * dst_y,int dst_stride_y,uint8_t * dst_u,int dst_stride_u,uint8_t * dst_v,int dst_stride_v,int width,int height)125 int ARGBToI422(const uint8_t* src_argb,
126 int src_stride_argb,
127 uint8_t* dst_y,
128 int dst_stride_y,
129 uint8_t* dst_u,
130 int dst_stride_u,
131 uint8_t* dst_v,
132 int dst_stride_v,
133 int width,
134 int height) {
135 int y;
136 void (*ARGBToUVRow)(const uint8_t* src_argb0, int src_stride_argb,
137 uint8_t* dst_u, uint8_t* dst_v, int width) =
138 ARGBToUVRow_C;
139 void (*ARGBToYRow)(const uint8_t* src_argb, uint8_t* dst_y, int width) =
140 ARGBToYRow_C;
141 if (!src_argb || !dst_y || !dst_u || !dst_v || width <= 0 || height == 0) {
142 return -1;
143 }
144 // Negative height means invert the image.
145 if (height < 0) {
146 height = -height;
147 src_argb = src_argb + (height - 1) * src_stride_argb;
148 src_stride_argb = -src_stride_argb;
149 }
150 // Coalesce rows.
151 if (src_stride_argb == width * 4 && dst_stride_y == width &&
152 dst_stride_u * 2 == width && dst_stride_v * 2 == width) {
153 width *= height;
154 height = 1;
155 src_stride_argb = dst_stride_y = dst_stride_u = dst_stride_v = 0;
156 }
157 #if defined(HAS_ARGBTOYROW_SSSE3) && defined(HAS_ARGBTOUVROW_SSSE3)
158 if (TestCpuFlag(kCpuHasSSSE3)) {
159 ARGBToUVRow = ARGBToUVRow_Any_SSSE3;
160 ARGBToYRow = ARGBToYRow_Any_SSSE3;
161 if (IS_ALIGNED(width, 16)) {
162 ARGBToUVRow = ARGBToUVRow_SSSE3;
163 ARGBToYRow = ARGBToYRow_SSSE3;
164 }
165 }
166 #endif
167 #if defined(HAS_ARGBTOYROW_AVX2) && defined(HAS_ARGBTOUVROW_AVX2)
168 if (TestCpuFlag(kCpuHasAVX2)) {
169 ARGBToUVRow = ARGBToUVRow_Any_AVX2;
170 ARGBToYRow = ARGBToYRow_Any_AVX2;
171 if (IS_ALIGNED(width, 32)) {
172 ARGBToUVRow = ARGBToUVRow_AVX2;
173 ARGBToYRow = ARGBToYRow_AVX2;
174 }
175 }
176 #endif
177 #if defined(HAS_ARGBTOYROW_NEON)
178 if (TestCpuFlag(kCpuHasNEON)) {
179 ARGBToYRow = ARGBToYRow_Any_NEON;
180 if (IS_ALIGNED(width, 8)) {
181 ARGBToYRow = ARGBToYRow_NEON;
182 }
183 }
184 #endif
185 #if defined(HAS_ARGBTOUVROW_NEON)
186 if (TestCpuFlag(kCpuHasNEON)) {
187 ARGBToUVRow = ARGBToUVRow_Any_NEON;
188 if (IS_ALIGNED(width, 16)) {
189 ARGBToUVRow = ARGBToUVRow_NEON;
190 }
191 }
192 #endif
193
194 #if defined(HAS_ARGBTOYROW_MSA)
195 if (TestCpuFlag(kCpuHasMSA)) {
196 ARGBToYRow = ARGBToYRow_Any_MSA;
197 if (IS_ALIGNED(width, 16)) {
198 ARGBToYRow = ARGBToYRow_MSA;
199 }
200 }
201 #endif
202 #if defined(HAS_ARGBTOUVROW_MSA)
203 if (TestCpuFlag(kCpuHasMSA)) {
204 ARGBToUVRow = ARGBToUVRow_Any_MSA;
205 if (IS_ALIGNED(width, 32)) {
206 ARGBToUVRow = ARGBToUVRow_MSA;
207 }
208 }
209 #endif
210
211 for (y = 0; y < height; ++y) {
212 ARGBToUVRow(src_argb, 0, dst_u, dst_v, width);
213 ARGBToYRow(src_argb, dst_y, width);
214 src_argb += src_stride_argb;
215 dst_y += dst_stride_y;
216 dst_u += dst_stride_u;
217 dst_v += dst_stride_v;
218 }
219 return 0;
220 }
221
222 LIBYUV_API
ARGBToNV12(const uint8_t * src_argb,int src_stride_argb,uint8_t * dst_y,int dst_stride_y,uint8_t * dst_uv,int dst_stride_uv,int width,int height)223 int ARGBToNV12(const uint8_t* src_argb,
224 int src_stride_argb,
225 uint8_t* dst_y,
226 int dst_stride_y,
227 uint8_t* dst_uv,
228 int dst_stride_uv,
229 int width,
230 int height) {
231 int y;
232 int halfwidth = (width + 1) >> 1;
233 void (*ARGBToUVRow)(const uint8_t* src_argb0, int src_stride_argb,
234 uint8_t* dst_u, uint8_t* dst_v, int width) =
235 ARGBToUVRow_C;
236 void (*ARGBToYRow)(const uint8_t* src_argb, uint8_t* dst_y, int width) =
237 ARGBToYRow_C;
238 void (*MergeUVRow_)(const uint8_t* src_u, const uint8_t* src_v,
239 uint8_t* dst_uv, int width) = MergeUVRow_C;
240 if (!src_argb || !dst_y || !dst_uv || width <= 0 || height == 0) {
241 return -1;
242 }
243 // Negative height means invert the image.
244 if (height < 0) {
245 height = -height;
246 src_argb = src_argb + (height - 1) * src_stride_argb;
247 src_stride_argb = -src_stride_argb;
248 }
249 #if defined(HAS_ARGBTOYROW_SSSE3) && defined(HAS_ARGBTOUVROW_SSSE3)
250 if (TestCpuFlag(kCpuHasSSSE3)) {
251 ARGBToUVRow = ARGBToUVRow_Any_SSSE3;
252 ARGBToYRow = ARGBToYRow_Any_SSSE3;
253 if (IS_ALIGNED(width, 16)) {
254 ARGBToUVRow = ARGBToUVRow_SSSE3;
255 ARGBToYRow = ARGBToYRow_SSSE3;
256 }
257 }
258 #endif
259 #if defined(HAS_ARGBTOYROW_AVX2) && defined(HAS_ARGBTOUVROW_AVX2)
260 if (TestCpuFlag(kCpuHasAVX2)) {
261 ARGBToUVRow = ARGBToUVRow_Any_AVX2;
262 ARGBToYRow = ARGBToYRow_Any_AVX2;
263 if (IS_ALIGNED(width, 32)) {
264 ARGBToUVRow = ARGBToUVRow_AVX2;
265 ARGBToYRow = ARGBToYRow_AVX2;
266 }
267 }
268 #endif
269 #if defined(HAS_ARGBTOYROW_NEON)
270 if (TestCpuFlag(kCpuHasNEON)) {
271 ARGBToYRow = ARGBToYRow_Any_NEON;
272 if (IS_ALIGNED(width, 8)) {
273 ARGBToYRow = ARGBToYRow_NEON;
274 }
275 }
276 #endif
277 #if defined(HAS_ARGBTOUVROW_NEON)
278 if (TestCpuFlag(kCpuHasNEON)) {
279 ARGBToUVRow = ARGBToUVRow_Any_NEON;
280 if (IS_ALIGNED(width, 16)) {
281 ARGBToUVRow = ARGBToUVRow_NEON;
282 }
283 }
284 #endif
285 #if defined(HAS_ARGBTOYROW_MSA)
286 if (TestCpuFlag(kCpuHasMSA)) {
287 ARGBToYRow = ARGBToYRow_Any_MSA;
288 if (IS_ALIGNED(width, 16)) {
289 ARGBToYRow = ARGBToYRow_MSA;
290 }
291 }
292 #endif
293 #if defined(HAS_ARGBTOUVROW_MSA)
294 if (TestCpuFlag(kCpuHasMSA)) {
295 ARGBToUVRow = ARGBToUVRow_Any_MSA;
296 if (IS_ALIGNED(width, 32)) {
297 ARGBToUVRow = ARGBToUVRow_MSA;
298 }
299 }
300 #endif
301 #if defined(HAS_MERGEUVROW_SSE2)
302 if (TestCpuFlag(kCpuHasSSE2)) {
303 MergeUVRow_ = MergeUVRow_Any_SSE2;
304 if (IS_ALIGNED(halfwidth, 16)) {
305 MergeUVRow_ = MergeUVRow_SSE2;
306 }
307 }
308 #endif
309 #if defined(HAS_MERGEUVROW_AVX2)
310 if (TestCpuFlag(kCpuHasAVX2)) {
311 MergeUVRow_ = MergeUVRow_Any_AVX2;
312 if (IS_ALIGNED(halfwidth, 32)) {
313 MergeUVRow_ = MergeUVRow_AVX2;
314 }
315 }
316 #endif
317 #if defined(HAS_MERGEUVROW_NEON)
318 if (TestCpuFlag(kCpuHasNEON)) {
319 MergeUVRow_ = MergeUVRow_Any_NEON;
320 if (IS_ALIGNED(halfwidth, 16)) {
321 MergeUVRow_ = MergeUVRow_NEON;
322 }
323 }
324 #endif
325 #if defined(HAS_MERGEUVROW_MSA)
326 if (TestCpuFlag(kCpuHasMSA)) {
327 MergeUVRow_ = MergeUVRow_Any_MSA;
328 if (IS_ALIGNED(halfwidth, 16)) {
329 MergeUVRow_ = MergeUVRow_MSA;
330 }
331 }
332 #endif
333 {
334 // Allocate a rows of uv.
335 align_buffer_64(row_u, ((halfwidth + 31) & ~31) * 2);
336 uint8_t* row_v = row_u + ((halfwidth + 31) & ~31);
337
338 for (y = 0; y < height - 1; y += 2) {
339 ARGBToUVRow(src_argb, src_stride_argb, row_u, row_v, width);
340 MergeUVRow_(row_u, row_v, dst_uv, halfwidth);
341 ARGBToYRow(src_argb, dst_y, width);
342 ARGBToYRow(src_argb + src_stride_argb, dst_y + dst_stride_y, width);
343 src_argb += src_stride_argb * 2;
344 dst_y += dst_stride_y * 2;
345 dst_uv += dst_stride_uv;
346 }
347 if (height & 1) {
348 ARGBToUVRow(src_argb, 0, row_u, row_v, width);
349 MergeUVRow_(row_u, row_v, dst_uv, halfwidth);
350 ARGBToYRow(src_argb, dst_y, width);
351 }
352 free_aligned_buffer_64(row_u);
353 }
354 return 0;
355 }
356
357 // Same as NV12 but U and V swapped.
358 LIBYUV_API
ARGBToNV21(const uint8_t * src_argb,int src_stride_argb,uint8_t * dst_y,int dst_stride_y,uint8_t * dst_vu,int dst_stride_vu,int width,int height)359 int ARGBToNV21(const uint8_t* src_argb,
360 int src_stride_argb,
361 uint8_t* dst_y,
362 int dst_stride_y,
363 uint8_t* dst_vu,
364 int dst_stride_vu,
365 int width,
366 int height) {
367 int y;
368 int halfwidth = (width + 1) >> 1;
369 void (*ARGBToUVRow)(const uint8_t* src_argb0, int src_stride_argb,
370 uint8_t* dst_u, uint8_t* dst_v, int width) =
371 ARGBToUVRow_C;
372 void (*ARGBToYRow)(const uint8_t* src_argb, uint8_t* dst_y, int width) =
373 ARGBToYRow_C;
374 void (*MergeUVRow_)(const uint8_t* src_u, const uint8_t* src_v,
375 uint8_t* dst_vu, int width) = MergeUVRow_C;
376 if (!src_argb || !dst_y || !dst_vu || width <= 0 || height == 0) {
377 return -1;
378 }
379 // Negative height means invert the image.
380 if (height < 0) {
381 height = -height;
382 src_argb = src_argb + (height - 1) * src_stride_argb;
383 src_stride_argb = -src_stride_argb;
384 }
385 #if defined(HAS_ARGBTOYROW_SSSE3) && defined(HAS_ARGBTOUVROW_SSSE3)
386 if (TestCpuFlag(kCpuHasSSSE3)) {
387 ARGBToUVRow = ARGBToUVRow_Any_SSSE3;
388 ARGBToYRow = ARGBToYRow_Any_SSSE3;
389 if (IS_ALIGNED(width, 16)) {
390 ARGBToUVRow = ARGBToUVRow_SSSE3;
391 ARGBToYRow = ARGBToYRow_SSSE3;
392 }
393 }
394 #endif
395 #if defined(HAS_ARGBTOYROW_AVX2) && defined(HAS_ARGBTOUVROW_AVX2)
396 if (TestCpuFlag(kCpuHasAVX2)) {
397 ARGBToUVRow = ARGBToUVRow_Any_AVX2;
398 ARGBToYRow = ARGBToYRow_Any_AVX2;
399 if (IS_ALIGNED(width, 32)) {
400 ARGBToUVRow = ARGBToUVRow_AVX2;
401 ARGBToYRow = ARGBToYRow_AVX2;
402 }
403 }
404 #endif
405 #if defined(HAS_ARGBTOYROW_NEON)
406 if (TestCpuFlag(kCpuHasNEON)) {
407 ARGBToYRow = ARGBToYRow_Any_NEON;
408 if (IS_ALIGNED(width, 8)) {
409 ARGBToYRow = ARGBToYRow_NEON;
410 }
411 }
412 #endif
413 #if defined(HAS_ARGBTOUVROW_NEON)
414 if (TestCpuFlag(kCpuHasNEON)) {
415 ARGBToUVRow = ARGBToUVRow_Any_NEON;
416 if (IS_ALIGNED(width, 16)) {
417 ARGBToUVRow = ARGBToUVRow_NEON;
418 }
419 }
420 #endif
421 #if defined(HAS_ARGBTOYROW_MSA)
422 if (TestCpuFlag(kCpuHasMSA)) {
423 ARGBToYRow = ARGBToYRow_Any_MSA;
424 if (IS_ALIGNED(width, 16)) {
425 ARGBToYRow = ARGBToYRow_MSA;
426 }
427 }
428 #endif
429 #if defined(HAS_ARGBTOUVROW_MSA)
430 if (TestCpuFlag(kCpuHasMSA)) {
431 ARGBToUVRow = ARGBToUVRow_Any_MSA;
432 if (IS_ALIGNED(width, 32)) {
433 ARGBToUVRow = ARGBToUVRow_MSA;
434 }
435 }
436 #endif
437 #if defined(HAS_MERGEUVROW_SSE2)
438 if (TestCpuFlag(kCpuHasSSE2)) {
439 MergeUVRow_ = MergeUVRow_Any_SSE2;
440 if (IS_ALIGNED(halfwidth, 16)) {
441 MergeUVRow_ = MergeUVRow_SSE2;
442 }
443 }
444 #endif
445 #if defined(HAS_MERGEUVROW_AVX2)
446 if (TestCpuFlag(kCpuHasAVX2)) {
447 MergeUVRow_ = MergeUVRow_Any_AVX2;
448 if (IS_ALIGNED(halfwidth, 32)) {
449 MergeUVRow_ = MergeUVRow_AVX2;
450 }
451 }
452 #endif
453 #if defined(HAS_MERGEUVROW_NEON)
454 if (TestCpuFlag(kCpuHasNEON)) {
455 MergeUVRow_ = MergeUVRow_Any_NEON;
456 if (IS_ALIGNED(halfwidth, 16)) {
457 MergeUVRow_ = MergeUVRow_NEON;
458 }
459 }
460 #endif
461 #if defined(HAS_MERGEUVROW_MSA)
462 if (TestCpuFlag(kCpuHasMSA)) {
463 MergeUVRow_ = MergeUVRow_Any_MSA;
464 if (IS_ALIGNED(halfwidth, 16)) {
465 MergeUVRow_ = MergeUVRow_MSA;
466 }
467 }
468 #endif
469 {
470 // Allocate a rows of uv.
471 align_buffer_64(row_u, ((halfwidth + 31) & ~31) * 2);
472 uint8_t* row_v = row_u + ((halfwidth + 31) & ~31);
473
474 for (y = 0; y < height - 1; y += 2) {
475 ARGBToUVRow(src_argb, src_stride_argb, row_u, row_v, width);
476 MergeUVRow_(row_v, row_u, dst_vu, halfwidth);
477 ARGBToYRow(src_argb, dst_y, width);
478 ARGBToYRow(src_argb + src_stride_argb, dst_y + dst_stride_y, width);
479 src_argb += src_stride_argb * 2;
480 dst_y += dst_stride_y * 2;
481 dst_vu += dst_stride_vu;
482 }
483 if (height & 1) {
484 ARGBToUVRow(src_argb, 0, row_u, row_v, width);
485 MergeUVRow_(row_v, row_u, dst_vu, halfwidth);
486 ARGBToYRow(src_argb, dst_y, width);
487 }
488 free_aligned_buffer_64(row_u);
489 }
490 return 0;
491 }
492
493 // Convert ARGB to YUY2.
494 LIBYUV_API
ARGBToYUY2(const uint8_t * src_argb,int src_stride_argb,uint8_t * dst_yuy2,int dst_stride_yuy2,int width,int height)495 int ARGBToYUY2(const uint8_t* src_argb,
496 int src_stride_argb,
497 uint8_t* dst_yuy2,
498 int dst_stride_yuy2,
499 int width,
500 int height) {
501 int y;
502 void (*ARGBToUVRow)(const uint8_t* src_argb, int src_stride_argb,
503 uint8_t* dst_u, uint8_t* dst_v, int width) =
504 ARGBToUVRow_C;
505 void (*ARGBToYRow)(const uint8_t* src_argb, uint8_t* dst_y, int width) =
506 ARGBToYRow_C;
507 void (*I422ToYUY2Row)(const uint8_t* src_y, const uint8_t* src_u,
508 const uint8_t* src_v, uint8_t* dst_yuy2, int width) =
509 I422ToYUY2Row_C;
510
511 if (!src_argb || !dst_yuy2 || width <= 0 || height == 0) {
512 return -1;
513 }
514 // Negative height means invert the image.
515 if (height < 0) {
516 height = -height;
517 dst_yuy2 = dst_yuy2 + (height - 1) * dst_stride_yuy2;
518 dst_stride_yuy2 = -dst_stride_yuy2;
519 }
520 // Coalesce rows.
521 if (src_stride_argb == width * 4 && dst_stride_yuy2 == width * 2) {
522 width *= height;
523 height = 1;
524 src_stride_argb = dst_stride_yuy2 = 0;
525 }
526 #if defined(HAS_ARGBTOYROW_SSSE3) && defined(HAS_ARGBTOUVROW_SSSE3)
527 if (TestCpuFlag(kCpuHasSSSE3)) {
528 ARGBToUVRow = ARGBToUVRow_Any_SSSE3;
529 ARGBToYRow = ARGBToYRow_Any_SSSE3;
530 if (IS_ALIGNED(width, 16)) {
531 ARGBToUVRow = ARGBToUVRow_SSSE3;
532 ARGBToYRow = ARGBToYRow_SSSE3;
533 }
534 }
535 #endif
536 #if defined(HAS_ARGBTOYROW_AVX2) && defined(HAS_ARGBTOUVROW_AVX2)
537 if (TestCpuFlag(kCpuHasAVX2)) {
538 ARGBToUVRow = ARGBToUVRow_Any_AVX2;
539 ARGBToYRow = ARGBToYRow_Any_AVX2;
540 if (IS_ALIGNED(width, 32)) {
541 ARGBToUVRow = ARGBToUVRow_AVX2;
542 ARGBToYRow = ARGBToYRow_AVX2;
543 }
544 }
545 #endif
546 #if defined(HAS_ARGBTOYROW_NEON)
547 if (TestCpuFlag(kCpuHasNEON)) {
548 ARGBToYRow = ARGBToYRow_Any_NEON;
549 if (IS_ALIGNED(width, 8)) {
550 ARGBToYRow = ARGBToYRow_NEON;
551 }
552 }
553 #endif
554 #if defined(HAS_ARGBTOUVROW_NEON)
555 if (TestCpuFlag(kCpuHasNEON)) {
556 ARGBToUVRow = ARGBToUVRow_Any_NEON;
557 if (IS_ALIGNED(width, 16)) {
558 ARGBToUVRow = ARGBToUVRow_NEON;
559 }
560 }
561 #endif
562 #if defined(HAS_ARGBTOYROW_MSA)
563 if (TestCpuFlag(kCpuHasMSA)) {
564 ARGBToYRow = ARGBToYRow_Any_MSA;
565 if (IS_ALIGNED(width, 16)) {
566 ARGBToYRow = ARGBToYRow_MSA;
567 }
568 }
569 #endif
570 #if defined(HAS_ARGBTOUVROW_MSA)
571 if (TestCpuFlag(kCpuHasMSA)) {
572 ARGBToUVRow = ARGBToUVRow_Any_MSA;
573 if (IS_ALIGNED(width, 32)) {
574 ARGBToUVRow = ARGBToUVRow_MSA;
575 }
576 }
577 #endif
578 #if defined(HAS_I422TOYUY2ROW_SSE2)
579 if (TestCpuFlag(kCpuHasSSE2)) {
580 I422ToYUY2Row = I422ToYUY2Row_Any_SSE2;
581 if (IS_ALIGNED(width, 16)) {
582 I422ToYUY2Row = I422ToYUY2Row_SSE2;
583 }
584 }
585 #endif
586 #if defined(HAS_I422TOYUY2ROW_AVX2)
587 if (TestCpuFlag(kCpuHasAVX2)) {
588 I422ToYUY2Row = I422ToYUY2Row_Any_AVX2;
589 if (IS_ALIGNED(width, 32)) {
590 I422ToYUY2Row = I422ToYUY2Row_AVX2;
591 }
592 }
593 #endif
594 #if defined(HAS_I422TOYUY2ROW_NEON)
595 if (TestCpuFlag(kCpuHasNEON)) {
596 I422ToYUY2Row = I422ToYUY2Row_Any_NEON;
597 if (IS_ALIGNED(width, 16)) {
598 I422ToYUY2Row = I422ToYUY2Row_NEON;
599 }
600 }
601 #endif
602 #if defined(HAS_I422TOYUY2ROW_MSA)
603 if (TestCpuFlag(kCpuHasMSA)) {
604 I422ToYUY2Row = I422ToYUY2Row_Any_MSA;
605 if (IS_ALIGNED(width, 32)) {
606 I422ToYUY2Row = I422ToYUY2Row_MSA;
607 }
608 }
609 #endif
610
611 {
612 // Allocate a rows of yuv.
613 align_buffer_64(row_y, ((width + 63) & ~63) * 2);
614 uint8_t* row_u = row_y + ((width + 63) & ~63);
615 uint8_t* row_v = row_u + ((width + 63) & ~63) / 2;
616
617 for (y = 0; y < height; ++y) {
618 ARGBToUVRow(src_argb, 0, row_u, row_v, width);
619 ARGBToYRow(src_argb, row_y, width);
620 I422ToYUY2Row(row_y, row_u, row_v, dst_yuy2, width);
621 src_argb += src_stride_argb;
622 dst_yuy2 += dst_stride_yuy2;
623 }
624
625 free_aligned_buffer_64(row_y);
626 }
627 return 0;
628 }
629
630 // Convert ARGB to UYVY.
631 LIBYUV_API
ARGBToUYVY(const uint8_t * src_argb,int src_stride_argb,uint8_t * dst_uyvy,int dst_stride_uyvy,int width,int height)632 int ARGBToUYVY(const uint8_t* src_argb,
633 int src_stride_argb,
634 uint8_t* dst_uyvy,
635 int dst_stride_uyvy,
636 int width,
637 int height) {
638 int y;
639 void (*ARGBToUVRow)(const uint8_t* src_argb, int src_stride_argb,
640 uint8_t* dst_u, uint8_t* dst_v, int width) =
641 ARGBToUVRow_C;
642 void (*ARGBToYRow)(const uint8_t* src_argb, uint8_t* dst_y, int width) =
643 ARGBToYRow_C;
644 void (*I422ToUYVYRow)(const uint8_t* src_y, const uint8_t* src_u,
645 const uint8_t* src_v, uint8_t* dst_uyvy, int width) =
646 I422ToUYVYRow_C;
647
648 if (!src_argb || !dst_uyvy || width <= 0 || height == 0) {
649 return -1;
650 }
651 // Negative height means invert the image.
652 if (height < 0) {
653 height = -height;
654 dst_uyvy = dst_uyvy + (height - 1) * dst_stride_uyvy;
655 dst_stride_uyvy = -dst_stride_uyvy;
656 }
657 // Coalesce rows.
658 if (src_stride_argb == width * 4 && dst_stride_uyvy == width * 2) {
659 width *= height;
660 height = 1;
661 src_stride_argb = dst_stride_uyvy = 0;
662 }
663 #if defined(HAS_ARGBTOYROW_SSSE3) && defined(HAS_ARGBTOUVROW_SSSE3)
664 if (TestCpuFlag(kCpuHasSSSE3)) {
665 ARGBToUVRow = ARGBToUVRow_Any_SSSE3;
666 ARGBToYRow = ARGBToYRow_Any_SSSE3;
667 if (IS_ALIGNED(width, 16)) {
668 ARGBToUVRow = ARGBToUVRow_SSSE3;
669 ARGBToYRow = ARGBToYRow_SSSE3;
670 }
671 }
672 #endif
673 #if defined(HAS_ARGBTOYROW_AVX2) && defined(HAS_ARGBTOUVROW_AVX2)
674 if (TestCpuFlag(kCpuHasAVX2)) {
675 ARGBToUVRow = ARGBToUVRow_Any_AVX2;
676 ARGBToYRow = ARGBToYRow_Any_AVX2;
677 if (IS_ALIGNED(width, 32)) {
678 ARGBToUVRow = ARGBToUVRow_AVX2;
679 ARGBToYRow = ARGBToYRow_AVX2;
680 }
681 }
682 #endif
683 #if defined(HAS_ARGBTOYROW_NEON)
684 if (TestCpuFlag(kCpuHasNEON)) {
685 ARGBToYRow = ARGBToYRow_Any_NEON;
686 if (IS_ALIGNED(width, 8)) {
687 ARGBToYRow = ARGBToYRow_NEON;
688 }
689 }
690 #endif
691 #if defined(HAS_ARGBTOUVROW_NEON)
692 if (TestCpuFlag(kCpuHasNEON)) {
693 ARGBToUVRow = ARGBToUVRow_Any_NEON;
694 if (IS_ALIGNED(width, 16)) {
695 ARGBToUVRow = ARGBToUVRow_NEON;
696 }
697 }
698 #endif
699 #if defined(HAS_ARGBTOYROW_MSA)
700 if (TestCpuFlag(kCpuHasMSA)) {
701 ARGBToYRow = ARGBToYRow_Any_MSA;
702 if (IS_ALIGNED(width, 16)) {
703 ARGBToYRow = ARGBToYRow_MSA;
704 }
705 }
706 #endif
707 #if defined(HAS_ARGBTOUVROW_MSA)
708 if (TestCpuFlag(kCpuHasMSA)) {
709 ARGBToUVRow = ARGBToUVRow_Any_MSA;
710 if (IS_ALIGNED(width, 32)) {
711 ARGBToUVRow = ARGBToUVRow_MSA;
712 }
713 }
714 #endif
715 #if defined(HAS_I422TOUYVYROW_SSE2)
716 if (TestCpuFlag(kCpuHasSSE2)) {
717 I422ToUYVYRow = I422ToUYVYRow_Any_SSE2;
718 if (IS_ALIGNED(width, 16)) {
719 I422ToUYVYRow = I422ToUYVYRow_SSE2;
720 }
721 }
722 #endif
723 #if defined(HAS_I422TOUYVYROW_AVX2)
724 if (TestCpuFlag(kCpuHasAVX2)) {
725 I422ToUYVYRow = I422ToUYVYRow_Any_AVX2;
726 if (IS_ALIGNED(width, 32)) {
727 I422ToUYVYRow = I422ToUYVYRow_AVX2;
728 }
729 }
730 #endif
731 #if defined(HAS_I422TOUYVYROW_NEON)
732 if (TestCpuFlag(kCpuHasNEON)) {
733 I422ToUYVYRow = I422ToUYVYRow_Any_NEON;
734 if (IS_ALIGNED(width, 16)) {
735 I422ToUYVYRow = I422ToUYVYRow_NEON;
736 }
737 }
738 #endif
739 #if defined(HAS_I422TOUYVYROW_MSA)
740 if (TestCpuFlag(kCpuHasMSA)) {
741 I422ToUYVYRow = I422ToUYVYRow_Any_MSA;
742 if (IS_ALIGNED(width, 32)) {
743 I422ToUYVYRow = I422ToUYVYRow_MSA;
744 }
745 }
746 #endif
747
748 {
749 // Allocate a rows of yuv.
750 align_buffer_64(row_y, ((width + 63) & ~63) * 2);
751 uint8_t* row_u = row_y + ((width + 63) & ~63);
752 uint8_t* row_v = row_u + ((width + 63) & ~63) / 2;
753
754 for (y = 0; y < height; ++y) {
755 ARGBToUVRow(src_argb, 0, row_u, row_v, width);
756 ARGBToYRow(src_argb, row_y, width);
757 I422ToUYVYRow(row_y, row_u, row_v, dst_uyvy, width);
758 src_argb += src_stride_argb;
759 dst_uyvy += dst_stride_uyvy;
760 }
761
762 free_aligned_buffer_64(row_y);
763 }
764 return 0;
765 }
766
767 // Convert ARGB to I400.
768 LIBYUV_API
ARGBToI400(const uint8_t * src_argb,int src_stride_argb,uint8_t * dst_y,int dst_stride_y,int width,int height)769 int ARGBToI400(const uint8_t* src_argb,
770 int src_stride_argb,
771 uint8_t* dst_y,
772 int dst_stride_y,
773 int width,
774 int height) {
775 int y;
776 void (*ARGBToYRow)(const uint8_t* src_argb, uint8_t* dst_y, int width) =
777 ARGBToYRow_C;
778 if (!src_argb || !dst_y || width <= 0 || height == 0) {
779 return -1;
780 }
781 if (height < 0) {
782 height = -height;
783 src_argb = src_argb + (height - 1) * src_stride_argb;
784 src_stride_argb = -src_stride_argb;
785 }
786 // Coalesce rows.
787 if (src_stride_argb == width * 4 && dst_stride_y == width) {
788 width *= height;
789 height = 1;
790 src_stride_argb = dst_stride_y = 0;
791 }
792 #if defined(HAS_ARGBTOYROW_SSSE3)
793 if (TestCpuFlag(kCpuHasSSSE3)) {
794 ARGBToYRow = ARGBToYRow_Any_SSSE3;
795 if (IS_ALIGNED(width, 16)) {
796 ARGBToYRow = ARGBToYRow_SSSE3;
797 }
798 }
799 #endif
800 #if defined(HAS_ARGBTOYROW_AVX2)
801 if (TestCpuFlag(kCpuHasAVX2)) {
802 ARGBToYRow = ARGBToYRow_Any_AVX2;
803 if (IS_ALIGNED(width, 32)) {
804 ARGBToYRow = ARGBToYRow_AVX2;
805 }
806 }
807 #endif
808 #if defined(HAS_ARGBTOYROW_NEON)
809 if (TestCpuFlag(kCpuHasNEON)) {
810 ARGBToYRow = ARGBToYRow_Any_NEON;
811 if (IS_ALIGNED(width, 8)) {
812 ARGBToYRow = ARGBToYRow_NEON;
813 }
814 }
815 #endif
816 #if defined(HAS_ARGBTOYROW_MSA)
817 if (TestCpuFlag(kCpuHasMSA)) {
818 ARGBToYRow = ARGBToYRow_Any_MSA;
819 if (IS_ALIGNED(width, 16)) {
820 ARGBToYRow = ARGBToYRow_MSA;
821 }
822 }
823 #endif
824
825 for (y = 0; y < height; ++y) {
826 ARGBToYRow(src_argb, dst_y, width);
827 src_argb += src_stride_argb;
828 dst_y += dst_stride_y;
829 }
830 return 0;
831 }
832
833 // Shuffle table for converting ARGB to RGBA.
834 static const uvec8 kShuffleMaskARGBToRGBA = {
835 3u, 0u, 1u, 2u, 7u, 4u, 5u, 6u, 11u, 8u, 9u, 10u, 15u, 12u, 13u, 14u};
836
837 // Convert ARGB to RGBA.
838 LIBYUV_API
ARGBToRGBA(const uint8_t * src_argb,int src_stride_argb,uint8_t * dst_rgba,int dst_stride_rgba,int width,int height)839 int ARGBToRGBA(const uint8_t* src_argb,
840 int src_stride_argb,
841 uint8_t* dst_rgba,
842 int dst_stride_rgba,
843 int width,
844 int height) {
845 return ARGBShuffle(src_argb, src_stride_argb, dst_rgba, dst_stride_rgba,
846 (const uint8_t*)(&kShuffleMaskARGBToRGBA), width, height);
847 }
848
849 // Convert ARGB To RGB24.
850 LIBYUV_API
ARGBToRGB24(const uint8_t * src_argb,int src_stride_argb,uint8_t * dst_rgb24,int dst_stride_rgb24,int width,int height)851 int ARGBToRGB24(const uint8_t* src_argb,
852 int src_stride_argb,
853 uint8_t* dst_rgb24,
854 int dst_stride_rgb24,
855 int width,
856 int height) {
857 int y;
858 void (*ARGBToRGB24Row)(const uint8_t* src_argb, uint8_t* dst_rgb, int width) =
859 ARGBToRGB24Row_C;
860 if (!src_argb || !dst_rgb24 || width <= 0 || height == 0) {
861 return -1;
862 }
863 if (height < 0) {
864 height = -height;
865 src_argb = src_argb + (height - 1) * src_stride_argb;
866 src_stride_argb = -src_stride_argb;
867 }
868 // Coalesce rows.
869 if (src_stride_argb == width * 4 && dst_stride_rgb24 == width * 3) {
870 width *= height;
871 height = 1;
872 src_stride_argb = dst_stride_rgb24 = 0;
873 }
874 #if defined(HAS_ARGBTORGB24ROW_SSSE3)
875 if (TestCpuFlag(kCpuHasSSSE3)) {
876 ARGBToRGB24Row = ARGBToRGB24Row_Any_SSSE3;
877 if (IS_ALIGNED(width, 16)) {
878 ARGBToRGB24Row = ARGBToRGB24Row_SSSE3;
879 }
880 }
881 #endif
882 #if defined(HAS_ARGBTORGB24ROW_AVX2)
883 if (TestCpuFlag(kCpuHasAVX2)) {
884 ARGBToRGB24Row = ARGBToRGB24Row_Any_AVX2;
885 if (IS_ALIGNED(width, 32)) {
886 ARGBToRGB24Row = ARGBToRGB24Row_AVX2;
887 }
888 }
889 #endif
890 #if defined(HAS_ARGBTORGB24ROW_AVX512VBMI)
891 if (TestCpuFlag(kCpuHasAVX512VBMI)) {
892 ARGBToRGB24Row = ARGBToRGB24Row_Any_AVX512VBMI;
893 if (IS_ALIGNED(width, 32)) {
894 ARGBToRGB24Row = ARGBToRGB24Row_AVX512VBMI;
895 }
896 }
897 #endif
898 #if defined(HAS_ARGBTORGB24ROW_NEON)
899 if (TestCpuFlag(kCpuHasNEON)) {
900 ARGBToRGB24Row = ARGBToRGB24Row_Any_NEON;
901 if (IS_ALIGNED(width, 8)) {
902 ARGBToRGB24Row = ARGBToRGB24Row_NEON;
903 }
904 }
905 #endif
906 #if defined(HAS_ARGBTORGB24ROW_MSA)
907 if (TestCpuFlag(kCpuHasMSA)) {
908 ARGBToRGB24Row = ARGBToRGB24Row_Any_MSA;
909 if (IS_ALIGNED(width, 16)) {
910 ARGBToRGB24Row = ARGBToRGB24Row_MSA;
911 }
912 }
913 #endif
914
915 for (y = 0; y < height; ++y) {
916 ARGBToRGB24Row(src_argb, dst_rgb24, width);
917 src_argb += src_stride_argb;
918 dst_rgb24 += dst_stride_rgb24;
919 }
920 return 0;
921 }
922
923 // Convert ARGB To RAW.
924 LIBYUV_API
ARGBToRAW(const uint8_t * src_argb,int src_stride_argb,uint8_t * dst_raw,int dst_stride_raw,int width,int height)925 int ARGBToRAW(const uint8_t* src_argb,
926 int src_stride_argb,
927 uint8_t* dst_raw,
928 int dst_stride_raw,
929 int width,
930 int height) {
931 int y;
932 void (*ARGBToRAWRow)(const uint8_t* src_argb, uint8_t* dst_rgb, int width) =
933 ARGBToRAWRow_C;
934 if (!src_argb || !dst_raw || width <= 0 || height == 0) {
935 return -1;
936 }
937 if (height < 0) {
938 height = -height;
939 src_argb = src_argb + (height - 1) * src_stride_argb;
940 src_stride_argb = -src_stride_argb;
941 }
942 // Coalesce rows.
943 if (src_stride_argb == width * 4 && dst_stride_raw == width * 3) {
944 width *= height;
945 height = 1;
946 src_stride_argb = dst_stride_raw = 0;
947 }
948 #if defined(HAS_ARGBTORAWROW_SSSE3)
949 if (TestCpuFlag(kCpuHasSSSE3)) {
950 ARGBToRAWRow = ARGBToRAWRow_Any_SSSE3;
951 if (IS_ALIGNED(width, 16)) {
952 ARGBToRAWRow = ARGBToRAWRow_SSSE3;
953 }
954 }
955 #endif
956 #if defined(HAS_ARGBTORAWROW_AVX2)
957 if (TestCpuFlag(kCpuHasAVX2)) {
958 ARGBToRAWRow = ARGBToRAWRow_Any_AVX2;
959 if (IS_ALIGNED(width, 32)) {
960 ARGBToRAWRow = ARGBToRAWRow_AVX2;
961 }
962 }
963 #endif
964 #if defined(HAS_ARGBTORAWROW_NEON)
965 if (TestCpuFlag(kCpuHasNEON)) {
966 ARGBToRAWRow = ARGBToRAWRow_Any_NEON;
967 if (IS_ALIGNED(width, 8)) {
968 ARGBToRAWRow = ARGBToRAWRow_NEON;
969 }
970 }
971 #endif
972 #if defined(HAS_ARGBTORAWROW_MSA)
973 if (TestCpuFlag(kCpuHasMSA)) {
974 ARGBToRAWRow = ARGBToRAWRow_Any_MSA;
975 if (IS_ALIGNED(width, 16)) {
976 ARGBToRAWRow = ARGBToRAWRow_MSA;
977 }
978 }
979 #endif
980
981 for (y = 0; y < height; ++y) {
982 ARGBToRAWRow(src_argb, dst_raw, width);
983 src_argb += src_stride_argb;
984 dst_raw += dst_stride_raw;
985 }
986 return 0;
987 }
988
989 // Ordered 8x8 dither for 888 to 565. Values from 0 to 7.
990 static const uint8_t kDither565_4x4[16] = {
991 0, 4, 1, 5, 6, 2, 7, 3, 1, 5, 0, 4, 7, 3, 6, 2,
992 };
993
994 // Convert ARGB To RGB565 with 4x4 dither matrix (16 bytes).
995 LIBYUV_API
ARGBToRGB565Dither(const uint8_t * src_argb,int src_stride_argb,uint8_t * dst_rgb565,int dst_stride_rgb565,const uint8_t * dither4x4,int width,int height)996 int ARGBToRGB565Dither(const uint8_t* src_argb,
997 int src_stride_argb,
998 uint8_t* dst_rgb565,
999 int dst_stride_rgb565,
1000 const uint8_t* dither4x4,
1001 int width,
1002 int height) {
1003 int y;
1004 void (*ARGBToRGB565DitherRow)(const uint8_t* src_argb, uint8_t* dst_rgb,
1005 const uint32_t dither4, int width) =
1006 ARGBToRGB565DitherRow_C;
1007 if (!src_argb || !dst_rgb565 || width <= 0 || height == 0) {
1008 return -1;
1009 }
1010 if (height < 0) {
1011 height = -height;
1012 src_argb = src_argb + (height - 1) * src_stride_argb;
1013 src_stride_argb = -src_stride_argb;
1014 }
1015 if (!dither4x4) {
1016 dither4x4 = kDither565_4x4;
1017 }
1018 #if defined(HAS_ARGBTORGB565DITHERROW_SSE2)
1019 if (TestCpuFlag(kCpuHasSSE2)) {
1020 ARGBToRGB565DitherRow = ARGBToRGB565DitherRow_Any_SSE2;
1021 if (IS_ALIGNED(width, 4)) {
1022 ARGBToRGB565DitherRow = ARGBToRGB565DitherRow_SSE2;
1023 }
1024 }
1025 #endif
1026 #if defined(HAS_ARGBTORGB565DITHERROW_AVX2)
1027 if (TestCpuFlag(kCpuHasAVX2)) {
1028 ARGBToRGB565DitherRow = ARGBToRGB565DitherRow_Any_AVX2;
1029 if (IS_ALIGNED(width, 8)) {
1030 ARGBToRGB565DitherRow = ARGBToRGB565DitherRow_AVX2;
1031 }
1032 }
1033 #endif
1034 #if defined(HAS_ARGBTORGB565DITHERROW_NEON)
1035 if (TestCpuFlag(kCpuHasNEON)) {
1036 ARGBToRGB565DitherRow = ARGBToRGB565DitherRow_Any_NEON;
1037 if (IS_ALIGNED(width, 8)) {
1038 ARGBToRGB565DitherRow = ARGBToRGB565DitherRow_NEON;
1039 }
1040 }
1041 #endif
1042 #if defined(HAS_ARGBTORGB565DITHERROW_MSA)
1043 if (TestCpuFlag(kCpuHasMSA)) {
1044 ARGBToRGB565DitherRow = ARGBToRGB565DitherRow_Any_MSA;
1045 if (IS_ALIGNED(width, 8)) {
1046 ARGBToRGB565DitherRow = ARGBToRGB565DitherRow_MSA;
1047 }
1048 }
1049 #endif
1050
1051 for (y = 0; y < height; ++y) {
1052 ARGBToRGB565DitherRow(src_argb, dst_rgb565,
1053 *(const uint32_t*)(dither4x4 + ((y & 3) << 2)),
1054 width);
1055 src_argb += src_stride_argb;
1056 dst_rgb565 += dst_stride_rgb565;
1057 }
1058 return 0;
1059 }
1060
1061 // Convert ARGB To RGB565.
1062 // TODO(fbarchard): Consider using dither function low level with zeros.
1063 LIBYUV_API
ARGBToRGB565(const uint8_t * src_argb,int src_stride_argb,uint8_t * dst_rgb565,int dst_stride_rgb565,int width,int height)1064 int ARGBToRGB565(const uint8_t* src_argb,
1065 int src_stride_argb,
1066 uint8_t* dst_rgb565,
1067 int dst_stride_rgb565,
1068 int width,
1069 int height) {
1070 int y;
1071 void (*ARGBToRGB565Row)(const uint8_t* src_argb, uint8_t* dst_rgb,
1072 int width) = ARGBToRGB565Row_C;
1073 if (!src_argb || !dst_rgb565 || width <= 0 || height == 0) {
1074 return -1;
1075 }
1076 if (height < 0) {
1077 height = -height;
1078 src_argb = src_argb + (height - 1) * src_stride_argb;
1079 src_stride_argb = -src_stride_argb;
1080 }
1081 // Coalesce rows.
1082 if (src_stride_argb == width * 4 && dst_stride_rgb565 == width * 2) {
1083 width *= height;
1084 height = 1;
1085 src_stride_argb = dst_stride_rgb565 = 0;
1086 }
1087 #if defined(HAS_ARGBTORGB565ROW_SSE2)
1088 if (TestCpuFlag(kCpuHasSSE2)) {
1089 ARGBToRGB565Row = ARGBToRGB565Row_Any_SSE2;
1090 if (IS_ALIGNED(width, 4)) {
1091 ARGBToRGB565Row = ARGBToRGB565Row_SSE2;
1092 }
1093 }
1094 #endif
1095 #if defined(HAS_ARGBTORGB565ROW_AVX2)
1096 if (TestCpuFlag(kCpuHasAVX2)) {
1097 ARGBToRGB565Row = ARGBToRGB565Row_Any_AVX2;
1098 if (IS_ALIGNED(width, 8)) {
1099 ARGBToRGB565Row = ARGBToRGB565Row_AVX2;
1100 }
1101 }
1102 #endif
1103 #if defined(HAS_ARGBTORGB565ROW_NEON)
1104 if (TestCpuFlag(kCpuHasNEON)) {
1105 ARGBToRGB565Row = ARGBToRGB565Row_Any_NEON;
1106 if (IS_ALIGNED(width, 8)) {
1107 ARGBToRGB565Row = ARGBToRGB565Row_NEON;
1108 }
1109 }
1110 #endif
1111 #if defined(HAS_ARGBTORGB565ROW_MSA)
1112 if (TestCpuFlag(kCpuHasMSA)) {
1113 ARGBToRGB565Row = ARGBToRGB565Row_Any_MSA;
1114 if (IS_ALIGNED(width, 8)) {
1115 ARGBToRGB565Row = ARGBToRGB565Row_MSA;
1116 }
1117 }
1118 #endif
1119
1120 for (y = 0; y < height; ++y) {
1121 ARGBToRGB565Row(src_argb, dst_rgb565, width);
1122 src_argb += src_stride_argb;
1123 dst_rgb565 += dst_stride_rgb565;
1124 }
1125 return 0;
1126 }
1127
1128 // Convert ARGB To ARGB1555.
1129 LIBYUV_API
ARGBToARGB1555(const uint8_t * src_argb,int src_stride_argb,uint8_t * dst_argb1555,int dst_stride_argb1555,int width,int height)1130 int ARGBToARGB1555(const uint8_t* src_argb,
1131 int src_stride_argb,
1132 uint8_t* dst_argb1555,
1133 int dst_stride_argb1555,
1134 int width,
1135 int height) {
1136 int y;
1137 void (*ARGBToARGB1555Row)(const uint8_t* src_argb, uint8_t* dst_rgb,
1138 int width) = ARGBToARGB1555Row_C;
1139 if (!src_argb || !dst_argb1555 || width <= 0 || height == 0) {
1140 return -1;
1141 }
1142 if (height < 0) {
1143 height = -height;
1144 src_argb = src_argb + (height - 1) * src_stride_argb;
1145 src_stride_argb = -src_stride_argb;
1146 }
1147 // Coalesce rows.
1148 if (src_stride_argb == width * 4 && dst_stride_argb1555 == width * 2) {
1149 width *= height;
1150 height = 1;
1151 src_stride_argb = dst_stride_argb1555 = 0;
1152 }
1153 #if defined(HAS_ARGBTOARGB1555ROW_SSE2)
1154 if (TestCpuFlag(kCpuHasSSE2)) {
1155 ARGBToARGB1555Row = ARGBToARGB1555Row_Any_SSE2;
1156 if (IS_ALIGNED(width, 4)) {
1157 ARGBToARGB1555Row = ARGBToARGB1555Row_SSE2;
1158 }
1159 }
1160 #endif
1161 #if defined(HAS_ARGBTOARGB1555ROW_AVX2)
1162 if (TestCpuFlag(kCpuHasAVX2)) {
1163 ARGBToARGB1555Row = ARGBToARGB1555Row_Any_AVX2;
1164 if (IS_ALIGNED(width, 8)) {
1165 ARGBToARGB1555Row = ARGBToARGB1555Row_AVX2;
1166 }
1167 }
1168 #endif
1169 #if defined(HAS_ARGBTOARGB1555ROW_NEON)
1170 if (TestCpuFlag(kCpuHasNEON)) {
1171 ARGBToARGB1555Row = ARGBToARGB1555Row_Any_NEON;
1172 if (IS_ALIGNED(width, 8)) {
1173 ARGBToARGB1555Row = ARGBToARGB1555Row_NEON;
1174 }
1175 }
1176 #endif
1177 #if defined(HAS_ARGBTOARGB1555ROW_MSA)
1178 if (TestCpuFlag(kCpuHasMSA)) {
1179 ARGBToARGB1555Row = ARGBToARGB1555Row_Any_MSA;
1180 if (IS_ALIGNED(width, 8)) {
1181 ARGBToARGB1555Row = ARGBToARGB1555Row_MSA;
1182 }
1183 }
1184 #endif
1185
1186 for (y = 0; y < height; ++y) {
1187 ARGBToARGB1555Row(src_argb, dst_argb1555, width);
1188 src_argb += src_stride_argb;
1189 dst_argb1555 += dst_stride_argb1555;
1190 }
1191 return 0;
1192 }
1193
1194 // Convert ARGB To ARGB4444.
1195 LIBYUV_API
ARGBToARGB4444(const uint8_t * src_argb,int src_stride_argb,uint8_t * dst_argb4444,int dst_stride_argb4444,int width,int height)1196 int ARGBToARGB4444(const uint8_t* src_argb,
1197 int src_stride_argb,
1198 uint8_t* dst_argb4444,
1199 int dst_stride_argb4444,
1200 int width,
1201 int height) {
1202 int y;
1203 void (*ARGBToARGB4444Row)(const uint8_t* src_argb, uint8_t* dst_rgb,
1204 int width) = ARGBToARGB4444Row_C;
1205 if (!src_argb || !dst_argb4444 || width <= 0 || height == 0) {
1206 return -1;
1207 }
1208 if (height < 0) {
1209 height = -height;
1210 src_argb = src_argb + (height - 1) * src_stride_argb;
1211 src_stride_argb = -src_stride_argb;
1212 }
1213 // Coalesce rows.
1214 if (src_stride_argb == width * 4 && dst_stride_argb4444 == width * 2) {
1215 width *= height;
1216 height = 1;
1217 src_stride_argb = dst_stride_argb4444 = 0;
1218 }
1219 #if defined(HAS_ARGBTOARGB4444ROW_SSE2)
1220 if (TestCpuFlag(kCpuHasSSE2)) {
1221 ARGBToARGB4444Row = ARGBToARGB4444Row_Any_SSE2;
1222 if (IS_ALIGNED(width, 4)) {
1223 ARGBToARGB4444Row = ARGBToARGB4444Row_SSE2;
1224 }
1225 }
1226 #endif
1227 #if defined(HAS_ARGBTOARGB4444ROW_AVX2)
1228 if (TestCpuFlag(kCpuHasAVX2)) {
1229 ARGBToARGB4444Row = ARGBToARGB4444Row_Any_AVX2;
1230 if (IS_ALIGNED(width, 8)) {
1231 ARGBToARGB4444Row = ARGBToARGB4444Row_AVX2;
1232 }
1233 }
1234 #endif
1235 #if defined(HAS_ARGBTOARGB4444ROW_NEON)
1236 if (TestCpuFlag(kCpuHasNEON)) {
1237 ARGBToARGB4444Row = ARGBToARGB4444Row_Any_NEON;
1238 if (IS_ALIGNED(width, 8)) {
1239 ARGBToARGB4444Row = ARGBToARGB4444Row_NEON;
1240 }
1241 }
1242 #endif
1243 #if defined(HAS_ARGBTOARGB4444ROW_MSA)
1244 if (TestCpuFlag(kCpuHasMSA)) {
1245 ARGBToARGB4444Row = ARGBToARGB4444Row_Any_MSA;
1246 if (IS_ALIGNED(width, 8)) {
1247 ARGBToARGB4444Row = ARGBToARGB4444Row_MSA;
1248 }
1249 }
1250 #endif
1251
1252 for (y = 0; y < height; ++y) {
1253 ARGBToARGB4444Row(src_argb, dst_argb4444, width);
1254 src_argb += src_stride_argb;
1255 dst_argb4444 += dst_stride_argb4444;
1256 }
1257 return 0;
1258 }
1259
1260 // Convert ABGR To AR30.
1261 LIBYUV_API
ABGRToAR30(const uint8_t * src_abgr,int src_stride_abgr,uint8_t * dst_ar30,int dst_stride_ar30,int width,int height)1262 int ABGRToAR30(const uint8_t* src_abgr,
1263 int src_stride_abgr,
1264 uint8_t* dst_ar30,
1265 int dst_stride_ar30,
1266 int width,
1267 int height) {
1268 int y;
1269 void (*ABGRToAR30Row)(const uint8_t* src_abgr, uint8_t* dst_rgb, int width) =
1270 ABGRToAR30Row_C;
1271 if (!src_abgr || !dst_ar30 || width <= 0 || height == 0) {
1272 return -1;
1273 }
1274 if (height < 0) {
1275 height = -height;
1276 src_abgr = src_abgr + (height - 1) * src_stride_abgr;
1277 src_stride_abgr = -src_stride_abgr;
1278 }
1279 // Coalesce rows.
1280 if (src_stride_abgr == width * 4 && dst_stride_ar30 == width * 4) {
1281 width *= height;
1282 height = 1;
1283 src_stride_abgr = dst_stride_ar30 = 0;
1284 }
1285 #if defined(HAS_ABGRTOAR30ROW_SSSE3)
1286 if (TestCpuFlag(kCpuHasSSSE3)) {
1287 ABGRToAR30Row = ABGRToAR30Row_Any_SSSE3;
1288 if (IS_ALIGNED(width, 4)) {
1289 ABGRToAR30Row = ABGRToAR30Row_SSSE3;
1290 }
1291 }
1292 #endif
1293 #if defined(HAS_ABGRTOAR30ROW_AVX2)
1294 if (TestCpuFlag(kCpuHasAVX2)) {
1295 ABGRToAR30Row = ABGRToAR30Row_Any_AVX2;
1296 if (IS_ALIGNED(width, 8)) {
1297 ABGRToAR30Row = ABGRToAR30Row_AVX2;
1298 }
1299 }
1300 #endif
1301 for (y = 0; y < height; ++y) {
1302 ABGRToAR30Row(src_abgr, dst_ar30, width);
1303 src_abgr += src_stride_abgr;
1304 dst_ar30 += dst_stride_ar30;
1305 }
1306 return 0;
1307 }
1308
1309 // Convert ARGB To AR30.
1310 LIBYUV_API
ARGBToAR30(const uint8_t * src_argb,int src_stride_argb,uint8_t * dst_ar30,int dst_stride_ar30,int width,int height)1311 int ARGBToAR30(const uint8_t* src_argb,
1312 int src_stride_argb,
1313 uint8_t* dst_ar30,
1314 int dst_stride_ar30,
1315 int width,
1316 int height) {
1317 int y;
1318 void (*ARGBToAR30Row)(const uint8_t* src_argb, uint8_t* dst_rgb, int width) =
1319 ARGBToAR30Row_C;
1320 if (!src_argb || !dst_ar30 || width <= 0 || height == 0) {
1321 return -1;
1322 }
1323 if (height < 0) {
1324 height = -height;
1325 src_argb = src_argb + (height - 1) * src_stride_argb;
1326 src_stride_argb = -src_stride_argb;
1327 }
1328 // Coalesce rows.
1329 if (src_stride_argb == width * 4 && dst_stride_ar30 == width * 4) {
1330 width *= height;
1331 height = 1;
1332 src_stride_argb = dst_stride_ar30 = 0;
1333 }
1334 #if defined(HAS_ARGBTOAR30ROW_SSSE3)
1335 if (TestCpuFlag(kCpuHasSSSE3)) {
1336 ARGBToAR30Row = ARGBToAR30Row_Any_SSSE3;
1337 if (IS_ALIGNED(width, 4)) {
1338 ARGBToAR30Row = ARGBToAR30Row_SSSE3;
1339 }
1340 }
1341 #endif
1342 #if defined(HAS_ARGBTOAR30ROW_AVX2)
1343 if (TestCpuFlag(kCpuHasAVX2)) {
1344 ARGBToAR30Row = ARGBToAR30Row_Any_AVX2;
1345 if (IS_ALIGNED(width, 8)) {
1346 ARGBToAR30Row = ARGBToAR30Row_AVX2;
1347 }
1348 }
1349 #endif
1350 for (y = 0; y < height; ++y) {
1351 ARGBToAR30Row(src_argb, dst_ar30, width);
1352 src_argb += src_stride_argb;
1353 dst_ar30 += dst_stride_ar30;
1354 }
1355 return 0;
1356 }
1357
1358 // Convert ARGB to J420. (JPeg full range I420).
1359 LIBYUV_API
ARGBToJ420(const uint8_t * src_argb,int src_stride_argb,uint8_t * dst_yj,int dst_stride_yj,uint8_t * dst_u,int dst_stride_u,uint8_t * dst_v,int dst_stride_v,int width,int height)1360 int ARGBToJ420(const uint8_t* src_argb,
1361 int src_stride_argb,
1362 uint8_t* dst_yj,
1363 int dst_stride_yj,
1364 uint8_t* dst_u,
1365 int dst_stride_u,
1366 uint8_t* dst_v,
1367 int dst_stride_v,
1368 int width,
1369 int height) {
1370 int y;
1371 void (*ARGBToUVJRow)(const uint8_t* src_argb0, int src_stride_argb,
1372 uint8_t* dst_u, uint8_t* dst_v, int width) =
1373 ARGBToUVJRow_C;
1374 void (*ARGBToYJRow)(const uint8_t* src_argb, uint8_t* dst_yj, int width) =
1375 ARGBToYJRow_C;
1376 if (!src_argb || !dst_yj || !dst_u || !dst_v || width <= 0 || height == 0) {
1377 return -1;
1378 }
1379 // Negative height means invert the image.
1380 if (height < 0) {
1381 height = -height;
1382 src_argb = src_argb + (height - 1) * src_stride_argb;
1383 src_stride_argb = -src_stride_argb;
1384 }
1385 #if defined(HAS_ARGBTOYJROW_SSSE3) && defined(HAS_ARGBTOUVJROW_SSSE3)
1386 if (TestCpuFlag(kCpuHasSSSE3)) {
1387 ARGBToUVJRow = ARGBToUVJRow_Any_SSSE3;
1388 ARGBToYJRow = ARGBToYJRow_Any_SSSE3;
1389 if (IS_ALIGNED(width, 16)) {
1390 ARGBToUVJRow = ARGBToUVJRow_SSSE3;
1391 ARGBToYJRow = ARGBToYJRow_SSSE3;
1392 }
1393 }
1394 #endif
1395 #if defined(HAS_ARGBTOYJROW_AVX2)
1396 if (TestCpuFlag(kCpuHasAVX2)) {
1397 ARGBToYJRow = ARGBToYJRow_Any_AVX2;
1398 if (IS_ALIGNED(width, 32)) {
1399 ARGBToYJRow = ARGBToYJRow_AVX2;
1400 }
1401 }
1402 #endif
1403 #if defined(HAS_ARGBTOYJROW_NEON)
1404 if (TestCpuFlag(kCpuHasNEON)) {
1405 ARGBToYJRow = ARGBToYJRow_Any_NEON;
1406 if (IS_ALIGNED(width, 8)) {
1407 ARGBToYJRow = ARGBToYJRow_NEON;
1408 }
1409 }
1410 #endif
1411 #if defined(HAS_ARGBTOUVJROW_NEON)
1412 if (TestCpuFlag(kCpuHasNEON)) {
1413 ARGBToUVJRow = ARGBToUVJRow_Any_NEON;
1414 if (IS_ALIGNED(width, 16)) {
1415 ARGBToUVJRow = ARGBToUVJRow_NEON;
1416 }
1417 }
1418 #endif
1419 #if defined(HAS_ARGBTOYJROW_MSA)
1420 if (TestCpuFlag(kCpuHasMSA)) {
1421 ARGBToYJRow = ARGBToYJRow_Any_MSA;
1422 if (IS_ALIGNED(width, 16)) {
1423 ARGBToYJRow = ARGBToYJRow_MSA;
1424 }
1425 }
1426 #endif
1427 #if defined(HAS_ARGBTOUVJROW_MSA)
1428 if (TestCpuFlag(kCpuHasMSA)) {
1429 ARGBToUVJRow = ARGBToUVJRow_Any_MSA;
1430 if (IS_ALIGNED(width, 32)) {
1431 ARGBToUVJRow = ARGBToUVJRow_MSA;
1432 }
1433 }
1434 #endif
1435
1436 for (y = 0; y < height - 1; y += 2) {
1437 ARGBToUVJRow(src_argb, src_stride_argb, dst_u, dst_v, width);
1438 ARGBToYJRow(src_argb, dst_yj, width);
1439 ARGBToYJRow(src_argb + src_stride_argb, dst_yj + dst_stride_yj, width);
1440 src_argb += src_stride_argb * 2;
1441 dst_yj += dst_stride_yj * 2;
1442 dst_u += dst_stride_u;
1443 dst_v += dst_stride_v;
1444 }
1445 if (height & 1) {
1446 ARGBToUVJRow(src_argb, 0, dst_u, dst_v, width);
1447 ARGBToYJRow(src_argb, dst_yj, width);
1448 }
1449 return 0;
1450 }
1451
1452 // Convert ARGB to J422. (JPeg full range I422).
1453 LIBYUV_API
ARGBToJ422(const uint8_t * src_argb,int src_stride_argb,uint8_t * dst_yj,int dst_stride_yj,uint8_t * dst_u,int dst_stride_u,uint8_t * dst_v,int dst_stride_v,int width,int height)1454 int ARGBToJ422(const uint8_t* src_argb,
1455 int src_stride_argb,
1456 uint8_t* dst_yj,
1457 int dst_stride_yj,
1458 uint8_t* dst_u,
1459 int dst_stride_u,
1460 uint8_t* dst_v,
1461 int dst_stride_v,
1462 int width,
1463 int height) {
1464 int y;
1465 void (*ARGBToUVJRow)(const uint8_t* src_argb0, int src_stride_argb,
1466 uint8_t* dst_u, uint8_t* dst_v, int width) =
1467 ARGBToUVJRow_C;
1468 void (*ARGBToYJRow)(const uint8_t* src_argb, uint8_t* dst_yj, int width) =
1469 ARGBToYJRow_C;
1470 if (!src_argb || !dst_yj || !dst_u || !dst_v || width <= 0 || height == 0) {
1471 return -1;
1472 }
1473 // Negative height means invert the image.
1474 if (height < 0) {
1475 height = -height;
1476 src_argb = src_argb + (height - 1) * src_stride_argb;
1477 src_stride_argb = -src_stride_argb;
1478 }
1479 // Coalesce rows.
1480 if (src_stride_argb == width * 4 && dst_stride_yj == width &&
1481 dst_stride_u * 2 == width && dst_stride_v * 2 == width) {
1482 width *= height;
1483 height = 1;
1484 src_stride_argb = dst_stride_yj = dst_stride_u = dst_stride_v = 0;
1485 }
1486 #if defined(HAS_ARGBTOYJROW_SSSE3) && defined(HAS_ARGBTOUVJROW_SSSE3)
1487 if (TestCpuFlag(kCpuHasSSSE3)) {
1488 ARGBToUVJRow = ARGBToUVJRow_Any_SSSE3;
1489 ARGBToYJRow = ARGBToYJRow_Any_SSSE3;
1490 if (IS_ALIGNED(width, 16)) {
1491 ARGBToUVJRow = ARGBToUVJRow_SSSE3;
1492 ARGBToYJRow = ARGBToYJRow_SSSE3;
1493 }
1494 }
1495 #endif
1496 #if defined(HAS_ARGBTOYJROW_AVX2)
1497 if (TestCpuFlag(kCpuHasAVX2)) {
1498 ARGBToYJRow = ARGBToYJRow_Any_AVX2;
1499 if (IS_ALIGNED(width, 32)) {
1500 ARGBToYJRow = ARGBToYJRow_AVX2;
1501 }
1502 }
1503 #endif
1504 #if defined(HAS_ARGBTOYJROW_NEON)
1505 if (TestCpuFlag(kCpuHasNEON)) {
1506 ARGBToYJRow = ARGBToYJRow_Any_NEON;
1507 if (IS_ALIGNED(width, 8)) {
1508 ARGBToYJRow = ARGBToYJRow_NEON;
1509 }
1510 }
1511 #endif
1512 #if defined(HAS_ARGBTOUVJROW_NEON)
1513 if (TestCpuFlag(kCpuHasNEON)) {
1514 ARGBToUVJRow = ARGBToUVJRow_Any_NEON;
1515 if (IS_ALIGNED(width, 16)) {
1516 ARGBToUVJRow = ARGBToUVJRow_NEON;
1517 }
1518 }
1519 #endif
1520 #if defined(HAS_ARGBTOYJROW_MSA)
1521 if (TestCpuFlag(kCpuHasMSA)) {
1522 ARGBToYJRow = ARGBToYJRow_Any_MSA;
1523 if (IS_ALIGNED(width, 16)) {
1524 ARGBToYJRow = ARGBToYJRow_MSA;
1525 }
1526 }
1527 #endif
1528 #if defined(HAS_ARGBTOUVJROW_MSA)
1529 if (TestCpuFlag(kCpuHasMSA)) {
1530 ARGBToUVJRow = ARGBToUVJRow_Any_MSA;
1531 if (IS_ALIGNED(width, 32)) {
1532 ARGBToUVJRow = ARGBToUVJRow_MSA;
1533 }
1534 }
1535 #endif
1536
1537 for (y = 0; y < height; ++y) {
1538 ARGBToUVJRow(src_argb, 0, dst_u, dst_v, width);
1539 ARGBToYJRow(src_argb, dst_yj, width);
1540 src_argb += src_stride_argb;
1541 dst_yj += dst_stride_yj;
1542 dst_u += dst_stride_u;
1543 dst_v += dst_stride_v;
1544 }
1545 return 0;
1546 }
1547
1548 // Convert ARGB to J400.
1549 LIBYUV_API
ARGBToJ400(const uint8_t * src_argb,int src_stride_argb,uint8_t * dst_yj,int dst_stride_yj,int width,int height)1550 int ARGBToJ400(const uint8_t* src_argb,
1551 int src_stride_argb,
1552 uint8_t* dst_yj,
1553 int dst_stride_yj,
1554 int width,
1555 int height) {
1556 int y;
1557 void (*ARGBToYJRow)(const uint8_t* src_argb, uint8_t* dst_yj, int width) =
1558 ARGBToYJRow_C;
1559 if (!src_argb || !dst_yj || width <= 0 || height == 0) {
1560 return -1;
1561 }
1562 if (height < 0) {
1563 height = -height;
1564 src_argb = src_argb + (height - 1) * src_stride_argb;
1565 src_stride_argb = -src_stride_argb;
1566 }
1567 // Coalesce rows.
1568 if (src_stride_argb == width * 4 && dst_stride_yj == width) {
1569 width *= height;
1570 height = 1;
1571 src_stride_argb = dst_stride_yj = 0;
1572 }
1573 #if defined(HAS_ARGBTOYJROW_SSSE3)
1574 if (TestCpuFlag(kCpuHasSSSE3)) {
1575 ARGBToYJRow = ARGBToYJRow_Any_SSSE3;
1576 if (IS_ALIGNED(width, 16)) {
1577 ARGBToYJRow = ARGBToYJRow_SSSE3;
1578 }
1579 }
1580 #endif
1581 #if defined(HAS_ARGBTOYJROW_AVX2)
1582 if (TestCpuFlag(kCpuHasAVX2)) {
1583 ARGBToYJRow = ARGBToYJRow_Any_AVX2;
1584 if (IS_ALIGNED(width, 32)) {
1585 ARGBToYJRow = ARGBToYJRow_AVX2;
1586 }
1587 }
1588 #endif
1589 #if defined(HAS_ARGBTOYJROW_NEON)
1590 if (TestCpuFlag(kCpuHasNEON)) {
1591 ARGBToYJRow = ARGBToYJRow_Any_NEON;
1592 if (IS_ALIGNED(width, 8)) {
1593 ARGBToYJRow = ARGBToYJRow_NEON;
1594 }
1595 }
1596 #endif
1597 #if defined(HAS_ARGBTOYJROW_MSA)
1598 if (TestCpuFlag(kCpuHasMSA)) {
1599 ARGBToYJRow = ARGBToYJRow_Any_MSA;
1600 if (IS_ALIGNED(width, 16)) {
1601 ARGBToYJRow = ARGBToYJRow_MSA;
1602 }
1603 }
1604 #endif
1605
1606 for (y = 0; y < height; ++y) {
1607 ARGBToYJRow(src_argb, dst_yj, width);
1608 src_argb += src_stride_argb;
1609 dst_yj += dst_stride_yj;
1610 }
1611 return 0;
1612 }
1613
1614 #ifdef __cplusplus
1615 } // extern "C"
1616 } // namespace libyuv
1617 #endif
1618