1 /*
2 * Copyright 2012 The LibYuv Project Authors. All rights reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #include "libyuv/convert_from_argb.h"
12
13 #include "libyuv/basic_types.h"
14 #include "libyuv/cpu_id.h"
15 #include "libyuv/planar_functions.h"
16 #include "libyuv/row.h"
17
18 #ifdef __cplusplus
19 namespace libyuv {
20 extern "C" {
21 #endif
22
23 // ARGB little endian (bgra in memory) to I444
24 LIBYUV_API
ARGBToI444(const uint8_t * src_argb,int src_stride_argb,uint8_t * dst_y,int dst_stride_y,uint8_t * dst_u,int dst_stride_u,uint8_t * dst_v,int dst_stride_v,int width,int height)25 int ARGBToI444(const uint8_t* src_argb,
26 int src_stride_argb,
27 uint8_t* dst_y,
28 int dst_stride_y,
29 uint8_t* dst_u,
30 int dst_stride_u,
31 uint8_t* dst_v,
32 int dst_stride_v,
33 int width,
34 int height) {
35 int y;
36 void (*ARGBToYRow)(const uint8_t* src_argb, uint8_t* dst_y, int width) =
37 ARGBToYRow_C;
38 void (*ARGBToUV444Row)(const uint8_t* src_argb, uint8_t* dst_u,
39 uint8_t* dst_v, int width) = ARGBToUV444Row_C;
40 if (!src_argb || !dst_y || !dst_u || !dst_v || width <= 0 || height == 0) {
41 return -1;
42 }
43 if (height < 0) {
44 height = -height;
45 src_argb = src_argb + (height - 1) * src_stride_argb;
46 src_stride_argb = -src_stride_argb;
47 }
48 // Coalesce rows.
49 if (src_stride_argb == width * 4 && dst_stride_y == width &&
50 dst_stride_u == width && dst_stride_v == width) {
51 width *= height;
52 height = 1;
53 src_stride_argb = dst_stride_y = dst_stride_u = dst_stride_v = 0;
54 }
55 #if defined(HAS_ARGBTOUV444ROW_SSSE3)
56 if (TestCpuFlag(kCpuHasSSSE3)) {
57 ARGBToUV444Row = ARGBToUV444Row_Any_SSSE3;
58 if (IS_ALIGNED(width, 16)) {
59 ARGBToUV444Row = ARGBToUV444Row_SSSE3;
60 }
61 }
62 #endif
63 #if defined(HAS_ARGBTOUV444ROW_NEON)
64 if (TestCpuFlag(kCpuHasNEON)) {
65 ARGBToUV444Row = ARGBToUV444Row_Any_NEON;
66 if (IS_ALIGNED(width, 8)) {
67 ARGBToUV444Row = ARGBToUV444Row_NEON;
68 }
69 }
70 #endif
71 #if defined(HAS_ARGBTOUV444ROW_MMI)
72 if (TestCpuFlag(kCpuHasMMI)) {
73 ARGBToUV444Row = ARGBToUV444Row_Any_MMI;
74 if (IS_ALIGNED(width, 8)) {
75 ARGBToUV444Row = ARGBToUV444Row_MMI;
76 }
77 }
78 #endif
79 #if defined(HAS_ARGBTOUV444ROW_MSA)
80 if (TestCpuFlag(kCpuHasMSA)) {
81 ARGBToUV444Row = ARGBToUV444Row_Any_MSA;
82 if (IS_ALIGNED(width, 16)) {
83 ARGBToUV444Row = ARGBToUV444Row_MSA;
84 }
85 }
86 #endif
87 #if defined(HAS_ARGBTOYROW_SSSE3)
88 if (TestCpuFlag(kCpuHasSSSE3)) {
89 ARGBToYRow = ARGBToYRow_Any_SSSE3;
90 if (IS_ALIGNED(width, 16)) {
91 ARGBToYRow = ARGBToYRow_SSSE3;
92 }
93 }
94 #endif
95 #if defined(HAS_ARGBTOYROW_AVX2)
96 if (TestCpuFlag(kCpuHasAVX2)) {
97 ARGBToYRow = ARGBToYRow_Any_AVX2;
98 if (IS_ALIGNED(width, 32)) {
99 ARGBToYRow = ARGBToYRow_AVX2;
100 }
101 }
102 #endif
103 #if defined(HAS_ARGBTOYROW_NEON)
104 if (TestCpuFlag(kCpuHasNEON)) {
105 ARGBToYRow = ARGBToYRow_Any_NEON;
106 if (IS_ALIGNED(width, 8)) {
107 ARGBToYRow = ARGBToYRow_NEON;
108 }
109 }
110 #endif
111 #if defined(HAS_ARGBTOYROW_MMI)
112 if (TestCpuFlag(kCpuHasMMI)) {
113 ARGBToYRow = ARGBToYRow_Any_MMI;
114 if (IS_ALIGNED(width, 8)) {
115 ARGBToYRow = ARGBToYRow_MMI;
116 }
117 }
118 #endif
119 #if defined(HAS_ARGBTOYROW_MSA)
120 if (TestCpuFlag(kCpuHasMSA)) {
121 ARGBToYRow = ARGBToYRow_Any_MSA;
122 if (IS_ALIGNED(width, 16)) {
123 ARGBToYRow = ARGBToYRow_MSA;
124 }
125 }
126 #endif
127
128 for (y = 0; y < height; ++y) {
129 ARGBToUV444Row(src_argb, dst_u, dst_v, width);
130 ARGBToYRow(src_argb, dst_y, width);
131 src_argb += src_stride_argb;
132 dst_y += dst_stride_y;
133 dst_u += dst_stride_u;
134 dst_v += dst_stride_v;
135 }
136 return 0;
137 }
138
139 // ARGB little endian (bgra in memory) to I422
140 LIBYUV_API
ARGBToI422(const uint8_t * src_argb,int src_stride_argb,uint8_t * dst_y,int dst_stride_y,uint8_t * dst_u,int dst_stride_u,uint8_t * dst_v,int dst_stride_v,int width,int height)141 int ARGBToI422(const uint8_t* src_argb,
142 int src_stride_argb,
143 uint8_t* dst_y,
144 int dst_stride_y,
145 uint8_t* dst_u,
146 int dst_stride_u,
147 uint8_t* dst_v,
148 int dst_stride_v,
149 int width,
150 int height) {
151 int y;
152 void (*ARGBToUVRow)(const uint8_t* src_argb0, int src_stride_argb,
153 uint8_t* dst_u, uint8_t* dst_v, int width) =
154 ARGBToUVRow_C;
155 void (*ARGBToYRow)(const uint8_t* src_argb, uint8_t* dst_y, int width) =
156 ARGBToYRow_C;
157 if (!src_argb || !dst_y || !dst_u || !dst_v || width <= 0 || height == 0) {
158 return -1;
159 }
160 // Negative height means invert the image.
161 if (height < 0) {
162 height = -height;
163 src_argb = src_argb + (height - 1) * src_stride_argb;
164 src_stride_argb = -src_stride_argb;
165 }
166 // Coalesce rows.
167 if (src_stride_argb == width * 4 && dst_stride_y == width &&
168 dst_stride_u * 2 == width && dst_stride_v * 2 == width) {
169 width *= height;
170 height = 1;
171 src_stride_argb = dst_stride_y = dst_stride_u = dst_stride_v = 0;
172 }
173 #if defined(HAS_ARGBTOYROW_SSSE3) && defined(HAS_ARGBTOUVROW_SSSE3)
174 if (TestCpuFlag(kCpuHasSSSE3)) {
175 ARGBToUVRow = ARGBToUVRow_Any_SSSE3;
176 ARGBToYRow = ARGBToYRow_Any_SSSE3;
177 if (IS_ALIGNED(width, 16)) {
178 ARGBToUVRow = ARGBToUVRow_SSSE3;
179 ARGBToYRow = ARGBToYRow_SSSE3;
180 }
181 }
182 #endif
183 #if defined(HAS_ARGBTOYROW_AVX2) && defined(HAS_ARGBTOUVROW_AVX2)
184 if (TestCpuFlag(kCpuHasAVX2)) {
185 ARGBToUVRow = ARGBToUVRow_Any_AVX2;
186 ARGBToYRow = ARGBToYRow_Any_AVX2;
187 if (IS_ALIGNED(width, 32)) {
188 ARGBToUVRow = ARGBToUVRow_AVX2;
189 ARGBToYRow = ARGBToYRow_AVX2;
190 }
191 }
192 #endif
193 #if defined(HAS_ARGBTOYROW_NEON)
194 if (TestCpuFlag(kCpuHasNEON)) {
195 ARGBToYRow = ARGBToYRow_Any_NEON;
196 if (IS_ALIGNED(width, 8)) {
197 ARGBToYRow = ARGBToYRow_NEON;
198 }
199 }
200 #endif
201 #if defined(HAS_ARGBTOUVROW_NEON)
202 if (TestCpuFlag(kCpuHasNEON)) {
203 ARGBToUVRow = ARGBToUVRow_Any_NEON;
204 if (IS_ALIGNED(width, 16)) {
205 ARGBToUVRow = ARGBToUVRow_NEON;
206 }
207 }
208 #endif
209
210 #if defined(HAS_ARGBTOYROW_MMI) && defined(HAS_ARGBTOUVROW_MMI)
211 if (TestCpuFlag(kCpuHasMMI)) {
212 ARGBToYRow = ARGBToYRow_Any_MMI;
213 ARGBToUVRow = ARGBToUVRow_Any_MMI;
214 if (IS_ALIGNED(width, 8)) {
215 ARGBToYRow = ARGBToYRow_MMI;
216 }
217 if (IS_ALIGNED(width, 16)) {
218 ARGBToUVRow = ARGBToUVRow_MMI;
219 }
220 }
221 #endif
222
223 #if defined(HAS_ARGBTOYROW_MSA) && defined(HAS_ARGBTOUVROW_MSA)
224 if (TestCpuFlag(kCpuHasMSA)) {
225 ARGBToYRow = ARGBToYRow_Any_MSA;
226 ARGBToUVRow = ARGBToUVRow_Any_MSA;
227 if (IS_ALIGNED(width, 16)) {
228 ARGBToYRow = ARGBToYRow_MSA;
229 }
230 if (IS_ALIGNED(width, 32)) {
231 ARGBToUVRow = ARGBToUVRow_MSA;
232 }
233 }
234 #endif
235
236 for (y = 0; y < height; ++y) {
237 ARGBToUVRow(src_argb, 0, dst_u, dst_v, width);
238 ARGBToYRow(src_argb, dst_y, width);
239 src_argb += src_stride_argb;
240 dst_y += dst_stride_y;
241 dst_u += dst_stride_u;
242 dst_v += dst_stride_v;
243 }
244 return 0;
245 }
246
247 LIBYUV_API
ARGBToNV12(const uint8_t * src_argb,int src_stride_argb,uint8_t * dst_y,int dst_stride_y,uint8_t * dst_uv,int dst_stride_uv,int width,int height)248 int ARGBToNV12(const uint8_t* src_argb,
249 int src_stride_argb,
250 uint8_t* dst_y,
251 int dst_stride_y,
252 uint8_t* dst_uv,
253 int dst_stride_uv,
254 int width,
255 int height) {
256 int y;
257 int halfwidth = (width + 1) >> 1;
258 void (*ARGBToUVRow)(const uint8_t* src_argb0, int src_stride_argb,
259 uint8_t* dst_u, uint8_t* dst_v, int width) =
260 ARGBToUVRow_C;
261 void (*ARGBToYRow)(const uint8_t* src_argb, uint8_t* dst_y, int width) =
262 ARGBToYRow_C;
263 void (*MergeUVRow_)(const uint8_t* src_u, const uint8_t* src_v,
264 uint8_t* dst_uv, int width) = MergeUVRow_C;
265 if (!src_argb || !dst_y || !dst_uv || width <= 0 || height == 0) {
266 return -1;
267 }
268 // Negative height means invert the image.
269 if (height < 0) {
270 height = -height;
271 src_argb = src_argb + (height - 1) * src_stride_argb;
272 src_stride_argb = -src_stride_argb;
273 }
274 #if defined(HAS_ARGBTOYROW_SSSE3) && defined(HAS_ARGBTOUVROW_SSSE3)
275 if (TestCpuFlag(kCpuHasSSSE3)) {
276 ARGBToUVRow = ARGBToUVRow_Any_SSSE3;
277 ARGBToYRow = ARGBToYRow_Any_SSSE3;
278 if (IS_ALIGNED(width, 16)) {
279 ARGBToUVRow = ARGBToUVRow_SSSE3;
280 ARGBToYRow = ARGBToYRow_SSSE3;
281 }
282 }
283 #endif
284 #if defined(HAS_ARGBTOYROW_AVX2) && defined(HAS_ARGBTOUVROW_AVX2)
285 if (TestCpuFlag(kCpuHasAVX2)) {
286 ARGBToUVRow = ARGBToUVRow_Any_AVX2;
287 ARGBToYRow = ARGBToYRow_Any_AVX2;
288 if (IS_ALIGNED(width, 32)) {
289 ARGBToUVRow = ARGBToUVRow_AVX2;
290 ARGBToYRow = ARGBToYRow_AVX2;
291 }
292 }
293 #endif
294 #if defined(HAS_ARGBTOYROW_NEON)
295 if (TestCpuFlag(kCpuHasNEON)) {
296 ARGBToYRow = ARGBToYRow_Any_NEON;
297 if (IS_ALIGNED(width, 8)) {
298 ARGBToYRow = ARGBToYRow_NEON;
299 }
300 }
301 #endif
302 #if defined(HAS_ARGBTOUVROW_NEON)
303 if (TestCpuFlag(kCpuHasNEON)) {
304 ARGBToUVRow = ARGBToUVRow_Any_NEON;
305 if (IS_ALIGNED(width, 16)) {
306 ARGBToUVRow = ARGBToUVRow_NEON;
307 }
308 }
309 #endif
310 #if defined(HAS_ARGBTOYROW_MMI) && defined(HAS_ARGBTOUVROW_MMI)
311 if (TestCpuFlag(kCpuHasMMI)) {
312 ARGBToYRow = ARGBToYRow_Any_MMI;
313 ARGBToUVRow = ARGBToUVRow_Any_MMI;
314 if (IS_ALIGNED(width, 8)) {
315 ARGBToYRow = ARGBToYRow_MMI;
316 }
317 if (IS_ALIGNED(width, 16)) {
318 ARGBToUVRow = ARGBToUVRow_MMI;
319 }
320 }
321 #endif
322 #if defined(HAS_ARGBTOYROW_MSA) && defined(HAS_ARGBTOUVROW_MSA)
323 if (TestCpuFlag(kCpuHasMSA)) {
324 ARGBToYRow = ARGBToYRow_Any_MSA;
325 ARGBToUVRow = ARGBToUVRow_Any_MSA;
326 if (IS_ALIGNED(width, 16)) {
327 ARGBToYRow = ARGBToYRow_MSA;
328 }
329 if (IS_ALIGNED(width, 32)) {
330 ARGBToUVRow = ARGBToUVRow_MSA;
331 }
332 }
333 #endif
334 #if defined(HAS_MERGEUVROW_SSE2)
335 if (TestCpuFlag(kCpuHasSSE2)) {
336 MergeUVRow_ = MergeUVRow_Any_SSE2;
337 if (IS_ALIGNED(halfwidth, 16)) {
338 MergeUVRow_ = MergeUVRow_SSE2;
339 }
340 }
341 #endif
342 #if defined(HAS_MERGEUVROW_AVX2)
343 if (TestCpuFlag(kCpuHasAVX2)) {
344 MergeUVRow_ = MergeUVRow_Any_AVX2;
345 if (IS_ALIGNED(halfwidth, 32)) {
346 MergeUVRow_ = MergeUVRow_AVX2;
347 }
348 }
349 #endif
350 #if defined(HAS_MERGEUVROW_NEON)
351 if (TestCpuFlag(kCpuHasNEON)) {
352 MergeUVRow_ = MergeUVRow_Any_NEON;
353 if (IS_ALIGNED(halfwidth, 16)) {
354 MergeUVRow_ = MergeUVRow_NEON;
355 }
356 }
357 #endif
358 #if defined(HAS_MERGEUVROW_MMI)
359 if (TestCpuFlag(kCpuHasMMI)) {
360 MergeUVRow_ = MergeUVRow_Any_MMI;
361 if (IS_ALIGNED(halfwidth, 8)) {
362 MergeUVRow_ = MergeUVRow_MMI;
363 }
364 }
365 #endif
366 #if defined(HAS_MERGEUVROW_MSA)
367 if (TestCpuFlag(kCpuHasMSA)) {
368 MergeUVRow_ = MergeUVRow_Any_MSA;
369 if (IS_ALIGNED(halfwidth, 16)) {
370 MergeUVRow_ = MergeUVRow_MSA;
371 }
372 }
373 #endif
374 {
375 // Allocate a rows of uv.
376 align_buffer_64(row_u, ((halfwidth + 31) & ~31) * 2);
377 uint8_t* row_v = row_u + ((halfwidth + 31) & ~31);
378
379 for (y = 0; y < height - 1; y += 2) {
380 ARGBToUVRow(src_argb, src_stride_argb, row_u, row_v, width);
381 MergeUVRow_(row_u, row_v, dst_uv, halfwidth);
382 ARGBToYRow(src_argb, dst_y, width);
383 ARGBToYRow(src_argb + src_stride_argb, dst_y + dst_stride_y, width);
384 src_argb += src_stride_argb * 2;
385 dst_y += dst_stride_y * 2;
386 dst_uv += dst_stride_uv;
387 }
388 if (height & 1) {
389 ARGBToUVRow(src_argb, 0, row_u, row_v, width);
390 MergeUVRow_(row_u, row_v, dst_uv, halfwidth);
391 ARGBToYRow(src_argb, dst_y, width);
392 }
393 free_aligned_buffer_64(row_u);
394 }
395 return 0;
396 }
397
398 // Same as NV12 but U and V swapped.
399 LIBYUV_API
ARGBToNV21(const uint8_t * src_argb,int src_stride_argb,uint8_t * dst_y,int dst_stride_y,uint8_t * dst_vu,int dst_stride_vu,int width,int height)400 int ARGBToNV21(const uint8_t* src_argb,
401 int src_stride_argb,
402 uint8_t* dst_y,
403 int dst_stride_y,
404 uint8_t* dst_vu,
405 int dst_stride_vu,
406 int width,
407 int height) {
408 int y;
409 int halfwidth = (width + 1) >> 1;
410 void (*ARGBToUVRow)(const uint8_t* src_argb0, int src_stride_argb,
411 uint8_t* dst_u, uint8_t* dst_v, int width) =
412 ARGBToUVRow_C;
413 void (*ARGBToYRow)(const uint8_t* src_argb, uint8_t* dst_y, int width) =
414 ARGBToYRow_C;
415 void (*MergeUVRow_)(const uint8_t* src_u, const uint8_t* src_v,
416 uint8_t* dst_vu, int width) = MergeUVRow_C;
417 if (!src_argb || !dst_y || !dst_vu || width <= 0 || height == 0) {
418 return -1;
419 }
420 // Negative height means invert the image.
421 if (height < 0) {
422 height = -height;
423 src_argb = src_argb + (height - 1) * src_stride_argb;
424 src_stride_argb = -src_stride_argb;
425 }
426 #if defined(HAS_ARGBTOYROW_SSSE3) && defined(HAS_ARGBTOUVROW_SSSE3)
427 if (TestCpuFlag(kCpuHasSSSE3)) {
428 ARGBToUVRow = ARGBToUVRow_Any_SSSE3;
429 ARGBToYRow = ARGBToYRow_Any_SSSE3;
430 if (IS_ALIGNED(width, 16)) {
431 ARGBToUVRow = ARGBToUVRow_SSSE3;
432 ARGBToYRow = ARGBToYRow_SSSE3;
433 }
434 }
435 #endif
436 #if defined(HAS_ARGBTOYROW_AVX2) && defined(HAS_ARGBTOUVROW_AVX2)
437 if (TestCpuFlag(kCpuHasAVX2)) {
438 ARGBToUVRow = ARGBToUVRow_Any_AVX2;
439 ARGBToYRow = ARGBToYRow_Any_AVX2;
440 if (IS_ALIGNED(width, 32)) {
441 ARGBToUVRow = ARGBToUVRow_AVX2;
442 ARGBToYRow = ARGBToYRow_AVX2;
443 }
444 }
445 #endif
446 #if defined(HAS_ARGBTOYROW_NEON)
447 if (TestCpuFlag(kCpuHasNEON)) {
448 ARGBToYRow = ARGBToYRow_Any_NEON;
449 if (IS_ALIGNED(width, 8)) {
450 ARGBToYRow = ARGBToYRow_NEON;
451 }
452 }
453 #endif
454 #if defined(HAS_ARGBTOUVROW_NEON)
455 if (TestCpuFlag(kCpuHasNEON)) {
456 ARGBToUVRow = ARGBToUVRow_Any_NEON;
457 if (IS_ALIGNED(width, 16)) {
458 ARGBToUVRow = ARGBToUVRow_NEON;
459 }
460 }
461 #endif
462 #if defined(HAS_ARGBTOYROW_MMI) && defined(HAS_ARGBTOUVROW_MMI)
463 if (TestCpuFlag(kCpuHasMMI)) {
464 ARGBToYRow = ARGBToYRow_Any_MMI;
465 ARGBToUVRow = ARGBToUVRow_Any_MMI;
466 if (IS_ALIGNED(width, 8)) {
467 ARGBToYRow = ARGBToYRow_MMI;
468 }
469 if (IS_ALIGNED(width, 16)) {
470 ARGBToUVRow = ARGBToUVRow_MMI;
471 }
472 }
473 #endif
474 #if defined(HAS_ARGBTOYROW_MSA) && defined(HAS_ARGBTOUVROW_MSA)
475 if (TestCpuFlag(kCpuHasMSA)) {
476 ARGBToYRow = ARGBToYRow_Any_MSA;
477 ARGBToUVRow = ARGBToUVRow_Any_MSA;
478 if (IS_ALIGNED(width, 16)) {
479 ARGBToYRow = ARGBToYRow_MSA;
480 }
481 if (IS_ALIGNED(width, 32)) {
482 ARGBToUVRow = ARGBToUVRow_MSA;
483 }
484 }
485 #endif
486 #if defined(HAS_MERGEUVROW_SSE2)
487 if (TestCpuFlag(kCpuHasSSE2)) {
488 MergeUVRow_ = MergeUVRow_Any_SSE2;
489 if (IS_ALIGNED(halfwidth, 16)) {
490 MergeUVRow_ = MergeUVRow_SSE2;
491 }
492 }
493 #endif
494 #if defined(HAS_MERGEUVROW_AVX2)
495 if (TestCpuFlag(kCpuHasAVX2)) {
496 MergeUVRow_ = MergeUVRow_Any_AVX2;
497 if (IS_ALIGNED(halfwidth, 32)) {
498 MergeUVRow_ = MergeUVRow_AVX2;
499 }
500 }
501 #endif
502 #if defined(HAS_MERGEUVROW_NEON)
503 if (TestCpuFlag(kCpuHasNEON)) {
504 MergeUVRow_ = MergeUVRow_Any_NEON;
505 if (IS_ALIGNED(halfwidth, 16)) {
506 MergeUVRow_ = MergeUVRow_NEON;
507 }
508 }
509 #endif
510 #if defined(HAS_MERGEUVROW_MMI)
511 if (TestCpuFlag(kCpuHasMMI)) {
512 MergeUVRow_ = MergeUVRow_Any_MMI;
513 if (IS_ALIGNED(halfwidth, 8)) {
514 MergeUVRow_ = MergeUVRow_MMI;
515 }
516 }
517 #endif
518 #if defined(HAS_MERGEUVROW_MSA)
519 if (TestCpuFlag(kCpuHasMSA)) {
520 MergeUVRow_ = MergeUVRow_Any_MSA;
521 if (IS_ALIGNED(halfwidth, 16)) {
522 MergeUVRow_ = MergeUVRow_MSA;
523 }
524 }
525 #endif
526 {
527 // Allocate a rows of uv.
528 align_buffer_64(row_u, ((halfwidth + 31) & ~31) * 2);
529 uint8_t* row_v = row_u + ((halfwidth + 31) & ~31);
530
531 for (y = 0; y < height - 1; y += 2) {
532 ARGBToUVRow(src_argb, src_stride_argb, row_u, row_v, width);
533 MergeUVRow_(row_v, row_u, dst_vu, halfwidth);
534 ARGBToYRow(src_argb, dst_y, width);
535 ARGBToYRow(src_argb + src_stride_argb, dst_y + dst_stride_y, width);
536 src_argb += src_stride_argb * 2;
537 dst_y += dst_stride_y * 2;
538 dst_vu += dst_stride_vu;
539 }
540 if (height & 1) {
541 ARGBToUVRow(src_argb, 0, row_u, row_v, width);
542 MergeUVRow_(row_v, row_u, dst_vu, halfwidth);
543 ARGBToYRow(src_argb, dst_y, width);
544 }
545 free_aligned_buffer_64(row_u);
546 }
547 return 0;
548 }
549
550 LIBYUV_API
ABGRToNV12(const uint8_t * src_abgr,int src_stride_abgr,uint8_t * dst_y,int dst_stride_y,uint8_t * dst_uv,int dst_stride_uv,int width,int height)551 int ABGRToNV12(const uint8_t* src_abgr,
552 int src_stride_abgr,
553 uint8_t* dst_y,
554 int dst_stride_y,
555 uint8_t* dst_uv,
556 int dst_stride_uv,
557 int width,
558 int height) {
559 int y;
560 int halfwidth = (width + 1) >> 1;
561 void (*ABGRToUVRow)(const uint8_t* src_abgr0, int src_stride_abgr,
562 uint8_t* dst_u, uint8_t* dst_v, int width) =
563 ABGRToUVRow_C;
564 void (*ABGRToYRow)(const uint8_t* src_abgr, uint8_t* dst_y, int width) =
565 ABGRToYRow_C;
566 void (*MergeUVRow_)(const uint8_t* src_u, const uint8_t* src_v,
567 uint8_t* dst_uv, int width) = MergeUVRow_C;
568 if (!src_abgr || !dst_y || !dst_uv || width <= 0 || height == 0) {
569 return -1;
570 }
571 // Negative height means invert the image.
572 if (height < 0) {
573 height = -height;
574 src_abgr = src_abgr + (height - 1) * src_stride_abgr;
575 src_stride_abgr = -src_stride_abgr;
576 }
577 #if defined(HAS_ABGRTOYROW_SSSE3) && defined(HAS_ABGRTOUVROW_SSSE3)
578 if (TestCpuFlag(kCpuHasSSSE3)) {
579 ABGRToUVRow = ABGRToUVRow_Any_SSSE3;
580 ABGRToYRow = ABGRToYRow_Any_SSSE3;
581 if (IS_ALIGNED(width, 16)) {
582 ABGRToUVRow = ABGRToUVRow_SSSE3;
583 ABGRToYRow = ABGRToYRow_SSSE3;
584 }
585 }
586 #endif
587 #if defined(HAS_ABGRTOYROW_AVX2) && defined(HAS_ABGRTOUVROW_AVX2)
588 if (TestCpuFlag(kCpuHasAVX2)) {
589 ABGRToUVRow = ABGRToUVRow_Any_AVX2;
590 ABGRToYRow = ABGRToYRow_Any_AVX2;
591 if (IS_ALIGNED(width, 32)) {
592 ABGRToUVRow = ABGRToUVRow_AVX2;
593 ABGRToYRow = ABGRToYRow_AVX2;
594 }
595 }
596 #endif
597 #if defined(HAS_ABGRTOYROW_NEON)
598 if (TestCpuFlag(kCpuHasNEON)) {
599 ABGRToYRow = ABGRToYRow_Any_NEON;
600 if (IS_ALIGNED(width, 8)) {
601 ABGRToYRow = ABGRToYRow_NEON;
602 }
603 }
604 #endif
605 #if defined(HAS_ABGRTOUVROW_NEON)
606 if (TestCpuFlag(kCpuHasNEON)) {
607 ABGRToUVRow = ABGRToUVRow_Any_NEON;
608 if (IS_ALIGNED(width, 16)) {
609 ABGRToUVRow = ABGRToUVRow_NEON;
610 }
611 }
612 #endif
613 #if defined(HAS_ABGRTOYROW_MMI) && defined(HAS_ABGRTOUVROW_MMI)
614 if (TestCpuFlag(kCpuHasMMI)) {
615 ABGRToYRow = ABGRToYRow_Any_MMI;
616 ABGRToUVRow = ABGRToUVRow_Any_MMI;
617 if (IS_ALIGNED(width, 8)) {
618 ABGRToYRow = ABGRToYRow_MMI;
619 }
620 if (IS_ALIGNED(width, 16)) {
621 ABGRToUVRow = ABGRToUVRow_MMI;
622 }
623 }
624 #endif
625 #if defined(HAS_ABGRTOYROW_MSA) && defined(HAS_ABGRTOUVROW_MSA)
626 if (TestCpuFlag(kCpuHasMSA)) {
627 ABGRToYRow = ABGRToYRow_Any_MSA;
628 ABGRToUVRow = ABGRToUVRow_Any_MSA;
629 if (IS_ALIGNED(width, 16)) {
630 ABGRToYRow = ABGRToYRow_MSA;
631 }
632 if (IS_ALIGNED(width, 32)) {
633 ABGRToUVRow = ABGRToUVRow_MSA;
634 }
635 }
636 #endif
637 #if defined(HAS_MERGEUVROW_SSE2)
638 if (TestCpuFlag(kCpuHasSSE2)) {
639 MergeUVRow_ = MergeUVRow_Any_SSE2;
640 if (IS_ALIGNED(halfwidth, 16)) {
641 MergeUVRow_ = MergeUVRow_SSE2;
642 }
643 }
644 #endif
645 #if defined(HAS_MERGEUVROW_AVX2)
646 if (TestCpuFlag(kCpuHasAVX2)) {
647 MergeUVRow_ = MergeUVRow_Any_AVX2;
648 if (IS_ALIGNED(halfwidth, 32)) {
649 MergeUVRow_ = MergeUVRow_AVX2;
650 }
651 }
652 #endif
653 #if defined(HAS_MERGEUVROW_NEON)
654 if (TestCpuFlag(kCpuHasNEON)) {
655 MergeUVRow_ = MergeUVRow_Any_NEON;
656 if (IS_ALIGNED(halfwidth, 16)) {
657 MergeUVRow_ = MergeUVRow_NEON;
658 }
659 }
660 #endif
661 #if defined(HAS_MERGEUVROW_MMI)
662 if (TestCpuFlag(kCpuHasMMI)) {
663 MergeUVRow_ = MergeUVRow_Any_MMI;
664 if (IS_ALIGNED(halfwidth, 8)) {
665 MergeUVRow_ = MergeUVRow_MMI;
666 }
667 }
668 #endif
669 #if defined(HAS_MERGEUVROW_MSA)
670 if (TestCpuFlag(kCpuHasMSA)) {
671 MergeUVRow_ = MergeUVRow_Any_MSA;
672 if (IS_ALIGNED(halfwidth, 16)) {
673 MergeUVRow_ = MergeUVRow_MSA;
674 }
675 }
676 #endif
677 {
678 // Allocate a rows of uv.
679 align_buffer_64(row_u, ((halfwidth + 31) & ~31) * 2);
680 uint8_t* row_v = row_u + ((halfwidth + 31) & ~31);
681
682 for (y = 0; y < height - 1; y += 2) {
683 ABGRToUVRow(src_abgr, src_stride_abgr, row_u, row_v, width);
684 MergeUVRow_(row_u, row_v, dst_uv, halfwidth);
685 ABGRToYRow(src_abgr, dst_y, width);
686 ABGRToYRow(src_abgr + src_stride_abgr, dst_y + dst_stride_y, width);
687 src_abgr += src_stride_abgr * 2;
688 dst_y += dst_stride_y * 2;
689 dst_uv += dst_stride_uv;
690 }
691 if (height & 1) {
692 ABGRToUVRow(src_abgr, 0, row_u, row_v, width);
693 MergeUVRow_(row_u, row_v, dst_uv, halfwidth);
694 ABGRToYRow(src_abgr, dst_y, width);
695 }
696 free_aligned_buffer_64(row_u);
697 }
698 return 0;
699 }
700
701 // Same as NV12 but U and V swapped.
702 LIBYUV_API
ABGRToNV21(const uint8_t * src_abgr,int src_stride_abgr,uint8_t * dst_y,int dst_stride_y,uint8_t * dst_vu,int dst_stride_vu,int width,int height)703 int ABGRToNV21(const uint8_t* src_abgr,
704 int src_stride_abgr,
705 uint8_t* dst_y,
706 int dst_stride_y,
707 uint8_t* dst_vu,
708 int dst_stride_vu,
709 int width,
710 int height) {
711 int y;
712 int halfwidth = (width + 1) >> 1;
713 void (*ABGRToUVRow)(const uint8_t* src_abgr0, int src_stride_abgr,
714 uint8_t* dst_u, uint8_t* dst_v, int width) =
715 ABGRToUVRow_C;
716 void (*ABGRToYRow)(const uint8_t* src_abgr, uint8_t* dst_y, int width) =
717 ABGRToYRow_C;
718 void (*MergeUVRow_)(const uint8_t* src_u, const uint8_t* src_v,
719 uint8_t* dst_vu, int width) = MergeUVRow_C;
720 if (!src_abgr || !dst_y || !dst_vu || width <= 0 || height == 0) {
721 return -1;
722 }
723 // Negative height means invert the image.
724 if (height < 0) {
725 height = -height;
726 src_abgr = src_abgr + (height - 1) * src_stride_abgr;
727 src_stride_abgr = -src_stride_abgr;
728 }
729 #if defined(HAS_ABGRTOYROW_SSSE3) && defined(HAS_ABGRTOUVROW_SSSE3)
730 if (TestCpuFlag(kCpuHasSSSE3)) {
731 ABGRToUVRow = ABGRToUVRow_Any_SSSE3;
732 ABGRToYRow = ABGRToYRow_Any_SSSE3;
733 if (IS_ALIGNED(width, 16)) {
734 ABGRToUVRow = ABGRToUVRow_SSSE3;
735 ABGRToYRow = ABGRToYRow_SSSE3;
736 }
737 }
738 #endif
739 #if defined(HAS_ABGRTOYROW_AVX2) && defined(HAS_ABGRTOUVROW_AVX2)
740 if (TestCpuFlag(kCpuHasAVX2)) {
741 ABGRToUVRow = ABGRToUVRow_Any_AVX2;
742 ABGRToYRow = ABGRToYRow_Any_AVX2;
743 if (IS_ALIGNED(width, 32)) {
744 ABGRToUVRow = ABGRToUVRow_AVX2;
745 ABGRToYRow = ABGRToYRow_AVX2;
746 }
747 }
748 #endif
749 #if defined(HAS_ABGRTOYROW_NEON)
750 if (TestCpuFlag(kCpuHasNEON)) {
751 ABGRToYRow = ABGRToYRow_Any_NEON;
752 if (IS_ALIGNED(width, 8)) {
753 ABGRToYRow = ABGRToYRow_NEON;
754 }
755 }
756 #endif
757 #if defined(HAS_ABGRTOUVROW_NEON)
758 if (TestCpuFlag(kCpuHasNEON)) {
759 ABGRToUVRow = ABGRToUVRow_Any_NEON;
760 if (IS_ALIGNED(width, 16)) {
761 ABGRToUVRow = ABGRToUVRow_NEON;
762 }
763 }
764 #endif
765 #if defined(HAS_ABGRTOYROW_MMI) && defined(HAS_ABGRTOUVROW_MMI)
766 if (TestCpuFlag(kCpuHasMMI)) {
767 ABGRToYRow = ABGRToYRow_Any_MMI;
768 ABGRToUVRow = ABGRToUVRow_Any_MMI;
769 if (IS_ALIGNED(width, 8)) {
770 ABGRToYRow = ABGRToYRow_MMI;
771 }
772 if (IS_ALIGNED(width, 16)) {
773 ABGRToUVRow = ABGRToUVRow_MMI;
774 }
775 }
776 #endif
777 #if defined(HAS_ABGRTOYROW_MSA) && defined(HAS_ABGRTOUVROW_MSA)
778 if (TestCpuFlag(kCpuHasMSA)) {
779 ABGRToYRow = ABGRToYRow_Any_MSA;
780 ABGRToUVRow = ABGRToUVRow_Any_MSA;
781 if (IS_ALIGNED(width, 16)) {
782 ABGRToYRow = ABGRToYRow_MSA;
783 }
784 if (IS_ALIGNED(width, 32)) {
785 ABGRToUVRow = ABGRToUVRow_MSA;
786 }
787 }
788 #endif
789 #if defined(HAS_MERGEUVROW_SSE2)
790 if (TestCpuFlag(kCpuHasSSE2)) {
791 MergeUVRow_ = MergeUVRow_Any_SSE2;
792 if (IS_ALIGNED(halfwidth, 16)) {
793 MergeUVRow_ = MergeUVRow_SSE2;
794 }
795 }
796 #endif
797 #if defined(HAS_MERGEUVROW_AVX2)
798 if (TestCpuFlag(kCpuHasAVX2)) {
799 MergeUVRow_ = MergeUVRow_Any_AVX2;
800 if (IS_ALIGNED(halfwidth, 32)) {
801 MergeUVRow_ = MergeUVRow_AVX2;
802 }
803 }
804 #endif
805 #if defined(HAS_MERGEUVROW_NEON)
806 if (TestCpuFlag(kCpuHasNEON)) {
807 MergeUVRow_ = MergeUVRow_Any_NEON;
808 if (IS_ALIGNED(halfwidth, 16)) {
809 MergeUVRow_ = MergeUVRow_NEON;
810 }
811 }
812 #endif
813 #if defined(HAS_MERGEUVROW_MMI)
814 if (TestCpuFlag(kCpuHasMMI)) {
815 MergeUVRow_ = MergeUVRow_Any_MMI;
816 if (IS_ALIGNED(halfwidth, 8)) {
817 MergeUVRow_ = MergeUVRow_MMI;
818 }
819 }
820 #endif
821 #if defined(HAS_MERGEUVROW_MSA)
822 if (TestCpuFlag(kCpuHasMSA)) {
823 MergeUVRow_ = MergeUVRow_Any_MSA;
824 if (IS_ALIGNED(halfwidth, 16)) {
825 MergeUVRow_ = MergeUVRow_MSA;
826 }
827 }
828 #endif
829 {
830 // Allocate a rows of uv.
831 align_buffer_64(row_u, ((halfwidth + 31) & ~31) * 2);
832 uint8_t* row_v = row_u + ((halfwidth + 31) & ~31);
833
834 for (y = 0; y < height - 1; y += 2) {
835 ABGRToUVRow(src_abgr, src_stride_abgr, row_u, row_v, width);
836 MergeUVRow_(row_v, row_u, dst_vu, halfwidth);
837 ABGRToYRow(src_abgr, dst_y, width);
838 ABGRToYRow(src_abgr + src_stride_abgr, dst_y + dst_stride_y, width);
839 src_abgr += src_stride_abgr * 2;
840 dst_y += dst_stride_y * 2;
841 dst_vu += dst_stride_vu;
842 }
843 if (height & 1) {
844 ABGRToUVRow(src_abgr, 0, row_u, row_v, width);
845 MergeUVRow_(row_v, row_u, dst_vu, halfwidth);
846 ABGRToYRow(src_abgr, dst_y, width);
847 }
848 free_aligned_buffer_64(row_u);
849 }
850 return 0;
851 }
852
853 // Convert ARGB to YUY2.
854 LIBYUV_API
ARGBToYUY2(const uint8_t * src_argb,int src_stride_argb,uint8_t * dst_yuy2,int dst_stride_yuy2,int width,int height)855 int ARGBToYUY2(const uint8_t* src_argb,
856 int src_stride_argb,
857 uint8_t* dst_yuy2,
858 int dst_stride_yuy2,
859 int width,
860 int height) {
861 int y;
862 void (*ARGBToUVRow)(const uint8_t* src_argb, int src_stride_argb,
863 uint8_t* dst_u, uint8_t* dst_v, int width) =
864 ARGBToUVRow_C;
865 void (*ARGBToYRow)(const uint8_t* src_argb, uint8_t* dst_y, int width) =
866 ARGBToYRow_C;
867 void (*I422ToYUY2Row)(const uint8_t* src_y, const uint8_t* src_u,
868 const uint8_t* src_v, uint8_t* dst_yuy2, int width) =
869 I422ToYUY2Row_C;
870
871 if (!src_argb || !dst_yuy2 || width <= 0 || height == 0) {
872 return -1;
873 }
874 // Negative height means invert the image.
875 if (height < 0) {
876 height = -height;
877 dst_yuy2 = dst_yuy2 + (height - 1) * dst_stride_yuy2;
878 dst_stride_yuy2 = -dst_stride_yuy2;
879 }
880 // Coalesce rows.
881 if (src_stride_argb == width * 4 && dst_stride_yuy2 == width * 2) {
882 width *= height;
883 height = 1;
884 src_stride_argb = dst_stride_yuy2 = 0;
885 }
886 #if defined(HAS_ARGBTOYROW_SSSE3) && defined(HAS_ARGBTOUVROW_SSSE3)
887 if (TestCpuFlag(kCpuHasSSSE3)) {
888 ARGBToUVRow = ARGBToUVRow_Any_SSSE3;
889 ARGBToYRow = ARGBToYRow_Any_SSSE3;
890 if (IS_ALIGNED(width, 16)) {
891 ARGBToUVRow = ARGBToUVRow_SSSE3;
892 ARGBToYRow = ARGBToYRow_SSSE3;
893 }
894 }
895 #endif
896 #if defined(HAS_ARGBTOYROW_AVX2) && defined(HAS_ARGBTOUVROW_AVX2)
897 if (TestCpuFlag(kCpuHasAVX2)) {
898 ARGBToUVRow = ARGBToUVRow_Any_AVX2;
899 ARGBToYRow = ARGBToYRow_Any_AVX2;
900 if (IS_ALIGNED(width, 32)) {
901 ARGBToUVRow = ARGBToUVRow_AVX2;
902 ARGBToYRow = ARGBToYRow_AVX2;
903 }
904 }
905 #endif
906 #if defined(HAS_ARGBTOYROW_NEON)
907 if (TestCpuFlag(kCpuHasNEON)) {
908 ARGBToYRow = ARGBToYRow_Any_NEON;
909 if (IS_ALIGNED(width, 8)) {
910 ARGBToYRow = ARGBToYRow_NEON;
911 }
912 }
913 #endif
914 #if defined(HAS_ARGBTOUVROW_NEON)
915 if (TestCpuFlag(kCpuHasNEON)) {
916 ARGBToUVRow = ARGBToUVRow_Any_NEON;
917 if (IS_ALIGNED(width, 16)) {
918 ARGBToUVRow = ARGBToUVRow_NEON;
919 }
920 }
921 #endif
922 #if defined(HAS_ARGBTOYROW_MMI) && defined(HAS_ARGBTOUVROW_MMI)
923 if (TestCpuFlag(kCpuHasMMI)) {
924 ARGBToYRow = ARGBToYRow_Any_MMI;
925 ARGBToUVRow = ARGBToUVRow_Any_MMI;
926 if (IS_ALIGNED(width, 8)) {
927 ARGBToYRow = ARGBToYRow_MMI;
928 }
929 if (IS_ALIGNED(width, 16)) {
930 ARGBToUVRow = ARGBToUVRow_MMI;
931 }
932 }
933 #endif
934 #if defined(HAS_ARGBTOYROW_MSA) && defined(HAS_ARGBTOUVROW_MSA)
935 if (TestCpuFlag(kCpuHasMSA)) {
936 ARGBToYRow = ARGBToYRow_Any_MSA;
937 ARGBToUVRow = ARGBToUVRow_Any_MSA;
938 if (IS_ALIGNED(width, 16)) {
939 ARGBToYRow = ARGBToYRow_MSA;
940 }
941 if (IS_ALIGNED(width, 32)) {
942 ARGBToUVRow = ARGBToUVRow_MSA;
943 }
944 }
945 #endif
946 #if defined(HAS_I422TOYUY2ROW_SSE2)
947 if (TestCpuFlag(kCpuHasSSE2)) {
948 I422ToYUY2Row = I422ToYUY2Row_Any_SSE2;
949 if (IS_ALIGNED(width, 16)) {
950 I422ToYUY2Row = I422ToYUY2Row_SSE2;
951 }
952 }
953 #endif
954 #if defined(HAS_I422TOYUY2ROW_AVX2)
955 if (TestCpuFlag(kCpuHasAVX2)) {
956 I422ToYUY2Row = I422ToYUY2Row_Any_AVX2;
957 if (IS_ALIGNED(width, 32)) {
958 I422ToYUY2Row = I422ToYUY2Row_AVX2;
959 }
960 }
961 #endif
962 #if defined(HAS_I422TOYUY2ROW_NEON)
963 if (TestCpuFlag(kCpuHasNEON)) {
964 I422ToYUY2Row = I422ToYUY2Row_Any_NEON;
965 if (IS_ALIGNED(width, 16)) {
966 I422ToYUY2Row = I422ToYUY2Row_NEON;
967 }
968 }
969 #endif
970 #if defined(HAS_I422TOYUY2ROW_MMI)
971 if (TestCpuFlag(kCpuHasMMI)) {
972 I422ToYUY2Row = I422ToYUY2Row_Any_MMI;
973 if (IS_ALIGNED(width, 8)) {
974 I422ToYUY2Row = I422ToYUY2Row_MMI;
975 }
976 }
977 #endif
978 #if defined(HAS_I422TOYUY2ROW_MSA)
979 if (TestCpuFlag(kCpuHasMSA)) {
980 I422ToYUY2Row = I422ToYUY2Row_Any_MSA;
981 if (IS_ALIGNED(width, 32)) {
982 I422ToYUY2Row = I422ToYUY2Row_MSA;
983 }
984 }
985 #endif
986
987 {
988 // Allocate a rows of yuv.
989 align_buffer_64(row_y, ((width + 63) & ~63) * 2);
990 uint8_t* row_u = row_y + ((width + 63) & ~63);
991 uint8_t* row_v = row_u + ((width + 63) & ~63) / 2;
992
993 for (y = 0; y < height; ++y) {
994 ARGBToUVRow(src_argb, 0, row_u, row_v, width);
995 ARGBToYRow(src_argb, row_y, width);
996 I422ToYUY2Row(row_y, row_u, row_v, dst_yuy2, width);
997 src_argb += src_stride_argb;
998 dst_yuy2 += dst_stride_yuy2;
999 }
1000
1001 free_aligned_buffer_64(row_y);
1002 }
1003 return 0;
1004 }
1005
1006 // Convert ARGB to UYVY.
1007 LIBYUV_API
ARGBToUYVY(const uint8_t * src_argb,int src_stride_argb,uint8_t * dst_uyvy,int dst_stride_uyvy,int width,int height)1008 int ARGBToUYVY(const uint8_t* src_argb,
1009 int src_stride_argb,
1010 uint8_t* dst_uyvy,
1011 int dst_stride_uyvy,
1012 int width,
1013 int height) {
1014 int y;
1015 void (*ARGBToUVRow)(const uint8_t* src_argb, int src_stride_argb,
1016 uint8_t* dst_u, uint8_t* dst_v, int width) =
1017 ARGBToUVRow_C;
1018 void (*ARGBToYRow)(const uint8_t* src_argb, uint8_t* dst_y, int width) =
1019 ARGBToYRow_C;
1020 void (*I422ToUYVYRow)(const uint8_t* src_y, const uint8_t* src_u,
1021 const uint8_t* src_v, uint8_t* dst_uyvy, int width) =
1022 I422ToUYVYRow_C;
1023
1024 if (!src_argb || !dst_uyvy || width <= 0 || height == 0) {
1025 return -1;
1026 }
1027 // Negative height means invert the image.
1028 if (height < 0) {
1029 height = -height;
1030 dst_uyvy = dst_uyvy + (height - 1) * dst_stride_uyvy;
1031 dst_stride_uyvy = -dst_stride_uyvy;
1032 }
1033 // Coalesce rows.
1034 if (src_stride_argb == width * 4 && dst_stride_uyvy == width * 2) {
1035 width *= height;
1036 height = 1;
1037 src_stride_argb = dst_stride_uyvy = 0;
1038 }
1039 #if defined(HAS_ARGBTOYROW_SSSE3) && defined(HAS_ARGBTOUVROW_SSSE3)
1040 if (TestCpuFlag(kCpuHasSSSE3)) {
1041 ARGBToUVRow = ARGBToUVRow_Any_SSSE3;
1042 ARGBToYRow = ARGBToYRow_Any_SSSE3;
1043 if (IS_ALIGNED(width, 16)) {
1044 ARGBToUVRow = ARGBToUVRow_SSSE3;
1045 ARGBToYRow = ARGBToYRow_SSSE3;
1046 }
1047 }
1048 #endif
1049 #if defined(HAS_ARGBTOYROW_AVX2) && defined(HAS_ARGBTOUVROW_AVX2)
1050 if (TestCpuFlag(kCpuHasAVX2)) {
1051 ARGBToUVRow = ARGBToUVRow_Any_AVX2;
1052 ARGBToYRow = ARGBToYRow_Any_AVX2;
1053 if (IS_ALIGNED(width, 32)) {
1054 ARGBToUVRow = ARGBToUVRow_AVX2;
1055 ARGBToYRow = ARGBToYRow_AVX2;
1056 }
1057 }
1058 #endif
1059 #if defined(HAS_ARGBTOYROW_NEON)
1060 if (TestCpuFlag(kCpuHasNEON)) {
1061 ARGBToYRow = ARGBToYRow_Any_NEON;
1062 if (IS_ALIGNED(width, 8)) {
1063 ARGBToYRow = ARGBToYRow_NEON;
1064 }
1065 }
1066 #endif
1067 #if defined(HAS_ARGBTOUVROW_NEON)
1068 if (TestCpuFlag(kCpuHasNEON)) {
1069 ARGBToUVRow = ARGBToUVRow_Any_NEON;
1070 if (IS_ALIGNED(width, 16)) {
1071 ARGBToUVRow = ARGBToUVRow_NEON;
1072 }
1073 }
1074 #endif
1075 #if defined(HAS_ARGBTOYROW_MMI) && defined(HAS_ARGBTOUVROW_MMI)
1076 if (TestCpuFlag(kCpuHasMMI)) {
1077 ARGBToYRow = ARGBToYRow_Any_MMI;
1078 ARGBToUVRow = ARGBToUVRow_Any_MMI;
1079 if (IS_ALIGNED(width, 8)) {
1080 ARGBToYRow = ARGBToYRow_MMI;
1081 }
1082 if (IS_ALIGNED(width, 16)) {
1083 ARGBToUVRow = ARGBToUVRow_MMI;
1084 }
1085 }
1086 #endif
1087 #if defined(HAS_ARGBTOYROW_MSA) && defined(HAS_ARGBTOUVROW_MSA)
1088 if (TestCpuFlag(kCpuHasMSA)) {
1089 ARGBToYRow = ARGBToYRow_Any_MSA;
1090 ARGBToUVRow = ARGBToUVRow_Any_MSA;
1091 if (IS_ALIGNED(width, 16)) {
1092 ARGBToYRow = ARGBToYRow_MSA;
1093 }
1094 if (IS_ALIGNED(width, 32)) {
1095 ARGBToUVRow = ARGBToUVRow_MSA;
1096 }
1097 }
1098 #endif
1099 #if defined(HAS_I422TOUYVYROW_SSE2)
1100 if (TestCpuFlag(kCpuHasSSE2)) {
1101 I422ToUYVYRow = I422ToUYVYRow_Any_SSE2;
1102 if (IS_ALIGNED(width, 16)) {
1103 I422ToUYVYRow = I422ToUYVYRow_SSE2;
1104 }
1105 }
1106 #endif
1107 #if defined(HAS_I422TOUYVYROW_AVX2)
1108 if (TestCpuFlag(kCpuHasAVX2)) {
1109 I422ToUYVYRow = I422ToUYVYRow_Any_AVX2;
1110 if (IS_ALIGNED(width, 32)) {
1111 I422ToUYVYRow = I422ToUYVYRow_AVX2;
1112 }
1113 }
1114 #endif
1115 #if defined(HAS_I422TOUYVYROW_NEON)
1116 if (TestCpuFlag(kCpuHasNEON)) {
1117 I422ToUYVYRow = I422ToUYVYRow_Any_NEON;
1118 if (IS_ALIGNED(width, 16)) {
1119 I422ToUYVYRow = I422ToUYVYRow_NEON;
1120 }
1121 }
1122 #endif
1123 #if defined(HAS_I422TOUYVYROW_MMI)
1124 if (TestCpuFlag(kCpuHasMMI)) {
1125 I422ToUYVYRow = I422ToUYVYRow_Any_MMI;
1126 if (IS_ALIGNED(width, 8)) {
1127 I422ToUYVYRow = I422ToUYVYRow_MMI;
1128 }
1129 }
1130 #endif
1131 #if defined(HAS_I422TOUYVYROW_MSA)
1132 if (TestCpuFlag(kCpuHasMSA)) {
1133 I422ToUYVYRow = I422ToUYVYRow_Any_MSA;
1134 if (IS_ALIGNED(width, 32)) {
1135 I422ToUYVYRow = I422ToUYVYRow_MSA;
1136 }
1137 }
1138 #endif
1139
1140 {
1141 // Allocate a rows of yuv.
1142 align_buffer_64(row_y, ((width + 63) & ~63) * 2);
1143 uint8_t* row_u = row_y + ((width + 63) & ~63);
1144 uint8_t* row_v = row_u + ((width + 63) & ~63) / 2;
1145
1146 for (y = 0; y < height; ++y) {
1147 ARGBToUVRow(src_argb, 0, row_u, row_v, width);
1148 ARGBToYRow(src_argb, row_y, width);
1149 I422ToUYVYRow(row_y, row_u, row_v, dst_uyvy, width);
1150 src_argb += src_stride_argb;
1151 dst_uyvy += dst_stride_uyvy;
1152 }
1153
1154 free_aligned_buffer_64(row_y);
1155 }
1156 return 0;
1157 }
1158
1159 // Convert ARGB to I400.
1160 LIBYUV_API
ARGBToI400(const uint8_t * src_argb,int src_stride_argb,uint8_t * dst_y,int dst_stride_y,int width,int height)1161 int ARGBToI400(const uint8_t* src_argb,
1162 int src_stride_argb,
1163 uint8_t* dst_y,
1164 int dst_stride_y,
1165 int width,
1166 int height) {
1167 int y;
1168 void (*ARGBToYRow)(const uint8_t* src_argb, uint8_t* dst_y, int width) =
1169 ARGBToYRow_C;
1170 if (!src_argb || !dst_y || width <= 0 || height == 0) {
1171 return -1;
1172 }
1173 if (height < 0) {
1174 height = -height;
1175 src_argb = src_argb + (height - 1) * src_stride_argb;
1176 src_stride_argb = -src_stride_argb;
1177 }
1178 // Coalesce rows.
1179 if (src_stride_argb == width * 4 && dst_stride_y == width) {
1180 width *= height;
1181 height = 1;
1182 src_stride_argb = dst_stride_y = 0;
1183 }
1184 #if defined(HAS_ARGBTOYROW_SSSE3)
1185 if (TestCpuFlag(kCpuHasSSSE3)) {
1186 ARGBToYRow = ARGBToYRow_Any_SSSE3;
1187 if (IS_ALIGNED(width, 16)) {
1188 ARGBToYRow = ARGBToYRow_SSSE3;
1189 }
1190 }
1191 #endif
1192 #if defined(HAS_ARGBTOYROW_AVX2)
1193 if (TestCpuFlag(kCpuHasAVX2)) {
1194 ARGBToYRow = ARGBToYRow_Any_AVX2;
1195 if (IS_ALIGNED(width, 32)) {
1196 ARGBToYRow = ARGBToYRow_AVX2;
1197 }
1198 }
1199 #endif
1200 #if defined(HAS_ARGBTOYROW_NEON)
1201 if (TestCpuFlag(kCpuHasNEON)) {
1202 ARGBToYRow = ARGBToYRow_Any_NEON;
1203 if (IS_ALIGNED(width, 8)) {
1204 ARGBToYRow = ARGBToYRow_NEON;
1205 }
1206 }
1207 #endif
1208 #if defined(HAS_ARGBTOYROW_MMI)
1209 if (TestCpuFlag(kCpuHasMMI)) {
1210 ARGBToYRow = ARGBToYRow_Any_MMI;
1211 if (IS_ALIGNED(width, 8)) {
1212 ARGBToYRow = ARGBToYRow_MMI;
1213 }
1214 }
1215 #endif
1216 #if defined(HAS_ARGBTOYROW_MSA)
1217 if (TestCpuFlag(kCpuHasMSA)) {
1218 ARGBToYRow = ARGBToYRow_Any_MSA;
1219 if (IS_ALIGNED(width, 16)) {
1220 ARGBToYRow = ARGBToYRow_MSA;
1221 }
1222 }
1223 #endif
1224
1225 for (y = 0; y < height; ++y) {
1226 ARGBToYRow(src_argb, dst_y, width);
1227 src_argb += src_stride_argb;
1228 dst_y += dst_stride_y;
1229 }
1230 return 0;
1231 }
1232
1233 // Shuffle table for converting ARGB to RGBA.
1234 static const uvec8 kShuffleMaskARGBToRGBA = {
1235 3u, 0u, 1u, 2u, 7u, 4u, 5u, 6u, 11u, 8u, 9u, 10u, 15u, 12u, 13u, 14u};
1236
1237 // Convert ARGB to RGBA.
1238 LIBYUV_API
ARGBToRGBA(const uint8_t * src_argb,int src_stride_argb,uint8_t * dst_rgba,int dst_stride_rgba,int width,int height)1239 int ARGBToRGBA(const uint8_t* src_argb,
1240 int src_stride_argb,
1241 uint8_t* dst_rgba,
1242 int dst_stride_rgba,
1243 int width,
1244 int height) {
1245 return ARGBShuffle(src_argb, src_stride_argb, dst_rgba, dst_stride_rgba,
1246 (const uint8_t*)(&kShuffleMaskARGBToRGBA), width, height);
1247 }
1248
1249 // Convert ARGB To RGB24.
1250 LIBYUV_API
ARGBToRGB24(const uint8_t * src_argb,int src_stride_argb,uint8_t * dst_rgb24,int dst_stride_rgb24,int width,int height)1251 int ARGBToRGB24(const uint8_t* src_argb,
1252 int src_stride_argb,
1253 uint8_t* dst_rgb24,
1254 int dst_stride_rgb24,
1255 int width,
1256 int height) {
1257 int y;
1258 void (*ARGBToRGB24Row)(const uint8_t* src_argb, uint8_t* dst_rgb, int width) =
1259 ARGBToRGB24Row_C;
1260 if (!src_argb || !dst_rgb24 || width <= 0 || height == 0) {
1261 return -1;
1262 }
1263 if (height < 0) {
1264 height = -height;
1265 src_argb = src_argb + (height - 1) * src_stride_argb;
1266 src_stride_argb = -src_stride_argb;
1267 }
1268 // Coalesce rows.
1269 if (src_stride_argb == width * 4 && dst_stride_rgb24 == width * 3) {
1270 width *= height;
1271 height = 1;
1272 src_stride_argb = dst_stride_rgb24 = 0;
1273 }
1274 #if defined(HAS_ARGBTORGB24ROW_SSSE3)
1275 if (TestCpuFlag(kCpuHasSSSE3)) {
1276 ARGBToRGB24Row = ARGBToRGB24Row_Any_SSSE3;
1277 if (IS_ALIGNED(width, 16)) {
1278 ARGBToRGB24Row = ARGBToRGB24Row_SSSE3;
1279 }
1280 }
1281 #endif
1282 #if defined(HAS_ARGBTORGB24ROW_AVX2)
1283 if (TestCpuFlag(kCpuHasAVX2)) {
1284 ARGBToRGB24Row = ARGBToRGB24Row_Any_AVX2;
1285 if (IS_ALIGNED(width, 32)) {
1286 ARGBToRGB24Row = ARGBToRGB24Row_AVX2;
1287 }
1288 }
1289 #endif
1290 #if defined(HAS_ARGBTORGB24ROW_AVX512VBMI)
1291 if (TestCpuFlag(kCpuHasAVX512VBMI)) {
1292 ARGBToRGB24Row = ARGBToRGB24Row_Any_AVX512VBMI;
1293 if (IS_ALIGNED(width, 32)) {
1294 ARGBToRGB24Row = ARGBToRGB24Row_AVX512VBMI;
1295 }
1296 }
1297 #endif
1298 #if defined(HAS_ARGBTORGB24ROW_NEON)
1299 if (TestCpuFlag(kCpuHasNEON)) {
1300 ARGBToRGB24Row = ARGBToRGB24Row_Any_NEON;
1301 if (IS_ALIGNED(width, 8)) {
1302 ARGBToRGB24Row = ARGBToRGB24Row_NEON;
1303 }
1304 }
1305 #endif
1306 #if defined(HAS_ARGBTORGB24ROW_MMI)
1307 if (TestCpuFlag(kCpuHasMMI)) {
1308 ARGBToRGB24Row = ARGBToRGB24Row_Any_MMI;
1309 if (IS_ALIGNED(width, 4)) {
1310 ARGBToRGB24Row = ARGBToRGB24Row_MMI;
1311 }
1312 }
1313 #endif
1314 #if defined(HAS_ARGBTORGB24ROW_MSA)
1315 if (TestCpuFlag(kCpuHasMSA)) {
1316 ARGBToRGB24Row = ARGBToRGB24Row_Any_MSA;
1317 if (IS_ALIGNED(width, 16)) {
1318 ARGBToRGB24Row = ARGBToRGB24Row_MSA;
1319 }
1320 }
1321 #endif
1322
1323 for (y = 0; y < height; ++y) {
1324 ARGBToRGB24Row(src_argb, dst_rgb24, width);
1325 src_argb += src_stride_argb;
1326 dst_rgb24 += dst_stride_rgb24;
1327 }
1328 return 0;
1329 }
1330
1331 // Convert ARGB To RAW.
1332 LIBYUV_API
ARGBToRAW(const uint8_t * src_argb,int src_stride_argb,uint8_t * dst_raw,int dst_stride_raw,int width,int height)1333 int ARGBToRAW(const uint8_t* src_argb,
1334 int src_stride_argb,
1335 uint8_t* dst_raw,
1336 int dst_stride_raw,
1337 int width,
1338 int height) {
1339 int y;
1340 void (*ARGBToRAWRow)(const uint8_t* src_argb, uint8_t* dst_rgb, int width) =
1341 ARGBToRAWRow_C;
1342 if (!src_argb || !dst_raw || width <= 0 || height == 0) {
1343 return -1;
1344 }
1345 if (height < 0) {
1346 height = -height;
1347 src_argb = src_argb + (height - 1) * src_stride_argb;
1348 src_stride_argb = -src_stride_argb;
1349 }
1350 // Coalesce rows.
1351 if (src_stride_argb == width * 4 && dst_stride_raw == width * 3) {
1352 width *= height;
1353 height = 1;
1354 src_stride_argb = dst_stride_raw = 0;
1355 }
1356 #if defined(HAS_ARGBTORAWROW_SSSE3)
1357 if (TestCpuFlag(kCpuHasSSSE3)) {
1358 ARGBToRAWRow = ARGBToRAWRow_Any_SSSE3;
1359 if (IS_ALIGNED(width, 16)) {
1360 ARGBToRAWRow = ARGBToRAWRow_SSSE3;
1361 }
1362 }
1363 #endif
1364 #if defined(HAS_ARGBTORAWROW_AVX2)
1365 if (TestCpuFlag(kCpuHasAVX2)) {
1366 ARGBToRAWRow = ARGBToRAWRow_Any_AVX2;
1367 if (IS_ALIGNED(width, 32)) {
1368 ARGBToRAWRow = ARGBToRAWRow_AVX2;
1369 }
1370 }
1371 #endif
1372 #if defined(HAS_ARGBTORAWROW_NEON)
1373 if (TestCpuFlag(kCpuHasNEON)) {
1374 ARGBToRAWRow = ARGBToRAWRow_Any_NEON;
1375 if (IS_ALIGNED(width, 8)) {
1376 ARGBToRAWRow = ARGBToRAWRow_NEON;
1377 }
1378 }
1379 #endif
1380 #if defined(HAS_ARGBTORAWROW_MMI)
1381 if (TestCpuFlag(kCpuHasMMI)) {
1382 ARGBToRAWRow = ARGBToRAWRow_Any_MMI;
1383 if (IS_ALIGNED(width, 4)) {
1384 ARGBToRAWRow = ARGBToRAWRow_MMI;
1385 }
1386 }
1387 #endif
1388 #if defined(HAS_ARGBTORAWROW_MSA)
1389 if (TestCpuFlag(kCpuHasMSA)) {
1390 ARGBToRAWRow = ARGBToRAWRow_Any_MSA;
1391 if (IS_ALIGNED(width, 16)) {
1392 ARGBToRAWRow = ARGBToRAWRow_MSA;
1393 }
1394 }
1395 #endif
1396
1397 for (y = 0; y < height; ++y) {
1398 ARGBToRAWRow(src_argb, dst_raw, width);
1399 src_argb += src_stride_argb;
1400 dst_raw += dst_stride_raw;
1401 }
1402 return 0;
1403 }
1404
1405 // Ordered 8x8 dither for 888 to 565. Values from 0 to 7.
1406 static const uint8_t kDither565_4x4[16] = {
1407 0, 4, 1, 5, 6, 2, 7, 3, 1, 5, 0, 4, 7, 3, 6, 2,
1408 };
1409
1410 // Convert ARGB To RGB565 with 4x4 dither matrix (16 bytes).
1411 LIBYUV_API
ARGBToRGB565Dither(const uint8_t * src_argb,int src_stride_argb,uint8_t * dst_rgb565,int dst_stride_rgb565,const uint8_t * dither4x4,int width,int height)1412 int ARGBToRGB565Dither(const uint8_t* src_argb,
1413 int src_stride_argb,
1414 uint8_t* dst_rgb565,
1415 int dst_stride_rgb565,
1416 const uint8_t* dither4x4,
1417 int width,
1418 int height) {
1419 int y;
1420 void (*ARGBToRGB565DitherRow)(const uint8_t* src_argb, uint8_t* dst_rgb,
1421 const uint32_t dither4, int width) =
1422 ARGBToRGB565DitherRow_C;
1423 if (!src_argb || !dst_rgb565 || width <= 0 || height == 0) {
1424 return -1;
1425 }
1426 if (height < 0) {
1427 height = -height;
1428 src_argb = src_argb + (height - 1) * src_stride_argb;
1429 src_stride_argb = -src_stride_argb;
1430 }
1431 if (!dither4x4) {
1432 dither4x4 = kDither565_4x4;
1433 }
1434 #if defined(HAS_ARGBTORGB565DITHERROW_SSE2)
1435 if (TestCpuFlag(kCpuHasSSE2)) {
1436 ARGBToRGB565DitherRow = ARGBToRGB565DitherRow_Any_SSE2;
1437 if (IS_ALIGNED(width, 4)) {
1438 ARGBToRGB565DitherRow = ARGBToRGB565DitherRow_SSE2;
1439 }
1440 }
1441 #endif
1442 #if defined(HAS_ARGBTORGB565DITHERROW_AVX2)
1443 if (TestCpuFlag(kCpuHasAVX2)) {
1444 ARGBToRGB565DitherRow = ARGBToRGB565DitherRow_Any_AVX2;
1445 if (IS_ALIGNED(width, 8)) {
1446 ARGBToRGB565DitherRow = ARGBToRGB565DitherRow_AVX2;
1447 }
1448 }
1449 #endif
1450 #if defined(HAS_ARGBTORGB565DITHERROW_NEON)
1451 if (TestCpuFlag(kCpuHasNEON)) {
1452 ARGBToRGB565DitherRow = ARGBToRGB565DitherRow_Any_NEON;
1453 if (IS_ALIGNED(width, 8)) {
1454 ARGBToRGB565DitherRow = ARGBToRGB565DitherRow_NEON;
1455 }
1456 }
1457 #endif
1458 #if defined(HAS_ARGBTORGB565DITHERROW_MMI)
1459 if (TestCpuFlag(kCpuHasMMI)) {
1460 ARGBToRGB565DitherRow = ARGBToRGB565DitherRow_Any_MMI;
1461 if (IS_ALIGNED(width, 4)) {
1462 ARGBToRGB565DitherRow = ARGBToRGB565DitherRow_MMI;
1463 }
1464 }
1465 #endif
1466 #if defined(HAS_ARGBTORGB565DITHERROW_MSA)
1467 if (TestCpuFlag(kCpuHasMSA)) {
1468 ARGBToRGB565DitherRow = ARGBToRGB565DitherRow_Any_MSA;
1469 if (IS_ALIGNED(width, 8)) {
1470 ARGBToRGB565DitherRow = ARGBToRGB565DitherRow_MSA;
1471 }
1472 }
1473 #endif
1474
1475 for (y = 0; y < height; ++y) {
1476 ARGBToRGB565DitherRow(src_argb, dst_rgb565,
1477 *(const uint32_t*)(dither4x4 + ((y & 3) << 2)),
1478 width);
1479 src_argb += src_stride_argb;
1480 dst_rgb565 += dst_stride_rgb565;
1481 }
1482 return 0;
1483 }
1484
1485 // Convert ARGB To RGB565.
1486 // TODO(fbarchard): Consider using dither function low level with zeros.
1487 LIBYUV_API
ARGBToRGB565(const uint8_t * src_argb,int src_stride_argb,uint8_t * dst_rgb565,int dst_stride_rgb565,int width,int height)1488 int ARGBToRGB565(const uint8_t* src_argb,
1489 int src_stride_argb,
1490 uint8_t* dst_rgb565,
1491 int dst_stride_rgb565,
1492 int width,
1493 int height) {
1494 int y;
1495 void (*ARGBToRGB565Row)(const uint8_t* src_argb, uint8_t* dst_rgb,
1496 int width) = ARGBToRGB565Row_C;
1497 if (!src_argb || !dst_rgb565 || width <= 0 || height == 0) {
1498 return -1;
1499 }
1500 if (height < 0) {
1501 height = -height;
1502 src_argb = src_argb + (height - 1) * src_stride_argb;
1503 src_stride_argb = -src_stride_argb;
1504 }
1505 // Coalesce rows.
1506 if (src_stride_argb == width * 4 && dst_stride_rgb565 == width * 2) {
1507 width *= height;
1508 height = 1;
1509 src_stride_argb = dst_stride_rgb565 = 0;
1510 }
1511 #if defined(HAS_ARGBTORGB565ROW_SSE2)
1512 if (TestCpuFlag(kCpuHasSSE2)) {
1513 ARGBToRGB565Row = ARGBToRGB565Row_Any_SSE2;
1514 if (IS_ALIGNED(width, 4)) {
1515 ARGBToRGB565Row = ARGBToRGB565Row_SSE2;
1516 }
1517 }
1518 #endif
1519 #if defined(HAS_ARGBTORGB565ROW_AVX2)
1520 if (TestCpuFlag(kCpuHasAVX2)) {
1521 ARGBToRGB565Row = ARGBToRGB565Row_Any_AVX2;
1522 if (IS_ALIGNED(width, 8)) {
1523 ARGBToRGB565Row = ARGBToRGB565Row_AVX2;
1524 }
1525 }
1526 #endif
1527 #if defined(HAS_ARGBTORGB565ROW_NEON)
1528 if (TestCpuFlag(kCpuHasNEON)) {
1529 ARGBToRGB565Row = ARGBToRGB565Row_Any_NEON;
1530 if (IS_ALIGNED(width, 8)) {
1531 ARGBToRGB565Row = ARGBToRGB565Row_NEON;
1532 }
1533 }
1534 #endif
1535 #if defined(HAS_ARGBTORGB565ROW_MMI)
1536 if (TestCpuFlag(kCpuHasMMI)) {
1537 ARGBToRGB565Row = ARGBToRGB565Row_Any_MMI;
1538 if (IS_ALIGNED(width, 4)) {
1539 ARGBToRGB565Row = ARGBToRGB565Row_MMI;
1540 }
1541 }
1542 #endif
1543 #if defined(HAS_ARGBTORGB565ROW_MSA)
1544 if (TestCpuFlag(kCpuHasMSA)) {
1545 ARGBToRGB565Row = ARGBToRGB565Row_Any_MSA;
1546 if (IS_ALIGNED(width, 8)) {
1547 ARGBToRGB565Row = ARGBToRGB565Row_MSA;
1548 }
1549 }
1550 #endif
1551
1552 for (y = 0; y < height; ++y) {
1553 ARGBToRGB565Row(src_argb, dst_rgb565, width);
1554 src_argb += src_stride_argb;
1555 dst_rgb565 += dst_stride_rgb565;
1556 }
1557 return 0;
1558 }
1559
1560 // Convert ARGB To ARGB1555.
1561 LIBYUV_API
ARGBToARGB1555(const uint8_t * src_argb,int src_stride_argb,uint8_t * dst_argb1555,int dst_stride_argb1555,int width,int height)1562 int ARGBToARGB1555(const uint8_t* src_argb,
1563 int src_stride_argb,
1564 uint8_t* dst_argb1555,
1565 int dst_stride_argb1555,
1566 int width,
1567 int height) {
1568 int y;
1569 void (*ARGBToARGB1555Row)(const uint8_t* src_argb, uint8_t* dst_rgb,
1570 int width) = ARGBToARGB1555Row_C;
1571 if (!src_argb || !dst_argb1555 || width <= 0 || height == 0) {
1572 return -1;
1573 }
1574 if (height < 0) {
1575 height = -height;
1576 src_argb = src_argb + (height - 1) * src_stride_argb;
1577 src_stride_argb = -src_stride_argb;
1578 }
1579 // Coalesce rows.
1580 if (src_stride_argb == width * 4 && dst_stride_argb1555 == width * 2) {
1581 width *= height;
1582 height = 1;
1583 src_stride_argb = dst_stride_argb1555 = 0;
1584 }
1585 #if defined(HAS_ARGBTOARGB1555ROW_SSE2)
1586 if (TestCpuFlag(kCpuHasSSE2)) {
1587 ARGBToARGB1555Row = ARGBToARGB1555Row_Any_SSE2;
1588 if (IS_ALIGNED(width, 4)) {
1589 ARGBToARGB1555Row = ARGBToARGB1555Row_SSE2;
1590 }
1591 }
1592 #endif
1593 #if defined(HAS_ARGBTOARGB1555ROW_AVX2)
1594 if (TestCpuFlag(kCpuHasAVX2)) {
1595 ARGBToARGB1555Row = ARGBToARGB1555Row_Any_AVX2;
1596 if (IS_ALIGNED(width, 8)) {
1597 ARGBToARGB1555Row = ARGBToARGB1555Row_AVX2;
1598 }
1599 }
1600 #endif
1601 #if defined(HAS_ARGBTOARGB1555ROW_NEON)
1602 if (TestCpuFlag(kCpuHasNEON)) {
1603 ARGBToARGB1555Row = ARGBToARGB1555Row_Any_NEON;
1604 if (IS_ALIGNED(width, 8)) {
1605 ARGBToARGB1555Row = ARGBToARGB1555Row_NEON;
1606 }
1607 }
1608 #endif
1609 #if defined(HAS_ARGBTOARGB1555ROW_MMI)
1610 if (TestCpuFlag(kCpuHasMMI)) {
1611 ARGBToARGB1555Row = ARGBToARGB1555Row_Any_MMI;
1612 if (IS_ALIGNED(width, 4)) {
1613 ARGBToARGB1555Row = ARGBToARGB1555Row_MMI;
1614 }
1615 }
1616 #endif
1617 #if defined(HAS_ARGBTOARGB1555ROW_MSA)
1618 if (TestCpuFlag(kCpuHasMSA)) {
1619 ARGBToARGB1555Row = ARGBToARGB1555Row_Any_MSA;
1620 if (IS_ALIGNED(width, 8)) {
1621 ARGBToARGB1555Row = ARGBToARGB1555Row_MSA;
1622 }
1623 }
1624 #endif
1625
1626 for (y = 0; y < height; ++y) {
1627 ARGBToARGB1555Row(src_argb, dst_argb1555, width);
1628 src_argb += src_stride_argb;
1629 dst_argb1555 += dst_stride_argb1555;
1630 }
1631 return 0;
1632 }
1633
1634 // Convert ARGB To ARGB4444.
1635 LIBYUV_API
ARGBToARGB4444(const uint8_t * src_argb,int src_stride_argb,uint8_t * dst_argb4444,int dst_stride_argb4444,int width,int height)1636 int ARGBToARGB4444(const uint8_t* src_argb,
1637 int src_stride_argb,
1638 uint8_t* dst_argb4444,
1639 int dst_stride_argb4444,
1640 int width,
1641 int height) {
1642 int y;
1643 void (*ARGBToARGB4444Row)(const uint8_t* src_argb, uint8_t* dst_rgb,
1644 int width) = ARGBToARGB4444Row_C;
1645 if (!src_argb || !dst_argb4444 || width <= 0 || height == 0) {
1646 return -1;
1647 }
1648 if (height < 0) {
1649 height = -height;
1650 src_argb = src_argb + (height - 1) * src_stride_argb;
1651 src_stride_argb = -src_stride_argb;
1652 }
1653 // Coalesce rows.
1654 if (src_stride_argb == width * 4 && dst_stride_argb4444 == width * 2) {
1655 width *= height;
1656 height = 1;
1657 src_stride_argb = dst_stride_argb4444 = 0;
1658 }
1659 #if defined(HAS_ARGBTOARGB4444ROW_SSE2)
1660 if (TestCpuFlag(kCpuHasSSE2)) {
1661 ARGBToARGB4444Row = ARGBToARGB4444Row_Any_SSE2;
1662 if (IS_ALIGNED(width, 4)) {
1663 ARGBToARGB4444Row = ARGBToARGB4444Row_SSE2;
1664 }
1665 }
1666 #endif
1667 #if defined(HAS_ARGBTOARGB4444ROW_AVX2)
1668 if (TestCpuFlag(kCpuHasAVX2)) {
1669 ARGBToARGB4444Row = ARGBToARGB4444Row_Any_AVX2;
1670 if (IS_ALIGNED(width, 8)) {
1671 ARGBToARGB4444Row = ARGBToARGB4444Row_AVX2;
1672 }
1673 }
1674 #endif
1675 #if defined(HAS_ARGBTOARGB4444ROW_NEON)
1676 if (TestCpuFlag(kCpuHasNEON)) {
1677 ARGBToARGB4444Row = ARGBToARGB4444Row_Any_NEON;
1678 if (IS_ALIGNED(width, 8)) {
1679 ARGBToARGB4444Row = ARGBToARGB4444Row_NEON;
1680 }
1681 }
1682 #endif
1683 #if defined(HAS_ARGBTOARGB4444ROW_MMI)
1684 if (TestCpuFlag(kCpuHasMMI)) {
1685 ARGBToARGB4444Row = ARGBToARGB4444Row_Any_MMI;
1686 if (IS_ALIGNED(width, 4)) {
1687 ARGBToARGB4444Row = ARGBToARGB4444Row_MMI;
1688 }
1689 }
1690 #endif
1691 #if defined(HAS_ARGBTOARGB4444ROW_MSA)
1692 if (TestCpuFlag(kCpuHasMSA)) {
1693 ARGBToARGB4444Row = ARGBToARGB4444Row_Any_MSA;
1694 if (IS_ALIGNED(width, 8)) {
1695 ARGBToARGB4444Row = ARGBToARGB4444Row_MSA;
1696 }
1697 }
1698 #endif
1699
1700 for (y = 0; y < height; ++y) {
1701 ARGBToARGB4444Row(src_argb, dst_argb4444, width);
1702 src_argb += src_stride_argb;
1703 dst_argb4444 += dst_stride_argb4444;
1704 }
1705 return 0;
1706 }
1707
1708 // Convert ABGR To AR30.
1709 LIBYUV_API
ABGRToAR30(const uint8_t * src_abgr,int src_stride_abgr,uint8_t * dst_ar30,int dst_stride_ar30,int width,int height)1710 int ABGRToAR30(const uint8_t* src_abgr,
1711 int src_stride_abgr,
1712 uint8_t* dst_ar30,
1713 int dst_stride_ar30,
1714 int width,
1715 int height) {
1716 int y;
1717 void (*ABGRToAR30Row)(const uint8_t* src_abgr, uint8_t* dst_rgb, int width) =
1718 ABGRToAR30Row_C;
1719 if (!src_abgr || !dst_ar30 || width <= 0 || height == 0) {
1720 return -1;
1721 }
1722 if (height < 0) {
1723 height = -height;
1724 src_abgr = src_abgr + (height - 1) * src_stride_abgr;
1725 src_stride_abgr = -src_stride_abgr;
1726 }
1727 // Coalesce rows.
1728 if (src_stride_abgr == width * 4 && dst_stride_ar30 == width * 4) {
1729 width *= height;
1730 height = 1;
1731 src_stride_abgr = dst_stride_ar30 = 0;
1732 }
1733 #if defined(HAS_ABGRTOAR30ROW_SSSE3)
1734 if (TestCpuFlag(kCpuHasSSSE3)) {
1735 ABGRToAR30Row = ABGRToAR30Row_Any_SSSE3;
1736 if (IS_ALIGNED(width, 4)) {
1737 ABGRToAR30Row = ABGRToAR30Row_SSSE3;
1738 }
1739 }
1740 #endif
1741 #if defined(HAS_ABGRTOAR30ROW_AVX2)
1742 if (TestCpuFlag(kCpuHasAVX2)) {
1743 ABGRToAR30Row = ABGRToAR30Row_Any_AVX2;
1744 if (IS_ALIGNED(width, 8)) {
1745 ABGRToAR30Row = ABGRToAR30Row_AVX2;
1746 }
1747 }
1748 #endif
1749 for (y = 0; y < height; ++y) {
1750 ABGRToAR30Row(src_abgr, dst_ar30, width);
1751 src_abgr += src_stride_abgr;
1752 dst_ar30 += dst_stride_ar30;
1753 }
1754 return 0;
1755 }
1756
1757 // Convert ARGB To AR30.
1758 LIBYUV_API
ARGBToAR30(const uint8_t * src_argb,int src_stride_argb,uint8_t * dst_ar30,int dst_stride_ar30,int width,int height)1759 int ARGBToAR30(const uint8_t* src_argb,
1760 int src_stride_argb,
1761 uint8_t* dst_ar30,
1762 int dst_stride_ar30,
1763 int width,
1764 int height) {
1765 int y;
1766 void (*ARGBToAR30Row)(const uint8_t* src_argb, uint8_t* dst_rgb, int width) =
1767 ARGBToAR30Row_C;
1768 if (!src_argb || !dst_ar30 || width <= 0 || height == 0) {
1769 return -1;
1770 }
1771 if (height < 0) {
1772 height = -height;
1773 src_argb = src_argb + (height - 1) * src_stride_argb;
1774 src_stride_argb = -src_stride_argb;
1775 }
1776 // Coalesce rows.
1777 if (src_stride_argb == width * 4 && dst_stride_ar30 == width * 4) {
1778 width *= height;
1779 height = 1;
1780 src_stride_argb = dst_stride_ar30 = 0;
1781 }
1782 #if defined(HAS_ARGBTOAR30ROW_SSSE3)
1783 if (TestCpuFlag(kCpuHasSSSE3)) {
1784 ARGBToAR30Row = ARGBToAR30Row_Any_SSSE3;
1785 if (IS_ALIGNED(width, 4)) {
1786 ARGBToAR30Row = ARGBToAR30Row_SSSE3;
1787 }
1788 }
1789 #endif
1790 #if defined(HAS_ARGBTOAR30ROW_AVX2)
1791 if (TestCpuFlag(kCpuHasAVX2)) {
1792 ARGBToAR30Row = ARGBToAR30Row_Any_AVX2;
1793 if (IS_ALIGNED(width, 8)) {
1794 ARGBToAR30Row = ARGBToAR30Row_AVX2;
1795 }
1796 }
1797 #endif
1798 for (y = 0; y < height; ++y) {
1799 ARGBToAR30Row(src_argb, dst_ar30, width);
1800 src_argb += src_stride_argb;
1801 dst_ar30 += dst_stride_ar30;
1802 }
1803 return 0;
1804 }
1805
1806 // Convert ARGB to J420. (JPeg full range I420).
1807 LIBYUV_API
ARGBToJ420(const uint8_t * src_argb,int src_stride_argb,uint8_t * dst_yj,int dst_stride_yj,uint8_t * dst_u,int dst_stride_u,uint8_t * dst_v,int dst_stride_v,int width,int height)1808 int ARGBToJ420(const uint8_t* src_argb,
1809 int src_stride_argb,
1810 uint8_t* dst_yj,
1811 int dst_stride_yj,
1812 uint8_t* dst_u,
1813 int dst_stride_u,
1814 uint8_t* dst_v,
1815 int dst_stride_v,
1816 int width,
1817 int height) {
1818 int y;
1819 void (*ARGBToUVJRow)(const uint8_t* src_argb0, int src_stride_argb,
1820 uint8_t* dst_u, uint8_t* dst_v, int width) =
1821 ARGBToUVJRow_C;
1822 void (*ARGBToYJRow)(const uint8_t* src_argb, uint8_t* dst_yj, int width) =
1823 ARGBToYJRow_C;
1824 if (!src_argb || !dst_yj || !dst_u || !dst_v || width <= 0 || height == 0) {
1825 return -1;
1826 }
1827 // Negative height means invert the image.
1828 if (height < 0) {
1829 height = -height;
1830 src_argb = src_argb + (height - 1) * src_stride_argb;
1831 src_stride_argb = -src_stride_argb;
1832 }
1833 #if defined(HAS_ARGBTOYJROW_SSSE3) && defined(HAS_ARGBTOUVJROW_SSSE3)
1834 if (TestCpuFlag(kCpuHasSSSE3)) {
1835 ARGBToUVJRow = ARGBToUVJRow_Any_SSSE3;
1836 ARGBToYJRow = ARGBToYJRow_Any_SSSE3;
1837 if (IS_ALIGNED(width, 16)) {
1838 ARGBToUVJRow = ARGBToUVJRow_SSSE3;
1839 ARGBToYJRow = ARGBToYJRow_SSSE3;
1840 }
1841 }
1842 #endif
1843 #if defined(HAS_ARGBTOYJROW_AVX2)
1844 if (TestCpuFlag(kCpuHasAVX2)) {
1845 ARGBToYJRow = ARGBToYJRow_Any_AVX2;
1846 if (IS_ALIGNED(width, 32)) {
1847 ARGBToYJRow = ARGBToYJRow_AVX2;
1848 }
1849 }
1850 #endif
1851 #if defined(HAS_ARGBTOYJROW_NEON)
1852 if (TestCpuFlag(kCpuHasNEON)) {
1853 ARGBToYJRow = ARGBToYJRow_Any_NEON;
1854 if (IS_ALIGNED(width, 8)) {
1855 ARGBToYJRow = ARGBToYJRow_NEON;
1856 }
1857 }
1858 #endif
1859 #if defined(HAS_ARGBTOUVJROW_NEON)
1860 if (TestCpuFlag(kCpuHasNEON)) {
1861 ARGBToUVJRow = ARGBToUVJRow_Any_NEON;
1862 if (IS_ALIGNED(width, 16)) {
1863 ARGBToUVJRow = ARGBToUVJRow_NEON;
1864 }
1865 }
1866 #endif
1867 #if defined(HAS_ARGBTOYJROW_MMI) && defined(HAS_ARGBTOUVJROW_MMI)
1868 if (TestCpuFlag(kCpuHasMMI)) {
1869 ARGBToYJRow = ARGBToYJRow_Any_MMI;
1870 ARGBToUVJRow = ARGBToUVJRow_Any_MMI;
1871 if (IS_ALIGNED(width, 8)) {
1872 ARGBToYJRow = ARGBToYJRow_MMI;
1873 }
1874 if (IS_ALIGNED(width, 16)) {
1875 ARGBToUVJRow = ARGBToUVJRow_MMI;
1876 }
1877 }
1878 #endif
1879 #if defined(HAS_ARGBTOYJROW_MSA) && defined(HAS_ARGBTOUVJROW_MSA)
1880 if (TestCpuFlag(kCpuHasMSA)) {
1881 ARGBToYJRow = ARGBToYJRow_Any_MSA;
1882 ARGBToUVJRow = ARGBToUVJRow_Any_MSA;
1883 if (IS_ALIGNED(width, 16)) {
1884 ARGBToYJRow = ARGBToYJRow_MSA;
1885 }
1886 if (IS_ALIGNED(width, 32)) {
1887 ARGBToUVJRow = ARGBToUVJRow_MSA;
1888 }
1889 }
1890 #endif
1891
1892 for (y = 0; y < height - 1; y += 2) {
1893 ARGBToUVJRow(src_argb, src_stride_argb, dst_u, dst_v, width);
1894 ARGBToYJRow(src_argb, dst_yj, width);
1895 ARGBToYJRow(src_argb + src_stride_argb, dst_yj + dst_stride_yj, width);
1896 src_argb += src_stride_argb * 2;
1897 dst_yj += dst_stride_yj * 2;
1898 dst_u += dst_stride_u;
1899 dst_v += dst_stride_v;
1900 }
1901 if (height & 1) {
1902 ARGBToUVJRow(src_argb, 0, dst_u, dst_v, width);
1903 ARGBToYJRow(src_argb, dst_yj, width);
1904 }
1905 return 0;
1906 }
1907
1908 // Convert ARGB to J422. (JPeg full range I422).
1909 LIBYUV_API
ARGBToJ422(const uint8_t * src_argb,int src_stride_argb,uint8_t * dst_yj,int dst_stride_yj,uint8_t * dst_u,int dst_stride_u,uint8_t * dst_v,int dst_stride_v,int width,int height)1910 int ARGBToJ422(const uint8_t* src_argb,
1911 int src_stride_argb,
1912 uint8_t* dst_yj,
1913 int dst_stride_yj,
1914 uint8_t* dst_u,
1915 int dst_stride_u,
1916 uint8_t* dst_v,
1917 int dst_stride_v,
1918 int width,
1919 int height) {
1920 int y;
1921 void (*ARGBToUVJRow)(const uint8_t* src_argb0, int src_stride_argb,
1922 uint8_t* dst_u, uint8_t* dst_v, int width) =
1923 ARGBToUVJRow_C;
1924 void (*ARGBToYJRow)(const uint8_t* src_argb, uint8_t* dst_yj, int width) =
1925 ARGBToYJRow_C;
1926 if (!src_argb || !dst_yj || !dst_u || !dst_v || width <= 0 || height == 0) {
1927 return -1;
1928 }
1929 // Negative height means invert the image.
1930 if (height < 0) {
1931 height = -height;
1932 src_argb = src_argb + (height - 1) * src_stride_argb;
1933 src_stride_argb = -src_stride_argb;
1934 }
1935 // Coalesce rows.
1936 if (src_stride_argb == width * 4 && dst_stride_yj == width &&
1937 dst_stride_u * 2 == width && dst_stride_v * 2 == width) {
1938 width *= height;
1939 height = 1;
1940 src_stride_argb = dst_stride_yj = dst_stride_u = dst_stride_v = 0;
1941 }
1942 #if defined(HAS_ARGBTOYJROW_SSSE3) && defined(HAS_ARGBTOUVJROW_SSSE3)
1943 if (TestCpuFlag(kCpuHasSSSE3)) {
1944 ARGBToUVJRow = ARGBToUVJRow_Any_SSSE3;
1945 ARGBToYJRow = ARGBToYJRow_Any_SSSE3;
1946 if (IS_ALIGNED(width, 16)) {
1947 ARGBToUVJRow = ARGBToUVJRow_SSSE3;
1948 ARGBToYJRow = ARGBToYJRow_SSSE3;
1949 }
1950 }
1951 #endif
1952 #if defined(HAS_ARGBTOYJROW_AVX2)
1953 if (TestCpuFlag(kCpuHasAVX2)) {
1954 ARGBToYJRow = ARGBToYJRow_Any_AVX2;
1955 if (IS_ALIGNED(width, 32)) {
1956 ARGBToYJRow = ARGBToYJRow_AVX2;
1957 }
1958 }
1959 #endif
1960 #if defined(HAS_ARGBTOYJROW_NEON)
1961 if (TestCpuFlag(kCpuHasNEON)) {
1962 ARGBToYJRow = ARGBToYJRow_Any_NEON;
1963 if (IS_ALIGNED(width, 8)) {
1964 ARGBToYJRow = ARGBToYJRow_NEON;
1965 }
1966 }
1967 #endif
1968 #if defined(HAS_ARGBTOUVJROW_NEON)
1969 if (TestCpuFlag(kCpuHasNEON)) {
1970 ARGBToUVJRow = ARGBToUVJRow_Any_NEON;
1971 if (IS_ALIGNED(width, 16)) {
1972 ARGBToUVJRow = ARGBToUVJRow_NEON;
1973 }
1974 }
1975 #endif
1976 #if defined(HAS_ARGBTOYJROW_MMI) && defined(HAS_ARGBTOUVJROW_MMI)
1977 if (TestCpuFlag(kCpuHasMMI)) {
1978 ARGBToYJRow = ARGBToYJRow_Any_MMI;
1979 ARGBToUVJRow = ARGBToUVJRow_Any_MMI;
1980 if (IS_ALIGNED(width, 8)) {
1981 ARGBToYJRow = ARGBToYJRow_MMI;
1982 }
1983 if (IS_ALIGNED(width, 16)) {
1984 ARGBToUVJRow = ARGBToUVJRow_MMI;
1985 }
1986 }
1987 #endif
1988 #if defined(HAS_ARGBTOYJROW_MSA) && defined(HAS_ARGBTOUVJROW_MSA)
1989 if (TestCpuFlag(kCpuHasMSA)) {
1990 ARGBToYJRow = ARGBToYJRow_Any_MSA;
1991 ARGBToUVJRow = ARGBToUVJRow_Any_MSA;
1992 if (IS_ALIGNED(width, 16)) {
1993 ARGBToYJRow = ARGBToYJRow_MSA;
1994 }
1995 if (IS_ALIGNED(width, 32)) {
1996 ARGBToUVJRow = ARGBToUVJRow_MSA;
1997 }
1998 }
1999 #endif
2000
2001 for (y = 0; y < height; ++y) {
2002 ARGBToUVJRow(src_argb, 0, dst_u, dst_v, width);
2003 ARGBToYJRow(src_argb, dst_yj, width);
2004 src_argb += src_stride_argb;
2005 dst_yj += dst_stride_yj;
2006 dst_u += dst_stride_u;
2007 dst_v += dst_stride_v;
2008 }
2009 return 0;
2010 }
2011
2012 // Convert ARGB to J400.
2013 LIBYUV_API
ARGBToJ400(const uint8_t * src_argb,int src_stride_argb,uint8_t * dst_yj,int dst_stride_yj,int width,int height)2014 int ARGBToJ400(const uint8_t* src_argb,
2015 int src_stride_argb,
2016 uint8_t* dst_yj,
2017 int dst_stride_yj,
2018 int width,
2019 int height) {
2020 int y;
2021 void (*ARGBToYJRow)(const uint8_t* src_argb, uint8_t* dst_yj, int width) =
2022 ARGBToYJRow_C;
2023 if (!src_argb || !dst_yj || width <= 0 || height == 0) {
2024 return -1;
2025 }
2026 if (height < 0) {
2027 height = -height;
2028 src_argb = src_argb + (height - 1) * src_stride_argb;
2029 src_stride_argb = -src_stride_argb;
2030 }
2031 // Coalesce rows.
2032 if (src_stride_argb == width * 4 && dst_stride_yj == width) {
2033 width *= height;
2034 height = 1;
2035 src_stride_argb = dst_stride_yj = 0;
2036 }
2037 #if defined(HAS_ARGBTOYJROW_SSSE3)
2038 if (TestCpuFlag(kCpuHasSSSE3)) {
2039 ARGBToYJRow = ARGBToYJRow_Any_SSSE3;
2040 if (IS_ALIGNED(width, 16)) {
2041 ARGBToYJRow = ARGBToYJRow_SSSE3;
2042 }
2043 }
2044 #endif
2045 #if defined(HAS_ARGBTOYJROW_AVX2)
2046 if (TestCpuFlag(kCpuHasAVX2)) {
2047 ARGBToYJRow = ARGBToYJRow_Any_AVX2;
2048 if (IS_ALIGNED(width, 32)) {
2049 ARGBToYJRow = ARGBToYJRow_AVX2;
2050 }
2051 }
2052 #endif
2053 #if defined(HAS_ARGBTOYJROW_NEON)
2054 if (TestCpuFlag(kCpuHasNEON)) {
2055 ARGBToYJRow = ARGBToYJRow_Any_NEON;
2056 if (IS_ALIGNED(width, 8)) {
2057 ARGBToYJRow = ARGBToYJRow_NEON;
2058 }
2059 }
2060 #endif
2061 #if defined(HAS_ARGBTOYJROW_MMI)
2062 if (TestCpuFlag(kCpuHasMMI)) {
2063 ARGBToYJRow = ARGBToYJRow_Any_MMI;
2064 if (IS_ALIGNED(width, 8)) {
2065 ARGBToYJRow = ARGBToYJRow_MMI;
2066 }
2067 }
2068 #endif
2069 #if defined(HAS_ARGBTOYJROW_MSA)
2070 if (TestCpuFlag(kCpuHasMSA)) {
2071 ARGBToYJRow = ARGBToYJRow_Any_MSA;
2072 if (IS_ALIGNED(width, 16)) {
2073 ARGBToYJRow = ARGBToYJRow_MSA;
2074 }
2075 }
2076 #endif
2077
2078 for (y = 0; y < height; ++y) {
2079 ARGBToYJRow(src_argb, dst_yj, width);
2080 src_argb += src_stride_argb;
2081 dst_yj += dst_stride_yj;
2082 }
2083 return 0;
2084 }
2085
2086 // Convert RGBA to J400.
2087 LIBYUV_API
RGBAToJ400(const uint8_t * src_rgba,int src_stride_rgba,uint8_t * dst_yj,int dst_stride_yj,int width,int height)2088 int RGBAToJ400(const uint8_t* src_rgba,
2089 int src_stride_rgba,
2090 uint8_t* dst_yj,
2091 int dst_stride_yj,
2092 int width,
2093 int height) {
2094 int y;
2095 void (*RGBAToYJRow)(const uint8_t* src_rgba, uint8_t* dst_yj, int width) =
2096 RGBAToYJRow_C;
2097 if (!src_rgba || !dst_yj || width <= 0 || height == 0) {
2098 return -1;
2099 }
2100 if (height < 0) {
2101 height = -height;
2102 src_rgba = src_rgba + (height - 1) * src_stride_rgba;
2103 src_stride_rgba = -src_stride_rgba;
2104 }
2105 // Coalesce rows.
2106 if (src_stride_rgba == width * 4 && dst_stride_yj == width) {
2107 width *= height;
2108 height = 1;
2109 src_stride_rgba = dst_stride_yj = 0;
2110 }
2111 #if defined(HAS_RGBATOYJROW_SSSE3)
2112 if (TestCpuFlag(kCpuHasSSSE3)) {
2113 RGBAToYJRow = RGBAToYJRow_Any_SSSE3;
2114 if (IS_ALIGNED(width, 16)) {
2115 RGBAToYJRow = RGBAToYJRow_SSSE3;
2116 }
2117 }
2118 #endif
2119 #if defined(HAS_RGBATOYJROW_AVX2)
2120 if (TestCpuFlag(kCpuHasAVX2)) {
2121 RGBAToYJRow = RGBAToYJRow_Any_AVX2;
2122 if (IS_ALIGNED(width, 32)) {
2123 RGBAToYJRow = RGBAToYJRow_AVX2;
2124 }
2125 }
2126 #endif
2127 #if defined(HAS_RGBATOYJROW_NEON)
2128 if (TestCpuFlag(kCpuHasNEON)) {
2129 RGBAToYJRow = RGBAToYJRow_Any_NEON;
2130 if (IS_ALIGNED(width, 8)) {
2131 RGBAToYJRow = RGBAToYJRow_NEON;
2132 }
2133 }
2134 #endif
2135 #if defined(HAS_RGBATOYJROW_MMI)
2136 if (TestCpuFlag(kCpuHasMMI)) {
2137 RGBAToYJRow = RGBAToYJRow_Any_MMI;
2138 if (IS_ALIGNED(width, 8)) {
2139 RGBAToYJRow = RGBAToYJRow_MMI;
2140 }
2141 }
2142 #endif
2143 #if defined(HAS_RGBATOYJROW_MSA)
2144 if (TestCpuFlag(kCpuHasMSA)) {
2145 RGBAToYJRow = RGBAToYJRow_Any_MSA;
2146 if (IS_ALIGNED(width, 16)) {
2147 RGBAToYJRow = RGBAToYJRow_MSA;
2148 }
2149 }
2150 #endif
2151
2152 for (y = 0; y < height; ++y) {
2153 RGBAToYJRow(src_rgba, dst_yj, width);
2154 src_rgba += src_stride_rgba;
2155 dst_yj += dst_stride_yj;
2156 }
2157 return 0;
2158 }
2159
2160 #ifdef __cplusplus
2161 } // extern "C"
2162 } // namespace libyuv
2163 #endif
2164