1 /*
2 * Copyright 2011 The LibYuv Project Authors. All rights reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #include <stdlib.h>
12 #include <time.h>
13
14 #include "libyuv/basic_types.h"
15 #include "libyuv/compare.h"
16 #include "libyuv/convert.h"
17 #include "libyuv/convert_argb.h"
18 #include "libyuv/convert_from.h"
19 #include "libyuv/convert_from_argb.h"
20 #include "libyuv/cpu_id.h"
21 #ifdef HAVE_JPEG
22 #include "libyuv/mjpeg_decoder.h"
23 #endif
24 #include "../unit_test/unit_test.h"
25 #include "libyuv/planar_functions.h"
26 #include "libyuv/rotate.h"
27 #include "libyuv/video_common.h"
28
29 namespace libyuv {
30
31 #define SUBSAMPLE(v, a) ((((v) + (a)-1)) / (a))
32
33 #define TESTPLANARTOPI(SRC_FMT_PLANAR, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, \
34 FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, W1280, N, NEG, OFF) \
35 TEST_F(LibYUVConvertTest, SRC_FMT_PLANAR##To##FMT_PLANAR##N) { \
36 const int kWidth = ((W1280) > 0) ? (W1280) : 1; \
37 const int kHeight = benchmark_height_; \
38 align_buffer_page_end(src_y, kWidth* kHeight + OFF); \
39 align_buffer_page_end(src_u, SUBSAMPLE(kWidth, SRC_SUBSAMP_X) * \
40 SUBSAMPLE(kHeight, SRC_SUBSAMP_Y) + \
41 OFF); \
42 align_buffer_page_end(src_v, SUBSAMPLE(kWidth, SRC_SUBSAMP_X) * \
43 SUBSAMPLE(kHeight, SRC_SUBSAMP_Y) + \
44 OFF); \
45 align_buffer_page_end(dst_y_c, kWidth* kHeight); \
46 align_buffer_page_end(dst_u_c, SUBSAMPLE(kWidth, SUBSAMP_X) * \
47 SUBSAMPLE(kHeight, SUBSAMP_Y)); \
48 align_buffer_page_end(dst_v_c, SUBSAMPLE(kWidth, SUBSAMP_X) * \
49 SUBSAMPLE(kHeight, SUBSAMP_Y)); \
50 align_buffer_page_end(dst_y_opt, kWidth* kHeight); \
51 align_buffer_page_end(dst_u_opt, SUBSAMPLE(kWidth, SUBSAMP_X) * \
52 SUBSAMPLE(kHeight, SUBSAMP_Y)); \
53 align_buffer_page_end(dst_v_opt, SUBSAMPLE(kWidth, SUBSAMP_X) * \
54 SUBSAMPLE(kHeight, SUBSAMP_Y)); \
55 for (int i = 0; i < kHeight; ++i) \
56 for (int j = 0; j < kWidth; ++j) \
57 src_y[i * kWidth + j + OFF] = (fastrand() & 0xff); \
58 for (int i = 0; i < SUBSAMPLE(kHeight, SRC_SUBSAMP_Y); ++i) { \
59 for (int j = 0; j < SUBSAMPLE(kWidth, SRC_SUBSAMP_X); ++j) { \
60 src_u[(i * SUBSAMPLE(kWidth, SRC_SUBSAMP_X)) + j + OFF] = \
61 (fastrand() & 0xff); \
62 src_v[(i * SUBSAMPLE(kWidth, SRC_SUBSAMP_X)) + j + OFF] = \
63 (fastrand() & 0xff); \
64 } \
65 } \
66 memset(dst_y_c, 1, kWidth* kHeight); \
67 memset(dst_u_c, 2, \
68 SUBSAMPLE(kWidth, SUBSAMP_X) * SUBSAMPLE(kHeight, SUBSAMP_Y)); \
69 memset(dst_v_c, 3, \
70 SUBSAMPLE(kWidth, SUBSAMP_X) * SUBSAMPLE(kHeight, SUBSAMP_Y)); \
71 memset(dst_y_opt, 101, kWidth* kHeight); \
72 memset(dst_u_opt, 102, \
73 SUBSAMPLE(kWidth, SUBSAMP_X) * SUBSAMPLE(kHeight, SUBSAMP_Y)); \
74 memset(dst_v_opt, 103, \
75 SUBSAMPLE(kWidth, SUBSAMP_X) * SUBSAMPLE(kHeight, SUBSAMP_Y)); \
76 MaskCpuFlags(disable_cpu_flags_); \
77 SRC_FMT_PLANAR##To##FMT_PLANAR( \
78 src_y + OFF, kWidth, src_u + OFF, SUBSAMPLE(kWidth, SRC_SUBSAMP_X), \
79 src_v + OFF, SUBSAMPLE(kWidth, SRC_SUBSAMP_X), dst_y_c, kWidth, \
80 dst_u_c, SUBSAMPLE(kWidth, SUBSAMP_X), dst_v_c, \
81 SUBSAMPLE(kWidth, SUBSAMP_X), kWidth, NEG kHeight); \
82 MaskCpuFlags(benchmark_cpu_info_); \
83 for (int i = 0; i < benchmark_iterations_; ++i) { \
84 SRC_FMT_PLANAR##To##FMT_PLANAR( \
85 src_y + OFF, kWidth, src_u + OFF, SUBSAMPLE(kWidth, SRC_SUBSAMP_X), \
86 src_v + OFF, SUBSAMPLE(kWidth, SRC_SUBSAMP_X), dst_y_opt, kWidth, \
87 dst_u_opt, SUBSAMPLE(kWidth, SUBSAMP_X), dst_v_opt, \
88 SUBSAMPLE(kWidth, SUBSAMP_X), kWidth, NEG kHeight); \
89 } \
90 int max_diff = 0; \
91 for (int i = 0; i < kHeight; ++i) { \
92 for (int j = 0; j < kWidth; ++j) { \
93 int abs_diff = abs(static_cast<int>(dst_y_c[i * kWidth + j]) - \
94 static_cast<int>(dst_y_opt[i * kWidth + j])); \
95 if (abs_diff > max_diff) { \
96 max_diff = abs_diff; \
97 } \
98 } \
99 } \
100 EXPECT_EQ(0, max_diff); \
101 for (int i = 0; i < SUBSAMPLE(kHeight, SUBSAMP_Y); ++i) { \
102 for (int j = 0; j < SUBSAMPLE(kWidth, SUBSAMP_X); ++j) { \
103 int abs_diff = abs( \
104 static_cast<int>(dst_u_c[i * SUBSAMPLE(kWidth, SUBSAMP_X) + j]) - \
105 static_cast<int>( \
106 dst_u_opt[i * SUBSAMPLE(kWidth, SUBSAMP_X) + j])); \
107 if (abs_diff > max_diff) { \
108 max_diff = abs_diff; \
109 } \
110 } \
111 } \
112 EXPECT_LE(max_diff, 3); \
113 for (int i = 0; i < SUBSAMPLE(kHeight, SUBSAMP_Y); ++i) { \
114 for (int j = 0; j < SUBSAMPLE(kWidth, SUBSAMP_X); ++j) { \
115 int abs_diff = abs( \
116 static_cast<int>(dst_v_c[i * SUBSAMPLE(kWidth, SUBSAMP_X) + j]) - \
117 static_cast<int>( \
118 dst_v_opt[i * SUBSAMPLE(kWidth, SUBSAMP_X) + j])); \
119 if (abs_diff > max_diff) { \
120 max_diff = abs_diff; \
121 } \
122 } \
123 } \
124 EXPECT_LE(max_diff, 3); \
125 free_aligned_buffer_page_end(dst_y_c); \
126 free_aligned_buffer_page_end(dst_u_c); \
127 free_aligned_buffer_page_end(dst_v_c); \
128 free_aligned_buffer_page_end(dst_y_opt); \
129 free_aligned_buffer_page_end(dst_u_opt); \
130 free_aligned_buffer_page_end(dst_v_opt); \
131 free_aligned_buffer_page_end(src_y); \
132 free_aligned_buffer_page_end(src_u); \
133 free_aligned_buffer_page_end(src_v); \
134 }
135
136 #define TESTPLANARTOP(SRC_FMT_PLANAR, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, \
137 FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y) \
138 TESTPLANARTOPI(SRC_FMT_PLANAR, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, FMT_PLANAR, \
139 SUBSAMP_X, SUBSAMP_Y, benchmark_width_ - 4, _Any, +, 0) \
140 TESTPLANARTOPI(SRC_FMT_PLANAR, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, FMT_PLANAR, \
141 SUBSAMP_X, SUBSAMP_Y, benchmark_width_, _Unaligned, +, 1) \
142 TESTPLANARTOPI(SRC_FMT_PLANAR, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, FMT_PLANAR, \
143 SUBSAMP_X, SUBSAMP_Y, benchmark_width_, _Invert, -, 0) \
144 TESTPLANARTOPI(SRC_FMT_PLANAR, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, FMT_PLANAR, \
145 SUBSAMP_X, SUBSAMP_Y, benchmark_width_, _Opt, +, 0)
146
147 TESTPLANARTOP(I420, 2, 2, I420, 2, 2)
148 TESTPLANARTOP(I422, 2, 1, I420, 2, 2)
149 TESTPLANARTOP(I444, 1, 1, I420, 2, 2)
150 TESTPLANARTOP(I420, 2, 2, I422, 2, 1)
151 TESTPLANARTOP(I420, 2, 2, I444, 1, 1)
152 TESTPLANARTOP(I420, 2, 2, I420Mirror, 2, 2)
153 TESTPLANARTOP(I422, 2, 1, I422, 2, 1)
154 TESTPLANARTOP(I444, 1, 1, I444, 1, 1)
155
156 // Test Android 420 to I420
157 #define TESTAPLANARTOPI(SRC_FMT_PLANAR, PIXEL_STRIDE, SRC_SUBSAMP_X, \
158 SRC_SUBSAMP_Y, FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \
159 W1280, N, NEG, OFF, PN, OFF_U, OFF_V) \
160 TEST_F(LibYUVConvertTest, SRC_FMT_PLANAR##To##FMT_PLANAR##_##PN##N) { \
161 const int kWidth = ((W1280) > 0) ? (W1280) : 1; \
162 const int kHeight = benchmark_height_; \
163 const int kSizeUV = \
164 SUBSAMPLE(kWidth, SRC_SUBSAMP_X) * SUBSAMPLE(kHeight, SRC_SUBSAMP_Y); \
165 align_buffer_page_end(src_y, kWidth* kHeight + OFF); \
166 align_buffer_page_end(src_uv, \
167 kSizeUV*((PIXEL_STRIDE == 3) ? 3 : 2) + OFF); \
168 align_buffer_page_end(dst_y_c, kWidth* kHeight); \
169 align_buffer_page_end(dst_u_c, SUBSAMPLE(kWidth, SUBSAMP_X) * \
170 SUBSAMPLE(kHeight, SUBSAMP_Y)); \
171 align_buffer_page_end(dst_v_c, SUBSAMPLE(kWidth, SUBSAMP_X) * \
172 SUBSAMPLE(kHeight, SUBSAMP_Y)); \
173 align_buffer_page_end(dst_y_opt, kWidth* kHeight); \
174 align_buffer_page_end(dst_u_opt, SUBSAMPLE(kWidth, SUBSAMP_X) * \
175 SUBSAMPLE(kHeight, SUBSAMP_Y)); \
176 align_buffer_page_end(dst_v_opt, SUBSAMPLE(kWidth, SUBSAMP_X) * \
177 SUBSAMPLE(kHeight, SUBSAMP_Y)); \
178 uint8* src_u = src_uv + OFF_U; \
179 uint8* src_v = src_uv + (PIXEL_STRIDE == 1 ? kSizeUV : OFF_V); \
180 int src_stride_uv = SUBSAMPLE(kWidth, SUBSAMP_X) * PIXEL_STRIDE; \
181 for (int i = 0; i < kHeight; ++i) \
182 for (int j = 0; j < kWidth; ++j) \
183 src_y[i * kWidth + j + OFF] = (fastrand() & 0xff); \
184 for (int i = 0; i < SUBSAMPLE(kHeight, SRC_SUBSAMP_Y); ++i) { \
185 for (int j = 0; j < SUBSAMPLE(kWidth, SRC_SUBSAMP_X); ++j) { \
186 src_u[(i * src_stride_uv) + j * PIXEL_STRIDE + OFF] = \
187 (fastrand() & 0xff); \
188 src_v[(i * src_stride_uv) + j * PIXEL_STRIDE + OFF] = \
189 (fastrand() & 0xff); \
190 } \
191 } \
192 memset(dst_y_c, 1, kWidth* kHeight); \
193 memset(dst_u_c, 2, \
194 SUBSAMPLE(kWidth, SUBSAMP_X) * SUBSAMPLE(kHeight, SUBSAMP_Y)); \
195 memset(dst_v_c, 3, \
196 SUBSAMPLE(kWidth, SUBSAMP_X) * SUBSAMPLE(kHeight, SUBSAMP_Y)); \
197 memset(dst_y_opt, 101, kWidth* kHeight); \
198 memset(dst_u_opt, 102, \
199 SUBSAMPLE(kWidth, SUBSAMP_X) * SUBSAMPLE(kHeight, SUBSAMP_Y)); \
200 memset(dst_v_opt, 103, \
201 SUBSAMPLE(kWidth, SUBSAMP_X) * SUBSAMPLE(kHeight, SUBSAMP_Y)); \
202 MaskCpuFlags(disable_cpu_flags_); \
203 SRC_FMT_PLANAR##To##FMT_PLANAR( \
204 src_y + OFF, kWidth, src_u + OFF, SUBSAMPLE(kWidth, SRC_SUBSAMP_X), \
205 src_v + OFF, SUBSAMPLE(kWidth, SRC_SUBSAMP_X), PIXEL_STRIDE, dst_y_c, \
206 kWidth, dst_u_c, SUBSAMPLE(kWidth, SUBSAMP_X), dst_v_c, \
207 SUBSAMPLE(kWidth, SUBSAMP_X), kWidth, NEG kHeight); \
208 MaskCpuFlags(benchmark_cpu_info_); \
209 for (int i = 0; i < benchmark_iterations_; ++i) { \
210 SRC_FMT_PLANAR##To##FMT_PLANAR( \
211 src_y + OFF, kWidth, src_u + OFF, SUBSAMPLE(kWidth, SRC_SUBSAMP_X), \
212 src_v + OFF, SUBSAMPLE(kWidth, SRC_SUBSAMP_X), PIXEL_STRIDE, \
213 dst_y_opt, kWidth, dst_u_opt, SUBSAMPLE(kWidth, SUBSAMP_X), \
214 dst_v_opt, SUBSAMPLE(kWidth, SUBSAMP_X), kWidth, NEG kHeight); \
215 } \
216 int max_diff = 0; \
217 for (int i = 0; i < kHeight; ++i) { \
218 for (int j = 0; j < kWidth; ++j) { \
219 int abs_diff = abs(static_cast<int>(dst_y_c[i * kWidth + j]) - \
220 static_cast<int>(dst_y_opt[i * kWidth + j])); \
221 if (abs_diff > max_diff) { \
222 max_diff = abs_diff; \
223 } \
224 } \
225 } \
226 EXPECT_EQ(0, max_diff); \
227 for (int i = 0; i < SUBSAMPLE(kHeight, SUBSAMP_Y); ++i) { \
228 for (int j = 0; j < SUBSAMPLE(kWidth, SUBSAMP_X); ++j) { \
229 int abs_diff = abs( \
230 static_cast<int>(dst_u_c[i * SUBSAMPLE(kWidth, SUBSAMP_X) + j]) - \
231 static_cast<int>( \
232 dst_u_opt[i * SUBSAMPLE(kWidth, SUBSAMP_X) + j])); \
233 if (abs_diff > max_diff) { \
234 max_diff = abs_diff; \
235 } \
236 } \
237 } \
238 EXPECT_LE(max_diff, 3); \
239 for (int i = 0; i < SUBSAMPLE(kHeight, SUBSAMP_Y); ++i) { \
240 for (int j = 0; j < SUBSAMPLE(kWidth, SUBSAMP_X); ++j) { \
241 int abs_diff = abs( \
242 static_cast<int>(dst_v_c[i * SUBSAMPLE(kWidth, SUBSAMP_X) + j]) - \
243 static_cast<int>( \
244 dst_v_opt[i * SUBSAMPLE(kWidth, SUBSAMP_X) + j])); \
245 if (abs_diff > max_diff) { \
246 max_diff = abs_diff; \
247 } \
248 } \
249 } \
250 EXPECT_LE(max_diff, 3); \
251 free_aligned_buffer_page_end(dst_y_c); \
252 free_aligned_buffer_page_end(dst_u_c); \
253 free_aligned_buffer_page_end(dst_v_c); \
254 free_aligned_buffer_page_end(dst_y_opt); \
255 free_aligned_buffer_page_end(dst_u_opt); \
256 free_aligned_buffer_page_end(dst_v_opt); \
257 free_aligned_buffer_page_end(src_y); \
258 free_aligned_buffer_page_end(src_uv); \
259 }
260
261 #define TESTAPLANARTOP(SRC_FMT_PLANAR, PN, PIXEL_STRIDE, OFF_U, OFF_V, \
262 SRC_SUBSAMP_X, SRC_SUBSAMP_Y, FMT_PLANAR, SUBSAMP_X, \
263 SUBSAMP_Y) \
264 TESTAPLANARTOPI(SRC_FMT_PLANAR, PIXEL_STRIDE, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, \
265 FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, benchmark_width_ - 4, \
266 _Any, +, 0, PN, OFF_U, OFF_V) \
267 TESTAPLANARTOPI(SRC_FMT_PLANAR, PIXEL_STRIDE, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, \
268 FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, benchmark_width_, \
269 _Unaligned, +, 1, PN, OFF_U, OFF_V) \
270 TESTAPLANARTOPI(SRC_FMT_PLANAR, PIXEL_STRIDE, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, \
271 FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, benchmark_width_, _Invert, \
272 -, 0, PN, OFF_U, OFF_V) \
273 TESTAPLANARTOPI(SRC_FMT_PLANAR, PIXEL_STRIDE, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, \
274 FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, benchmark_width_, _Opt, +, \
275 0, PN, OFF_U, OFF_V)
276
277 TESTAPLANARTOP(Android420, I420, 1, 0, 0, 2, 2, I420, 2, 2)
278 TESTAPLANARTOP(Android420, NV12, 2, 0, 1, 2, 2, I420, 2, 2)
279 TESTAPLANARTOP(Android420, NV21, 2, 1, 0, 2, 2, I420, 2, 2)
280
281 #define TESTPLANARTOBPI(SRC_FMT_PLANAR, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, \
282 FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, W1280, N, NEG, OFF) \
283 TEST_F(LibYUVConvertTest, SRC_FMT_PLANAR##To##FMT_PLANAR##N) { \
284 const int kWidth = ((W1280) > 0) ? (W1280) : 1; \
285 const int kHeight = benchmark_height_; \
286 align_buffer_page_end(src_y, kWidth* kHeight + OFF); \
287 align_buffer_page_end(src_u, SUBSAMPLE(kWidth, SRC_SUBSAMP_X) * \
288 SUBSAMPLE(kHeight, SRC_SUBSAMP_Y) + \
289 OFF); \
290 align_buffer_page_end(src_v, SUBSAMPLE(kWidth, SRC_SUBSAMP_X) * \
291 SUBSAMPLE(kHeight, SRC_SUBSAMP_Y) + \
292 OFF); \
293 align_buffer_page_end(dst_y_c, kWidth* kHeight); \
294 align_buffer_page_end(dst_uv_c, SUBSAMPLE(kWidth * 2, SUBSAMP_X) * \
295 SUBSAMPLE(kHeight, SUBSAMP_Y)); \
296 align_buffer_page_end(dst_y_opt, kWidth* kHeight); \
297 align_buffer_page_end(dst_uv_opt, SUBSAMPLE(kWidth * 2, SUBSAMP_X) * \
298 SUBSAMPLE(kHeight, SUBSAMP_Y)); \
299 for (int i = 0; i < kHeight; ++i) \
300 for (int j = 0; j < kWidth; ++j) \
301 src_y[i * kWidth + j + OFF] = (fastrand() & 0xff); \
302 for (int i = 0; i < SUBSAMPLE(kHeight, SRC_SUBSAMP_Y); ++i) { \
303 for (int j = 0; j < SUBSAMPLE(kWidth, SRC_SUBSAMP_X); ++j) { \
304 src_u[(i * SUBSAMPLE(kWidth, SRC_SUBSAMP_X)) + j + OFF] = \
305 (fastrand() & 0xff); \
306 src_v[(i * SUBSAMPLE(kWidth, SRC_SUBSAMP_X)) + j + OFF] = \
307 (fastrand() & 0xff); \
308 } \
309 } \
310 memset(dst_y_c, 1, kWidth* kHeight); \
311 memset(dst_uv_c, 2, \
312 SUBSAMPLE(kWidth * 2, SUBSAMP_X) * SUBSAMPLE(kHeight, SUBSAMP_Y)); \
313 memset(dst_y_opt, 101, kWidth* kHeight); \
314 memset(dst_uv_opt, 102, \
315 SUBSAMPLE(kWidth * 2, SUBSAMP_X) * SUBSAMPLE(kHeight, SUBSAMP_Y)); \
316 MaskCpuFlags(disable_cpu_flags_); \
317 SRC_FMT_PLANAR##To##FMT_PLANAR( \
318 src_y + OFF, kWidth, src_u + OFF, SUBSAMPLE(kWidth, SRC_SUBSAMP_X), \
319 src_v + OFF, SUBSAMPLE(kWidth, SRC_SUBSAMP_X), dst_y_c, kWidth, \
320 dst_uv_c, SUBSAMPLE(kWidth * 2, SUBSAMP_X), kWidth, NEG kHeight); \
321 MaskCpuFlags(benchmark_cpu_info_); \
322 for (int i = 0; i < benchmark_iterations_; ++i) { \
323 SRC_FMT_PLANAR##To##FMT_PLANAR( \
324 src_y + OFF, kWidth, src_u + OFF, SUBSAMPLE(kWidth, SRC_SUBSAMP_X), \
325 src_v + OFF, SUBSAMPLE(kWidth, SRC_SUBSAMP_X), dst_y_opt, kWidth, \
326 dst_uv_opt, SUBSAMPLE(kWidth * 2, SUBSAMP_X), kWidth, NEG kHeight); \
327 } \
328 int max_diff = 0; \
329 for (int i = 0; i < kHeight; ++i) { \
330 for (int j = 0; j < kWidth; ++j) { \
331 int abs_diff = abs(static_cast<int>(dst_y_c[i * kWidth + j]) - \
332 static_cast<int>(dst_y_opt[i * kWidth + j])); \
333 if (abs_diff > max_diff) { \
334 max_diff = abs_diff; \
335 } \
336 } \
337 } \
338 EXPECT_LE(max_diff, 1); \
339 for (int i = 0; i < SUBSAMPLE(kHeight, SUBSAMP_Y); ++i) { \
340 for (int j = 0; j < SUBSAMPLE(kWidth * 2, SUBSAMP_X); ++j) { \
341 int abs_diff = \
342 abs(static_cast<int>( \
343 dst_uv_c[i * SUBSAMPLE(kWidth * 2, SUBSAMP_X) + j]) - \
344 static_cast<int>( \
345 dst_uv_opt[i * SUBSAMPLE(kWidth * 2, SUBSAMP_X) + j])); \
346 if (abs_diff > max_diff) { \
347 max_diff = abs_diff; \
348 } \
349 } \
350 } \
351 EXPECT_LE(max_diff, 1); \
352 free_aligned_buffer_page_end(dst_y_c); \
353 free_aligned_buffer_page_end(dst_uv_c); \
354 free_aligned_buffer_page_end(dst_y_opt); \
355 free_aligned_buffer_page_end(dst_uv_opt); \
356 free_aligned_buffer_page_end(src_y); \
357 free_aligned_buffer_page_end(src_u); \
358 free_aligned_buffer_page_end(src_v); \
359 }
360
361 #define TESTPLANARTOBP(SRC_FMT_PLANAR, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, \
362 FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y) \
363 TESTPLANARTOBPI(SRC_FMT_PLANAR, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, FMT_PLANAR, \
364 SUBSAMP_X, SUBSAMP_Y, benchmark_width_ - 4, _Any, +, 0) \
365 TESTPLANARTOBPI(SRC_FMT_PLANAR, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, FMT_PLANAR, \
366 SUBSAMP_X, SUBSAMP_Y, benchmark_width_, _Unaligned, +, 1) \
367 TESTPLANARTOBPI(SRC_FMT_PLANAR, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, FMT_PLANAR, \
368 SUBSAMP_X, SUBSAMP_Y, benchmark_width_, _Invert, -, 0) \
369 TESTPLANARTOBPI(SRC_FMT_PLANAR, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, FMT_PLANAR, \
370 SUBSAMP_X, SUBSAMP_Y, benchmark_width_, _Opt, +, 0)
371
372 TESTPLANARTOBP(I420, 2, 2, NV12, 2, 2)
373 TESTPLANARTOBP(I420, 2, 2, NV21, 2, 2)
374
375 #define TESTBIPLANARTOPI(SRC_FMT_PLANAR, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, \
376 FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, W1280, N, NEG, OFF, \
377 DOY) \
378 TEST_F(LibYUVConvertTest, SRC_FMT_PLANAR##To##FMT_PLANAR##N) { \
379 const int kWidth = ((W1280) > 0) ? (W1280) : 1; \
380 const int kHeight = benchmark_height_; \
381 align_buffer_page_end(src_y, kWidth* kHeight + OFF); \
382 align_buffer_page_end(src_uv, 2 * SUBSAMPLE(kWidth, SRC_SUBSAMP_X) * \
383 SUBSAMPLE(kHeight, SRC_SUBSAMP_Y) + \
384 OFF); \
385 align_buffer_page_end(dst_y_c, kWidth* kHeight); \
386 align_buffer_page_end(dst_u_c, SUBSAMPLE(kWidth, SUBSAMP_X) * \
387 SUBSAMPLE(kHeight, SUBSAMP_Y)); \
388 align_buffer_page_end(dst_v_c, SUBSAMPLE(kWidth, SUBSAMP_X) * \
389 SUBSAMPLE(kHeight, SUBSAMP_Y)); \
390 align_buffer_page_end(dst_y_opt, kWidth* kHeight); \
391 align_buffer_page_end(dst_u_opt, SUBSAMPLE(kWidth, SUBSAMP_X) * \
392 SUBSAMPLE(kHeight, SUBSAMP_Y)); \
393 align_buffer_page_end(dst_v_opt, SUBSAMPLE(kWidth, SUBSAMP_X) * \
394 SUBSAMPLE(kHeight, SUBSAMP_Y)); \
395 for (int i = 0; i < kHeight; ++i) \
396 for (int j = 0; j < kWidth; ++j) \
397 src_y[i * kWidth + j + OFF] = (fastrand() & 0xff); \
398 for (int i = 0; i < SUBSAMPLE(kHeight, SRC_SUBSAMP_Y); ++i) { \
399 for (int j = 0; j < 2 * SUBSAMPLE(kWidth, SRC_SUBSAMP_X); ++j) { \
400 src_uv[(i * 2 * SUBSAMPLE(kWidth, SRC_SUBSAMP_X)) + j + OFF] = \
401 (fastrand() & 0xff); \
402 } \
403 } \
404 memset(dst_y_c, 1, kWidth* kHeight); \
405 memset(dst_u_c, 2, \
406 SUBSAMPLE(kWidth, SUBSAMP_X) * SUBSAMPLE(kHeight, SUBSAMP_Y)); \
407 memset(dst_v_c, 3, \
408 SUBSAMPLE(kWidth, SUBSAMP_X) * SUBSAMPLE(kHeight, SUBSAMP_Y)); \
409 memset(dst_y_opt, 101, kWidth* kHeight); \
410 memset(dst_u_opt, 102, \
411 SUBSAMPLE(kWidth, SUBSAMP_X) * SUBSAMPLE(kHeight, SUBSAMP_Y)); \
412 memset(dst_v_opt, 103, \
413 SUBSAMPLE(kWidth, SUBSAMP_X) * SUBSAMPLE(kHeight, SUBSAMP_Y)); \
414 MaskCpuFlags(disable_cpu_flags_); \
415 SRC_FMT_PLANAR##To##FMT_PLANAR( \
416 src_y + OFF, kWidth, src_uv + OFF, \
417 2 * SUBSAMPLE(kWidth, SRC_SUBSAMP_X), DOY ? dst_y_c : NULL, kWidth, \
418 dst_u_c, SUBSAMPLE(kWidth, SUBSAMP_X), dst_v_c, \
419 SUBSAMPLE(kWidth, SUBSAMP_X), kWidth, NEG kHeight); \
420 MaskCpuFlags(benchmark_cpu_info_); \
421 for (int i = 0; i < benchmark_iterations_; ++i) { \
422 SRC_FMT_PLANAR##To##FMT_PLANAR( \
423 src_y + OFF, kWidth, src_uv + OFF, \
424 2 * SUBSAMPLE(kWidth, SRC_SUBSAMP_X), DOY ? dst_y_opt : NULL, \
425 kWidth, dst_u_opt, SUBSAMPLE(kWidth, SUBSAMP_X), dst_v_opt, \
426 SUBSAMPLE(kWidth, SUBSAMP_X), kWidth, NEG kHeight); \
427 } \
428 int max_diff = 0; \
429 if (DOY) { \
430 for (int i = 0; i < kHeight; ++i) { \
431 for (int j = 0; j < kWidth; ++j) { \
432 int abs_diff = abs(static_cast<int>(dst_y_c[i * kWidth + j]) - \
433 static_cast<int>(dst_y_opt[i * kWidth + j])); \
434 if (abs_diff > max_diff) { \
435 max_diff = abs_diff; \
436 } \
437 } \
438 } \
439 EXPECT_LE(max_diff, 1); \
440 } \
441 for (int i = 0; i < SUBSAMPLE(kHeight, SUBSAMP_Y); ++i) { \
442 for (int j = 0; j < SUBSAMPLE(kWidth, SUBSAMP_X); ++j) { \
443 int abs_diff = abs( \
444 static_cast<int>(dst_u_c[i * SUBSAMPLE(kWidth, SUBSAMP_X) + j]) - \
445 static_cast<int>( \
446 dst_u_opt[i * SUBSAMPLE(kWidth, SUBSAMP_X) + j])); \
447 if (abs_diff > max_diff) { \
448 max_diff = abs_diff; \
449 } \
450 } \
451 } \
452 EXPECT_LE(max_diff, 1); \
453 for (int i = 0; i < SUBSAMPLE(kHeight, SUBSAMP_Y); ++i) { \
454 for (int j = 0; j < SUBSAMPLE(kWidth, SUBSAMP_X); ++j) { \
455 int abs_diff = abs( \
456 static_cast<int>(dst_v_c[i * SUBSAMPLE(kWidth, SUBSAMP_X) + j]) - \
457 static_cast<int>( \
458 dst_v_opt[i * SUBSAMPLE(kWidth, SUBSAMP_X) + j])); \
459 if (abs_diff > max_diff) { \
460 max_diff = abs_diff; \
461 } \
462 } \
463 } \
464 EXPECT_LE(max_diff, 1); \
465 free_aligned_buffer_page_end(dst_y_c); \
466 free_aligned_buffer_page_end(dst_u_c); \
467 free_aligned_buffer_page_end(dst_v_c); \
468 free_aligned_buffer_page_end(dst_y_opt); \
469 free_aligned_buffer_page_end(dst_u_opt); \
470 free_aligned_buffer_page_end(dst_v_opt); \
471 free_aligned_buffer_page_end(src_y); \
472 free_aligned_buffer_page_end(src_uv); \
473 }
474
475 #define TESTBIPLANARTOP(SRC_FMT_PLANAR, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, \
476 FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y) \
477 TESTBIPLANARTOPI(SRC_FMT_PLANAR, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, FMT_PLANAR, \
478 SUBSAMP_X, SUBSAMP_Y, benchmark_width_ - 4, _Any, +, 0, 1) \
479 TESTBIPLANARTOPI(SRC_FMT_PLANAR, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, FMT_PLANAR, \
480 SUBSAMP_X, SUBSAMP_Y, benchmark_width_, _Unaligned, +, 1, \
481 1) \
482 TESTBIPLANARTOPI(SRC_FMT_PLANAR, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, FMT_PLANAR, \
483 SUBSAMP_X, SUBSAMP_Y, benchmark_width_, _Invert, -, 0, 1) \
484 TESTBIPLANARTOPI(SRC_FMT_PLANAR, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, FMT_PLANAR, \
485 SUBSAMP_X, SUBSAMP_Y, benchmark_width_, _Opt, +, 0, 1) \
486 TESTBIPLANARTOPI(SRC_FMT_PLANAR, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, FMT_PLANAR, \
487 SUBSAMP_X, SUBSAMP_Y, benchmark_width_, _NullY, +, 0, 0)
488
489 TESTBIPLANARTOP(NV12, 2, 2, I420, 2, 2)
490 TESTBIPLANARTOP(NV21, 2, 2, I420, 2, 2)
491
492 #define ALIGNINT(V, ALIGN) (((V) + (ALIGN)-1) / (ALIGN) * (ALIGN))
493
494 #define TESTPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \
495 YALIGN, W1280, DIFF, N, NEG, OFF, FMT_C, BPP_C) \
496 TEST_F(LibYUVConvertTest, FMT_PLANAR##To##FMT_B##N) { \
497 const int kWidth = ((W1280) > 0) ? (W1280) : 1; \
498 const int kHeight = ALIGNINT(benchmark_height_, YALIGN); \
499 const int kStrideB = ALIGNINT(kWidth * BPP_B, ALIGN); \
500 const int kStrideUV = SUBSAMPLE(kWidth, SUBSAMP_X); \
501 const int kSizeUV = kStrideUV * SUBSAMPLE(kHeight, SUBSAMP_Y); \
502 align_buffer_page_end(src_y, kWidth* kHeight + OFF); \
503 align_buffer_page_end(src_u, kSizeUV + OFF); \
504 align_buffer_page_end(src_v, kSizeUV + OFF); \
505 align_buffer_page_end(dst_argb_c, kStrideB* kHeight + OFF); \
506 align_buffer_page_end(dst_argb_opt, kStrideB* kHeight + OFF); \
507 for (int i = 0; i < kWidth * kHeight; ++i) { \
508 src_y[i + OFF] = (fastrand() & 0xff); \
509 } \
510 for (int i = 0; i < kSizeUV; ++i) { \
511 src_u[i + OFF] = (fastrand() & 0xff); \
512 src_v[i + OFF] = (fastrand() & 0xff); \
513 } \
514 memset(dst_argb_c + OFF, 1, kStrideB * kHeight); \
515 memset(dst_argb_opt + OFF, 101, kStrideB * kHeight); \
516 MaskCpuFlags(disable_cpu_flags_); \
517 FMT_PLANAR##To##FMT_B(src_y + OFF, kWidth, src_u + OFF, kStrideUV, \
518 src_v + OFF, kStrideUV, dst_argb_c + OFF, kStrideB, \
519 kWidth, NEG kHeight); \
520 MaskCpuFlags(benchmark_cpu_info_); \
521 for (int i = 0; i < benchmark_iterations_; ++i) { \
522 FMT_PLANAR##To##FMT_B(src_y + OFF, kWidth, src_u + OFF, kStrideUV, \
523 src_v + OFF, kStrideUV, dst_argb_opt + OFF, \
524 kStrideB, kWidth, NEG kHeight); \
525 } \
526 int max_diff = 0; \
527 /* Convert to ARGB so 565 is expanded to bytes that can be compared. */ \
528 align_buffer_page_end(dst_argb32_c, kWidth* BPP_C* kHeight); \
529 align_buffer_page_end(dst_argb32_opt, kWidth* BPP_C* kHeight); \
530 memset(dst_argb32_c, 2, kWidth* BPP_C* kHeight); \
531 memset(dst_argb32_opt, 102, kWidth* BPP_C* kHeight); \
532 FMT_B##To##FMT_C(dst_argb_c + OFF, kStrideB, dst_argb32_c, kWidth * BPP_C, \
533 kWidth, kHeight); \
534 FMT_B##To##FMT_C(dst_argb_opt + OFF, kStrideB, dst_argb32_opt, \
535 kWidth * BPP_C, kWidth, kHeight); \
536 for (int i = 0; i < kWidth * BPP_C * kHeight; ++i) { \
537 int abs_diff = abs(static_cast<int>(dst_argb32_c[i]) - \
538 static_cast<int>(dst_argb32_opt[i])); \
539 if (abs_diff > max_diff) { \
540 max_diff = abs_diff; \
541 } \
542 } \
543 EXPECT_LE(max_diff, DIFF); \
544 free_aligned_buffer_page_end(src_y); \
545 free_aligned_buffer_page_end(src_u); \
546 free_aligned_buffer_page_end(src_v); \
547 free_aligned_buffer_page_end(dst_argb_c); \
548 free_aligned_buffer_page_end(dst_argb_opt); \
549 free_aligned_buffer_page_end(dst_argb32_c); \
550 free_aligned_buffer_page_end(dst_argb32_opt); \
551 }
552
553 #define TESTPLANARTOB(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \
554 YALIGN, DIFF, FMT_C, BPP_C) \
555 TESTPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \
556 YALIGN, benchmark_width_ - 4, DIFF, _Any, +, 0, FMT_C, BPP_C) \
557 TESTPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \
558 YALIGN, benchmark_width_, DIFF, _Unaligned, +, 1, FMT_C, \
559 BPP_C) \
560 TESTPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \
561 YALIGN, benchmark_width_, DIFF, _Invert, -, 0, FMT_C, BPP_C) \
562 TESTPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \
563 YALIGN, benchmark_width_, DIFF, _Opt, +, 0, FMT_C, BPP_C)
564
565 TESTPLANARTOB(I420, 2, 2, ARGB, 4, 4, 1, 2, ARGB, 4)
566 TESTPLANARTOB(J420, 2, 2, ARGB, 4, 4, 1, 2, ARGB, 4)
567 TESTPLANARTOB(J420, 2, 2, ABGR, 4, 4, 1, 2, ARGB, 4)
568 TESTPLANARTOB(H420, 2, 2, ARGB, 4, 4, 1, 2, ARGB, 4)
569 TESTPLANARTOB(H420, 2, 2, ABGR, 4, 4, 1, 2, ARGB, 4)
570 TESTPLANARTOB(I420, 2, 2, BGRA, 4, 4, 1, 2, ARGB, 4)
571 TESTPLANARTOB(I420, 2, 2, ABGR, 4, 4, 1, 2, ARGB, 4)
572 TESTPLANARTOB(I420, 2, 2, RGBA, 4, 4, 1, 2, ARGB, 4)
573 TESTPLANARTOB(I420, 2, 2, RAW, 3, 3, 1, 2, ARGB, 4)
574 TESTPLANARTOB(I420, 2, 2, RGB24, 3, 3, 1, 2, ARGB, 4)
575 TESTPLANARTOB(I420, 2, 2, RGB565, 2, 2, 1, 9, ARGB, 4)
576 TESTPLANARTOB(I420, 2, 2, ARGB1555, 2, 2, 1, 9, ARGB, 4)
577 TESTPLANARTOB(I420, 2, 2, ARGB4444, 2, 2, 1, 17, ARGB, 4)
578 TESTPLANARTOB(I422, 2, 1, ARGB, 4, 4, 1, 2, ARGB, 4)
579 TESTPLANARTOB(I422, 2, 1, RGB565, 2, 2, 1, 9, ARGB, 4)
580 TESTPLANARTOB(J422, 2, 1, ARGB, 4, 4, 1, 2, ARGB, 4)
581 TESTPLANARTOB(J422, 2, 1, ABGR, 4, 4, 1, 2, ARGB, 4)
582 TESTPLANARTOB(H422, 2, 1, ARGB, 4, 4, 1, 2, ARGB, 4)
583 TESTPLANARTOB(H422, 2, 1, ABGR, 4, 4, 1, 2, ARGB, 4)
584 TESTPLANARTOB(I422, 2, 1, BGRA, 4, 4, 1, 2, ARGB, 4)
585 TESTPLANARTOB(I422, 2, 1, ABGR, 4, 4, 1, 2, ARGB, 4)
586 TESTPLANARTOB(I422, 2, 1, RGBA, 4, 4, 1, 2, ARGB, 4)
587 TESTPLANARTOB(I444, 1, 1, ARGB, 4, 4, 1, 2, ARGB, 4)
588 TESTPLANARTOB(J444, 1, 1, ARGB, 4, 4, 1, 2, ARGB, 4)
589 TESTPLANARTOB(I444, 1, 1, ABGR, 4, 4, 1, 2, ARGB, 4)
590 TESTPLANARTOB(I420, 2, 2, YUY2, 2, 4, 1, 1, ARGB, 4)
591 TESTPLANARTOB(I420, 2, 2, UYVY, 2, 4, 1, 1, ARGB, 4)
592 TESTPLANARTOB(I422, 2, 1, YUY2, 2, 4, 1, 0, ARGB, 4)
593 TESTPLANARTOB(I422, 2, 1, UYVY, 2, 4, 1, 0, ARGB, 4)
594 TESTPLANARTOB(I420, 2, 2, I400, 1, 1, 1, 0, ARGB, 4)
595 TESTPLANARTOB(J420, 2, 2, J400, 1, 1, 1, 0, ARGB, 4)
596
597 #define TESTQPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \
598 YALIGN, W1280, DIFF, N, NEG, OFF, ATTEN) \
599 TEST_F(LibYUVConvertTest, FMT_PLANAR##To##FMT_B##N) { \
600 const int kWidth = ((W1280) > 0) ? (W1280) : 1; \
601 const int kHeight = ALIGNINT(benchmark_height_, YALIGN); \
602 const int kStrideB = ALIGNINT(kWidth * BPP_B, ALIGN); \
603 const int kStrideUV = SUBSAMPLE(kWidth, SUBSAMP_X); \
604 const int kSizeUV = kStrideUV * SUBSAMPLE(kHeight, SUBSAMP_Y); \
605 align_buffer_page_end(src_y, kWidth* kHeight + OFF); \
606 align_buffer_page_end(src_u, kSizeUV + OFF); \
607 align_buffer_page_end(src_v, kSizeUV + OFF); \
608 align_buffer_page_end(src_a, kWidth* kHeight + OFF); \
609 align_buffer_page_end(dst_argb_c, kStrideB* kHeight + OFF); \
610 align_buffer_page_end(dst_argb_opt, kStrideB* kHeight + OFF); \
611 for (int i = 0; i < kWidth * kHeight; ++i) { \
612 src_y[i + OFF] = (fastrand() & 0xff); \
613 src_a[i + OFF] = (fastrand() & 0xff); \
614 } \
615 for (int i = 0; i < kSizeUV; ++i) { \
616 src_u[i + OFF] = (fastrand() & 0xff); \
617 src_v[i + OFF] = (fastrand() & 0xff); \
618 } \
619 memset(dst_argb_c + OFF, 1, kStrideB * kHeight); \
620 memset(dst_argb_opt + OFF, 101, kStrideB * kHeight); \
621 MaskCpuFlags(disable_cpu_flags_); \
622 FMT_PLANAR##To##FMT_B(src_y + OFF, kWidth, src_u + OFF, kStrideUV, \
623 src_v + OFF, kStrideUV, src_a + OFF, kWidth, \
624 dst_argb_c + OFF, kStrideB, kWidth, NEG kHeight, \
625 ATTEN); \
626 MaskCpuFlags(benchmark_cpu_info_); \
627 for (int i = 0; i < benchmark_iterations_; ++i) { \
628 FMT_PLANAR##To##FMT_B(src_y + OFF, kWidth, src_u + OFF, kStrideUV, \
629 src_v + OFF, kStrideUV, src_a + OFF, kWidth, \
630 dst_argb_opt + OFF, kStrideB, kWidth, NEG kHeight, \
631 ATTEN); \
632 } \
633 int max_diff = 0; \
634 for (int i = 0; i < kWidth * BPP_B * kHeight; ++i) { \
635 int abs_diff = abs(static_cast<int>(dst_argb_c[i + OFF]) - \
636 static_cast<int>(dst_argb_opt[i + OFF])); \
637 if (abs_diff > max_diff) { \
638 max_diff = abs_diff; \
639 } \
640 } \
641 EXPECT_LE(max_diff, DIFF); \
642 free_aligned_buffer_page_end(src_y); \
643 free_aligned_buffer_page_end(src_u); \
644 free_aligned_buffer_page_end(src_v); \
645 free_aligned_buffer_page_end(src_a); \
646 free_aligned_buffer_page_end(dst_argb_c); \
647 free_aligned_buffer_page_end(dst_argb_opt); \
648 }
649
650 #define TESTQPLANARTOB(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \
651 YALIGN, DIFF) \
652 TESTQPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \
653 YALIGN, benchmark_width_ - 4, DIFF, _Any, +, 0, 0) \
654 TESTQPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \
655 YALIGN, benchmark_width_, DIFF, _Unaligned, +, 1, 0) \
656 TESTQPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \
657 YALIGN, benchmark_width_, DIFF, _Invert, -, 0, 0) \
658 TESTQPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \
659 YALIGN, benchmark_width_, DIFF, _Opt, +, 0, 0) \
660 TESTQPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \
661 YALIGN, benchmark_width_, DIFF, _Premult, +, 0, 1)
662
663 TESTQPLANARTOB(I420Alpha, 2, 2, ARGB, 4, 4, 1, 2)
664 TESTQPLANARTOB(I420Alpha, 2, 2, ABGR, 4, 4, 1, 2)
665
666 #define TESTBIPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, \
667 W1280, DIFF, N, NEG, OFF) \
668 TEST_F(LibYUVConvertTest, FMT_PLANAR##To##FMT_B##N) { \
669 const int kWidth = ((W1280) > 0) ? (W1280) : 1; \
670 const int kHeight = benchmark_height_; \
671 const int kStrideB = kWidth * BPP_B; \
672 const int kStrideUV = SUBSAMPLE(kWidth, SUBSAMP_X); \
673 align_buffer_page_end(src_y, kWidth* kHeight + OFF); \
674 align_buffer_page_end(src_uv, \
675 kStrideUV* SUBSAMPLE(kHeight, SUBSAMP_Y) * 2 + OFF); \
676 align_buffer_page_end(dst_argb_c, kStrideB* kHeight); \
677 align_buffer_page_end(dst_argb_opt, kStrideB* kHeight); \
678 for (int i = 0; i < kHeight; ++i) \
679 for (int j = 0; j < kWidth; ++j) \
680 src_y[i * kWidth + j + OFF] = (fastrand() & 0xff); \
681 for (int i = 0; i < SUBSAMPLE(kHeight, SUBSAMP_Y); ++i) { \
682 for (int j = 0; j < kStrideUV * 2; ++j) { \
683 src_uv[i * kStrideUV * 2 + j + OFF] = (fastrand() & 0xff); \
684 } \
685 } \
686 memset(dst_argb_c, 1, kStrideB* kHeight); \
687 memset(dst_argb_opt, 101, kStrideB* kHeight); \
688 MaskCpuFlags(disable_cpu_flags_); \
689 FMT_PLANAR##To##FMT_B(src_y + OFF, kWidth, src_uv + OFF, kStrideUV * 2, \
690 dst_argb_c, kWidth * BPP_B, kWidth, NEG kHeight); \
691 MaskCpuFlags(benchmark_cpu_info_); \
692 for (int i = 0; i < benchmark_iterations_; ++i) { \
693 FMT_PLANAR##To##FMT_B(src_y + OFF, kWidth, src_uv + OFF, kStrideUV * 2, \
694 dst_argb_opt, kWidth * BPP_B, kWidth, \
695 NEG kHeight); \
696 } \
697 /* Convert to ARGB so 565 is expanded to bytes that can be compared. */ \
698 align_buffer_page_end(dst_argb32_c, kWidth * 4 * kHeight); \
699 align_buffer_page_end(dst_argb32_opt, kWidth * 4 * kHeight); \
700 memset(dst_argb32_c, 2, kWidth * 4 * kHeight); \
701 memset(dst_argb32_opt, 102, kWidth * 4 * kHeight); \
702 FMT_B##ToARGB(dst_argb_c, kStrideB, dst_argb32_c, kWidth * 4, kWidth, \
703 kHeight); \
704 FMT_B##ToARGB(dst_argb_opt, kStrideB, dst_argb32_opt, kWidth * 4, kWidth, \
705 kHeight); \
706 int max_diff = 0; \
707 for (int i = 0; i < kHeight; ++i) { \
708 for (int j = 0; j < kWidth * 4; ++j) { \
709 int abs_diff = \
710 abs(static_cast<int>(dst_argb32_c[i * kWidth * 4 + j]) - \
711 static_cast<int>(dst_argb32_opt[i * kWidth * 4 + j])); \
712 if (abs_diff > max_diff) { \
713 max_diff = abs_diff; \
714 } \
715 } \
716 } \
717 EXPECT_LE(max_diff, DIFF); \
718 free_aligned_buffer_page_end(src_y); \
719 free_aligned_buffer_page_end(src_uv); \
720 free_aligned_buffer_page_end(dst_argb_c); \
721 free_aligned_buffer_page_end(dst_argb_opt); \
722 free_aligned_buffer_page_end(dst_argb32_c); \
723 free_aligned_buffer_page_end(dst_argb32_opt); \
724 }
725
726 #define TESTBIPLANARTOB(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, DIFF) \
727 TESTBIPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, \
728 benchmark_width_ - 4, DIFF, _Any, +, 0) \
729 TESTBIPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, \
730 benchmark_width_, DIFF, _Unaligned, +, 1) \
731 TESTBIPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, \
732 benchmark_width_, DIFF, _Invert, -, 0) \
733 TESTBIPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, \
734 benchmark_width_, DIFF, _Opt, +, 0)
735
736 TESTBIPLANARTOB(NV12, 2, 2, ARGB, 4, 2)
737 TESTBIPLANARTOB(NV21, 2, 2, ARGB, 4, 2)
738 TESTBIPLANARTOB(NV12, 2, 2, RGB565, 2, 9)
739
740 #ifdef DO_THREE_PLANES
741 // Do 3 allocations for yuv. conventional but slower.
742 #define TESTATOPLANARI(FMT_A, BPP_A, YALIGN, FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \
743 W1280, DIFF, N, NEG, OFF) \
744 TEST_F(LibYUVConvertTest, FMT_A##To##FMT_PLANAR##N) { \
745 const int kWidth = ((W1280) > 0) ? (W1280) : 1; \
746 const int kHeight = ALIGNINT(benchmark_height_, YALIGN); \
747 const int kStrideUV = SUBSAMPLE(kWidth, SUBSAMP_X); \
748 const int kStride = (kStrideUV * SUBSAMP_X * 8 * BPP_A + 7) / 8; \
749 align_buffer_page_end(src_argb, kStride* kHeight + OFF); \
750 align_buffer_page_end(dst_y_c, kWidth* kHeight); \
751 align_buffer_page_end(dst_u_c, kStrideUV* SUBSAMPLE(kHeight, SUBSAMP_Y)); \
752 align_buffer_page_end(dst_v_c, kStrideUV* SUBSAMPLE(kHeight, SUBSAMP_Y)); \
753 align_buffer_page_end(dst_y_opt, kWidth* kHeight); \
754 align_buffer_page_end(dst_u_opt, \
755 kStrideUV* SUBSAMPLE(kHeight, SUBSAMP_Y)); \
756 align_buffer_page_end(dst_v_opt, \
757 kStrideUV* SUBSAMPLE(kHeight, SUBSAMP_Y)); \
758 memset(dst_y_c, 1, kWidth* kHeight); \
759 memset(dst_u_c, 2, kStrideUV* SUBSAMPLE(kHeight, SUBSAMP_Y)); \
760 memset(dst_v_c, 3, kStrideUV* SUBSAMPLE(kHeight, SUBSAMP_Y)); \
761 memset(dst_y_opt, 101, kWidth* kHeight); \
762 memset(dst_u_opt, 102, kStrideUV* SUBSAMPLE(kHeight, SUBSAMP_Y)); \
763 memset(dst_v_opt, 103, kStrideUV* SUBSAMPLE(kHeight, SUBSAMP_Y)); \
764 for (int i = 0; i < kHeight; ++i) \
765 for (int j = 0; j < kStride; ++j) \
766 src_argb[(i * kStride) + j + OFF] = (fastrand() & 0xff); \
767 MaskCpuFlags(disable_cpu_flags_); \
768 FMT_A##To##FMT_PLANAR(src_argb + OFF, kStride, dst_y_c, kWidth, dst_u_c, \
769 kStrideUV, dst_v_c, kStrideUV, kWidth, NEG kHeight); \
770 MaskCpuFlags(benchmark_cpu_info_); \
771 for (int i = 0; i < benchmark_iterations_; ++i) { \
772 FMT_A##To##FMT_PLANAR(src_argb + OFF, kStride, dst_y_opt, kWidth, \
773 dst_u_opt, kStrideUV, dst_v_opt, kStrideUV, \
774 kWidth, NEG kHeight); \
775 } \
776 for (int i = 0; i < kHeight; ++i) { \
777 for (int j = 0; j < kWidth; ++j) { \
778 EXPECT_NEAR(static_cast<int>(dst_y_c[i * kWidth + j]), \
779 static_cast<int>(dst_y_opt[i * kWidth + j]), DIFF); \
780 } \
781 } \
782 for (int i = 0; i < SUBSAMPLE(kHeight, SUBSAMP_Y); ++i) { \
783 for (int j = 0; j < kStrideUV; ++j) { \
784 EXPECT_NEAR(static_cast<int>(dst_u_c[i * kStrideUV + j]), \
785 static_cast<int>(dst_u_opt[i * kStrideUV + j]), DIFF); \
786 } \
787 } \
788 for (int i = 0; i < SUBSAMPLE(kHeight, SUBSAMP_Y); ++i) { \
789 for (int j = 0; j < kStrideUV; ++j) { \
790 EXPECT_NEAR(static_cast<int>(dst_v_c[i * kStrideUV + j]), \
791 static_cast<int>(dst_v_opt[i * kStrideUV + j]), DIFF); \
792 } \
793 } \
794 free_aligned_buffer_page_end(dst_y_c); \
795 free_aligned_buffer_page_end(dst_u_c); \
796 free_aligned_buffer_page_end(dst_v_c); \
797 free_aligned_buffer_page_end(dst_y_opt); \
798 free_aligned_buffer_page_end(dst_u_opt); \
799 free_aligned_buffer_page_end(dst_v_opt); \
800 free_aligned_buffer_page_end(src_argb); \
801 }
802 #else
803 #define TESTATOPLANARI(FMT_A, BPP_A, YALIGN, FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \
804 W1280, DIFF, N, NEG, OFF) \
805 TEST_F(LibYUVConvertTest, FMT_A##To##FMT_PLANAR##N) { \
806 const int kWidth = ((W1280) > 0) ? (W1280) : 1; \
807 const int kHeight = ALIGNINT(benchmark_height_, YALIGN); \
808 const int kStrideUV = SUBSAMPLE(kWidth, SUBSAMP_X); \
809 const int kStride = (kStrideUV * SUBSAMP_X * 8 * BPP_A + 7) / 8; \
810 align_buffer_page_end(src_argb, kStride* kHeight + OFF); \
811 align_buffer_page_end(dst_y_c, kWidth* kHeight); \
812 align_buffer_page_end(dst_uv_c, \
813 kStrideUV * 2 * SUBSAMPLE(kHeight, SUBSAMP_Y)); \
814 align_buffer_page_end(dst_y_opt, kWidth* kHeight); \
815 align_buffer_page_end(dst_uv_opt, \
816 kStrideUV * 2 * SUBSAMPLE(kHeight, SUBSAMP_Y)); \
817 memset(dst_y_c, 1, kWidth* kHeight); \
818 memset(dst_uv_c, 2, kStrideUV * 2 * SUBSAMPLE(kHeight, SUBSAMP_Y)); \
819 memset(dst_y_opt, 101, kWidth* kHeight); \
820 memset(dst_uv_opt, 102, kStrideUV * 2 * SUBSAMPLE(kHeight, SUBSAMP_Y)); \
821 for (int i = 0; i < kHeight; ++i) \
822 for (int j = 0; j < kStride; ++j) \
823 src_argb[(i * kStride) + j + OFF] = (fastrand() & 0xff); \
824 MaskCpuFlags(disable_cpu_flags_); \
825 FMT_A##To##FMT_PLANAR(src_argb + OFF, kStride, dst_y_c, kWidth, dst_uv_c, \
826 kStrideUV * 2, dst_uv_c + kStrideUV, kStrideUV * 2, \
827 kWidth, NEG kHeight); \
828 MaskCpuFlags(benchmark_cpu_info_); \
829 for (int i = 0; i < benchmark_iterations_; ++i) { \
830 FMT_A##To##FMT_PLANAR(src_argb + OFF, kStride, dst_y_opt, kWidth, \
831 dst_uv_opt, kStrideUV * 2, dst_uv_opt + kStrideUV, \
832 kStrideUV * 2, kWidth, NEG kHeight); \
833 } \
834 for (int i = 0; i < kHeight; ++i) { \
835 for (int j = 0; j < kWidth; ++j) { \
836 EXPECT_NEAR(static_cast<int>(dst_y_c[i * kWidth + j]), \
837 static_cast<int>(dst_y_opt[i * kWidth + j]), DIFF); \
838 } \
839 } \
840 for (int i = 0; i < SUBSAMPLE(kHeight, SUBSAMP_Y) * 2; ++i) { \
841 for (int j = 0; j < kStrideUV; ++j) { \
842 EXPECT_NEAR(static_cast<int>(dst_uv_c[i * kStrideUV + j]), \
843 static_cast<int>(dst_uv_opt[i * kStrideUV + j]), DIFF); \
844 } \
845 } \
846 free_aligned_buffer_page_end(dst_y_c); \
847 free_aligned_buffer_page_end(dst_uv_c); \
848 free_aligned_buffer_page_end(dst_y_opt); \
849 free_aligned_buffer_page_end(dst_uv_opt); \
850 free_aligned_buffer_page_end(src_argb); \
851 }
852 #endif
853
854 #define TESTATOPLANAR(FMT_A, BPP_A, YALIGN, FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \
855 DIFF) \
856 TESTATOPLANARI(FMT_A, BPP_A, YALIGN, FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \
857 benchmark_width_ - 4, DIFF, _Any, +, 0) \
858 TESTATOPLANARI(FMT_A, BPP_A, YALIGN, FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \
859 benchmark_width_, DIFF, _Unaligned, +, 1) \
860 TESTATOPLANARI(FMT_A, BPP_A, YALIGN, FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \
861 benchmark_width_, DIFF, _Invert, -, 0) \
862 TESTATOPLANARI(FMT_A, BPP_A, YALIGN, FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \
863 benchmark_width_, DIFF, _Opt, +, 0)
864
865 TESTATOPLANAR(ARGB, 4, 1, I420, 2, 2, 4)
866 #if defined(__arm__) || defined(__aarch64__)
867 // arm version subsamples by summing 4 pixels then multiplying by matrix with
868 // 4x smaller coefficients which are rounded to nearest integer.
869 TESTATOPLANAR(ARGB, 4, 1, J420, 2, 2, 4)
870 TESTATOPLANAR(ARGB, 4, 1, J422, 2, 1, 4)
871 #else
872 TESTATOPLANAR(ARGB, 4, 1, J420, 2, 2, 0)
873 TESTATOPLANAR(ARGB, 4, 1, J422, 2, 1, 0)
874 #endif
875 TESTATOPLANAR(BGRA, 4, 1, I420, 2, 2, 4)
876 TESTATOPLANAR(ABGR, 4, 1, I420, 2, 2, 4)
877 TESTATOPLANAR(RGBA, 4, 1, I420, 2, 2, 4)
878 TESTATOPLANAR(RAW, 3, 1, I420, 2, 2, 4)
879 TESTATOPLANAR(RGB24, 3, 1, I420, 2, 2, 4)
880 TESTATOPLANAR(RGB565, 2, 1, I420, 2, 2, 5)
881 // TODO(fbarchard): Make 1555 neon work same as C code, reduce to diff 9.
882 TESTATOPLANAR(ARGB1555, 2, 1, I420, 2, 2, 15)
883 TESTATOPLANAR(ARGB4444, 2, 1, I420, 2, 2, 17)
884 TESTATOPLANAR(ARGB, 4, 1, I422, 2, 1, 2)
885 TESTATOPLANAR(ARGB, 4, 1, I444, 1, 1, 2)
886 TESTATOPLANAR(YUY2, 2, 1, I420, 2, 2, 2)
887 TESTATOPLANAR(UYVY, 2, 1, I420, 2, 2, 2)
888 TESTATOPLANAR(YUY2, 2, 1, I422, 2, 1, 2)
889 TESTATOPLANAR(UYVY, 2, 1, I422, 2, 1, 2)
890 TESTATOPLANAR(I400, 1, 1, I420, 2, 2, 2)
891 TESTATOPLANAR(J400, 1, 1, J420, 2, 2, 2)
892
893 #define TESTATOBIPLANARI(FMT_A, SUB_A, BPP_A, FMT_PLANAR, SUBSAMP_X, \
894 SUBSAMP_Y, W1280, N, NEG, OFF) \
895 TEST_F(LibYUVConvertTest, FMT_A##To##FMT_PLANAR##N) { \
896 const int kWidth = ((W1280) > 0) ? (W1280) : 1; \
897 const int kHeight = benchmark_height_; \
898 const int kStride = SUBSAMPLE(kWidth, SUB_A) * BPP_A; \
899 const int kStrideUV = SUBSAMPLE(kWidth, SUBSAMP_X); \
900 align_buffer_page_end(src_argb, kStride* kHeight + OFF); \
901 align_buffer_page_end(dst_y_c, kWidth* kHeight); \
902 align_buffer_page_end(dst_uv_c, \
903 kStrideUV * 2 * SUBSAMPLE(kHeight, SUBSAMP_Y)); \
904 align_buffer_page_end(dst_y_opt, kWidth* kHeight); \
905 align_buffer_page_end(dst_uv_opt, \
906 kStrideUV * 2 * SUBSAMPLE(kHeight, SUBSAMP_Y)); \
907 for (int i = 0; i < kHeight; ++i) \
908 for (int j = 0; j < kStride; ++j) \
909 src_argb[(i * kStride) + j + OFF] = (fastrand() & 0xff); \
910 memset(dst_y_c, 1, kWidth* kHeight); \
911 memset(dst_uv_c, 2, kStrideUV * 2 * SUBSAMPLE(kHeight, SUBSAMP_Y)); \
912 memset(dst_y_opt, 101, kWidth* kHeight); \
913 memset(dst_uv_opt, 102, kStrideUV * 2 * SUBSAMPLE(kHeight, SUBSAMP_Y)); \
914 MaskCpuFlags(disable_cpu_flags_); \
915 FMT_A##To##FMT_PLANAR(src_argb + OFF, kStride, dst_y_c, kWidth, dst_uv_c, \
916 kStrideUV * 2, kWidth, NEG kHeight); \
917 MaskCpuFlags(benchmark_cpu_info_); \
918 for (int i = 0; i < benchmark_iterations_; ++i) { \
919 FMT_A##To##FMT_PLANAR(src_argb + OFF, kStride, dst_y_opt, kWidth, \
920 dst_uv_opt, kStrideUV * 2, kWidth, NEG kHeight); \
921 } \
922 int max_diff = 0; \
923 for (int i = 0; i < kHeight; ++i) { \
924 for (int j = 0; j < kWidth; ++j) { \
925 int abs_diff = abs(static_cast<int>(dst_y_c[i * kWidth + j]) - \
926 static_cast<int>(dst_y_opt[i * kWidth + j])); \
927 if (abs_diff > max_diff) { \
928 max_diff = abs_diff; \
929 } \
930 } \
931 } \
932 EXPECT_LE(max_diff, 4); \
933 for (int i = 0; i < SUBSAMPLE(kHeight, SUBSAMP_Y); ++i) { \
934 for (int j = 0; j < kStrideUV * 2; ++j) { \
935 int abs_diff = \
936 abs(static_cast<int>(dst_uv_c[i * kStrideUV * 2 + j]) - \
937 static_cast<int>(dst_uv_opt[i * kStrideUV * 2 + j])); \
938 if (abs_diff > max_diff) { \
939 max_diff = abs_diff; \
940 } \
941 } \
942 } \
943 EXPECT_LE(max_diff, 4); \
944 free_aligned_buffer_page_end(dst_y_c); \
945 free_aligned_buffer_page_end(dst_uv_c); \
946 free_aligned_buffer_page_end(dst_y_opt); \
947 free_aligned_buffer_page_end(dst_uv_opt); \
948 free_aligned_buffer_page_end(src_argb); \
949 }
950
951 #define TESTATOBIPLANAR(FMT_A, SUB_A, BPP_A, FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y) \
952 TESTATOBIPLANARI(FMT_A, SUB_A, BPP_A, FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \
953 benchmark_width_ - 4, _Any, +, 0) \
954 TESTATOBIPLANARI(FMT_A, SUB_A, BPP_A, FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \
955 benchmark_width_, _Unaligned, +, 1) \
956 TESTATOBIPLANARI(FMT_A, SUB_A, BPP_A, FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \
957 benchmark_width_, _Invert, -, 0) \
958 TESTATOBIPLANARI(FMT_A, SUB_A, BPP_A, FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \
959 benchmark_width_, _Opt, +, 0)
960
961 TESTATOBIPLANAR(ARGB, 1, 4, NV12, 2, 2)
962 TESTATOBIPLANAR(ARGB, 1, 4, NV21, 2, 2)
963 TESTATOBIPLANAR(YUY2, 2, 4, NV12, 2, 2)
964 TESTATOBIPLANAR(UYVY, 2, 4, NV12, 2, 2)
965
966 #define TESTATOBI(FMT_A, BPP_A, STRIDE_A, HEIGHT_A, FMT_B, BPP_B, STRIDE_B, \
967 HEIGHT_B, W1280, DIFF, N, NEG, OFF) \
968 TEST_F(LibYUVConvertTest, FMT_A##To##FMT_B##N) { \
969 const int kWidth = ((W1280) > 0) ? (W1280) : 1; \
970 const int kHeight = benchmark_height_; \
971 const int kHeightA = (kHeight + HEIGHT_A - 1) / HEIGHT_A * HEIGHT_A; \
972 const int kHeightB = (kHeight + HEIGHT_B - 1) / HEIGHT_B * HEIGHT_B; \
973 const int kStrideA = \
974 (kWidth * BPP_A + STRIDE_A - 1) / STRIDE_A * STRIDE_A; \
975 const int kStrideB = \
976 (kWidth * BPP_B + STRIDE_B - 1) / STRIDE_B * STRIDE_B; \
977 align_buffer_page_end(src_argb, kStrideA* kHeightA + OFF); \
978 align_buffer_page_end(dst_argb_c, kStrideB* kHeightB); \
979 align_buffer_page_end(dst_argb_opt, kStrideB* kHeightB); \
980 for (int i = 0; i < kStrideA * kHeightA; ++i) { \
981 src_argb[i + OFF] = (fastrand() & 0xff); \
982 } \
983 memset(dst_argb_c, 1, kStrideB* kHeightB); \
984 memset(dst_argb_opt, 101, kStrideB* kHeightB); \
985 MaskCpuFlags(disable_cpu_flags_); \
986 FMT_A##To##FMT_B(src_argb + OFF, kStrideA, dst_argb_c, kStrideB, kWidth, \
987 NEG kHeight); \
988 MaskCpuFlags(benchmark_cpu_info_); \
989 for (int i = 0; i < benchmark_iterations_; ++i) { \
990 FMT_A##To##FMT_B(src_argb + OFF, kStrideA, dst_argb_opt, kStrideB, \
991 kWidth, NEG kHeight); \
992 } \
993 int max_diff = 0; \
994 for (int i = 0; i < kStrideB * kHeightB; ++i) { \
995 int abs_diff = abs(static_cast<int>(dst_argb_c[i]) - \
996 static_cast<int>(dst_argb_opt[i])); \
997 if (abs_diff > max_diff) { \
998 max_diff = abs_diff; \
999 } \
1000 } \
1001 EXPECT_LE(max_diff, DIFF); \
1002 free_aligned_buffer_page_end(src_argb); \
1003 free_aligned_buffer_page_end(dst_argb_c); \
1004 free_aligned_buffer_page_end(dst_argb_opt); \
1005 }
1006
1007 #define TESTATOBRANDOM(FMT_A, BPP_A, STRIDE_A, HEIGHT_A, FMT_B, BPP_B, \
1008 STRIDE_B, HEIGHT_B, DIFF) \
1009 TEST_F(LibYUVConvertTest, FMT_A##To##FMT_B##_Random) { \
1010 for (int times = 0; times < benchmark_iterations_; ++times) { \
1011 const int kWidth = (fastrand() & 63) + 1; \
1012 const int kHeight = (fastrand() & 31) + 1; \
1013 const int kHeightA = (kHeight + HEIGHT_A - 1) / HEIGHT_A * HEIGHT_A; \
1014 const int kHeightB = (kHeight + HEIGHT_B - 1) / HEIGHT_B * HEIGHT_B; \
1015 const int kStrideA = \
1016 (kWidth * BPP_A + STRIDE_A - 1) / STRIDE_A * STRIDE_A; \
1017 const int kStrideB = \
1018 (kWidth * BPP_B + STRIDE_B - 1) / STRIDE_B * STRIDE_B; \
1019 align_buffer_page_end(src_argb, kStrideA* kHeightA); \
1020 align_buffer_page_end(dst_argb_c, kStrideB* kHeightB); \
1021 align_buffer_page_end(dst_argb_opt, kStrideB* kHeightB); \
1022 for (int i = 0; i < kStrideA * kHeightA; ++i) { \
1023 src_argb[i] = (fastrand() & 0xff); \
1024 } \
1025 memset(dst_argb_c, 123, kStrideB* kHeightB); \
1026 memset(dst_argb_opt, 123, kStrideB* kHeightB); \
1027 MaskCpuFlags(disable_cpu_flags_); \
1028 FMT_A##To##FMT_B(src_argb, kStrideA, dst_argb_c, kStrideB, kWidth, \
1029 kHeight); \
1030 MaskCpuFlags(benchmark_cpu_info_); \
1031 FMT_A##To##FMT_B(src_argb, kStrideA, dst_argb_opt, kStrideB, kWidth, \
1032 kHeight); \
1033 int max_diff = 0; \
1034 for (int i = 0; i < kStrideB * kHeightB; ++i) { \
1035 int abs_diff = abs(static_cast<int>(dst_argb_c[i]) - \
1036 static_cast<int>(dst_argb_opt[i])); \
1037 if (abs_diff > max_diff) { \
1038 max_diff = abs_diff; \
1039 } \
1040 } \
1041 EXPECT_LE(max_diff, DIFF); \
1042 free_aligned_buffer_page_end(src_argb); \
1043 free_aligned_buffer_page_end(dst_argb_c); \
1044 free_aligned_buffer_page_end(dst_argb_opt); \
1045 } \
1046 }
1047
1048 #define TESTATOB(FMT_A, BPP_A, STRIDE_A, HEIGHT_A, FMT_B, BPP_B, STRIDE_B, \
1049 HEIGHT_B, DIFF) \
1050 TESTATOBI(FMT_A, BPP_A, STRIDE_A, HEIGHT_A, FMT_B, BPP_B, STRIDE_B, \
1051 HEIGHT_B, benchmark_width_ - 4, DIFF, _Any, +, 0) \
1052 TESTATOBI(FMT_A, BPP_A, STRIDE_A, HEIGHT_A, FMT_B, BPP_B, STRIDE_B, \
1053 HEIGHT_B, benchmark_width_, DIFF, _Unaligned, +, 1) \
1054 TESTATOBI(FMT_A, BPP_A, STRIDE_A, HEIGHT_A, FMT_B, BPP_B, STRIDE_B, \
1055 HEIGHT_B, benchmark_width_, DIFF, _Invert, -, 0) \
1056 TESTATOBI(FMT_A, BPP_A, STRIDE_A, HEIGHT_A, FMT_B, BPP_B, STRIDE_B, \
1057 HEIGHT_B, benchmark_width_, DIFF, _Opt, +, 0) \
1058 TESTATOBRANDOM(FMT_A, BPP_A, STRIDE_A, HEIGHT_A, FMT_B, BPP_B, STRIDE_B, \
1059 HEIGHT_B, DIFF)
1060
1061 TESTATOB(ARGB, 4, 4, 1, ARGB, 4, 4, 1, 0)
1062 TESTATOB(ARGB, 4, 4, 1, BGRA, 4, 4, 1, 0)
1063 TESTATOB(ARGB, 4, 4, 1, ABGR, 4, 4, 1, 0)
1064 TESTATOB(ARGB, 4, 4, 1, RGBA, 4, 4, 1, 0)
1065 TESTATOB(ARGB, 4, 4, 1, RAW, 3, 3, 1, 0)
1066 TESTATOB(ARGB, 4, 4, 1, RGB24, 3, 3, 1, 0)
1067 TESTATOB(ARGB, 4, 4, 1, RGB565, 2, 2, 1, 0)
1068 TESTATOB(ARGB, 4, 4, 1, ARGB1555, 2, 2, 1, 0)
1069 TESTATOB(ARGB, 4, 4, 1, ARGB4444, 2, 2, 1, 0)
1070 TESTATOB(ARGB, 4, 4, 1, YUY2, 2, 4, 1, 4)
1071 TESTATOB(ARGB, 4, 4, 1, UYVY, 2, 4, 1, 4)
1072 TESTATOB(ARGB, 4, 4, 1, I400, 1, 1, 1, 2)
1073 TESTATOB(ARGB, 4, 4, 1, J400, 1, 1, 1, 2)
1074 TESTATOB(BGRA, 4, 4, 1, ARGB, 4, 4, 1, 0)
1075 TESTATOB(ABGR, 4, 4, 1, ARGB, 4, 4, 1, 0)
1076 TESTATOB(RGBA, 4, 4, 1, ARGB, 4, 4, 1, 0)
1077 TESTATOB(RAW, 3, 3, 1, ARGB, 4, 4, 1, 0)
1078 TESTATOB(RAW, 3, 3, 1, RGB24, 3, 3, 1, 0)
1079 TESTATOB(RGB24, 3, 3, 1, ARGB, 4, 4, 1, 0)
1080 TESTATOB(RGB565, 2, 2, 1, ARGB, 4, 4, 1, 0)
1081 TESTATOB(ARGB1555, 2, 2, 1, ARGB, 4, 4, 1, 0)
1082 TESTATOB(ARGB4444, 2, 2, 1, ARGB, 4, 4, 1, 0)
1083 TESTATOB(YUY2, 2, 4, 1, ARGB, 4, 4, 1, 4)
1084 TESTATOB(UYVY, 2, 4, 1, ARGB, 4, 4, 1, 4)
1085 TESTATOB(YUY2, 2, 4, 1, Y, 1, 1, 1, 0)
1086 TESTATOB(I400, 1, 1, 1, ARGB, 4, 4, 1, 0)
1087 TESTATOB(J400, 1, 1, 1, ARGB, 4, 4, 1, 0)
1088 TESTATOB(I400, 1, 1, 1, I400, 1, 1, 1, 0)
1089 TESTATOB(J400, 1, 1, 1, J400, 1, 1, 1, 0)
1090 TESTATOB(I400, 1, 1, 1, I400Mirror, 1, 1, 1, 0)
1091 TESTATOB(ARGB, 4, 4, 1, ARGBMirror, 4, 4, 1, 0)
1092
1093 #define TESTATOBDI(FMT_A, BPP_A, STRIDE_A, HEIGHT_A, FMT_B, BPP_B, STRIDE_B, \
1094 HEIGHT_B, W1280, DIFF, N, NEG, OFF) \
1095 TEST_F(LibYUVConvertTest, FMT_A##To##FMT_B##Dither##N) { \
1096 const int kWidth = ((W1280) > 0) ? (W1280) : 1; \
1097 const int kHeight = benchmark_height_; \
1098 const int kHeightA = (kHeight + HEIGHT_A - 1) / HEIGHT_A * HEIGHT_A; \
1099 const int kHeightB = (kHeight + HEIGHT_B - 1) / HEIGHT_B * HEIGHT_B; \
1100 const int kStrideA = \
1101 (kWidth * BPP_A + STRIDE_A - 1) / STRIDE_A * STRIDE_A; \
1102 const int kStrideB = \
1103 (kWidth * BPP_B + STRIDE_B - 1) / STRIDE_B * STRIDE_B; \
1104 align_buffer_page_end(src_argb, kStrideA* kHeightA + OFF); \
1105 align_buffer_page_end(dst_argb_c, kStrideB* kHeightB); \
1106 align_buffer_page_end(dst_argb_opt, kStrideB* kHeightB); \
1107 for (int i = 0; i < kStrideA * kHeightA; ++i) { \
1108 src_argb[i + OFF] = (fastrand() & 0xff); \
1109 } \
1110 memset(dst_argb_c, 1, kStrideB* kHeightB); \
1111 memset(dst_argb_opt, 101, kStrideB* kHeightB); \
1112 MaskCpuFlags(disable_cpu_flags_); \
1113 FMT_A##To##FMT_B##Dither(src_argb + OFF, kStrideA, dst_argb_c, kStrideB, \
1114 NULL, kWidth, NEG kHeight); \
1115 MaskCpuFlags(benchmark_cpu_info_); \
1116 for (int i = 0; i < benchmark_iterations_; ++i) { \
1117 FMT_A##To##FMT_B##Dither(src_argb + OFF, kStrideA, dst_argb_opt, \
1118 kStrideB, NULL, kWidth, NEG kHeight); \
1119 } \
1120 int max_diff = 0; \
1121 for (int i = 0; i < kStrideB * kHeightB; ++i) { \
1122 int abs_diff = abs(static_cast<int>(dst_argb_c[i]) - \
1123 static_cast<int>(dst_argb_opt[i])); \
1124 if (abs_diff > max_diff) { \
1125 max_diff = abs_diff; \
1126 } \
1127 } \
1128 EXPECT_LE(max_diff, DIFF); \
1129 free_aligned_buffer_page_end(src_argb); \
1130 free_aligned_buffer_page_end(dst_argb_c); \
1131 free_aligned_buffer_page_end(dst_argb_opt); \
1132 }
1133
1134 #define TESTATOBDRANDOM(FMT_A, BPP_A, STRIDE_A, HEIGHT_A, FMT_B, BPP_B, \
1135 STRIDE_B, HEIGHT_B, DIFF) \
1136 TEST_F(LibYUVConvertTest, FMT_A##To##FMT_B##Dither_Random) { \
1137 for (int times = 0; times < benchmark_iterations_; ++times) { \
1138 const int kWidth = (fastrand() & 63) + 1; \
1139 const int kHeight = (fastrand() & 31) + 1; \
1140 const int kHeightA = (kHeight + HEIGHT_A - 1) / HEIGHT_A * HEIGHT_A; \
1141 const int kHeightB = (kHeight + HEIGHT_B - 1) / HEIGHT_B * HEIGHT_B; \
1142 const int kStrideA = \
1143 (kWidth * BPP_A + STRIDE_A - 1) / STRIDE_A * STRIDE_A; \
1144 const int kStrideB = \
1145 (kWidth * BPP_B + STRIDE_B - 1) / STRIDE_B * STRIDE_B; \
1146 align_buffer_page_end(src_argb, kStrideA* kHeightA); \
1147 align_buffer_page_end(dst_argb_c, kStrideB* kHeightB); \
1148 align_buffer_page_end(dst_argb_opt, kStrideB* kHeightB); \
1149 for (int i = 0; i < kStrideA * kHeightA; ++i) { \
1150 src_argb[i] = (fastrand() & 0xff); \
1151 } \
1152 memset(dst_argb_c, 123, kStrideB* kHeightB); \
1153 memset(dst_argb_opt, 123, kStrideB* kHeightB); \
1154 MaskCpuFlags(disable_cpu_flags_); \
1155 FMT_A##To##FMT_B##Dither(src_argb, kStrideA, dst_argb_c, kStrideB, NULL, \
1156 kWidth, kHeight); \
1157 MaskCpuFlags(benchmark_cpu_info_); \
1158 FMT_A##To##FMT_B##Dither(src_argb, kStrideA, dst_argb_opt, kStrideB, \
1159 NULL, kWidth, kHeight); \
1160 int max_diff = 0; \
1161 for (int i = 0; i < kStrideB * kHeightB; ++i) { \
1162 int abs_diff = abs(static_cast<int>(dst_argb_c[i]) - \
1163 static_cast<int>(dst_argb_opt[i])); \
1164 if (abs_diff > max_diff) { \
1165 max_diff = abs_diff; \
1166 } \
1167 } \
1168 EXPECT_LE(max_diff, DIFF); \
1169 free_aligned_buffer_page_end(src_argb); \
1170 free_aligned_buffer_page_end(dst_argb_c); \
1171 free_aligned_buffer_page_end(dst_argb_opt); \
1172 } \
1173 }
1174
1175 #define TESTATOBD(FMT_A, BPP_A, STRIDE_A, HEIGHT_A, FMT_B, BPP_B, STRIDE_B, \
1176 HEIGHT_B, DIFF) \
1177 TESTATOBDI(FMT_A, BPP_A, STRIDE_A, HEIGHT_A, FMT_B, BPP_B, STRIDE_B, \
1178 HEIGHT_B, benchmark_width_ - 4, DIFF, _Any, +, 0) \
1179 TESTATOBDI(FMT_A, BPP_A, STRIDE_A, HEIGHT_A, FMT_B, BPP_B, STRIDE_B, \
1180 HEIGHT_B, benchmark_width_, DIFF, _Unaligned, +, 1) \
1181 TESTATOBDI(FMT_A, BPP_A, STRIDE_A, HEIGHT_A, FMT_B, BPP_B, STRIDE_B, \
1182 HEIGHT_B, benchmark_width_, DIFF, _Invert, -, 0) \
1183 TESTATOBDI(FMT_A, BPP_A, STRIDE_A, HEIGHT_A, FMT_B, BPP_B, STRIDE_B, \
1184 HEIGHT_B, benchmark_width_, DIFF, _Opt, +, 0) \
1185 TESTATOBDRANDOM(FMT_A, BPP_A, STRIDE_A, HEIGHT_A, FMT_B, BPP_B, STRIDE_B, \
1186 HEIGHT_B, DIFF)
1187
1188 TESTATOBD(ARGB, 4, 4, 1, RGB565, 2, 2, 1, 0)
1189
1190 #define TESTSYMI(FMT_ATOB, BPP_A, STRIDE_A, HEIGHT_A, W1280, N, NEG, OFF) \
1191 TEST_F(LibYUVConvertTest, FMT_ATOB##_Symetric##N) { \
1192 const int kWidth = ((W1280) > 0) ? (W1280) : 1; \
1193 const int kHeight = benchmark_height_; \
1194 const int kHeightA = (kHeight + HEIGHT_A - 1) / HEIGHT_A * HEIGHT_A; \
1195 const int kStrideA = \
1196 (kWidth * BPP_A + STRIDE_A - 1) / STRIDE_A * STRIDE_A; \
1197 align_buffer_page_end(src_argb, kStrideA* kHeightA + OFF); \
1198 align_buffer_page_end(dst_argb_c, kStrideA* kHeightA); \
1199 align_buffer_page_end(dst_argb_opt, kStrideA* kHeightA); \
1200 for (int i = 0; i < kStrideA * kHeightA; ++i) { \
1201 src_argb[i + OFF] = (fastrand() & 0xff); \
1202 } \
1203 memset(dst_argb_c, 1, kStrideA* kHeightA); \
1204 memset(dst_argb_opt, 101, kStrideA* kHeightA); \
1205 MaskCpuFlags(disable_cpu_flags_); \
1206 FMT_ATOB(src_argb + OFF, kStrideA, dst_argb_c, kStrideA, kWidth, \
1207 NEG kHeight); \
1208 MaskCpuFlags(benchmark_cpu_info_); \
1209 for (int i = 0; i < benchmark_iterations_; ++i) { \
1210 FMT_ATOB(src_argb + OFF, kStrideA, dst_argb_opt, kStrideA, kWidth, \
1211 NEG kHeight); \
1212 } \
1213 MaskCpuFlags(disable_cpu_flags_); \
1214 FMT_ATOB(dst_argb_c, kStrideA, dst_argb_c, kStrideA, kWidth, NEG kHeight); \
1215 MaskCpuFlags(benchmark_cpu_info_); \
1216 FMT_ATOB(dst_argb_opt, kStrideA, dst_argb_opt, kStrideA, kWidth, \
1217 NEG kHeight); \
1218 for (int i = 0; i < kStrideA * kHeightA; ++i) { \
1219 EXPECT_EQ(src_argb[i + OFF], dst_argb_opt[i]); \
1220 EXPECT_EQ(dst_argb_c[i], dst_argb_opt[i]); \
1221 } \
1222 free_aligned_buffer_page_end(src_argb); \
1223 free_aligned_buffer_page_end(dst_argb_c); \
1224 free_aligned_buffer_page_end(dst_argb_opt); \
1225 }
1226
1227 #define TESTSYM(FMT_ATOB, BPP_A, STRIDE_A, HEIGHT_A) \
1228 TESTSYMI(FMT_ATOB, BPP_A, STRIDE_A, HEIGHT_A, benchmark_width_ - 4, _Any, +, \
1229 0) \
1230 TESTSYMI(FMT_ATOB, BPP_A, STRIDE_A, HEIGHT_A, benchmark_width_, _Unaligned, \
1231 +, 1) \
1232 TESTSYMI(FMT_ATOB, BPP_A, STRIDE_A, HEIGHT_A, benchmark_width_, _Opt, +, 0)
1233
1234 TESTSYM(ARGBToARGB, 4, 4, 1)
1235 TESTSYM(ARGBToBGRA, 4, 4, 1)
1236 TESTSYM(ARGBToABGR, 4, 4, 1)
1237 TESTSYM(BGRAToARGB, 4, 4, 1)
1238 TESTSYM(ABGRToARGB, 4, 4, 1)
1239
TEST_F(LibYUVConvertTest,Test565)1240 TEST_F(LibYUVConvertTest, Test565) {
1241 SIMD_ALIGNED(uint8 orig_pixels[256][4]);
1242 SIMD_ALIGNED(uint8 pixels565[256][2]);
1243
1244 for (int i = 0; i < 256; ++i) {
1245 for (int j = 0; j < 4; ++j) {
1246 orig_pixels[i][j] = i;
1247 }
1248 }
1249 ARGBToRGB565(&orig_pixels[0][0], 0, &pixels565[0][0], 0, 256, 1);
1250 uint32 checksum = HashDjb2(&pixels565[0][0], sizeof(pixels565), 5381);
1251 EXPECT_EQ(610919429u, checksum);
1252 }
1253
1254 #ifdef HAVE_JPEG
TEST_F(LibYUVConvertTest,ValidateJpeg)1255 TEST_F(LibYUVConvertTest, ValidateJpeg) {
1256 const int kOff = 10;
1257 const int kMinJpeg = 64;
1258 const int kImageSize = benchmark_width_ * benchmark_height_ >= kMinJpeg
1259 ? benchmark_width_ * benchmark_height_
1260 : kMinJpeg;
1261 const int kSize = kImageSize + kOff;
1262 align_buffer_page_end(orig_pixels, kSize);
1263
1264 // No SOI or EOI. Expect fail.
1265 memset(orig_pixels, 0, kSize);
1266 EXPECT_FALSE(ValidateJpeg(orig_pixels, kSize));
1267
1268 // Test special value that matches marker start.
1269 memset(orig_pixels, 0xff, kSize);
1270 EXPECT_FALSE(ValidateJpeg(orig_pixels, kSize));
1271
1272 // EOI, SOI. Expect pass.
1273 orig_pixels[0] = 0xff;
1274 orig_pixels[1] = 0xd8; // SOI.
1275 orig_pixels[kSize - kOff + 0] = 0xff;
1276 orig_pixels[kSize - kOff + 1] = 0xd9; // EOI.
1277 for (int times = 0; times < benchmark_iterations_; ++times) {
1278 EXPECT_TRUE(ValidateJpeg(orig_pixels, kSize));
1279 }
1280 free_aligned_buffer_page_end(orig_pixels);
1281 }
1282
TEST_F(LibYUVConvertTest,ValidateJpegLarge)1283 TEST_F(LibYUVConvertTest, ValidateJpegLarge) {
1284 const int kOff = 10;
1285 const int kMinJpeg = 64;
1286 const int kImageSize = benchmark_width_ * benchmark_height_ >= kMinJpeg
1287 ? benchmark_width_ * benchmark_height_
1288 : kMinJpeg;
1289 const int kSize = kImageSize + kOff;
1290 const int kMultiple = 10;
1291 const int kBufSize = kImageSize * kMultiple + kOff;
1292 align_buffer_page_end(orig_pixels, kBufSize);
1293
1294 // No SOI or EOI. Expect fail.
1295 memset(orig_pixels, 0, kBufSize);
1296 EXPECT_FALSE(ValidateJpeg(orig_pixels, kBufSize));
1297
1298 // EOI, SOI. Expect pass.
1299 orig_pixels[0] = 0xff;
1300 orig_pixels[1] = 0xd8; // SOI.
1301 orig_pixels[kSize - kOff + 0] = 0xff;
1302 orig_pixels[kSize - kOff + 1] = 0xd9; // EOI.
1303 for (int times = 0; times < benchmark_iterations_; ++times) {
1304 EXPECT_TRUE(ValidateJpeg(orig_pixels, kBufSize));
1305 }
1306 free_aligned_buffer_page_end(orig_pixels);
1307 }
1308
TEST_F(LibYUVConvertTest,InvalidateJpeg)1309 TEST_F(LibYUVConvertTest, InvalidateJpeg) {
1310 const int kOff = 10;
1311 const int kMinJpeg = 64;
1312 const int kImageSize = benchmark_width_ * benchmark_height_ >= kMinJpeg
1313 ? benchmark_width_ * benchmark_height_
1314 : kMinJpeg;
1315 const int kSize = kImageSize + kOff;
1316 align_buffer_page_end(orig_pixels, kSize);
1317
1318 // NULL pointer. Expect fail.
1319 EXPECT_FALSE(ValidateJpeg(NULL, kSize));
1320
1321 // Negative size. Expect fail.
1322 EXPECT_FALSE(ValidateJpeg(orig_pixels, -1));
1323
1324 // Too large size. Expect fail.
1325 EXPECT_FALSE(ValidateJpeg(orig_pixels, 0xfb000000ull));
1326
1327 // No SOI or EOI. Expect fail.
1328 memset(orig_pixels, 0, kSize);
1329 EXPECT_FALSE(ValidateJpeg(orig_pixels, kSize));
1330
1331 // SOI but no EOI. Expect fail.
1332 orig_pixels[0] = 0xff;
1333 orig_pixels[1] = 0xd8; // SOI.
1334 for (int times = 0; times < benchmark_iterations_; ++times) {
1335 EXPECT_FALSE(ValidateJpeg(orig_pixels, kSize));
1336 }
1337
1338 // EOI but no SOI. Expect fail.
1339 orig_pixels[0] = 0;
1340 orig_pixels[1] = 0;
1341 orig_pixels[kSize - kOff + 0] = 0xff;
1342 orig_pixels[kSize - kOff + 1] = 0xd9; // EOI.
1343 EXPECT_FALSE(ValidateJpeg(orig_pixels, kSize));
1344
1345 free_aligned_buffer_page_end(orig_pixels);
1346 }
1347
TEST_F(LibYUVConvertTest,FuzzJpeg)1348 TEST_F(LibYUVConvertTest, FuzzJpeg) {
1349 // SOI but no EOI. Expect fail.
1350 for (int times = 0; times < benchmark_iterations_; ++times) {
1351 const int kSize = fastrand() % 5000 + 2;
1352 align_buffer_page_end(orig_pixels, kSize);
1353 MemRandomize(orig_pixels, kSize);
1354
1355 // Add SOI so frame will be scanned.
1356 orig_pixels[0] = 0xff;
1357 orig_pixels[1] = 0xd8; // SOI.
1358 orig_pixels[kSize - 1] = 0xff;
1359 ValidateJpeg(orig_pixels, kSize); // Failure normally expected.
1360 free_aligned_buffer_page_end(orig_pixels);
1361 }
1362 }
1363
TEST_F(LibYUVConvertTest,MJPGToI420)1364 TEST_F(LibYUVConvertTest, MJPGToI420) {
1365 const int kOff = 10;
1366 const int kMinJpeg = 64;
1367 const int kImageSize = benchmark_width_ * benchmark_height_ >= kMinJpeg
1368 ? benchmark_width_ * benchmark_height_
1369 : kMinJpeg;
1370 const int kSize = kImageSize + kOff;
1371 align_buffer_page_end(orig_pixels, kSize);
1372 align_buffer_page_end(dst_y_opt, benchmark_width_ * benchmark_height_);
1373 align_buffer_page_end(dst_u_opt, SUBSAMPLE(benchmark_width_, 2) *
1374 SUBSAMPLE(benchmark_height_, 2));
1375 align_buffer_page_end(dst_v_opt, SUBSAMPLE(benchmark_width_, 2) *
1376 SUBSAMPLE(benchmark_height_, 2));
1377
1378 // EOI, SOI to make MJPG appear valid.
1379 memset(orig_pixels, 0, kSize);
1380 orig_pixels[0] = 0xff;
1381 orig_pixels[1] = 0xd8; // SOI.
1382 orig_pixels[kSize - kOff + 0] = 0xff;
1383 orig_pixels[kSize - kOff + 1] = 0xd9; // EOI.
1384
1385 for (int times = 0; times < benchmark_iterations_; ++times) {
1386 int ret =
1387 MJPGToI420(orig_pixels, kSize, dst_y_opt, benchmark_width_, dst_u_opt,
1388 SUBSAMPLE(benchmark_width_, 2), dst_v_opt,
1389 SUBSAMPLE(benchmark_width_, 2), benchmark_width_,
1390 benchmark_height_, benchmark_width_, benchmark_height_);
1391 // Expect failure because image is not really valid.
1392 EXPECT_EQ(1, ret);
1393 }
1394
1395 free_aligned_buffer_page_end(dst_y_opt);
1396 free_aligned_buffer_page_end(dst_u_opt);
1397 free_aligned_buffer_page_end(dst_v_opt);
1398 free_aligned_buffer_page_end(orig_pixels);
1399 }
1400
TEST_F(LibYUVConvertTest,MJPGToARGB)1401 TEST_F(LibYUVConvertTest, MJPGToARGB) {
1402 const int kOff = 10;
1403 const int kMinJpeg = 64;
1404 const int kImageSize = benchmark_width_ * benchmark_height_ >= kMinJpeg
1405 ? benchmark_width_ * benchmark_height_
1406 : kMinJpeg;
1407 const int kSize = kImageSize + kOff;
1408 align_buffer_page_end(orig_pixels, kSize);
1409 align_buffer_page_end(dst_argb_opt, benchmark_width_ * benchmark_height_ * 4);
1410
1411 // EOI, SOI to make MJPG appear valid.
1412 memset(orig_pixels, 0, kSize);
1413 orig_pixels[0] = 0xff;
1414 orig_pixels[1] = 0xd8; // SOI.
1415 orig_pixels[kSize - kOff + 0] = 0xff;
1416 orig_pixels[kSize - kOff + 1] = 0xd9; // EOI.
1417
1418 for (int times = 0; times < benchmark_iterations_; ++times) {
1419 int ret = MJPGToARGB(orig_pixels, kSize, dst_argb_opt, benchmark_width_ * 4,
1420 benchmark_width_, benchmark_height_, benchmark_width_,
1421 benchmark_height_);
1422 // Expect failure because image is not really valid.
1423 EXPECT_EQ(1, ret);
1424 }
1425
1426 free_aligned_buffer_page_end(dst_argb_opt);
1427 free_aligned_buffer_page_end(orig_pixels);
1428 }
1429
1430 #endif // HAVE_JPEG
1431
TEST_F(LibYUVConvertTest,NV12Crop)1432 TEST_F(LibYUVConvertTest, NV12Crop) {
1433 const int SUBSAMP_X = 2;
1434 const int SUBSAMP_Y = 2;
1435 const int kWidth = benchmark_width_;
1436 const int kHeight = benchmark_height_;
1437 const int crop_y =
1438 ((benchmark_height_ - (benchmark_height_ * 360 / 480)) / 2 + 1) & ~1;
1439 const int kDestWidth = benchmark_width_;
1440 const int kDestHeight = benchmark_height_ - crop_y * 2;
1441 const int kStrideUV = SUBSAMPLE(kWidth, SUBSAMP_X);
1442 const int sample_size =
1443 kWidth * kHeight + kStrideUV * SUBSAMPLE(kHeight, SUBSAMP_Y) * 2;
1444 align_buffer_page_end(src_y, sample_size);
1445 uint8* src_uv = src_y + kWidth * kHeight;
1446
1447 align_buffer_page_end(dst_y, kDestWidth * kDestHeight);
1448 align_buffer_page_end(dst_u, SUBSAMPLE(kDestWidth, SUBSAMP_X) *
1449 SUBSAMPLE(kDestHeight, SUBSAMP_Y));
1450 align_buffer_page_end(dst_v, SUBSAMPLE(kDestWidth, SUBSAMP_X) *
1451 SUBSAMPLE(kDestHeight, SUBSAMP_Y));
1452
1453 align_buffer_page_end(dst_y_2, kDestWidth * kDestHeight);
1454 align_buffer_page_end(dst_u_2, SUBSAMPLE(kDestWidth, SUBSAMP_X) *
1455 SUBSAMPLE(kDestHeight, SUBSAMP_Y));
1456 align_buffer_page_end(dst_v_2, SUBSAMPLE(kDestWidth, SUBSAMP_X) *
1457 SUBSAMPLE(kDestHeight, SUBSAMP_Y));
1458
1459 for (int i = 0; i < kHeight * kWidth; ++i) {
1460 src_y[i] = (fastrand() & 0xff);
1461 }
1462 for (int i = 0; i < (SUBSAMPLE(kHeight, SUBSAMP_Y) * kStrideUV) * 2; ++i) {
1463 src_uv[i] = (fastrand() & 0xff);
1464 }
1465 memset(dst_y, 1, kDestWidth * kDestHeight);
1466 memset(dst_u, 2,
1467 SUBSAMPLE(kDestWidth, SUBSAMP_X) * SUBSAMPLE(kDestHeight, SUBSAMP_Y));
1468 memset(dst_v, 3,
1469 SUBSAMPLE(kDestWidth, SUBSAMP_X) * SUBSAMPLE(kDestHeight, SUBSAMP_Y));
1470 memset(dst_y_2, 1, kDestWidth * kDestHeight);
1471 memset(dst_u_2, 2,
1472 SUBSAMPLE(kDestWidth, SUBSAMP_X) * SUBSAMPLE(kDestHeight, SUBSAMP_Y));
1473 memset(dst_v_2, 3,
1474 SUBSAMPLE(kDestWidth, SUBSAMP_X) * SUBSAMPLE(kDestHeight, SUBSAMP_Y));
1475
1476 ConvertToI420(src_y, sample_size, dst_y_2, kDestWidth, dst_u_2,
1477 SUBSAMPLE(kDestWidth, SUBSAMP_X), dst_v_2,
1478 SUBSAMPLE(kDestWidth, SUBSAMP_X), 0, crop_y, kWidth, kHeight,
1479 kDestWidth, kDestHeight, libyuv::kRotate0, libyuv::FOURCC_NV12);
1480
1481 NV12ToI420(src_y + crop_y * kWidth, kWidth,
1482 src_uv + (crop_y / 2) * kStrideUV * 2, kStrideUV * 2, dst_y,
1483 kDestWidth, dst_u, SUBSAMPLE(kDestWidth, SUBSAMP_X), dst_v,
1484 SUBSAMPLE(kDestWidth, SUBSAMP_X), kDestWidth, kDestHeight);
1485
1486 for (int i = 0; i < kDestHeight; ++i) {
1487 for (int j = 0; j < kDestWidth; ++j) {
1488 EXPECT_EQ(dst_y[i * kWidth + j], dst_y_2[i * kWidth + j]);
1489 }
1490 }
1491 for (int i = 0; i < SUBSAMPLE(kDestHeight, SUBSAMP_Y); ++i) {
1492 for (int j = 0; j < SUBSAMPLE(kDestWidth, SUBSAMP_X); ++j) {
1493 EXPECT_EQ(dst_u[i * SUBSAMPLE(kDestWidth, SUBSAMP_X) + j],
1494 dst_u_2[i * SUBSAMPLE(kDestWidth, SUBSAMP_X) + j]);
1495 }
1496 }
1497 for (int i = 0; i < SUBSAMPLE(kDestHeight, SUBSAMP_Y); ++i) {
1498 for (int j = 0; j < SUBSAMPLE(kDestWidth, SUBSAMP_X); ++j) {
1499 EXPECT_EQ(dst_v[i * SUBSAMPLE(kDestWidth, SUBSAMP_X) + j],
1500 dst_v_2[i * SUBSAMPLE(kDestWidth, SUBSAMP_X) + j]);
1501 }
1502 }
1503 free_aligned_buffer_page_end(dst_y);
1504 free_aligned_buffer_page_end(dst_u);
1505 free_aligned_buffer_page_end(dst_v);
1506 free_aligned_buffer_page_end(dst_y_2);
1507 free_aligned_buffer_page_end(dst_u_2);
1508 free_aligned_buffer_page_end(dst_v_2);
1509 free_aligned_buffer_page_end(src_y);
1510 }
1511
TEST_F(LibYUVConvertTest,TestYToARGB)1512 TEST_F(LibYUVConvertTest, TestYToARGB) {
1513 uint8 y[32];
1514 uint8 expectedg[32];
1515 for (int i = 0; i < 32; ++i) {
1516 y[i] = i * 5 + 17;
1517 expectedg[i] = static_cast<int>((y[i] - 16) * 1.164f + 0.5f);
1518 }
1519 uint8 argb[32 * 4];
1520 YToARGB(y, 0, argb, 0, 32, 1);
1521
1522 for (int i = 0; i < 32; ++i) {
1523 printf("%2d %d: %d <-> %d,%d,%d,%d\n", i, y[i], expectedg[i],
1524 argb[i * 4 + 0], argb[i * 4 + 1], argb[i * 4 + 2], argb[i * 4 + 3]);
1525 }
1526 for (int i = 0; i < 32; ++i) {
1527 EXPECT_EQ(expectedg[i], argb[i * 4 + 0]);
1528 }
1529 }
1530
1531 static const uint8 kNoDither4x4[16] = {
1532 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1533 };
1534
TEST_F(LibYUVConvertTest,TestNoDither)1535 TEST_F(LibYUVConvertTest, TestNoDither) {
1536 align_buffer_page_end(src_argb, benchmark_width_ * benchmark_height_ * 4);
1537 align_buffer_page_end(dst_rgb565, benchmark_width_ * benchmark_height_ * 2);
1538 align_buffer_page_end(dst_rgb565dither,
1539 benchmark_width_ * benchmark_height_ * 2);
1540 MemRandomize(src_argb, benchmark_width_ * benchmark_height_ * 4);
1541 MemRandomize(dst_rgb565, benchmark_width_ * benchmark_height_ * 2);
1542 MemRandomize(dst_rgb565dither, benchmark_width_ * benchmark_height_ * 2);
1543 ARGBToRGB565(src_argb, benchmark_width_ * 4, dst_rgb565, benchmark_width_ * 2,
1544 benchmark_width_, benchmark_height_);
1545 ARGBToRGB565Dither(src_argb, benchmark_width_ * 4, dst_rgb565dither,
1546 benchmark_width_ * 2, kNoDither4x4, benchmark_width_,
1547 benchmark_height_);
1548 for (int i = 0; i < benchmark_width_ * benchmark_height_ * 2; ++i) {
1549 EXPECT_EQ(dst_rgb565[i], dst_rgb565dither[i]);
1550 }
1551
1552 free_aligned_buffer_page_end(src_argb);
1553 free_aligned_buffer_page_end(dst_rgb565);
1554 free_aligned_buffer_page_end(dst_rgb565dither);
1555 }
1556
1557 // Ordered 4x4 dither for 888 to 565. Values from 0 to 7.
1558 static const uint8 kDither565_4x4[16] = {
1559 0, 4, 1, 5, 6, 2, 7, 3, 1, 5, 0, 4, 7, 3, 6, 2,
1560 };
1561
TEST_F(LibYUVConvertTest,TestDither)1562 TEST_F(LibYUVConvertTest, TestDither) {
1563 align_buffer_page_end(src_argb, benchmark_width_ * benchmark_height_ * 4);
1564 align_buffer_page_end(dst_rgb565, benchmark_width_ * benchmark_height_ * 2);
1565 align_buffer_page_end(dst_rgb565dither,
1566 benchmark_width_ * benchmark_height_ * 2);
1567 align_buffer_page_end(dst_argb, benchmark_width_ * benchmark_height_ * 4);
1568 align_buffer_page_end(dst_argbdither,
1569 benchmark_width_ * benchmark_height_ * 4);
1570 MemRandomize(src_argb, benchmark_width_ * benchmark_height_ * 4);
1571 MemRandomize(dst_rgb565, benchmark_width_ * benchmark_height_ * 2);
1572 MemRandomize(dst_rgb565dither, benchmark_width_ * benchmark_height_ * 2);
1573 MemRandomize(dst_argb, benchmark_width_ * benchmark_height_ * 4);
1574 MemRandomize(dst_argbdither, benchmark_width_ * benchmark_height_ * 4);
1575 ARGBToRGB565(src_argb, benchmark_width_ * 4, dst_rgb565, benchmark_width_ * 2,
1576 benchmark_width_, benchmark_height_);
1577 ARGBToRGB565Dither(src_argb, benchmark_width_ * 4, dst_rgb565dither,
1578 benchmark_width_ * 2, kDither565_4x4, benchmark_width_,
1579 benchmark_height_);
1580 RGB565ToARGB(dst_rgb565, benchmark_width_ * 2, dst_argb, benchmark_width_ * 4,
1581 benchmark_width_, benchmark_height_);
1582 RGB565ToARGB(dst_rgb565dither, benchmark_width_ * 2, dst_argbdither,
1583 benchmark_width_ * 4, benchmark_width_, benchmark_height_);
1584
1585 for (int i = 0; i < benchmark_width_ * benchmark_height_ * 4; ++i) {
1586 EXPECT_NEAR(dst_argb[i], dst_argbdither[i], 9);
1587 }
1588 free_aligned_buffer_page_end(src_argb);
1589 free_aligned_buffer_page_end(dst_rgb565);
1590 free_aligned_buffer_page_end(dst_rgb565dither);
1591 free_aligned_buffer_page_end(dst_argb);
1592 free_aligned_buffer_page_end(dst_argbdither);
1593 }
1594
1595 #define TESTPLANARTOBID(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \
1596 YALIGN, W1280, DIFF, N, NEG, OFF, FMT_C, BPP_C) \
1597 TEST_F(LibYUVConvertTest, FMT_PLANAR##To##FMT_B##Dither##N) { \
1598 const int kWidth = ((W1280) > 0) ? (W1280) : 1; \
1599 const int kHeight = ALIGNINT(benchmark_height_, YALIGN); \
1600 const int kStrideB = ALIGNINT(kWidth * BPP_B, ALIGN); \
1601 const int kStrideUV = SUBSAMPLE(kWidth, SUBSAMP_X); \
1602 const int kSizeUV = kStrideUV * SUBSAMPLE(kHeight, SUBSAMP_Y); \
1603 align_buffer_page_end(src_y, kWidth* kHeight + OFF); \
1604 align_buffer_page_end(src_u, kSizeUV + OFF); \
1605 align_buffer_page_end(src_v, kSizeUV + OFF); \
1606 align_buffer_page_end(dst_argb_c, kStrideB* kHeight + OFF); \
1607 align_buffer_page_end(dst_argb_opt, kStrideB* kHeight + OFF); \
1608 for (int i = 0; i < kWidth * kHeight; ++i) { \
1609 src_y[i + OFF] = (fastrand() & 0xff); \
1610 } \
1611 for (int i = 0; i < kSizeUV; ++i) { \
1612 src_u[i + OFF] = (fastrand() & 0xff); \
1613 src_v[i + OFF] = (fastrand() & 0xff); \
1614 } \
1615 memset(dst_argb_c + OFF, 1, kStrideB * kHeight); \
1616 memset(dst_argb_opt + OFF, 101, kStrideB * kHeight); \
1617 MaskCpuFlags(disable_cpu_flags_); \
1618 FMT_PLANAR##To##FMT_B##Dither(src_y + OFF, kWidth, src_u + OFF, kStrideUV, \
1619 src_v + OFF, kStrideUV, dst_argb_c + OFF, \
1620 kStrideB, NULL, kWidth, NEG kHeight); \
1621 MaskCpuFlags(benchmark_cpu_info_); \
1622 for (int i = 0; i < benchmark_iterations_; ++i) { \
1623 FMT_PLANAR##To##FMT_B##Dither( \
1624 src_y + OFF, kWidth, src_u + OFF, kStrideUV, src_v + OFF, kStrideUV, \
1625 dst_argb_opt + OFF, kStrideB, NULL, kWidth, NEG kHeight); \
1626 } \
1627 int max_diff = 0; \
1628 /* Convert to ARGB so 565 is expanded to bytes that can be compared. */ \
1629 align_buffer_page_end(dst_argb32_c, kWidth* BPP_C* kHeight); \
1630 align_buffer_page_end(dst_argb32_opt, kWidth* BPP_C* kHeight); \
1631 memset(dst_argb32_c, 2, kWidth* BPP_C* kHeight); \
1632 memset(dst_argb32_opt, 102, kWidth* BPP_C* kHeight); \
1633 FMT_B##To##FMT_C(dst_argb_c + OFF, kStrideB, dst_argb32_c, kWidth * BPP_C, \
1634 kWidth, kHeight); \
1635 FMT_B##To##FMT_C(dst_argb_opt + OFF, kStrideB, dst_argb32_opt, \
1636 kWidth * BPP_C, kWidth, kHeight); \
1637 for (int i = 0; i < kWidth * BPP_C * kHeight; ++i) { \
1638 int abs_diff = abs(static_cast<int>(dst_argb32_c[i]) - \
1639 static_cast<int>(dst_argb32_opt[i])); \
1640 if (abs_diff > max_diff) { \
1641 max_diff = abs_diff; \
1642 } \
1643 } \
1644 EXPECT_LE(max_diff, DIFF); \
1645 free_aligned_buffer_page_end(src_y); \
1646 free_aligned_buffer_page_end(src_u); \
1647 free_aligned_buffer_page_end(src_v); \
1648 free_aligned_buffer_page_end(dst_argb_c); \
1649 free_aligned_buffer_page_end(dst_argb_opt); \
1650 free_aligned_buffer_page_end(dst_argb32_c); \
1651 free_aligned_buffer_page_end(dst_argb32_opt); \
1652 }
1653
1654 #define TESTPLANARTOBD(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \
1655 YALIGN, DIFF, FMT_C, BPP_C) \
1656 TESTPLANARTOBID(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \
1657 YALIGN, benchmark_width_ - 4, DIFF, _Any, +, 0, FMT_C, \
1658 BPP_C) \
1659 TESTPLANARTOBID(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \
1660 YALIGN, benchmark_width_, DIFF, _Unaligned, +, 1, FMT_C, \
1661 BPP_C) \
1662 TESTPLANARTOBID(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \
1663 YALIGN, benchmark_width_, DIFF, _Invert, -, 0, FMT_C, BPP_C) \
1664 TESTPLANARTOBID(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \
1665 YALIGN, benchmark_width_, DIFF, _Opt, +, 0, FMT_C, BPP_C)
1666
1667 TESTPLANARTOBD(I420, 2, 2, RGB565, 2, 2, 1, 9, ARGB, 4)
1668
1669 #define TESTPTOB(NAME, UYVYTOI420, UYVYTONV12) \
1670 TEST_F(LibYUVConvertTest, NAME) { \
1671 const int kWidth = benchmark_width_; \
1672 const int kHeight = benchmark_height_; \
1673 \
1674 align_buffer_page_end(orig_uyvy, 4 * SUBSAMPLE(kWidth, 2) * kHeight); \
1675 align_buffer_page_end(orig_y, kWidth* kHeight); \
1676 align_buffer_page_end(orig_u, \
1677 SUBSAMPLE(kWidth, 2) * SUBSAMPLE(kHeight, 2)); \
1678 align_buffer_page_end(orig_v, \
1679 SUBSAMPLE(kWidth, 2) * SUBSAMPLE(kHeight, 2)); \
1680 \
1681 align_buffer_page_end(dst_y_orig, kWidth* kHeight); \
1682 align_buffer_page_end(dst_uv_orig, \
1683 2 * SUBSAMPLE(kWidth, 2) * SUBSAMPLE(kHeight, 2)); \
1684 \
1685 align_buffer_page_end(dst_y, kWidth* kHeight); \
1686 align_buffer_page_end(dst_uv, \
1687 2 * SUBSAMPLE(kWidth, 2) * SUBSAMPLE(kHeight, 2)); \
1688 \
1689 MemRandomize(orig_uyvy, 4 * SUBSAMPLE(kWidth, 2) * kHeight); \
1690 \
1691 /* Convert UYVY to NV12 in 2 steps for reference */ \
1692 libyuv::UYVYTOI420(orig_uyvy, 4 * SUBSAMPLE(kWidth, 2), orig_y, kWidth, \
1693 orig_u, SUBSAMPLE(kWidth, 2), orig_v, \
1694 SUBSAMPLE(kWidth, 2), kWidth, kHeight); \
1695 libyuv::I420ToNV12(orig_y, kWidth, orig_u, SUBSAMPLE(kWidth, 2), orig_v, \
1696 SUBSAMPLE(kWidth, 2), dst_y_orig, kWidth, dst_uv_orig, \
1697 2 * SUBSAMPLE(kWidth, 2), kWidth, kHeight); \
1698 \
1699 /* Convert to NV12 */ \
1700 for (int i = 0; i < benchmark_iterations_; ++i) { \
1701 libyuv::UYVYTONV12(orig_uyvy, 4 * SUBSAMPLE(kWidth, 2), dst_y, kWidth, \
1702 dst_uv, 2 * SUBSAMPLE(kWidth, 2), kWidth, kHeight); \
1703 } \
1704 \
1705 for (int i = 0; i < kWidth * kHeight; ++i) { \
1706 EXPECT_EQ(orig_y[i], dst_y[i]); \
1707 } \
1708 for (int i = 0; i < kWidth * kHeight; ++i) { \
1709 EXPECT_EQ(dst_y_orig[i], dst_y[i]); \
1710 } \
1711 for (int i = 0; i < 2 * SUBSAMPLE(kWidth, 2) * SUBSAMPLE(kHeight, 2); \
1712 ++i) { \
1713 EXPECT_EQ(dst_uv_orig[i], dst_uv[i]); \
1714 } \
1715 \
1716 free_aligned_buffer_page_end(orig_uyvy); \
1717 free_aligned_buffer_page_end(orig_y); \
1718 free_aligned_buffer_page_end(orig_u); \
1719 free_aligned_buffer_page_end(orig_v); \
1720 free_aligned_buffer_page_end(dst_y_orig); \
1721 free_aligned_buffer_page_end(dst_uv_orig); \
1722 free_aligned_buffer_page_end(dst_y); \
1723 free_aligned_buffer_page_end(dst_uv); \
1724 }
1725
TESTPTOB(TestYUY2ToNV12,YUY2ToI420,YUY2ToNV12)1726 TESTPTOB(TestYUY2ToNV12, YUY2ToI420, YUY2ToNV12)
1727 TESTPTOB(TestUYVYToNV12, UYVYToI420, UYVYToNV12)
1728
1729 #define TESTPLANARTOEI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, SUB_B, BPP_B, \
1730 W1280, N, NEG, OFF, FMT_C, BPP_C) \
1731 TEST_F(LibYUVConvertTest, FMT_PLANAR##To##FMT_B##_##FMT_C##N) { \
1732 const int kWidth = ((W1280) > 0) ? (W1280) : 1; \
1733 const int kHeight = benchmark_height_; \
1734 const int kStrideB = SUBSAMPLE(kWidth, SUB_B) * BPP_B; \
1735 const int kStrideUV = SUBSAMPLE(kWidth, SUBSAMP_X); \
1736 const int kSizeUV = kStrideUV * SUBSAMPLE(kHeight, SUBSAMP_Y); \
1737 align_buffer_page_end(src_y, kWidth* kHeight + OFF); \
1738 align_buffer_page_end(src_u, kSizeUV + OFF); \
1739 align_buffer_page_end(src_v, kSizeUV + OFF); \
1740 align_buffer_page_end(dst_argb_b, kStrideB* kHeight + OFF); \
1741 for (int i = 0; i < kWidth * kHeight; ++i) { \
1742 src_y[i + OFF] = (fastrand() & 0xff); \
1743 } \
1744 for (int i = 0; i < kSizeUV; ++i) { \
1745 src_u[i + OFF] = (fastrand() & 0xff); \
1746 src_v[i + OFF] = (fastrand() & 0xff); \
1747 } \
1748 memset(dst_argb_b + OFF, 1, kStrideB * kHeight); \
1749 for (int i = 0; i < benchmark_iterations_; ++i) { \
1750 FMT_PLANAR##To##FMT_B(src_y + OFF, kWidth, src_u + OFF, kStrideUV, \
1751 src_v + OFF, kStrideUV, dst_argb_b + OFF, \
1752 kStrideB, kWidth, NEG kHeight); \
1753 } \
1754 /* Convert to a 3rd format in 1 step and 2 steps and compare */ \
1755 const int kStrideC = kWidth * BPP_C; \
1756 align_buffer_page_end(dst_argb_c, kStrideC* kHeight + OFF); \
1757 align_buffer_page_end(dst_argb_bc, kStrideC* kHeight + OFF); \
1758 memset(dst_argb_c + OFF, 2, kStrideC * kHeight); \
1759 memset(dst_argb_bc + OFF, 3, kStrideC * kHeight); \
1760 FMT_PLANAR##To##FMT_C(src_y + OFF, kWidth, src_u + OFF, kStrideUV, \
1761 src_v + OFF, kStrideUV, dst_argb_c + OFF, kStrideC, \
1762 kWidth, NEG kHeight); \
1763 /* Convert B to C */ \
1764 FMT_B##To##FMT_C(dst_argb_b + OFF, kStrideB, dst_argb_bc + OFF, kStrideC, \
1765 kWidth, kHeight); \
1766 for (int i = 0; i < kStrideC * kHeight; ++i) { \
1767 EXPECT_EQ(dst_argb_c[i + OFF], dst_argb_bc[i + OFF]); \
1768 } \
1769 free_aligned_buffer_page_end(src_y); \
1770 free_aligned_buffer_page_end(src_u); \
1771 free_aligned_buffer_page_end(src_v); \
1772 free_aligned_buffer_page_end(dst_argb_b); \
1773 free_aligned_buffer_page_end(dst_argb_c); \
1774 free_aligned_buffer_page_end(dst_argb_bc); \
1775 }
1776
1777 #define TESTPLANARTOE(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, SUB_B, BPP_B, \
1778 FMT_C, BPP_C) \
1779 TESTPLANARTOEI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, SUB_B, BPP_B, \
1780 benchmark_width_ - 4, _Any, +, 0, FMT_C, BPP_C) \
1781 TESTPLANARTOEI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, SUB_B, BPP_B, \
1782 benchmark_width_, _Unaligned, +, 1, FMT_C, BPP_C) \
1783 TESTPLANARTOEI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, SUB_B, BPP_B, \
1784 benchmark_width_, _Invert, -, 0, FMT_C, BPP_C) \
1785 TESTPLANARTOEI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, SUB_B, BPP_B, \
1786 benchmark_width_, _Opt, +, 0, FMT_C, BPP_C)
1787
1788 TESTPLANARTOE(I420, 2, 2, ARGB, 1, 4, ABGR, 4)
1789 TESTPLANARTOE(J420, 2, 2, ARGB, 1, 4, ARGB, 4)
1790 TESTPLANARTOE(J420, 2, 2, ABGR, 1, 4, ARGB, 4)
1791 TESTPLANARTOE(H420, 2, 2, ARGB, 1, 4, ARGB, 4)
1792 TESTPLANARTOE(H420, 2, 2, ABGR, 1, 4, ARGB, 4)
1793 TESTPLANARTOE(I420, 2, 2, BGRA, 1, 4, ARGB, 4)
1794 TESTPLANARTOE(I420, 2, 2, ABGR, 1, 4, ARGB, 4)
1795 TESTPLANARTOE(I420, 2, 2, RGBA, 1, 4, ARGB, 4)
1796 TESTPLANARTOE(I420, 2, 2, RGB24, 1, 3, ARGB, 4)
1797 TESTPLANARTOE(I420, 2, 2, RAW, 1, 3, RGB24, 3)
1798 TESTPLANARTOE(I420, 2, 2, RGB24, 1, 3, RAW, 3)
1799 TESTPLANARTOE(I420, 2, 2, ARGB, 1, 4, RAW, 3)
1800 TESTPLANARTOE(I420, 2, 2, RAW, 1, 3, ARGB, 4)
1801 TESTPLANARTOE(I420, 2, 2, ARGB, 1, 4, RGB565, 2)
1802 TESTPLANARTOE(I420, 2, 2, ARGB, 1, 4, ARGB1555, 2)
1803 TESTPLANARTOE(I420, 2, 2, ARGB, 1, 4, ARGB4444, 2)
1804 TESTPLANARTOE(I422, 2, 1, ARGB, 1, 4, RGB565, 2)
1805 TESTPLANARTOE(J422, 2, 1, ARGB, 1, 4, ARGB, 4)
1806 TESTPLANARTOE(J422, 2, 1, ABGR, 1, 4, ARGB, 4)
1807 TESTPLANARTOE(H422, 2, 1, ARGB, 1, 4, ARGB, 4)
1808 TESTPLANARTOE(H422, 2, 1, ABGR, 1, 4, ARGB, 4)
1809 TESTPLANARTOE(I422, 2, 1, BGRA, 1, 4, ARGB, 4)
1810 TESTPLANARTOE(I422, 2, 1, ABGR, 1, 4, ARGB, 4)
1811 TESTPLANARTOE(I422, 2, 1, RGBA, 1, 4, ARGB, 4)
1812 TESTPLANARTOE(I444, 1, 1, ARGB, 1, 4, ARGB, 4)
1813 TESTPLANARTOE(J444, 1, 1, ARGB, 1, 4, ARGB, 4)
1814 TESTPLANARTOE(I444, 1, 1, ABGR, 1, 4, ARGB, 4)
1815 TESTPLANARTOE(I420, 2, 2, YUY2, 2, 4, ARGB, 4)
1816 TESTPLANARTOE(I420, 2, 2, UYVY, 2, 4, ARGB, 4)
1817 TESTPLANARTOE(I422, 2, 1, YUY2, 2, 4, ARGB, 4)
1818 TESTPLANARTOE(I422, 2, 1, UYVY, 2, 4, ARGB, 4)
1819
1820 #define TESTQPLANARTOEI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, SUB_B, BPP_B, \
1821 W1280, N, NEG, OFF, FMT_C, BPP_C, ATTEN) \
1822 TEST_F(LibYUVConvertTest, FMT_PLANAR##To##FMT_B##_##FMT_C##N) { \
1823 const int kWidth = ((W1280) > 0) ? (W1280) : 1; \
1824 const int kHeight = benchmark_height_; \
1825 const int kStrideB = SUBSAMPLE(kWidth, SUB_B) * BPP_B; \
1826 const int kSizeUV = \
1827 SUBSAMPLE(kWidth, SUBSAMP_X) * SUBSAMPLE(kHeight, SUBSAMP_Y); \
1828 align_buffer_page_end(src_y, kWidth* kHeight + OFF); \
1829 align_buffer_page_end(src_u, kSizeUV + OFF); \
1830 align_buffer_page_end(src_v, kSizeUV + OFF); \
1831 align_buffer_page_end(src_a, kWidth* kHeight + OFF); \
1832 align_buffer_page_end(dst_argb_b, kStrideB* kHeight + OFF); \
1833 for (int i = 0; i < kWidth * kHeight; ++i) { \
1834 src_y[i + OFF] = (fastrand() & 0xff); \
1835 src_a[i + OFF] = (fastrand() & 0xff); \
1836 } \
1837 for (int i = 0; i < kSizeUV; ++i) { \
1838 src_u[i + OFF] = (fastrand() & 0xff); \
1839 src_v[i + OFF] = (fastrand() & 0xff); \
1840 } \
1841 memset(dst_argb_b + OFF, 1, kStrideB * kHeight); \
1842 for (int i = 0; i < benchmark_iterations_; ++i) { \
1843 FMT_PLANAR##To##FMT_B( \
1844 src_y + OFF, kWidth, src_u + OFF, SUBSAMPLE(kWidth, SUBSAMP_X), \
1845 src_v + OFF, SUBSAMPLE(kWidth, SUBSAMP_X), src_a + OFF, kWidth, \
1846 dst_argb_b + OFF, kStrideB, kWidth, NEG kHeight, ATTEN); \
1847 } \
1848 /* Convert to a 3rd format in 1 step and 2 steps and compare */ \
1849 const int kStrideC = kWidth * BPP_C; \
1850 align_buffer_page_end(dst_argb_c, kStrideC* kHeight + OFF); \
1851 align_buffer_page_end(dst_argb_bc, kStrideC* kHeight + OFF); \
1852 memset(dst_argb_c + OFF, 2, kStrideC * kHeight); \
1853 memset(dst_argb_bc + OFF, 3, kStrideC * kHeight); \
1854 FMT_PLANAR##To##FMT_C( \
1855 src_y + OFF, kWidth, src_u + OFF, SUBSAMPLE(kWidth, SUBSAMP_X), \
1856 src_v + OFF, SUBSAMPLE(kWidth, SUBSAMP_X), src_a + OFF, kWidth, \
1857 dst_argb_c + OFF, kStrideC, kWidth, NEG kHeight, ATTEN); \
1858 /* Convert B to C */ \
1859 FMT_B##To##FMT_C(dst_argb_b + OFF, kStrideB, dst_argb_bc + OFF, kStrideC, \
1860 kWidth, kHeight); \
1861 for (int i = 0; i < kStrideC * kHeight; ++i) { \
1862 EXPECT_EQ(dst_argb_c[i + OFF], dst_argb_bc[i + OFF]); \
1863 } \
1864 free_aligned_buffer_page_end(src_y); \
1865 free_aligned_buffer_page_end(src_u); \
1866 free_aligned_buffer_page_end(src_v); \
1867 free_aligned_buffer_page_end(src_a); \
1868 free_aligned_buffer_page_end(dst_argb_b); \
1869 free_aligned_buffer_page_end(dst_argb_c); \
1870 free_aligned_buffer_page_end(dst_argb_bc); \
1871 }
1872
1873 #define TESTQPLANARTOE(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, SUB_B, BPP_B, \
1874 FMT_C, BPP_C) \
1875 TESTQPLANARTOEI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, SUB_B, BPP_B, \
1876 benchmark_width_ - 4, _Any, +, 0, FMT_C, BPP_C, 0) \
1877 TESTQPLANARTOEI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, SUB_B, BPP_B, \
1878 benchmark_width_, _Unaligned, +, 1, FMT_C, BPP_C, 0) \
1879 TESTQPLANARTOEI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, SUB_B, BPP_B, \
1880 benchmark_width_, _Invert, -, 0, FMT_C, BPP_C, 0) \
1881 TESTQPLANARTOEI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, SUB_B, BPP_B, \
1882 benchmark_width_, _Opt, +, 0, FMT_C, BPP_C, 0) \
1883 TESTQPLANARTOEI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, SUB_B, BPP_B, \
1884 benchmark_width_, _Premult, +, 0, FMT_C, BPP_C, 1)
1885
1886 TESTQPLANARTOE(I420Alpha, 2, 2, ARGB, 1, 4, ABGR, 4)
1887 TESTQPLANARTOE(I420Alpha, 2, 2, ABGR, 1, 4, ARGB, 4)
1888
1889 TEST_F(LibYUVConvertTest, RotateWithARGBSource) {
1890 // 2x2 frames
1891 uint32_t src[4];
1892 uint32_t dst[4];
1893 // some random input
1894 src[0] = 0x11000000;
1895 src[1] = 0x00450000;
1896 src[2] = 0x00009f00;
1897 src[3] = 0x000000ff;
1898 // zeros on destination
1899 dst[0] = 0x00000000;
1900 dst[1] = 0x00000000;
1901 dst[2] = 0x00000000;
1902 dst[3] = 0x00000000;
1903
1904 int r = ConvertToARGB(reinterpret_cast<uint8_t*>(src),
1905 16, // input size
1906 reinterpret_cast<uint8_t*>(dst),
1907 8, // destination stride
1908 0, // crop_x
1909 0, // crop_y
1910 2, // width
1911 2, // height
1912 2, // crop width
1913 2, // crop height
1914 kRotate90, FOURCC_ARGB);
1915
1916 EXPECT_EQ(r, 0);
1917 // 90 degrees rotation, no conversion
1918 EXPECT_EQ(dst[0], src[2]);
1919 EXPECT_EQ(dst[1], src[0]);
1920 EXPECT_EQ(dst[2], src[3]);
1921 EXPECT_EQ(dst[3], src[1]);
1922 }
1923
1924 } // namespace libyuv
1925