1 /*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "include/private/SkNx.h"
9 #include "include/utils/SkRandom.h"
10 #include "src/core/Sk4px.h"
11 #include "tests/Test.h"
12
13 template <int N>
test_Nf(skiatest::Reporter * r)14 static void test_Nf(skiatest::Reporter* r) {
15
16 auto assert_nearly_eq = [&](float eps, const SkNx<N, float>& v,
17 float a, float b, float c, float d) {
18 auto close = [=](float a, float b) { return fabsf(a-b) <= eps; };
19 float vals[4];
20 v.store(vals);
21 bool ok = close(vals[0], a) && close(vals[1], b)
22 && close( v[0], a) && close( v[1], b);
23 REPORTER_ASSERT(r, ok);
24 if (N == 4) {
25 ok = close(vals[2], c) && close(vals[3], d)
26 && close( v[2], c) && close( v[3], d);
27 REPORTER_ASSERT(r, ok);
28 }
29 };
30 auto assert_eq = [&](const SkNx<N, float>& v, float a, float b, float c, float d) {
31 return assert_nearly_eq(0, v, a,b,c,d);
32 };
33
34 float vals[] = {3, 4, 5, 6};
35 SkNx<N,float> a = SkNx<N,float>::Load(vals),
36 b(a),
37 c = a;
38 SkNx<N,float> d;
39 d = a;
40
41 assert_eq(a, 3, 4, 5, 6);
42 assert_eq(b, 3, 4, 5, 6);
43 assert_eq(c, 3, 4, 5, 6);
44 assert_eq(d, 3, 4, 5, 6);
45
46 assert_eq(a+b, 6, 8, 10, 12);
47 assert_eq(a*b, 9, 16, 25, 36);
48 assert_eq(a*b-b, 6, 12, 20, 30);
49 assert_eq((a*b).sqrt(), 3, 4, 5, 6);
50 assert_eq(a/b, 1, 1, 1, 1);
51 assert_eq(SkNx<N,float>(0)-a, -3, -4, -5, -6);
52
53 SkNx<N,float> fours(4);
54
55 assert_eq(fours.sqrt(), 2,2,2,2);
56
57 assert_eq(SkNx<N,float>::Min(a, fours), 3, 4, 4, 4);
58 assert_eq(SkNx<N,float>::Max(a, fours), 4, 4, 5, 6);
59
60 // Test some comparisons. This is not exhaustive.
61 REPORTER_ASSERT(r, (a == b).allTrue());
62 REPORTER_ASSERT(r, (a+b == a*b-b).anyTrue());
63 REPORTER_ASSERT(r, !(a+b == a*b-b).allTrue());
64 REPORTER_ASSERT(r, !(a+b == a*b).anyTrue());
65 REPORTER_ASSERT(r, !(a != b).anyTrue());
66 REPORTER_ASSERT(r, (a < fours).anyTrue());
67 REPORTER_ASSERT(r, (a <= fours).anyTrue());
68 REPORTER_ASSERT(r, !(a > fours).allTrue());
69 REPORTER_ASSERT(r, !(a >= fours).allTrue());
70 }
71
DEF_TEST(SkNf,r)72 DEF_TEST(SkNf, r) {
73 test_Nf<2>(r);
74 test_Nf<4>(r);
75 }
76
77 template <int N, typename T>
test_Ni(skiatest::Reporter * r)78 void test_Ni(skiatest::Reporter* r) {
79 auto assert_eq = [&](const SkNx<N,T>& v, T a, T b, T c, T d, T e, T f, T g, T h) {
80 T vals[8];
81 v.store(vals);
82
83 switch (N) {
84 case 8:
85 REPORTER_ASSERT(r, vals[4] == e && vals[5] == f && vals[6] == g && vals[7] == h);
86 [[fallthrough]];
87 case 4:
88 REPORTER_ASSERT(r, vals[2] == c && vals[3] == d);
89 [[fallthrough]];
90 case 2:
91 REPORTER_ASSERT(r, vals[0] == a && vals[1] == b);
92 }
93 switch (N) {
94 case 8:
95 REPORTER_ASSERT(r, v[4] == e && v[5] == f && v[6] == g && v[7] == h);
96 [[fallthrough]];
97 case 4:
98 REPORTER_ASSERT(r, v[2] == c && v[3] == d);
99 [[fallthrough]];
100 case 2:
101 REPORTER_ASSERT(r, v[0] == a && v[1] == b);
102 }
103 };
104
105 T vals[] = { 1,2,3,4,5,6,7,8 };
106 SkNx<N,T> a = SkNx<N,T>::Load(vals),
107 b(a),
108 c = a;
109 SkNx<N,T> d;
110 d = a;
111
112 assert_eq(a, 1,2,3,4,5,6,7,8);
113 assert_eq(b, 1,2,3,4,5,6,7,8);
114 assert_eq(c, 1,2,3,4,5,6,7,8);
115 assert_eq(d, 1,2,3,4,5,6,7,8);
116
117 assert_eq(a+a, 2,4,6,8,10,12,14,16);
118 assert_eq(a*a, 1,4,9,16,25,36,49,64);
119 assert_eq(a*a-a, 0,2,6,12,20,30,42,56);
120
121 assert_eq(a >> 2, 0,0,0,1,1,1,1,2);
122 assert_eq(a << 1, 2,4,6,8,10,12,14,16);
123
124 REPORTER_ASSERT(r, a[1] == 2);
125 }
126
DEF_TEST(SkNx,r)127 DEF_TEST(SkNx, r) {
128 test_Ni<2, uint16_t>(r);
129 test_Ni<4, uint16_t>(r);
130 test_Ni<8, uint16_t>(r);
131
132 test_Ni<2, int>(r);
133 test_Ni<4, int>(r);
134 test_Ni<8, int>(r);
135 }
136
DEF_TEST(SkNi_min_lt,r)137 DEF_TEST(SkNi_min_lt, r) {
138 // Exhaustively check the 8x8 bit space.
139 for (int a = 0; a < (1<<8); a++) {
140 for (int b = 0; b < (1<<8); b++) {
141 Sk16b aw(a), bw(b);
142 REPORTER_ASSERT(r, Sk16b::Min(aw, bw)[0] == std::min(a, b));
143 REPORTER_ASSERT(r, !(aw < bw)[0] == !(a < b));
144 }}
145
146 // Exhausting the 16x16 bit space is kind of slow, so only do that in release builds.
147 #ifdef SK_DEBUG
148 SkRandom rand;
149 for (int i = 0; i < (1<<16); i++) {
150 uint16_t a = rand.nextU() >> 16,
151 b = rand.nextU() >> 16;
152 REPORTER_ASSERT(r, Sk16h::Min(Sk16h(a), Sk16h(b))[0] == std::min(a, b));
153 }
154 #else
155 for (int a = 0; a < (1<<16); a++) {
156 for (int b = 0; b < (1<<16); b++) {
157 REPORTER_ASSERT(r, Sk16h::Min(Sk16h(a), Sk16h(b))[0] == std::min(a, b));
158 }}
159 #endif
160 }
161
DEF_TEST(SkNi_saturatedAdd,r)162 DEF_TEST(SkNi_saturatedAdd, r) {
163 for (int a = 0; a < (1<<8); a++) {
164 for (int b = 0; b < (1<<8); b++) {
165 int exact = a+b;
166 if (exact > 255) { exact = 255; }
167 if (exact < 0) { exact = 0; }
168
169 REPORTER_ASSERT(r, Sk16b(a).saturatedAdd(Sk16b(b))[0] == exact);
170 }
171 }
172 }
173
DEF_TEST(SkNi_mulHi,r)174 DEF_TEST(SkNi_mulHi, r) {
175 // First 8 primes.
176 Sk4u a{ 0x00020000, 0x00030000, 0x00050000, 0x00070000 };
177 Sk4u b{ 0x000b0000, 0x000d0000, 0x00110000, 0x00130000 };
178
179 Sk4u q{22, 39, 85, 133};
180
181 Sk4u c = a.mulHi(b);
182 REPORTER_ASSERT(r, c[0] == q[0]);
183 REPORTER_ASSERT(r, c[1] == q[1]);
184 REPORTER_ASSERT(r, c[2] == q[2]);
185 REPORTER_ASSERT(r, c[3] == q[3]);
186 }
187
DEF_TEST(Sk4px_muldiv255round,r)188 DEF_TEST(Sk4px_muldiv255round, r) {
189 for (int a = 0; a < (1<<8); a++) {
190 for (int b = 0; b < (1<<8); b++) {
191 int exact = (a*b+127)/255;
192
193 // Duplicate a and b 16x each.
194 Sk4px av = Sk16b(a),
195 bv = Sk16b(b);
196
197 // This way should always be exactly correct.
198 int correct = (av * bv).div255()[0];
199 REPORTER_ASSERT(r, correct == exact);
200
201 // We're a bit more flexible on this method: correct for 0 or 255, otherwise off by <=1.
202 int fast = av.approxMulDiv255(bv)[0];
203 REPORTER_ASSERT(r, fast-exact >= -1 && fast-exact <= 1);
204 if (a == 0 || a == 255 || b == 0 || b == 255) {
205 REPORTER_ASSERT(r, fast == exact);
206 }
207 }
208 }
209 }
210
DEF_TEST(SkNx_abs,r)211 DEF_TEST(SkNx_abs, r) {
212 auto fs = Sk4f(0.0f, -0.0f, 2.0f, -4.0f).abs();
213 REPORTER_ASSERT(r, fs[0] == 0.0f);
214 REPORTER_ASSERT(r, fs[1] == 0.0f);
215 REPORTER_ASSERT(r, fs[2] == 2.0f);
216 REPORTER_ASSERT(r, fs[3] == 4.0f);
217 auto fshi = Sk2f(0.0f, -0.0f).abs();
218 auto fslo = Sk2f(2.0f, -4.0f).abs();
219 REPORTER_ASSERT(r, fshi[0] == 0.0f);
220 REPORTER_ASSERT(r, fshi[1] == 0.0f);
221 REPORTER_ASSERT(r, fslo[0] == 2.0f);
222 REPORTER_ASSERT(r, fslo[1] == 4.0f);
223 }
224
DEF_TEST(Sk4i_abs,r)225 DEF_TEST(Sk4i_abs, r) {
226 auto is = Sk4i(0, -1, 2, -2147483647).abs();
227 REPORTER_ASSERT(r, is[0] == 0);
228 REPORTER_ASSERT(r, is[1] == 1);
229 REPORTER_ASSERT(r, is[2] == 2);
230 REPORTER_ASSERT(r, is[3] == 2147483647);
231 }
232
DEF_TEST(Sk4i_minmax,r)233 DEF_TEST(Sk4i_minmax, r) {
234 auto a = Sk4i(0, 2, 4, 6);
235 auto b = Sk4i(1, 1, 3, 7);
236 auto min = Sk4i::Min(a, b);
237 auto max = Sk4i::Max(a, b);
238 for(int i = 0; i < 4; ++i) {
239 REPORTER_ASSERT(r, min[i] == std::min(a[i], b[i]));
240 REPORTER_ASSERT(r, max[i] == std::max(a[i], b[i]));
241 }
242 }
243
DEF_TEST(SkNx_floor,r)244 DEF_TEST(SkNx_floor, r) {
245 auto fs = Sk4f(0.4f, -0.4f, 0.6f, -0.6f).floor();
246 REPORTER_ASSERT(r, fs[0] == 0.0f);
247 REPORTER_ASSERT(r, fs[1] == -1.0f);
248 REPORTER_ASSERT(r, fs[2] == 0.0f);
249 REPORTER_ASSERT(r, fs[3] == -1.0f);
250
251 auto fs2 = Sk2f(0.4f, -0.4f).floor();
252 REPORTER_ASSERT(r, fs2[0] == 0.0f);
253 REPORTER_ASSERT(r, fs2[1] == -1.0f);
254
255 auto fs3 = Sk2f(0.6f, -0.6f).floor();
256 REPORTER_ASSERT(r, fs3[0] == 0.0f);
257 REPORTER_ASSERT(r, fs3[1] == -1.0f);
258 }
259
DEF_TEST(SkNx_shuffle,r)260 DEF_TEST(SkNx_shuffle, r) {
261 Sk4f f4(0,10,20,30);
262
263 Sk2f f2 = SkNx_shuffle<2,1>(f4);
264 REPORTER_ASSERT(r, f2[0] == 20);
265 REPORTER_ASSERT(r, f2[1] == 10);
266
267 f4 = SkNx_shuffle<0,1,1,0>(f2);
268 REPORTER_ASSERT(r, f4[0] == 20);
269 REPORTER_ASSERT(r, f4[1] == 10);
270 REPORTER_ASSERT(r, f4[2] == 10);
271 REPORTER_ASSERT(r, f4[3] == 20);
272 }
273
DEF_TEST(SkNx_int_float,r)274 DEF_TEST(SkNx_int_float, r) {
275 Sk4f f(-2.3f, 1.0f, 0.45f, 0.6f);
276
277 Sk4i i = SkNx_cast<int>(f);
278 REPORTER_ASSERT(r, i[0] == -2);
279 REPORTER_ASSERT(r, i[1] == 1);
280 REPORTER_ASSERT(r, i[2] == 0);
281 REPORTER_ASSERT(r, i[3] == 0);
282
283 f = SkNx_cast<float>(i);
284 REPORTER_ASSERT(r, f[0] == -2.0f);
285 REPORTER_ASSERT(r, f[1] == 1.0f);
286 REPORTER_ASSERT(r, f[2] == 0.0f);
287 REPORTER_ASSERT(r, f[3] == 0.0f);
288 }
289
290 #include "include/utils/SkRandom.h"
291
DEF_TEST(SkNx_u16_float,r)292 DEF_TEST(SkNx_u16_float, r) {
293 {
294 // u16 --> float
295 auto h4 = Sk4h(15, 17, 257, 65535);
296 auto f4 = SkNx_cast<float>(h4);
297 REPORTER_ASSERT(r, f4[0] == 15.0f);
298 REPORTER_ASSERT(r, f4[1] == 17.0f);
299 REPORTER_ASSERT(r, f4[2] == 257.0f);
300 REPORTER_ASSERT(r, f4[3] == 65535.0f);
301 }
302 {
303 // float -> u16
304 auto f4 = Sk4f(15, 17, 257, 65535);
305 auto h4 = SkNx_cast<uint16_t>(f4);
306 REPORTER_ASSERT(r, h4[0] == 15);
307 REPORTER_ASSERT(r, h4[1] == 17);
308 REPORTER_ASSERT(r, h4[2] == 257);
309 REPORTER_ASSERT(r, h4[3] == 65535);
310 }
311
312 // starting with any u16 value, we should be able to have a perfect round-trip in/out of floats
313 //
314 SkRandom rand;
315 for (int i = 0; i < 10000; ++i) {
316 const uint16_t s16[4] {
317 (uint16_t)(rand.nextU() >> 16), (uint16_t)(rand.nextU() >> 16),
318 (uint16_t)(rand.nextU() >> 16), (uint16_t)(rand.nextU() >> 16),
319 };
320 auto u4_0 = Sk4h::Load(s16);
321 auto f4 = SkNx_cast<float>(u4_0);
322 auto u4_1 = SkNx_cast<uint16_t>(f4);
323 uint16_t d16[4];
324 u4_1.store(d16);
325 REPORTER_ASSERT(r, !memcmp(s16, d16, sizeof(s16)));
326 }
327 }
328
329 // The SSE2 implementation of SkNx_cast<uint16_t>(Sk4i) is non-trivial, so worth a test.
DEF_TEST(SkNx_int_u16,r)330 DEF_TEST(SkNx_int_u16, r) {
331 // These are pretty hard to get wrong.
332 for (int i = 0; i <= 0x7fff; i++) {
333 uint16_t expected = (uint16_t)i;
334 uint16_t actual = SkNx_cast<uint16_t>(Sk4i(i))[0];
335
336 REPORTER_ASSERT(r, expected == actual);
337 }
338
339 // A naive implementation with _mm_packs_epi32 would succeed up to 0x7fff but fail here:
340 for (int i = 0x8000; (1) && i <= 0xffff; i++) {
341 uint16_t expected = (uint16_t)i;
342 uint16_t actual = SkNx_cast<uint16_t>(Sk4i(i))[0];
343
344 REPORTER_ASSERT(r, expected == actual);
345 }
346 }
347
DEF_TEST(SkNx_4fLoad4Store4,r)348 DEF_TEST(SkNx_4fLoad4Store4, r) {
349 float src[] = {
350 0.0f, 1.0f, 2.0f, 3.0f,
351 4.0f, 5.0f, 6.0f, 7.0f,
352 8.0f, 9.0f, 10.0f, 11.0f,
353 12.0f, 13.0f, 14.0f, 15.0f
354 };
355
356 Sk4f a, b, c, d;
357 Sk4f::Load4(src, &a, &b, &c, &d);
358 REPORTER_ASSERT(r, 0.0f == a[0]);
359 REPORTER_ASSERT(r, 4.0f == a[1]);
360 REPORTER_ASSERT(r, 8.0f == a[2]);
361 REPORTER_ASSERT(r, 12.0f == a[3]);
362 REPORTER_ASSERT(r, 1.0f == b[0]);
363 REPORTER_ASSERT(r, 5.0f == b[1]);
364 REPORTER_ASSERT(r, 9.0f == b[2]);
365 REPORTER_ASSERT(r, 13.0f == b[3]);
366 REPORTER_ASSERT(r, 2.0f == c[0]);
367 REPORTER_ASSERT(r, 6.0f == c[1]);
368 REPORTER_ASSERT(r, 10.0f == c[2]);
369 REPORTER_ASSERT(r, 14.0f == c[3]);
370 REPORTER_ASSERT(r, 3.0f == d[0]);
371 REPORTER_ASSERT(r, 7.0f == d[1]);
372 REPORTER_ASSERT(r, 11.0f == d[2]);
373 REPORTER_ASSERT(r, 15.0f == d[3]);
374
375 float dst[16];
376 Sk4f::Store4(dst, a, b, c, d);
377 REPORTER_ASSERT(r, 0 == memcmp(dst, src, 16 * sizeof(float)));
378 }
379
DEF_TEST(SkNx_neg,r)380 DEF_TEST(SkNx_neg, r) {
381 auto fs = -Sk4f(0.0f, -0.0f, 2.0f, -4.0f);
382 REPORTER_ASSERT(r, fs[0] == 0.0f);
383 REPORTER_ASSERT(r, fs[1] == 0.0f);
384 REPORTER_ASSERT(r, fs[2] == -2.0f);
385 REPORTER_ASSERT(r, fs[3] == 4.0f);
386 auto fshi = -Sk2f(0.0f, -0.0f);
387 auto fslo = -Sk2f(2.0f, -4.0f);
388 REPORTER_ASSERT(r, fshi[0] == 0.0f);
389 REPORTER_ASSERT(r, fshi[1] == 0.0f);
390 REPORTER_ASSERT(r, fslo[0] == -2.0f);
391 REPORTER_ASSERT(r, fslo[1] == 4.0f);
392 }
393
DEF_TEST(SkNx_thenElse,r)394 DEF_TEST(SkNx_thenElse, r) {
395 auto fs = (Sk4f(0.0f, -0.0f, 2.0f, -4.0f) < 0).thenElse(-1, 1);
396 REPORTER_ASSERT(r, fs[0] == 1);
397 REPORTER_ASSERT(r, fs[1] == 1);
398 REPORTER_ASSERT(r, fs[2] == 1);
399 REPORTER_ASSERT(r, fs[3] == -1);
400 auto fshi = (Sk2f(0.0f, -0.0f) < 0).thenElse(-1, 1);
401 auto fslo = (Sk2f(2.0f, -4.0f) < 0).thenElse(-1, 1);
402 REPORTER_ASSERT(r, fshi[0] == 1);
403 REPORTER_ASSERT(r, fshi[1] == 1);
404 REPORTER_ASSERT(r, fslo[0] == 1);
405 REPORTER_ASSERT(r, fslo[1] == -1);
406 }
407
DEF_TEST(Sk4f_Load2,r)408 DEF_TEST(Sk4f_Load2, r) {
409 float xy[8] = { 0,1,2,3,4,5,6,7 };
410
411 Sk4f x,y;
412 Sk4f::Load2(xy, &x,&y);
413
414 REPORTER_ASSERT(r, x[0] == 0);
415 REPORTER_ASSERT(r, x[1] == 2);
416 REPORTER_ASSERT(r, x[2] == 4);
417 REPORTER_ASSERT(r, x[3] == 6);
418
419 REPORTER_ASSERT(r, y[0] == 1);
420 REPORTER_ASSERT(r, y[1] == 3);
421 REPORTER_ASSERT(r, y[2] == 5);
422 REPORTER_ASSERT(r, y[3] == 7);
423 }
424
DEF_TEST(Sk2f_Load2,r)425 DEF_TEST(Sk2f_Load2, r) {
426 float xy[4] = { 0,1,2,3 };
427
428 Sk2f x,y;
429 Sk2f::Load2(xy, &x,&y);
430
431 REPORTER_ASSERT(r, x[0] == 0);
432 REPORTER_ASSERT(r, x[1] == 2);
433
434 REPORTER_ASSERT(r, y[0] == 1);
435 REPORTER_ASSERT(r, y[1] == 3);
436 }
437
DEF_TEST(Sk2f_Store2,r)438 DEF_TEST(Sk2f_Store2, r) {
439 Sk2f p0{0, 2};
440 Sk2f p1{1, 3};
441 float dst[4];
442 Sk2f::Store2(dst, p0, p1);
443 REPORTER_ASSERT(r, dst[0] == 0);
444 REPORTER_ASSERT(r, dst[1] == 1);
445 REPORTER_ASSERT(r, dst[2] == 2);
446 REPORTER_ASSERT(r, dst[3] == 3);
447 }
448
DEF_TEST(Sk2f_Store3,r)449 DEF_TEST(Sk2f_Store3, r) {
450 Sk2f p0{0, 3};
451 Sk2f p1{1, 4};
452 Sk2f p2{2, 5};
453 float dst[6];
454 Sk2f::Store3(dst, p0, p1, p2);
455 REPORTER_ASSERT(r, dst[0] == 0);
456 REPORTER_ASSERT(r, dst[1] == 1);
457 REPORTER_ASSERT(r, dst[2] == 2);
458 REPORTER_ASSERT(r, dst[3] == 3);
459 REPORTER_ASSERT(r, dst[4] == 4);
460 REPORTER_ASSERT(r, dst[5] == 5);
461 }
462
DEF_TEST(Sk2f_Store4,r)463 DEF_TEST(Sk2f_Store4, r) {
464 Sk2f p0{0, 4};
465 Sk2f p1{1, 5};
466 Sk2f p2{2, 6};
467 Sk2f p3{3, 7};
468
469 float dst[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
470 Sk2f::Store4(dst, p0, p1, p2, p3);
471 REPORTER_ASSERT(r, dst[0] == 0);
472 REPORTER_ASSERT(r, dst[1] == 1);
473 REPORTER_ASSERT(r, dst[2] == 2);
474 REPORTER_ASSERT(r, dst[3] == 3);
475 REPORTER_ASSERT(r, dst[4] == 4);
476 REPORTER_ASSERT(r, dst[5] == 5);
477 REPORTER_ASSERT(r, dst[6] == 6);
478 REPORTER_ASSERT(r, dst[7] == 7);
479
480 // Ensure transposing to Sk4f works.
481 Sk4f dst4f[2] = {{-1, -1, -1, -1}, {-1, -1, -1, -1}};
482 Sk2f::Store4(dst4f, p0, p1, p2, p3);
483 REPORTER_ASSERT(r, dst4f[0][0] == 0);
484 REPORTER_ASSERT(r, dst4f[0][1] == 1);
485 REPORTER_ASSERT(r, dst4f[0][2] == 2);
486 REPORTER_ASSERT(r, dst4f[0][3] == 3);
487 REPORTER_ASSERT(r, dst4f[1][0] == 4);
488 REPORTER_ASSERT(r, dst4f[1][1] == 5);
489 REPORTER_ASSERT(r, dst4f[1][2] == 6);
490 REPORTER_ASSERT(r, dst4f[1][3] == 7);
491
492 }
493
DEF_TEST(Sk4f_minmax,r)494 DEF_TEST(Sk4f_minmax, r) {
495 REPORTER_ASSERT(r, 3 == Sk4f(0,1,2,3).max());
496 REPORTER_ASSERT(r, 2 == Sk4f(1,-5,2,-1).max());
497 REPORTER_ASSERT(r, -1 == Sk4f(-2,-1,-6,-3).max());
498 REPORTER_ASSERT(r, 3 == Sk4f(3,2,1,0).max());
499
500 REPORTER_ASSERT(r, 0 == Sk4f(0,1,2,3).min());
501 REPORTER_ASSERT(r, -5 == Sk4f(1,-5,2,-1).min());
502 REPORTER_ASSERT(r, -6 == Sk4f(-2,-1,-6,-3).min());
503 REPORTER_ASSERT(r, 0 == Sk4f(3,2,1,0).min());
504 }
505
DEF_TEST(SkNf_anyTrue_allTrue,r)506 DEF_TEST(SkNf_anyTrue_allTrue, r) {
507 REPORTER_ASSERT(r, (Sk2f{1,2} < Sk2f{3,4}).anyTrue());
508 REPORTER_ASSERT(r, (Sk2f{1,2} < Sk2f{3,4}).allTrue());
509 REPORTER_ASSERT(r, (Sk2f{3,2} < Sk2f{1,4}).anyTrue());
510 REPORTER_ASSERT(r, !(Sk2f{3,2} < Sk2f{1,4}).allTrue());
511 REPORTER_ASSERT(r, !(Sk2f{3,4} < Sk2f{1,2}).anyTrue());
512
513 REPORTER_ASSERT(r, (Sk4f{1,2,3,4} < Sk4f{3,4,5,6}).anyTrue());
514 REPORTER_ASSERT(r, (Sk4f{1,2,3,4} < Sk4f{3,4,5,6}).allTrue());
515 REPORTER_ASSERT(r, (Sk4f{1,2,3,4} < Sk4f{1,4,1,1}).anyTrue());
516 REPORTER_ASSERT(r, !(Sk4f{1,2,3,4} < Sk4f{1,4,1,1}).allTrue());
517 REPORTER_ASSERT(r, !(Sk4f{3,4,5,6} < Sk4f{1,2,3,4}).anyTrue());
518 }
519