1 /*
2 * Copyright 2019 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "include/private/SkVx.h"
9 #include "tests/Test.h"
10 #include <numeric>
11
12 using float2 = skvx::Vec<2,float>;
13 using float4 = skvx::Vec<4,float>;
14 using float8 = skvx::Vec<8,float>;
15
16 using double2 = skvx::Vec<2,double>;
17 using double4 = skvx::Vec<4,double>;
18 using double8 = skvx::Vec<8,double>;
19
20 using byte2 = skvx::Vec< 2,uint8_t>;
21 using byte4 = skvx::Vec< 4,uint8_t>;
22 using byte8 = skvx::Vec< 8,uint8_t>;
23 using byte16 = skvx::Vec<16,uint8_t>;
24
25 using int2 = skvx::Vec<2,int32_t>;
26 using int4 = skvx::Vec<4,int32_t>;
27 using int8 = skvx::Vec<8,int32_t>;
28
29 using uint2 = skvx::Vec<2,uint32_t>;
30 using uint4 = skvx::Vec<4,uint32_t>;
31 using uint8 = skvx::Vec<8,uint32_t>;
32
33 using long2 = skvx::Vec<2,int64_t>;
34 using long4 = skvx::Vec<4,int64_t>;
35 using long8 = skvx::Vec<8,int64_t>;
36
DEF_TEST(SkVx,r)37 DEF_TEST(SkVx, r) {
38 static_assert(sizeof(float2) == 8, "");
39 static_assert(sizeof(float4) == 16, "");
40 static_assert(sizeof(float8) == 32, "");
41
42 static_assert(sizeof(byte2) == 2, "");
43 static_assert(sizeof(byte4) == 4, "");
44 static_assert(sizeof(byte8) == 8, "");
45
46 {
47 int4 mask = float4{1,2,3,4} < float4{1,2,4,8};
48 REPORTER_ASSERT(r, mask[0] == int32_t( 0));
49 REPORTER_ASSERT(r, mask[1] == int32_t( 0));
50 REPORTER_ASSERT(r, mask[2] == int32_t(-1));
51 REPORTER_ASSERT(r, mask[3] == int32_t(-1));
52
53 REPORTER_ASSERT(r, any(mask));
54 REPORTER_ASSERT(r, !all(mask));
55 }
56
57 {
58 long4 mask = double4{1,2,3,4} < double4{1,2,4,8};
59 REPORTER_ASSERT(r, mask[0] == int64_t( 0));
60 REPORTER_ASSERT(r, mask[1] == int64_t( 0));
61 REPORTER_ASSERT(r, mask[2] == int64_t(-1));
62 REPORTER_ASSERT(r, mask[3] == int64_t(-1));
63
64 REPORTER_ASSERT(r, any(mask));
65 REPORTER_ASSERT(r, !all(mask));
66 }
67
68 REPORTER_ASSERT(r, min(float4{1,2,3,4}) == 1);
69 REPORTER_ASSERT(r, max(float4{1,2,3,4}) == 4);
70
71 REPORTER_ASSERT(r, all(int4{1,2,3,4,5} == int4{1,2,3,4}));
72 REPORTER_ASSERT(r, all(int4{1,2,3,4} == int4{1,2,3,4}));
73 REPORTER_ASSERT(r, all(int4{1,2,3} == int4{1,2,3,0}));
74 REPORTER_ASSERT(r, all(int4{1,2} == int4{1,2,0,0}));
75 REPORTER_ASSERT(r, all(int4{1} == int4{1,0,0,0}));
76 REPORTER_ASSERT(r, all(int4(1) == int4{1,1,1,1}));
77 REPORTER_ASSERT(r, all(int4{} == int4{0,0,0,0}));
78 REPORTER_ASSERT(r, all(int4() == int4{0,0,0,0}));
79
80 REPORTER_ASSERT(r, all(int4{1,2,2,1} == min(int4{1,2,3,4}, int4{4,3,2,1})));
81 REPORTER_ASSERT(r, all(int4{4,3,3,4} == max(int4{1,2,3,4}, int4{4,3,2,1})));
82
83 REPORTER_ASSERT(r, all(if_then_else(float4{1,2,3,2} <= float4{2,2,2,2}, float4(42), float4(47))
84 == float4{42,42,47,42}));
85
86 REPORTER_ASSERT(r, all(floor(float4{-1.5f,1.5f,1.0f,-1.0f}) == float4{-2.0f,1.0f,1.0f,-1.0f}));
87 REPORTER_ASSERT(r, all( ceil(float4{-1.5f,1.5f,1.0f,-1.0f}) == float4{-1.0f,2.0f,1.0f,-1.0f}));
88 REPORTER_ASSERT(r, all(trunc(float4{-1.5f,1.5f,1.0f,-1.0f}) == float4{-1.0f,1.0f,1.0f,-1.0f}));
89 REPORTER_ASSERT(r, all(round(float4{-1.5f,1.5f,1.0f,-1.0f}) == float4{-2.0f,2.0f,1.0f,-1.0f}));
90
91
92 REPORTER_ASSERT(r, all(abs(float4{-2,-1,0,1}) == float4{2,1,0,1}));
93
94 // TODO(mtklein): these tests could be made less loose.
95 REPORTER_ASSERT(r, all( sqrt(float4{2,3,4,5}) < float4{2,2,3,3}));
96 REPORTER_ASSERT(r, all( sqrt(float2{2,3}) < float2{2,2}));
97
98 REPORTER_ASSERT(r, all(skvx::cast<int>(float4{-1.5f,0.5f,1.0f,1.5f}) == int4{-1,0,1,1}));
99
100 float buf[] = {1,2,3,4,5,6};
101 REPORTER_ASSERT(r, all(float4::Load(buf) == float4{1,2,3,4}));
102 float4{2,3,4,5}.store(buf);
103 REPORTER_ASSERT(r, buf[0] == 2
104 && buf[1] == 3
105 && buf[2] == 4
106 && buf[3] == 5
107 && buf[4] == 5
108 && buf[5] == 6);
109 REPORTER_ASSERT(r, all(float4::Load(buf+0) == float4{2,3,4,5}));
110 REPORTER_ASSERT(r, all(float4::Load(buf+2) == float4{4,5,5,6}));
111
112 REPORTER_ASSERT(r, all(skvx::shuffle<2,1,0,3> (float4{1,2,3,4}) == float4{3,2,1,4}));
113 REPORTER_ASSERT(r, all(skvx::shuffle<2,1> (float4{1,2,3,4}) == float2{3,2}));
114 REPORTER_ASSERT(r, all(skvx::shuffle<3,3,3,3> (float4{1,2,3,4}) == float4{4,4,4,4}));
115 REPORTER_ASSERT(r, all(skvx::shuffle<2,1,2,1,2,1,2,1>(float4{1,2,3,4})
116 == float8{3,2,3,2,3,2,3,2}));
117
118 // Test that mixed types can be used where they make sense. Mostly about ergonomics.
119 REPORTER_ASSERT(r, all(float4{1,2,3,4} < 5));
120 REPORTER_ASSERT(r, all( byte4{1,2,3,4} < 5));
121 REPORTER_ASSERT(r, all( int4{1,2,3,4} < 5.0f));
122 float4 five = 5;
123 REPORTER_ASSERT(r, all(five == 5.0f));
124 REPORTER_ASSERT(r, all(five == 5));
125
126 REPORTER_ASSERT(r, all(max(2, min(float4{1,2,3,4}, 3)) == float4{2,2,3,3}));
127
128 for (int x = 0; x < 256; x++)
129 for (int y = 0; y < 256; y++) {
130 uint8_t want = (uint8_t)( 255*(x/255.0 * y/255.0) + 0.5 );
131
132 {
133 uint8_t got = skvx::div255(skvx::Vec<8, uint16_t>(x) *
134 skvx::Vec<8, uint16_t>(y) )[0];
135 REPORTER_ASSERT(r, got == want);
136 }
137
138 {
139 uint8_t got = skvx::approx_scale(skvx::Vec<8,uint8_t>(x),
140 skvx::Vec<8,uint8_t>(y))[0];
141
142 REPORTER_ASSERT(r, got == want-1 ||
143 got == want ||
144 got == want+1);
145 if (x == 0 || y == 0 || x == 255 || y == 255) {
146 REPORTER_ASSERT(r, got == want);
147 }
148 }
149 }
150
151 for (int x = 0; x < 256; x++)
152 for (int y = 0; y < 256; y++) {
153 uint16_t xy = x*y;
154
155 // Make sure to cover implementation cases N=8, N<8, and N>8.
156 REPORTER_ASSERT(r, all(mull(byte2 (x), byte2 (y)) == xy));
157 REPORTER_ASSERT(r, all(mull(byte4 (x), byte4 (y)) == xy));
158 REPORTER_ASSERT(r, all(mull(byte8 (x), byte8 (y)) == xy));
159 REPORTER_ASSERT(r, all(mull(byte16(x), byte16(y)) == xy));
160 }
161
162 {
163 // Intentionally not testing -0, as we don't care if it's 0x0000 or 0x8000.
164 float8 fs = {+0.0f,+0.5f,+1.0f,+2.0f,
165 -4.0f,-0.5f,-1.0f,-2.0f};
166 skvx::Vec<8,uint16_t> hs = {0x0000,0x3800,0x3c00,0x4000,
167 0xc400,0xb800,0xbc00,0xc000};
168 REPORTER_ASSERT(r, all(skvx:: to_half(fs) == hs));
169 REPORTER_ASSERT(r, all(skvx::from_half(hs) == fs));
170 }
171 }
172
DEF_TEST(SkVx_xy,r)173 DEF_TEST(SkVx_xy, r) {
174 float2 f = float2(1,2);
175 REPORTER_ASSERT(r, all(f == float2{1,2}));
176 REPORTER_ASSERT(r, f.x() == 1);
177 REPORTER_ASSERT(r, f.y() == 2);
178 f.y() = 9;
179 REPORTER_ASSERT(r, all(f == float2{1,9}));
180 f.x() = 0;
181 REPORTER_ASSERT(r, all(f == float2(0,9)));
182 f[0] = 8;
183 REPORTER_ASSERT(r, f.x() == 8);
184 f[1] = 6;
185 REPORTER_ASSERT(r, f.y() == 6);
186 REPORTER_ASSERT(r, all(f == float2(8,6)));
187 f = f.yx();
188 REPORTER_ASSERT(r, all(f == float2(6,8)));
189 REPORTER_ASSERT(r, skvx::bit_pun<SkPoint>(f) == SkPoint::Make(6,8));
190 SkPoint p;
191 f.store(&p);
192 REPORTER_ASSERT(r, p == SkPoint::Make(6,8));
193 f.yx().store(&p);
194 REPORTER_ASSERT(r, p == SkPoint::Make(8,6));
195 REPORTER_ASSERT(r, all(f.xyxy() == float4(6,8,6,8)));
196 REPORTER_ASSERT(r, all(f.xyxy() == float4(f,f)));
197 REPORTER_ASSERT(r, all(skvx::join(f,f) == f.xyxy()));
198 REPORTER_ASSERT(r, all(skvx::join(f.yx(),f) == float4(f.y(),f.x(),f)));
199 REPORTER_ASSERT(r, all(skvx::join(f.yx(),f) == float4(f.yx(),f.x(),f.y())));
200 REPORTER_ASSERT(r, all(skvx::join(f,f.yx()) == float4(f.x(),f.y(),f.yx())));
201 REPORTER_ASSERT(r, all(skvx::join(f.yx(),f.yx()) == float4(f.yx(),f.yx())));
202 }
203
DEF_TEST(SkVx_xyzw,r)204 DEF_TEST(SkVx_xyzw, r) {
205 float4 f = float4{1,2,3,4};
206 REPORTER_ASSERT(r, all(f == float4(1,2,3,4)));
207 REPORTER_ASSERT(r, all(f == float4(1,2,float2(3,4))));
208 REPORTER_ASSERT(r, all(f == float4(float2(1,2),3,4)));
209 REPORTER_ASSERT(r, all(f == float4(float2(1,2),float2(3,4))));
210 f.xy() = float2(9,8);
211 REPORTER_ASSERT(r, all(f == float4(9,8,3,4)));
212 f.zw().x() = 7;
213 f.zw().y() = 6;
214 REPORTER_ASSERT(r, all(f == float4(9,8,7,6)));
215 f.x() = 5;
216 f.y() = 4;
217 f.z() = 3;
218 f.w() = 2;
219 REPORTER_ASSERT(r, all(f == float4(5,4,3,2)));
220 f[0] = 0;
221 REPORTER_ASSERT(r, f.x() == 0);
222 f[1] = 1;
223 REPORTER_ASSERT(r, f.y() == 1);
224 f[2] = 2;
225 REPORTER_ASSERT(r, f.z() == 2);
226 f[3] = 3;
227 REPORTER_ASSERT(r, f.w() == 3);
228 REPORTER_ASSERT(r, skvx::all(f.xy() == float2(0,1)));
229 REPORTER_ASSERT(r, skvx::all(f.zw() == float2{2,3}));
230 REPORTER_ASSERT(r, all(f == float4(0,1,2,3)));
231 REPORTER_ASSERT(r, all(f.yxwz().lo == skvx::shuffle<1,0>(f)));
232 REPORTER_ASSERT(r, all(f.yxwz().hi == skvx::shuffle<3,2>(f)));
233 REPORTER_ASSERT(r, all(f.zwxy().lo.lo == f.z()));
234 REPORTER_ASSERT(r, all(f.zwxy().lo.hi == f.w()));
235 REPORTER_ASSERT(r, all(f.zwxy().hi.lo == f.x()));
236 REPORTER_ASSERT(r, all(f.zwxy().hi.hi == f.y()));
237 REPORTER_ASSERT(r, f.yxwz().lo.lo.val == f.y());
238 REPORTER_ASSERT(r, f.yxwz().lo.hi.val == f.x());
239 REPORTER_ASSERT(r, f.yxwz().hi.lo.val == f.w());
240 REPORTER_ASSERT(r, f.yxwz().hi.hi.val == f.z());
241
242 REPORTER_ASSERT(r, all(skvx::naive_if_then_else(int2(0,~0),
243 skvx::shuffle<3,2>(float4(0,1,2,3)),
244 float4(4,5,6,7).xy()) == float2(4,2)));
245 REPORTER_ASSERT(r, all(skvx::if_then_else(int2(0,~0),
246 skvx::shuffle<3,2>(float4(0,1,2,3)),
247 float4(4,5,6,7).xy()) == float2(4,2)));
248 REPORTER_ASSERT(r, all(skvx::naive_if_then_else(int2(0,~0).xyxy(),
249 float4(0,1,2,3).zwxy(),
250 float4(4,5,6,7)) == float4(4,3,6,1)));
251 REPORTER_ASSERT(r, all(skvx::if_then_else(int2(0,~0).xyxy(),
252 float4(0,1,2,3).zwxy(),
253 float4(4,5,6,7)) == float4(4,3,6,1)));
254
255 REPORTER_ASSERT(r, all(skvx::pin(float4(0,1,2,3).yxwz(),
256 float2(1).xyxy(),
257 float2(2).xyxy()) == float4(1,1,2,2)));
258 }
259
check_approx_acos(skiatest::Reporter * r,float x,float approx_acos_x)260 static bool check_approx_acos(skiatest::Reporter* r, float x, float approx_acos_x) {
261 float acosf_x = acosf(x);
262 float error = acosf_x - approx_acos_x;
263 if (!(fabsf(error) <= SKVX_APPROX_ACOS_MAX_ERROR)) {
264 ERRORF(r, "Larger-than-expected error from skvx::approx_acos\n"
265 " x= %f\n"
266 " approx_acos_x= %f (%f degrees\n"
267 " acosf_x= %f (%f degrees\n"
268 " error= %f (%f degrees)\n"
269 " tolerance= %f (%f degrees)\n\n",
270 x, approx_acos_x, SkRadiansToDegrees(approx_acos_x), acosf_x,
271 SkRadiansToDegrees(acosf_x), error, SkRadiansToDegrees(error),
272 SKVX_APPROX_ACOS_MAX_ERROR, SkRadiansToDegrees(SKVX_APPROX_ACOS_MAX_ERROR));
273 return false;
274 }
275 return true;
276 }
277
DEF_TEST(SkVx_approx_acos,r)278 DEF_TEST(SkVx_approx_acos, r) {
279 float4 boundaries = skvx::approx_acos(float4{-1, 0, 1, 0});
280 check_approx_acos(r, -1, boundaries[0]);
281 check_approx_acos(r, 0, boundaries[1]);
282 check_approx_acos(r, +1, boundaries[2]);
283
284 // Select a distribution of starting points around which to begin testing approx_acos. These
285 // fall roughly around the known minimum and maximum errors. No need to include -1, 0, or 1
286 // since those were just tested above. (Those are tricky because 0 is an inflection and the
287 // derivative is infinite at 1 and -1.)
288 float8 x = {-.99f, -.8f, -.4f, -.2f, .2f, .4f, .8f, .99f};
289
290 // Converge at the various local minima and maxima of "approx_acos(x) - cosf(x)" and verify that
291 // approx_acos is always within "kTolerance" degrees of the expected answer.
292 float8 err_;
293 for (int iter = 0; iter < 10; ++iter) {
294 // Run our approximate inverse cosine approximation.
295 auto approx_acos_x = skvx::approx_acos(x);
296
297 // Find d/dx(error)
298 // = d/dx(approx_acos(x) - acos(x))
299 // = (f'g - fg')/gg + 1/sqrt(1 - x^2), [where f = bx^3 + ax, g = dx^4 + cx^2 + 1]
300 float8 xx = x*x;
301 float8 a = -0.939115566365855f;
302 float8 b = 0.9217841528914573f;
303 float8 c = -1.2845906244690837f;
304 float8 d = 0.295624144969963174f;
305 float8 f = (b*xx + a)*x;
306 float8 f_ = 3*b*xx + a;
307 float8 g = (d*xx + c)*xx + 1;
308 float8 g_ = (4*d*xx + 2*c)*x;
309 float8 gg = g*g;
310 float8 q = skvx::sqrt(1 - xx);
311 err_ = (f_*g - f*g_)/gg + 1/q;
312
313 // Find d^2/dx^2(error)
314 // = ((f''g - fg'')g^2 - (f'g - fg')2gg') / g^4 + x(1 - x^2)^(-3/2)
315 // = ((f''g - fg'')g - (f'g - fg')2g') / g^3 + x(1 - x^2)^(-3/2)
316 float8 f__ = 6*b*x;
317 float8 g__ = 12*d*xx + 2*c;
318 float8 err__ = ((f__*g - f*g__)*g - (f_*g - f*g_)*2*g_) / (gg*g) + x/((1 - xx)*q);
319
320 #if 0
321 SkDebugf("\n\niter %i\n", iter);
322 #endif
323 // Ensure each lane's approximation is within maximum error.
324 for (int j = 0; j < 8; ++j) {
325 #if 0
326 SkDebugf("x=%f err=%f err'=%f err''=%f\n",
327 x[j], SkRadiansToDegrees(skvx::approx_acos_x[j] - acosf(x[j])),
328 SkRadiansToDegrees(err_[j]), SkRadiansToDegrees(err__[j]));
329 #endif
330 if (!check_approx_acos(r, x[j], approx_acos_x[j])) {
331 return;
332 }
333 }
334
335 // Use Newton's method to update the x values to locations closer to their local minimum or
336 // maximum. (This is where d/dx(error) == 0.)
337 x -= err_/err__;
338 x = skvx::pin<8,float>(x, -.99f, .99f);
339 }
340
341 // Ensure each lane converged to a local minimum or maximum.
342 for (int j = 0; j < 8; ++j) {
343 REPORTER_ASSERT(r, SkScalarNearlyZero(err_[j]));
344 }
345
346 // Make sure we found all the actual known locations of local min/max error.
347 for (float knownRoot : {-0.983536f, -0.867381f, -0.410923f, 0.410923f, 0.867381f, 0.983536f}) {
348 REPORTER_ASSERT(r, skvx::any(skvx::abs(x - knownRoot) < SK_ScalarNearlyZero));
349 }
350 }
351
check_strided_loads(skiatest::Reporter * r)352 template<int N, typename T> void check_strided_loads(skiatest::Reporter* r) {
353 using Vec = skvx::Vec<N,T>;
354 T values[N*4];
355 std::iota(values, values + N*4, 0);
356 Vec a, b, c, d;
357 skvx::strided_load2(values, a, b);
358 for (int i = 0; i < N; ++i) {
359 REPORTER_ASSERT(r, a[i] == values[i*2]);
360 REPORTER_ASSERT(r, b[i] == values[i*2 + 1]);
361 }
362 skvx::strided_load4(values, a, b, c, d);
363 for (int i = 0; i < N; ++i) {
364 REPORTER_ASSERT(r, a[i] == values[i*4]);
365 REPORTER_ASSERT(r, b[i] == values[i*4 + 1]);
366 REPORTER_ASSERT(r, c[i] == values[i*4 + 2]);
367 REPORTER_ASSERT(r, d[i] == values[i*4 + 3]);
368 }
369 }
370
check_strided_loads(skiatest::Reporter * r)371 template<typename T> void check_strided_loads(skiatest::Reporter* r) {
372 check_strided_loads<1,T>(r);
373 check_strided_loads<2,T>(r);
374 check_strided_loads<4,T>(r);
375 check_strided_loads<8,T>(r);
376 check_strided_loads<16,T>(r);
377 check_strided_loads<32,T>(r);
378 }
379
DEF_TEST(SkVx_strided_loads,r)380 DEF_TEST(SkVx_strided_loads, r) {
381 check_strided_loads<uint32_t>(r);
382 check_strided_loads<uint16_t>(r);
383 check_strided_loads<uint8_t>(r);
384 check_strided_loads<int32_t>(r);
385 check_strided_loads<int16_t>(r);
386 check_strided_loads<int8_t>(r);
387 check_strided_loads<float>(r);
388 }
389
DEF_TEST(SkVM_ScaledDividerU32,r)390 DEF_TEST(SkVM_ScaledDividerU32, r) {
391 static constexpr uint32_t kMax = std::numeric_limits<uint32_t>::max();
392
393 auto errorBounds = [&](uint32_t actual, uint32_t expected) {
394 uint32_t lowerLimit = expected == 0 ? 0 : expected - 1,
395 upperLimit = expected == kMax ? kMax : expected + 1;
396 return lowerLimit <= actual && actual <= upperLimit;
397 };
398
399 auto test = [&](uint32_t denom) {
400 // half == 1 so, the max to check is kMax-1
401 skvx::ScaledDividerU32 d(denom);
402 uint32_t maxCheck = static_cast<uint32_t>(floor((double)(kMax - d.half()) / denom + 0.5));
403 REPORTER_ASSERT(r, errorBounds(d.divide((kMax))[0], maxCheck));
404 for (uint32_t i = 0; i < kMax - d.half(); i += 65535) {
405 uint32_t expected = static_cast<uint32_t>(floor((double)i / denom + 0.5));
406 auto actual = d.divide(i + d.half());
407 if (!errorBounds(actual[0], expected)) {
408 SkDebugf("i: %u expected: %u actual: %u\n", i, expected, actual[0]);
409 }
410 // Make sure all the lanes are the same.
411 for (int e = 1; e < 4; e++) {
412 SkASSERT(actual[0] == actual[e]);
413 }
414 }
415 };
416
417 test(2);
418 test(3);
419 test(5);
420 test(7);
421 test(27);
422 test(65'535);
423 test(15'485'863);
424 test(512'927'377);
425 }
426