• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2019 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #ifndef SKVX_DEFINED
9 #define SKVX_DEFINED
10 
11 // skvx::Vec<N,T> are SIMD vectors of N T's, a v1.5 successor to SkNx<N,T>.
12 //
13 // This time we're leaning a bit less on platform-specific intrinsics and a bit
14 // more on Clang/GCC vector extensions, but still keeping the option open to
15 // drop in platform-specific intrinsics, actually more easily than before.
16 //
17 // We've also fixed a few of the caveats that used to make SkNx awkward to work
18 // with across translation units.  skvx::Vec<N,T> always has N*sizeof(T) size
19 // and alignment and is safe to use across translation units freely.
20 // (Ideally we'd only align to T, but that tanks ARMv7 NEON codegen.)
21 
22 // Please try to keep this file independent of Skia headers.
23 #include <algorithm>         // std::min, std::max
24 #include <cassert>           // assert()
25 #include <cmath>             // ceilf, floorf, truncf, roundf, sqrtf, etc.
26 #include <cstdint>           // intXX_t
27 #include <cstring>           // memcpy()
28 #include <initializer_list>  // std::initializer_list
29 #include <utility>           // std::index_sequence
30 
31 // Users may disable SIMD with SKNX_NO_SIMD, which may be set via compiler flags.
32 // The gn build has no option which sets SKNX_NO_SIMD.
33 // Use SKVX_USE_SIMD internally to avoid confusing double negation.
34 // Do not use 'defined' in a macro expansion.
35 #if !defined(SKNX_NO_SIMD)
36     #define SKVX_USE_SIMD 1
37 #else
38     #define SKVX_USE_SIMD 0
39 #endif
40 
41 #if SKVX_USE_SIMD
42     #if defined(__SSE__) || defined(__AVX__) || defined(__AVX2__)
43         #include <immintrin.h>
44     #elif defined(__ARM_NEON)
45         #include <arm_neon.h>
46     #elif defined(__wasm_simd128__)
47         #include <wasm_simd128.h>
48     #endif
49 #endif
50 
51 // To avoid ODR violations, all methods must be force-inlined...
52 #if defined(_MSC_VER)
53     #define SKVX_ALWAYS_INLINE __forceinline
54 #else
55     #define SKVX_ALWAYS_INLINE __attribute__((always_inline))
56 #endif
57 
58 // ... and all standalone functions must be static.  Please use these helpers:
59 #define SI    static inline
60 #define SIT   template <       typename T> SI
61 #define SIN   template <int N            > SI
62 #define SINT  template <int N, typename T> SI
63 #define SINTU template <int N, typename T, typename U, \
64                         typename=std::enable_if_t<std::is_convertible<U,T>::value>> SI
65 
66 namespace skvx {
67 
68 template <int N, typename T>
69 struct alignas(N*sizeof(T)) Vec;
70 
71 template <int... Ix, int N, typename T>
72 SI Vec<sizeof...(Ix),T> shuffle(const Vec<N,T>&);
73 
74 template <typename D, typename S>
75 SI D bit_pun(const S&);
76 
77 // All Vec have the same simple memory layout, the same as `T vec[N]`.
78 template <int N, typename T>
79 struct alignas(N*sizeof(T)) VecStorage {
80     SKVX_ALWAYS_INLINE VecStorage() = default;
VecStorageVecStorage81     SKVX_ALWAYS_INLINE VecStorage(T s) : lo(s), hi(s) {}
82 
83     Vec<N/2,T> lo, hi;
84 };
85 
86 template <typename T>
87 struct VecStorage<4,T> {
88     SKVX_ALWAYS_INLINE VecStorage() = default;
89     SKVX_ALWAYS_INLINE VecStorage(T s) : lo(s), hi(s) {}
90     SKVX_ALWAYS_INLINE VecStorage(T x, T y, T z, T w) : lo(x,y), hi(z, w) {}
91     SKVX_ALWAYS_INLINE VecStorage(Vec<2,T> xy, T z, T w) : lo(xy), hi(z,w) {}
92     SKVX_ALWAYS_INLINE VecStorage(T x, T y, Vec<2,T> zw) : lo(x,y), hi(zw) {}
93     SKVX_ALWAYS_INLINE VecStorage(Vec<2,T> xy, Vec<2,T> zw) : lo(xy), hi(zw) {}
94 
95     SKVX_ALWAYS_INLINE Vec<2,T>& xy() { return lo; }
96     SKVX_ALWAYS_INLINE Vec<2,T>& zw() { return hi; }
97     SKVX_ALWAYS_INLINE T& x() { return lo.lo.val; }
98     SKVX_ALWAYS_INLINE T& y() { return lo.hi.val; }
99     SKVX_ALWAYS_INLINE T& z() { return hi.lo.val; }
100     SKVX_ALWAYS_INLINE T& w() { return hi.hi.val; }
101 
102     SKVX_ALWAYS_INLINE Vec<2,T> xy() const { return lo; }
103     SKVX_ALWAYS_INLINE Vec<2,T> zw() const { return hi; }
104     SKVX_ALWAYS_INLINE T x() const { return lo.lo.val; }
105     SKVX_ALWAYS_INLINE T y() const { return lo.hi.val; }
106     SKVX_ALWAYS_INLINE T z() const { return hi.lo.val; }
107     SKVX_ALWAYS_INLINE T w() const { return hi.hi.val; }
108 
109     // Exchange-based swizzles. These should take 1 cycle on NEON and 3 (pipelined) cycles on SSE.
110     SKVX_ALWAYS_INLINE Vec<4,T> yxwz() const { return shuffle<1,0,3,2>(bit_pun<Vec<4,T>>(*this)); }
111     SKVX_ALWAYS_INLINE Vec<4,T> zwxy() const { return shuffle<2,3,0,1>(bit_pun<Vec<4,T>>(*this)); }
112 
113     Vec<2,T> lo, hi;
114 };
115 
116 template <typename T>
117 struct VecStorage<2,T> {
118     SKVX_ALWAYS_INLINE VecStorage() = default;
119     SKVX_ALWAYS_INLINE VecStorage(T s) : lo(s), hi(s) {}
120     SKVX_ALWAYS_INLINE VecStorage(T x, T y) : lo(x), hi(y) {}
121 
122     SKVX_ALWAYS_INLINE T& x() { return lo.val; }
123     SKVX_ALWAYS_INLINE T& y() { return hi.val; }
124 
125     SKVX_ALWAYS_INLINE T x() const { return lo.val; }
126     SKVX_ALWAYS_INLINE T y() const { return hi.val; }
127 
128     // This exchange-based swizzle should take 1 cycle on NEON and 3 (pipelined) cycles on SSE.
129     SKVX_ALWAYS_INLINE Vec<2,T> yx() const { return shuffle<1,0>(bit_pun<Vec<2,T>>(*this)); }
130 
131     SKVX_ALWAYS_INLINE Vec<4,T> xyxy() const {
132         return Vec<4,T>(bit_pun<Vec<2,T>>(*this), bit_pun<Vec<2,T>>(*this));
133     }
134 
135     Vec<1,T> lo, hi;
136 };
137 
138 template <int N, typename T>
139 struct alignas(N*sizeof(T)) Vec : public VecStorage<N,T> {
140     static_assert((N & (N-1)) == 0,        "N must be a power of 2.");
141     static_assert(sizeof(T) >= alignof(T), "What kind of unusual T is this?");
142 
143     // Methods belong here in the class declaration of Vec only if:
144     //   - they must be here, like constructors or operator[];
145     //   - they'll definitely never want a specialized implementation.
146     // Other operations on Vec should be defined outside the type.
147 
148     SKVX_ALWAYS_INLINE Vec() = default;
149 
150     using VecStorage<N,T>::VecStorage;
151 
152     // NOTE: Vec{x} produces x000..., whereas Vec(x) produces xxxx.... since this constructor fills
153     // unspecified lanes with 0s, whereas the single T constructor fills all lanes with the value.
154     SKVX_ALWAYS_INLINE Vec(std::initializer_list<T> xs) {
155         T vals[N] = {0};
156         memcpy(vals, xs.begin(), std::min(xs.size(), (size_t)N)*sizeof(T));
157 
158         this->lo = Vec<N/2,T>::Load(vals +   0);
159         this->hi = Vec<N/2,T>::Load(vals + N/2);
160     }
161 
162     SKVX_ALWAYS_INLINE T  operator[](int i) const { return i<N/2 ? this->lo[i] : this->hi[i-N/2]; }
163     SKVX_ALWAYS_INLINE T& operator[](int i)       { return i<N/2 ? this->lo[i] : this->hi[i-N/2]; }
164 
165     SKVX_ALWAYS_INLINE static Vec Load(const void* ptr) {
166         Vec v;
167         memcpy(&v, ptr, sizeof(Vec));
168         return v;
169     }
170     SKVX_ALWAYS_INLINE void store(void* ptr) const {
171         memcpy(ptr, this, sizeof(Vec));
172     }
173 };
174 
175 template <typename T>
176 struct Vec<1,T> {
177     T val;
178 
179     SKVX_ALWAYS_INLINE Vec() = default;
180 
181     Vec(T s) : val(s) {}
182 
183     SKVX_ALWAYS_INLINE Vec(std::initializer_list<T> xs) : val(xs.size() ? *xs.begin() : 0) {}
184 
185     SKVX_ALWAYS_INLINE T  operator[](int) const { return val; }
186     SKVX_ALWAYS_INLINE T& operator[](int)       { return val; }
187 
188     SKVX_ALWAYS_INLINE static Vec Load(const void* ptr) {
189         Vec v;
190         memcpy(&v, ptr, sizeof(Vec));
191         return v;
192     }
193     SKVX_ALWAYS_INLINE void store(void* ptr) const {
194         memcpy(ptr, this, sizeof(Vec));
195     }
196 };
197 
198 template <typename D, typename S>
199 SI D bit_pun(const S& s) {
200     static_assert(sizeof(D) == sizeof(S));
201     D d;
202     memcpy(&d, &s, sizeof(D));
203     return d;
204 }
205 
206 // Translate from a value type T to its corresponding Mask, the result of a comparison.
207 template <typename T> struct Mask { using type = T; };
208 template <> struct Mask<float > { using type = int32_t; };
209 template <> struct Mask<double> { using type = int64_t; };
210 template <typename T> using M = typename Mask<T>::type;
211 
212 // Join two Vec<N,T> into one Vec<2N,T>.
213 SINT Vec<2*N,T> join(const Vec<N,T>& lo, const Vec<N,T>& hi) {
214     Vec<2*N,T> v;
215     v.lo = lo;
216     v.hi = hi;
217     return v;
218 }
219 
220 // We have three strategies for implementing Vec operations:
221 //    1) lean on Clang/GCC vector extensions when available;
222 //    2) use map() to apply a scalar function lane-wise;
223 //    3) recurse on lo/hi to scalar portable implementations.
224 // We can slot in platform-specific implementations as overloads for particular Vec<N,T>,
225 // or often integrate them directly into the recursion of style 3), allowing fine control.
226 
227 #if SKVX_USE_SIMD && (defined(__clang__) || defined(__GNUC__))
228 
229     // VExt<N,T> types have the same size as Vec<N,T> and support most operations directly.
230     #if defined(__clang__)
231         template <int N, typename T>
232         using VExt = T __attribute__((ext_vector_type(N)));
233 
234     #elif defined(__GNUC__)
235         template <int N, typename T>
236         struct VExtHelper {
237             typedef T __attribute__((vector_size(N*sizeof(T)))) type;
238         };
239 
240         template <int N, typename T>
241         using VExt = typename VExtHelper<N,T>::type;
242 
243         // For some reason some (new!) versions of GCC cannot seem to deduce N in the generic
244         // to_vec<N,T>() below for N=4 and T=float.  This workaround seems to help...
245         SI Vec<4,float> to_vec(VExt<4,float> v) { return bit_pun<Vec<4,float>>(v); }
246     #endif
247 
248     SINT VExt<N,T> to_vext(const Vec<N,T>& v) { return bit_pun<VExt<N,T>>(v); }
249     SINT Vec <N,T> to_vec(const VExt<N,T>& v) { return bit_pun<Vec <N,T>>(v); }
250 
251     SINT Vec<N,T> operator+(const Vec<N,T>& x, const Vec<N,T>& y) {
252         return to_vec<N,T>(to_vext(x) + to_vext(y));
253     }
254     SINT Vec<N,T> operator-(const Vec<N,T>& x, const Vec<N,T>& y) {
255         return to_vec<N,T>(to_vext(x) - to_vext(y));
256     }
257     SINT Vec<N,T> operator*(const Vec<N,T>& x, const Vec<N,T>& y) {
258         return to_vec<N,T>(to_vext(x) * to_vext(y));
259     }
260     SINT Vec<N,T> operator/(const Vec<N,T>& x, const Vec<N,T>& y) {
261         return to_vec<N,T>(to_vext(x) / to_vext(y));
262     }
263 
264     SINT Vec<N,T> operator^(const Vec<N,T>& x, const Vec<N,T>& y) {
265         return to_vec<N,T>(to_vext(x) ^ to_vext(y));
266     }
267     SINT Vec<N,T> operator&(const Vec<N,T>& x, const Vec<N,T>& y) {
268         return to_vec<N,T>(to_vext(x) & to_vext(y));
269     }
270     SINT Vec<N,T> operator|(const Vec<N,T>& x, const Vec<N,T>& y) {
271         return to_vec<N,T>(to_vext(x) | to_vext(y));
272     }
273 
274     SINT Vec<N,T> operator!(const Vec<N,T>& x) { return to_vec<N,T>(!to_vext(x)); }
275     SINT Vec<N,T> operator-(const Vec<N,T>& x) { return to_vec<N,T>(-to_vext(x)); }
276     SINT Vec<N,T> operator~(const Vec<N,T>& x) { return to_vec<N,T>(~to_vext(x)); }
277 
278     SINT Vec<N,T> operator<<(const Vec<N,T>& x, int k) { return to_vec<N,T>(to_vext(x) << k); }
279     SINT Vec<N,T> operator>>(const Vec<N,T>& x, int k) { return to_vec<N,T>(to_vext(x) >> k); }
280 
281     SINT Vec<N,M<T>> operator==(const Vec<N,T>& x, const Vec<N,T>& y) {
282         return bit_pun<Vec<N,M<T>>>(to_vext(x) == to_vext(y));
283     }
284     SINT Vec<N,M<T>> operator!=(const Vec<N,T>& x, const Vec<N,T>& y) {
285         return bit_pun<Vec<N,M<T>>>(to_vext(x) != to_vext(y));
286     }
287     SINT Vec<N,M<T>> operator<=(const Vec<N,T>& x, const Vec<N,T>& y) {
288         return bit_pun<Vec<N,M<T>>>(to_vext(x) <= to_vext(y));
289     }
290     SINT Vec<N,M<T>> operator>=(const Vec<N,T>& x, const Vec<N,T>& y) {
291         return bit_pun<Vec<N,M<T>>>(to_vext(x) >= to_vext(y));
292     }
293     SINT Vec<N,M<T>> operator< (const Vec<N,T>& x, const Vec<N,T>& y) {
294         return bit_pun<Vec<N,M<T>>>(to_vext(x) <  to_vext(y));
295     }
296     SINT Vec<N,M<T>> operator> (const Vec<N,T>& x, const Vec<N,T>& y) {
297         return bit_pun<Vec<N,M<T>>>(to_vext(x) >  to_vext(y));
298     }
299 
300 #else
301 
302     // Either SKNX_NO_SIMD is defined, or Clang/GCC vector extensions are not available.
303     // We'll implement things portably with N==1 scalar implementations and recursion onto them.
304 
305     // N == 1 scalar implementations.
306     SIT Vec<1,T> operator+(const Vec<1,T>& x, const Vec<1,T>& y) { return x.val + y.val; }
307     SIT Vec<1,T> operator-(const Vec<1,T>& x, const Vec<1,T>& y) { return x.val - y.val; }
308     SIT Vec<1,T> operator*(const Vec<1,T>& x, const Vec<1,T>& y) { return x.val * y.val; }
309     SIT Vec<1,T> operator/(const Vec<1,T>& x, const Vec<1,T>& y) { return x.val / y.val; }
310 
311     SIT Vec<1,T> operator^(const Vec<1,T>& x, const Vec<1,T>& y) { return x.val ^ y.val; }
312     SIT Vec<1,T> operator&(const Vec<1,T>& x, const Vec<1,T>& y) { return x.val & y.val; }
313     SIT Vec<1,T> operator|(const Vec<1,T>& x, const Vec<1,T>& y) { return x.val | y.val; }
314 
315     SIT Vec<1,T> operator!(const Vec<1,T>& x) { return !x.val; }
316     SIT Vec<1,T> operator-(const Vec<1,T>& x) { return -x.val; }
317     SIT Vec<1,T> operator~(const Vec<1,T>& x) { return ~x.val; }
318 
319     SIT Vec<1,T> operator<<(const Vec<1,T>& x, int k) { return x.val << k; }
320     SIT Vec<1,T> operator>>(const Vec<1,T>& x, int k) { return x.val >> k; }
321 
322     SIT Vec<1,M<T>> operator==(const Vec<1,T>& x, const Vec<1,T>& y) {
323         return x.val == y.val ? ~0 : 0;
324     }
325     SIT Vec<1,M<T>> operator!=(const Vec<1,T>& x, const Vec<1,T>& y) {
326         return x.val != y.val ? ~0 : 0;
327     }
328     SIT Vec<1,M<T>> operator<=(const Vec<1,T>& x, const Vec<1,T>& y) {
329         return x.val <= y.val ? ~0 : 0;
330     }
331     SIT Vec<1,M<T>> operator>=(const Vec<1,T>& x, const Vec<1,T>& y) {
332         return x.val >= y.val ? ~0 : 0;
333     }
334     SIT Vec<1,M<T>> operator< (const Vec<1,T>& x, const Vec<1,T>& y) {
335         return x.val <  y.val ? ~0 : 0;
336     }
337     SIT Vec<1,M<T>> operator> (const Vec<1,T>& x, const Vec<1,T>& y) {
338         return x.val >  y.val ? ~0 : 0;
339     }
340 
341     // Recurse on lo/hi down to N==1 scalar implementations.
342     SINT Vec<N,T> operator+(const Vec<N,T>& x, const Vec<N,T>& y) {
343         return join(x.lo + y.lo, x.hi + y.hi);
344     }
345     SINT Vec<N,T> operator-(const Vec<N,T>& x, const Vec<N,T>& y) {
346         return join(x.lo - y.lo, x.hi - y.hi);
347     }
348     SINT Vec<N,T> operator*(const Vec<N,T>& x, const Vec<N,T>& y) {
349         return join(x.lo * y.lo, x.hi * y.hi);
350     }
351     SINT Vec<N,T> operator/(const Vec<N,T>& x, const Vec<N,T>& y) {
352         return join(x.lo / y.lo, x.hi / y.hi);
353     }
354 
355     SINT Vec<N,T> operator^(const Vec<N,T>& x, const Vec<N,T>& y) {
356         return join(x.lo ^ y.lo, x.hi ^ y.hi);
357     }
358     SINT Vec<N,T> operator&(const Vec<N,T>& x, const Vec<N,T>& y) {
359         return join(x.lo & y.lo, x.hi & y.hi);
360     }
361     SINT Vec<N,T> operator|(const Vec<N,T>& x, const Vec<N,T>& y) {
362         return join(x.lo | y.lo, x.hi | y.hi);
363     }
364 
365     SINT Vec<N,T> operator!(const Vec<N,T>& x) { return join(!x.lo, !x.hi); }
366     SINT Vec<N,T> operator-(const Vec<N,T>& x) { return join(-x.lo, -x.hi); }
367     SINT Vec<N,T> operator~(const Vec<N,T>& x) { return join(~x.lo, ~x.hi); }
368 
369     SINT Vec<N,T> operator<<(const Vec<N,T>& x, int k) { return join(x.lo << k, x.hi << k); }
370     SINT Vec<N,T> operator>>(const Vec<N,T>& x, int k) { return join(x.lo >> k, x.hi >> k); }
371 
372     SINT Vec<N,M<T>> operator==(const Vec<N,T>& x, const Vec<N,T>& y) {
373         return join(x.lo == y.lo, x.hi == y.hi);
374     }
375     SINT Vec<N,M<T>> operator!=(const Vec<N,T>& x, const Vec<N,T>& y) {
376         return join(x.lo != y.lo, x.hi != y.hi);
377     }
378     SINT Vec<N,M<T>> operator<=(const Vec<N,T>& x, const Vec<N,T>& y) {
379         return join(x.lo <= y.lo, x.hi <= y.hi);
380     }
381     SINT Vec<N,M<T>> operator>=(const Vec<N,T>& x, const Vec<N,T>& y) {
382         return join(x.lo >= y.lo, x.hi >= y.hi);
383     }
384     SINT Vec<N,M<T>> operator< (const Vec<N,T>& x, const Vec<N,T>& y) {
385         return join(x.lo <  y.lo, x.hi <  y.hi);
386     }
387     SINT Vec<N,M<T>> operator> (const Vec<N,T>& x, const Vec<N,T>& y) {
388         return join(x.lo >  y.lo, x.hi >  y.hi);
389     }
390 #endif
391 
392 // Scalar/vector operations splat the scalar to a vector.
393 SINTU Vec<N,T>    operator+ (U x, const Vec<N,T>& y) { return Vec<N,T>(x) +  y; }
394 SINTU Vec<N,T>    operator- (U x, const Vec<N,T>& y) { return Vec<N,T>(x) -  y; }
395 SINTU Vec<N,T>    operator* (U x, const Vec<N,T>& y) { return Vec<N,T>(x) *  y; }
396 SINTU Vec<N,T>    operator/ (U x, const Vec<N,T>& y) { return Vec<N,T>(x) /  y; }
397 SINTU Vec<N,T>    operator^ (U x, const Vec<N,T>& y) { return Vec<N,T>(x) ^  y; }
398 SINTU Vec<N,T>    operator& (U x, const Vec<N,T>& y) { return Vec<N,T>(x) &  y; }
399 SINTU Vec<N,T>    operator| (U x, const Vec<N,T>& y) { return Vec<N,T>(x) |  y; }
400 SINTU Vec<N,M<T>> operator==(U x, const Vec<N,T>& y) { return Vec<N,T>(x) == y; }
401 SINTU Vec<N,M<T>> operator!=(U x, const Vec<N,T>& y) { return Vec<N,T>(x) != y; }
402 SINTU Vec<N,M<T>> operator<=(U x, const Vec<N,T>& y) { return Vec<N,T>(x) <= y; }
403 SINTU Vec<N,M<T>> operator>=(U x, const Vec<N,T>& y) { return Vec<N,T>(x) >= y; }
404 SINTU Vec<N,M<T>> operator< (U x, const Vec<N,T>& y) { return Vec<N,T>(x) <  y; }
405 SINTU Vec<N,M<T>> operator> (U x, const Vec<N,T>& y) { return Vec<N,T>(x) >  y; }
406 
407 SINTU Vec<N,T>    operator+ (const Vec<N,T>& x, U y) { return x +  Vec<N,T>(y); }
408 SINTU Vec<N,T>    operator- (const Vec<N,T>& x, U y) { return x -  Vec<N,T>(y); }
409 SINTU Vec<N,T>    operator* (const Vec<N,T>& x, U y) { return x *  Vec<N,T>(y); }
410 SINTU Vec<N,T>    operator/ (const Vec<N,T>& x, U y) { return x /  Vec<N,T>(y); }
411 SINTU Vec<N,T>    operator^ (const Vec<N,T>& x, U y) { return x ^  Vec<N,T>(y); }
412 SINTU Vec<N,T>    operator& (const Vec<N,T>& x, U y) { return x &  Vec<N,T>(y); }
413 SINTU Vec<N,T>    operator| (const Vec<N,T>& x, U y) { return x |  Vec<N,T>(y); }
414 SINTU Vec<N,M<T>> operator==(const Vec<N,T>& x, U y) { return x == Vec<N,T>(y); }
415 SINTU Vec<N,M<T>> operator!=(const Vec<N,T>& x, U y) { return x != Vec<N,T>(y); }
416 SINTU Vec<N,M<T>> operator<=(const Vec<N,T>& x, U y) { return x <= Vec<N,T>(y); }
417 SINTU Vec<N,M<T>> operator>=(const Vec<N,T>& x, U y) { return x >= Vec<N,T>(y); }
418 SINTU Vec<N,M<T>> operator< (const Vec<N,T>& x, U y) { return x <  Vec<N,T>(y); }
419 SINTU Vec<N,M<T>> operator> (const Vec<N,T>& x, U y) { return x >  Vec<N,T>(y); }
420 
421 SINT Vec<N,T>& operator+=(Vec<N,T>& x, const Vec<N,T>& y) { return (x = x + y); }
422 SINT Vec<N,T>& operator-=(Vec<N,T>& x, const Vec<N,T>& y) { return (x = x - y); }
423 SINT Vec<N,T>& operator*=(Vec<N,T>& x, const Vec<N,T>& y) { return (x = x * y); }
424 SINT Vec<N,T>& operator/=(Vec<N,T>& x, const Vec<N,T>& y) { return (x = x / y); }
425 SINT Vec<N,T>& operator^=(Vec<N,T>& x, const Vec<N,T>& y) { return (x = x ^ y); }
426 SINT Vec<N,T>& operator&=(Vec<N,T>& x, const Vec<N,T>& y) { return (x = x & y); }
427 SINT Vec<N,T>& operator|=(Vec<N,T>& x, const Vec<N,T>& y) { return (x = x | y); }
428 
429 SINTU Vec<N,T>& operator+=(Vec<N,T>& x, U y) { return (x = x + Vec<N,T>(y)); }
430 SINTU Vec<N,T>& operator-=(Vec<N,T>& x, U y) { return (x = x - Vec<N,T>(y)); }
431 SINTU Vec<N,T>& operator*=(Vec<N,T>& x, U y) { return (x = x * Vec<N,T>(y)); }
432 SINTU Vec<N,T>& operator/=(Vec<N,T>& x, U y) { return (x = x / Vec<N,T>(y)); }
433 SINTU Vec<N,T>& operator^=(Vec<N,T>& x, U y) { return (x = x ^ Vec<N,T>(y)); }
434 SINTU Vec<N,T>& operator&=(Vec<N,T>& x, U y) { return (x = x & Vec<N,T>(y)); }
435 SINTU Vec<N,T>& operator|=(Vec<N,T>& x, U y) { return (x = x | Vec<N,T>(y)); }
436 
437 SINT Vec<N,T>& operator<<=(Vec<N,T>& x, int bits) { return (x = x << bits); }
438 SINT Vec<N,T>& operator>>=(Vec<N,T>& x, int bits) { return (x = x >> bits); }
439 
440 // Some operations we want are not expressible with Clang/GCC vector extensions.
441 
442 // Clang can reason about naive_if_then_else() and optimize through it better
443 // than if_then_else(), so it's sometimes useful to call it directly when we
444 // think an entire expression should optimize away, e.g. min()/max().
445 SINT Vec<N,T> naive_if_then_else(const Vec<N,M<T>>& cond, const Vec<N,T>& t, const Vec<N,T>& e) {
446     return bit_pun<Vec<N,T>>(( cond & bit_pun<Vec<N, M<T>>>(t)) |
447                              (~cond & bit_pun<Vec<N, M<T>>>(e)) );
448 }
449 
450 SIT Vec<1,T> if_then_else(const Vec<1,M<T>>& cond, const Vec<1,T>& t, const Vec<1,T>& e) {
451     // In practice this scalar implementation is unlikely to be used.  See next if_then_else().
452     return bit_pun<Vec<1,T>>(( cond & bit_pun<Vec<1, M<T>>>(t)) |
453                              (~cond & bit_pun<Vec<1, M<T>>>(e)) );
454 }
455 SINT Vec<N,T> if_then_else(const Vec<N,M<T>>& cond, const Vec<N,T>& t, const Vec<N,T>& e) {
456     // Specializations inline here so they can generalize what types the apply to.
457 #if SKVX_USE_SIMD && defined(__AVX2__)
458     if constexpr (N*sizeof(T) == 32) {
459         return bit_pun<Vec<N,T>>(_mm256_blendv_epi8(bit_pun<__m256i>(e),
460                                                     bit_pun<__m256i>(t),
461                                                     bit_pun<__m256i>(cond)));
462     }
463 #endif
464 #if SKVX_USE_SIMD && defined(__SSE4_1__)
465     if constexpr (N*sizeof(T) == 16) {
466         return bit_pun<Vec<N,T>>(_mm_blendv_epi8(bit_pun<__m128i>(e),
467                                                  bit_pun<__m128i>(t),
468                                                  bit_pun<__m128i>(cond)));
469     }
470 #endif
471 #if SKVX_USE_SIMD && defined(__ARM_NEON)
472     if constexpr (N*sizeof(T) == 16) {
473         return bit_pun<Vec<N,T>>(vbslq_u8(bit_pun<uint8x16_t>(cond),
474                                           bit_pun<uint8x16_t>(t),
475                                           bit_pun<uint8x16_t>(e)));
476     }
477 #endif
478     // Recurse for large vectors to try to hit the specializations above.
479     if constexpr (N*sizeof(T) > 16) {
480         return join(if_then_else(cond.lo, t.lo, e.lo),
481                     if_then_else(cond.hi, t.hi, e.hi));
482     }
483     // This default can lead to better code than the recursing onto scalars.
484     return naive_if_then_else(cond, t, e);
485 }
486 
487 SIT  bool any(const Vec<1,T>& x) { return x.val != 0; }
488 SINT bool any(const Vec<N,T>& x) {
489     // For any(), the _mm_testz intrinsics are correct and don't require comparing 'x' to 0, so it's
490     // lower latency compared to _mm_movemask + _mm_compneq on plain SSE.
491 #if SKVX_USE_SIMD && defined(__AVX2__)
492     if constexpr (N*sizeof(T) == 32) {
493         return !_mm256_testz_si256(bit_pun<__m256i>(x), _mm256_set1_epi32(-1));
494     }
495 #endif
496 #if SKVX_USE_SIMD && defined(__SSE_4_1__)
497     if constexpr (N*sizeof(T) == 16) {
498         return !_mm_testz_si128(bit_pun<__m128i>(x), _mm_set1_epi32(-1));
499     }
500 #endif
501 #if SKVX_USE_SIMD && defined(__SSE__)
502     if constexpr (N*sizeof(T) == 16) {
503         // On SSE, movemask checks only the MSB in each lane, which is fine if the lanes were set
504         // directly from a comparison op (which sets all bits to 1 when true), but skvx::Vec<>
505         // treats any non-zero value as true, so we have to compare 'x' to 0 before calling movemask
506         return _mm_movemask_ps(_mm_cmpneq_ps(bit_pun<__m128>(x), _mm_set1_ps(0))) != 0b0000;
507     }
508 #endif
509 #if SKVX_USE_SIMD && defined(__aarch64__)
510     // On 64-bit NEON, take the max across lanes, which will be non-zero if any lane was true.
511     // The specific lane-size doesn't really matter in this case since it's really any set bit
512     // that we're looking for.
513     if constexpr (N*sizeof(T) == 8 ) { return vmaxv_u8 (bit_pun<uint8x8_t> (x)) > 0; }
514     if constexpr (N*sizeof(T) == 16) { return vmaxvq_u8(bit_pun<uint8x16_t>(x)) > 0; }
515 #endif
516 #if SKVX_USE_SIMD && defined(__wasm_simd128__)
517     if constexpr (N == 4 && sizeof(T) == 4) {
518         return wasm_i32x4_any_true(bit_pun<VExt<4,int>>(x));
519     }
520 #endif
521     return any(x.lo)
522         || any(x.hi);
523 }
524 
525 SIT  bool all(const Vec<1,T>& x) { return x.val != 0; }
526 SINT bool all(const Vec<N,T>& x) {
527 // Unlike any(), we have to respect the lane layout, or we'll miss cases where a
528 // true lane has a mix of 0 and 1 bits.
529 #if SKVX_USE_SIMD && defined(__SSE__)
530     // Unfortunately, the _mm_testc intrinsics don't let us avoid the comparison to 0 for all()'s
531     // correctness, so always just use the plain SSE version.
532     if constexpr (N == 4 && sizeof(T) == 4) {
533         return _mm_movemask_ps(_mm_cmpneq_ps(bit_pun<__m128>(x), _mm_set1_ps(0))) == 0b1111;
534     }
535 #endif
536 #if SKVX_USE_SIMD && defined(__aarch64__)
537     // On 64-bit NEON, take the min across the lanes, which will be non-zero if all lanes are != 0.
538     if constexpr (sizeof(T)==1 && N==8)  {return vminv_u8  (bit_pun<uint8x8_t> (x)) > 0;}
539     if constexpr (sizeof(T)==1 && N==16) {return vminvq_u8 (bit_pun<uint8x16_t>(x)) > 0;}
540     if constexpr (sizeof(T)==2 && N==4)  {return vminv_u16 (bit_pun<uint16x4_t>(x)) > 0;}
541     if constexpr (sizeof(T)==2 && N==8)  {return vminvq_u16(bit_pun<uint16x8_t>(x)) > 0;}
542     if constexpr (sizeof(T)==4 && N==2)  {return vminv_u32 (bit_pun<uint32x2_t>(x)) > 0;}
543     if constexpr (sizeof(T)==4 && N==4)  {return vminvq_u32(bit_pun<uint32x4_t>(x)) > 0;}
544 #endif
545 #if SKVX_USE_SIMD && defined(__wasm_simd128__)
546     if constexpr (N == 4 && sizeof(T) == 4) {
547         return wasm_i32x4_all_true(bit_pun<VExt<4,int>>(x));
548     }
549 #endif
550     return all(x.lo)
551         && all(x.hi);
552 }
553 
554 // cast() Vec<N,S> to Vec<N,D>, as if applying a C-cast to each lane.
555 // TODO: implement with map()?
556 template <typename D, typename S>
557 SI Vec<1,D> cast(const Vec<1,S>& src) { return (D)src.val; }
558 
559 template <typename D, int N, typename S>
560 SI Vec<N,D> cast(const Vec<N,S>& src) {
561 #if SKVX_USE_SIMD && defined(__clang__)
562     return to_vec(__builtin_convertvector(to_vext(src), VExt<N,D>));
563 #else
564     return join(cast<D>(src.lo), cast<D>(src.hi));
565 #endif
566 }
567 
568 // min/max match logic of std::min/std::max, which is important when NaN is involved.
569 SIT  T min(const Vec<1,T>& x) { return x.val; }
570 SIT  T max(const Vec<1,T>& x) { return x.val; }
571 SINT T min(const Vec<N,T>& x) { return std::min(min(x.lo), min(x.hi)); }
572 SINT T max(const Vec<N,T>& x) { return std::max(max(x.lo), max(x.hi)); }
573 
574 SINT Vec<N,T> min(const Vec<N,T>& x, const Vec<N,T>& y) { return naive_if_then_else(y < x, y, x); }
575 SINT Vec<N,T> max(const Vec<N,T>& x, const Vec<N,T>& y) { return naive_if_then_else(x < y, y, x); }
576 
577 SINTU Vec<N,T> min(const Vec<N,T>& x, U y) { return min(x, Vec<N,T>(y)); }
578 SINTU Vec<N,T> max(const Vec<N,T>& x, U y) { return max(x, Vec<N,T>(y)); }
579 SINTU Vec<N,T> min(U x, const Vec<N,T>& y) { return min(Vec<N,T>(x), y); }
580 SINTU Vec<N,T> max(U x, const Vec<N,T>& y) { return max(Vec<N,T>(x), y); }
581 
582 // pin matches the logic of SkTPin, which is important when NaN is involved. It always returns
583 // values in the range lo..hi, and if x is NaN, it returns lo.
584 SINT Vec<N,T> pin(const Vec<N,T>& x, const Vec<N,T>& lo, const Vec<N,T>& hi) {
585     return max(lo, min(x, hi));
586 }
587 
588 // Shuffle values from a vector pretty arbitrarily:
589 //    skvx::Vec<4,float> rgba = {R,G,B,A};
590 //    shuffle<2,1,0,3>        (rgba) ~> {B,G,R,A}
591 //    shuffle<2,1>            (rgba) ~> {B,G}
592 //    shuffle<2,1,2,1,2,1,2,1>(rgba) ~> {B,G,B,G,B,G,B,G}
593 //    shuffle<3,3,3,3>        (rgba) ~> {A,A,A,A}
594 // The only real restriction is that the output also be a legal N=power-of-two sknx::Vec.
595 template <int... Ix, int N, typename T>
596 SI Vec<sizeof...(Ix),T> shuffle(const Vec<N,T>& x) {
597 #if SKVX_USE_SIMD && defined(__clang__)
598     // TODO: can we just always use { x[Ix]... }?
599     return to_vec<sizeof...(Ix),T>(__builtin_shufflevector(to_vext(x), to_vext(x), Ix...));
600 #else
601     return { x[Ix]... };
602 #endif
603 }
604 
605 // Call map(fn, x) for a vector with fn() applied to each lane of x, { fn(x[0]), fn(x[1]), ... },
606 // or map(fn, x,y) for a vector of fn(x[i], y[i]), etc.
607 
608 template <typename Fn, typename... Args, size_t... I>
609 SI auto map(std::index_sequence<I...>,
610             Fn&& fn, const Args&... args) -> skvx::Vec<sizeof...(I), decltype(fn(args[0]...))> {
611     auto lane = [&](size_t i)
612 #if defined(__clang__)
613     // CFI, specifically -fsanitize=cfi-icall, seems to give a false positive here,
614     // with errors like "control flow integrity check for type 'float (float)
615     // noexcept' failed during indirect function call... note: sqrtf.cfi_jt defined
616     // here".  But we can be quite sure fn is the right type: it's all inferred!
617     // So, stifle CFI in this function.
618     __attribute__((no_sanitize("cfi")))
619 #endif
620     { return fn(args[static_cast<int>(i)]...); };
621 
622     return { lane(I)... };
623 }
624 
625 template <typename Fn, int N, typename T, typename... Rest>
626 auto map(Fn&& fn, const Vec<N,T>& first, const Rest&... rest) {
627     // Derive an {0...N-1} index_sequence from the size of the first arg: N lanes in, N lanes out.
628     return map(std::make_index_sequence<N>{}, fn, first,rest...);
629 }
630 
631 SIN Vec<N,float>  ceil(const Vec<N,float>& x) { return map( ceilf, x); }
632 SIN Vec<N,float> floor(const Vec<N,float>& x) { return map(floorf, x); }
633 SIN Vec<N,float> trunc(const Vec<N,float>& x) { return map(truncf, x); }
634 SIN Vec<N,float> round(const Vec<N,float>& x) { return map(roundf, x); }
635 SIN Vec<N,float>  sqrt(const Vec<N,float>& x) { return map( sqrtf, x); }
636 SIN Vec<N,float>   abs(const Vec<N,float>& x) { return map( fabsf, x); }
637 SIN Vec<N,float>   fma(const Vec<N,float>& x,
638                        const Vec<N,float>& y,
639                        const Vec<N,float>& z) {
640     // I don't understand why Clang's codegen is terrible if we write map(fmaf, x,y,z) directly.
641     auto fn = [](float x, float y, float z) { return fmaf(x,y,z); };
642     return map(fn, x,y,z);
643 }
644 
645 SI Vec<1,int> lrint(const Vec<1,float>& x) {
646     return (int)lrintf(x.val);
647 }
648 SIN Vec<N,int> lrint(const Vec<N,float>& x) {
649 #if SKVX_USE_SIMD && defined(__AVX__)
650     if constexpr (N == 8) {
651         return bit_pun<Vec<N,int>>(_mm256_cvtps_epi32(bit_pun<__m256>(x)));
652     }
653 #endif
654 #if SKVX_USE_SIMD && defined(__SSE__)
655     if constexpr (N == 4) {
656         return bit_pun<Vec<N,int>>(_mm_cvtps_epi32(bit_pun<__m128>(x)));
657     }
658 #endif
659     return join(lrint(x.lo),
660                 lrint(x.hi));
661 }
662 
663 SIN Vec<N,float> fract(const Vec<N,float>& x) { return x - floor(x); }
664 
665 // Assumes inputs are finite and treat/flush denorm half floats as/to zero.
666 // Key constants to watch for:
667 //    - a float is 32-bit, 1-8-23 sign-exponent-mantissa, with 127 exponent bias;
668 //    - a half  is 16-bit, 1-5-10 sign-exponent-mantissa, with  15 exponent bias.
669 SIN Vec<N,uint16_t> to_half_finite_ftz(const Vec<N,float>& x) {
670     Vec<N,uint32_t> sem = bit_pun<Vec<N,uint32_t>>(x),
671                     s   = sem & 0x8000'0000,
672                      em = sem ^ s,
673                 is_norm =  em > 0x387f'd000, // halfway between largest f16 denorm and smallest norm
674                    norm = (em>>13) - ((127-15)<<10);
675     return cast<uint16_t>((s>>16) | (is_norm & norm));
676 }
677 SIN Vec<N,float> from_half_finite_ftz(const Vec<N,uint16_t>& x) {
678     Vec<N,uint32_t> wide = cast<uint32_t>(x),
679                       s  = wide & 0x8000,
680                       em = wide ^ s,
681                  is_norm =   em > 0x3ff,
682                     norm = (em<<13) + ((127-15)<<23);
683     return bit_pun<Vec<N,float>>((s<<16) | (is_norm & norm));
684 }
685 
686 // Like if_then_else(), these N=1 base cases won't actually be used unless explicitly called.
687 SI Vec<1,uint16_t> to_half(const Vec<1,float>&    x) { return   to_half_finite_ftz(x); }
688 SI Vec<1,float>  from_half(const Vec<1,uint16_t>& x) { return from_half_finite_ftz(x); }
689 
690 SIN Vec<N,uint16_t> to_half(const Vec<N,float>& x) {
691 #if SKVX_USE_SIMD && defined(__F16C__)
692     if constexpr (N == 8) {
693         return bit_pun<Vec<N,uint16_t>>(_mm256_cvtps_ph(bit_pun<__m256>(x),
694                                                         _MM_FROUND_TO_NEAREST_INT));
695     }
696 #endif
697 #if SKVX_USE_SIMD && defined(__aarch64__)
698     if constexpr (N == 4) {
699         return bit_pun<Vec<N,uint16_t>>(vcvt_f16_f32(bit_pun<float32x4_t>(x)));
700 
701     }
702 #endif
703     if constexpr (N > 4) {
704         return join(to_half(x.lo),
705                     to_half(x.hi));
706     }
707     return to_half_finite_ftz(x);
708 }
709 
710 SIN Vec<N,float> from_half(const Vec<N,uint16_t>& x) {
711 #if SKVX_USE_SIMD && defined(__F16C__)
712     if constexpr (N == 8) {
713         return bit_pun<Vec<N,float>>(_mm256_cvtph_ps(bit_pun<__m128i>(x)));
714     }
715 #endif
716 #if SKVX_USE_SIMD && defined(__aarch64__)
717     if constexpr (N == 4) {
718         return bit_pun<Vec<N,float>>(vcvt_f32_f16(bit_pun<float16x4_t>(x)));
719     }
720 #endif
721     if constexpr (N > 4) {
722         return join(from_half(x.lo),
723                     from_half(x.hi));
724     }
725     return from_half_finite_ftz(x);
726 }
727 
728 // div255(x) = (x + 127) / 255 is a bit-exact rounding divide-by-255, packing down to 8-bit.
729 SIN Vec<N,uint8_t> div255(const Vec<N,uint16_t>& x) {
730     return cast<uint8_t>( (x+127)/255 );
731 }
732 
733 // approx_scale(x,y) approximates div255(cast<uint16_t>(x)*cast<uint16_t>(y)) within a bit,
734 // and is always perfect when x or y is 0 or 255.
735 SIN Vec<N,uint8_t> approx_scale(const Vec<N,uint8_t>& x, const Vec<N,uint8_t>& y) {
736     // All of (x*y+x)/256, (x*y+y)/256, and (x*y+255)/256 meet the criteria above.
737     // We happen to have historically picked (x*y+x)/256.
738     auto X = cast<uint16_t>(x),
739          Y = cast<uint16_t>(y);
740     return cast<uint8_t>( (X*Y+X)/256 );
741 }
742 
743 // saturated_add(x,y) sums values and clamps to the maximum value instead of overflowing.
744 SINT std::enable_if_t<std::is_unsigned_v<T>, Vec<N,T>> saturated_add(const Vec<N,T>& x,
745                                                                      const Vec<N,T>& y) {
746 #if SKVX_USE_SIMD && (defined(__SSE__) || defined(__ARM_NEON))
747     // Both SSE and ARM have 16-lane saturated adds, so use intrinsics for those and recurse down
748     // or join up to take advantage.
749     if constexpr (N == 16 && sizeof(T) == 1) {
750         #if defined(__SSE__)
751         return bit_pun<Vec<N,T>>(_mm_adds_epu8(bit_pun<__m128i>(x), bit_pun<__m128i>(y)));
752         #else  // __ARM_NEON
753         return bit_pun<Vec<N,T>>(vqaddq_u8(bit_pun<uint8x16_t>(x), bit_pun<uint8x16_t>(y)));
754         #endif
755     } else if constexpr (N < 16 && sizeof(T) == 1) {
756         return saturated_add(join(x,x), join(y,y)).lo;
757     } else if constexpr (sizeof(T) == 1) {
758         return join(saturated_add(x.lo, y.lo), saturated_add(x.hi, y.hi));
759     }
760 #endif
761     // Otherwise saturate manually
762     auto sum = x + y;
763     return if_then_else(sum < x, Vec<N,T>(std::numeric_limits<T>::max()), sum);
764 }
765 
766 // The ScaledDividerU32 takes a divisor > 1, and creates a function divide(numerator) that
767 // calculates a numerator / denominator. For this to be rounded properly, numerator should have
768 // half added in:
769 // divide(numerator + half) == floor(numerator/denominator + 1/2).
770 //
771 // This gives an answer within +/- 1 from the true value.
772 //
773 // Derivation of half:
774 //    numerator/denominator + 1/2 = (numerator + half) / d
775 //    numerator + denominator / 2 = numerator + half
776 //    half = denominator / 2.
777 //
778 // Because half is divided by 2, that division must also be rounded.
779 //    half == denominator / 2 = (denominator + 1) / 2.
780 //
781 // The divisorFactor is just a scaled value:
782 //    divisorFactor = (1 / divisor) * 2 ^ 32.
783 // The maximum that can be divided and rounded is UINT_MAX - half.
784 class ScaledDividerU32 {
785 public:
786     explicit ScaledDividerU32(uint32_t divisor)
787             : fDivisorFactor{(uint32_t)(std::round((1.0 / divisor) * (1ull << 32)))}
788             , fHalf{(divisor + 1) >> 1} {
789         assert(divisor > 1);
790     }
791 
792     Vec<4, uint32_t> divide(const Vec<4, uint32_t>& numerator) const {
793 #if SKVX_USE_SIMD && defined(__ARM_NEON)
794         uint64x2_t hi = vmull_n_u32(vget_high_u32(to_vext(numerator)), fDivisorFactor);
795         uint64x2_t lo = vmull_n_u32(vget_low_u32(to_vext(numerator)),  fDivisorFactor);
796 
797         return to_vec<4, uint32_t>(vcombine_u32(vshrn_n_u64(lo,32), vshrn_n_u64(hi,32)));
798 #else
799         return cast<uint32_t>((cast<uint64_t>(numerator) * fDivisorFactor) >> 32);
800 #endif
801     }
802 
803     uint32_t half() const { return fHalf; }
804 
805 private:
806     const uint32_t fDivisorFactor;
807     const uint32_t fHalf;
808 };
809 
810 
811 SIN Vec<N,uint16_t> mull(const Vec<N,uint8_t>& x,
812                          const Vec<N,uint8_t>& y) {
813 #if SKVX_USE_SIMD && defined(__ARM_NEON)
814     // With NEON we can do eight u8*u8 -> u16 in one instruction, vmull_u8 (read, mul-long).
815     if constexpr (N == 8) {
816         return to_vec<8,uint16_t>(vmull_u8(to_vext(x), to_vext(y)));
817     } else if constexpr (N < 8) {
818         return mull(join(x,x), join(y,y)).lo;
819     } else { // N > 8
820         return join(mull(x.lo, y.lo), mull(x.hi, y.hi));
821     }
822 #else
823     return cast<uint16_t>(x) * cast<uint16_t>(y);
824 #endif
825 }
826 
827 SIN Vec<N,uint32_t> mull(const Vec<N,uint16_t>& x,
828                          const Vec<N,uint16_t>& y) {
829 #if SKVX_USE_SIMD && defined(__ARM_NEON)
830     // NEON can do four u16*u16 -> u32 in one instruction, vmull_u16
831     if constexpr (N == 4) {
832         return to_vec<4,uint32_t>(vmull_u16(to_vext(x), to_vext(y)));
833     } else if constexpr (N < 4) {
834         return mull(join(x,x), join(y,y)).lo;
835     } else { // N > 4
836         return join(mull(x.lo, y.lo), mull(x.hi, y.hi));
837     }
838 #else
839     return cast<uint32_t>(x) * cast<uint32_t>(y);
840 #endif
841 }
842 
843 SIN Vec<N,uint16_t> mulhi(const Vec<N,uint16_t>& x,
844                           const Vec<N,uint16_t>& y) {
845 #if SKVX_USE_SIMD && defined(__SSE__)
846     // Use _mm_mulhi_epu16 for 8xuint16_t and join or split to get there.
847     if constexpr (N == 8) {
848         return bit_pun<Vec<8,uint16_t>>(_mm_mulhi_epu16(bit_pun<__m128i>(x), bit_pun<__m128i>(y)));
849     } else if constexpr (N < 8) {
850         return mulhi(join(x,x), join(y,y)).lo;
851     } else { // N > 8
852         return join(mulhi(x.lo, y.lo), mulhi(x.hi, y.hi));
853     }
854 #else
855     return skvx::cast<uint16_t>(mull(x, y) >> 16);
856 #endif
857 }
858 
859 SINT T dot(const Vec<N, T>& a, const Vec<N, T>& b) {
860     // While dot is a "horizontal" operation like any or all, it needs to remain
861     // in floating point and there aren't really any good SIMD instructions that make it faster.
862     // The constexpr cases remove the for loop in the only cases we realistically call.
863     auto ab = a*b;
864     if constexpr (N == 2) {
865         return ab[0] + ab[1];
866     } else if constexpr (N == 4) {
867         return ab[0] + ab[1] + ab[2] + ab[3];
868     } else {
869         T sum = ab[0];
870         for (int i = 1; i < N; ++i) {
871             sum += ab[i];
872         }
873         return sum;
874     }
875 }
876 
877 SIT T cross(const Vec<2, T>& a, const Vec<2, T>& b) {
878     auto x = a * shuffle<1,0>(b);
879     return x[0] - x[1];
880 }
881 
882 SIN float length(const Vec<N, float>& v) {
883     return std::sqrt(dot(v, v));
884 }
885 
886 SIN double length(const Vec<N, double>& v) {
887     return std::sqrt(dot(v, v));
888 }
889 
890 SIN Vec<N, float> normalize(const Vec<N, float>& v) {
891     return v / length(v);
892 }
893 
894 SIN Vec<N, double> normalize(const Vec<N, double>& v) {
895     return v / length(v);
896 }
897 
898 SINT bool isfinite(const Vec<N, T>& v) {
899     // Multiply all values together with 0. If they were all finite, the output is
900     // 0 (also finite). If any were not, we'll get nan.
901     return std::isfinite(dot(v, Vec<N, T>(0)));
902 }
903 
904 // De-interleaving load of 4 vectors.
905 //
906 // WARNING: These are really only supported well on NEON. Consider restructuring your data before
907 // resorting to these methods.
908 SIT void strided_load4(const T* v,
909                        Vec<1,T>& a,
910                        Vec<1,T>& b,
911                        Vec<1,T>& c,
912                        Vec<1,T>& d) {
913     a.val = v[0];
914     b.val = v[1];
915     c.val = v[2];
916     d.val = v[3];
917 }
918 SINT void strided_load4(const T* v,
919                         Vec<N,T>& a,
920                         Vec<N,T>& b,
921                         Vec<N,T>& c,
922                         Vec<N,T>& d) {
923     strided_load4(v, a.lo, b.lo, c.lo, d.lo);
924     strided_load4(v + 4*(N/2), a.hi, b.hi, c.hi, d.hi);
925 }
926 #if SKVX_USE_SIMD && defined(__ARM_NEON)
927 #define IMPL_LOAD4_TRANSPOSED(N, T, VLD) \
928 SI void strided_load4(const T* v, \
929                       Vec<N,T>& a, \
930                       Vec<N,T>& b, \
931                       Vec<N,T>& c, \
932                       Vec<N,T>& d) { \
933     auto mat = VLD(v); \
934     a = bit_pun<Vec<N,T>>(mat.val[0]); \
935     b = bit_pun<Vec<N,T>>(mat.val[1]); \
936     c = bit_pun<Vec<N,T>>(mat.val[2]); \
937     d = bit_pun<Vec<N,T>>(mat.val[3]); \
938 }
939 IMPL_LOAD4_TRANSPOSED(2, uint32_t, vld4_u32)
940 IMPL_LOAD4_TRANSPOSED(4, uint16_t, vld4_u16)
941 IMPL_LOAD4_TRANSPOSED(8, uint8_t, vld4_u8)
942 IMPL_LOAD4_TRANSPOSED(2, int32_t, vld4_s32)
943 IMPL_LOAD4_TRANSPOSED(4, int16_t, vld4_s16)
944 IMPL_LOAD4_TRANSPOSED(8, int8_t, vld4_s8)
945 IMPL_LOAD4_TRANSPOSED(2, float, vld4_f32)
946 IMPL_LOAD4_TRANSPOSED(4, uint32_t, vld4q_u32)
947 IMPL_LOAD4_TRANSPOSED(8, uint16_t, vld4q_u16)
948 IMPL_LOAD4_TRANSPOSED(16, uint8_t, vld4q_u8)
949 IMPL_LOAD4_TRANSPOSED(4, int32_t, vld4q_s32)
950 IMPL_LOAD4_TRANSPOSED(8, int16_t, vld4q_s16)
951 IMPL_LOAD4_TRANSPOSED(16, int8_t, vld4q_s8)
952 IMPL_LOAD4_TRANSPOSED(4, float, vld4q_f32)
953 #undef IMPL_LOAD4_TRANSPOSED
954 
955 #elif SKVX_USE_SIMD && defined(__SSE__)
956 
957 SI void strided_load4(const float* v,
958                       Vec<4,float>& a,
959                       Vec<4,float>& b,
960                       Vec<4,float>& c,
961                       Vec<4,float>& d) {
962     __m128 a_ = _mm_loadu_ps(v);
963     __m128 b_ = _mm_loadu_ps(v+4);
964     __m128 c_ = _mm_loadu_ps(v+8);
965     __m128 d_ = _mm_loadu_ps(v+12);
966     _MM_TRANSPOSE4_PS(a_, b_, c_, d_);
967     a = bit_pun<Vec<4,float>>(a_);
968     b = bit_pun<Vec<4,float>>(b_);
969     c = bit_pun<Vec<4,float>>(c_);
970     d = bit_pun<Vec<4,float>>(d_);
971 }
972 #endif
973 
974 // De-interleaving load of 2 vectors.
975 //
976 // WARNING: These are really only supported well on NEON. Consider restructuring your data before
977 // resorting to these methods.
978 SIT void strided_load2(const T* v, Vec<1,T>& a, Vec<1,T>& b) {
979     a.val = v[0];
980     b.val = v[1];
981 }
982 SINT void strided_load2(const T* v, Vec<N,T>& a, Vec<N,T>& b) {
983     strided_load2(v, a.lo, b.lo);
984     strided_load2(v + 2*(N/2), a.hi, b.hi);
985 }
986 #if SKVX_USE_SIMD && defined(__ARM_NEON)
987 #define IMPL_LOAD2_TRANSPOSED(N, T, VLD) \
988 SI void strided_load2(const T* v, Vec<N,T>& a, Vec<N,T>& b) { \
989     auto mat = VLD(v); \
990     a = bit_pun<Vec<N,T>>(mat.val[0]); \
991     b = bit_pun<Vec<N,T>>(mat.val[1]); \
992 }
993 IMPL_LOAD2_TRANSPOSED(2, uint32_t, vld2_u32)
994 IMPL_LOAD2_TRANSPOSED(4, uint16_t, vld2_u16)
995 IMPL_LOAD2_TRANSPOSED(8, uint8_t, vld2_u8)
996 IMPL_LOAD2_TRANSPOSED(2, int32_t, vld2_s32)
997 IMPL_LOAD2_TRANSPOSED(4, int16_t, vld2_s16)
998 IMPL_LOAD2_TRANSPOSED(8, int8_t, vld2_s8)
999 IMPL_LOAD2_TRANSPOSED(2, float, vld2_f32)
1000 IMPL_LOAD2_TRANSPOSED(4, uint32_t, vld2q_u32)
1001 IMPL_LOAD2_TRANSPOSED(8, uint16_t, vld2q_u16)
1002 IMPL_LOAD2_TRANSPOSED(16, uint8_t, vld2q_u8)
1003 IMPL_LOAD2_TRANSPOSED(4, int32_t, vld2q_s32)
1004 IMPL_LOAD2_TRANSPOSED(8, int16_t, vld2q_s16)
1005 IMPL_LOAD2_TRANSPOSED(16, int8_t, vld2q_s8)
1006 IMPL_LOAD2_TRANSPOSED(4, float, vld2q_f32)
1007 #undef IMPL_LOAD2_TRANSPOSED
1008 #endif
1009 
1010 // Define commonly used aliases
1011 using float2  = Vec< 2, float>;
1012 using float4  = Vec< 4, float>;
1013 using float8  = Vec< 8, float>;
1014 
1015 using double2 = Vec< 2, double>;
1016 using double4 = Vec< 4, double>;
1017 using double8 = Vec< 8, double>;
1018 
1019 using byte2   = Vec< 2, uint8_t>;
1020 using byte4   = Vec< 4, uint8_t>;
1021 using byte8   = Vec< 8, uint8_t>;
1022 using byte16  = Vec<16, uint8_t>;
1023 
1024 using int2    = Vec< 2, int32_t>;
1025 using int4    = Vec< 4, int32_t>;
1026 using int8    = Vec< 8, int32_t>;
1027 
1028 using uint2   = Vec< 2, uint32_t>;
1029 using uint4   = Vec< 4, uint32_t>;
1030 using uint8   = Vec< 8, uint32_t>;
1031 
1032 using long2   = Vec< 2, int64_t>;
1033 using long4   = Vec< 4, int64_t>;
1034 using long8   = Vec< 8, int64_t>;
1035 
1036 // Use with from_half and to_half to convert between floatX, and use these for storage.
1037 using half2   = Vec< 2, uint16_t>;
1038 using half4   = Vec< 4, uint16_t>;
1039 using half8   = Vec< 8, uint16_t>;
1040 
1041 }  // namespace skvx
1042 
1043 #undef SINTU
1044 #undef SINT
1045 #undef SIN
1046 #undef SIT
1047 #undef SI
1048 #undef SKVX_ALWAYS_INLINE
1049 #undef SKVX_USE_SIMD
1050 
1051 #endif//SKVX_DEFINED
1052