• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2019 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #ifndef SKVX_DEFINED
9 #define SKVX_DEFINED
10 
11 // skvx::Vec<N,T> are SIMD vectors of N T's, a v1.5 successor to SkNx<N,T>.
12 //
13 // This time we're leaning a bit less on platform-specific intrinsics and a bit
14 // more on Clang/GCC vector extensions, but still keeping the option open to
15 // drop in platform-specific intrinsics, actually more easily than before.
16 //
17 // We've also fixed a few of the caveats that used to make SkNx awkward to work
18 // with across translation units.  skvx::Vec<N,T> always has N*sizeof(T) size
19 // and alignment and is safe to use across translation units freely.
20 // (Ideally we'd only align to T, but that tanks ARMv7 NEON codegen.)
21 
22 // Please try to keep this file independent of Skia headers.
23 #include <algorithm>         // std::min, std::max
24 #include <cassert>           // assert()
25 #include <cmath>             // ceilf, floorf, truncf, roundf, sqrtf, etc.
26 #include <cstdint>           // intXX_t
27 #include <cstring>           // memcpy()
28 #include <initializer_list>  // std::initializer_list
29 #include <utility>           // std::index_sequence
30 
31 // Users may disable SIMD with SKNX_NO_SIMD, which may be set via compiler flags.
32 // The gn build has no option which sets SKNX_NO_SIMD.
33 // Use SKVX_USE_SIMD internally to avoid confusing double negation.
34 // Do not use 'defined' in a macro expansion.
35 #if !defined(SKNX_NO_SIMD)
36     #define SKVX_USE_SIMD 1
37 #else
38     #define SKVX_USE_SIMD 0
39 #endif
40 
41 #if SKVX_USE_SIMD
42     #if defined(__SSE__) || defined(__AVX__) || defined(__AVX2__)
43         #include <immintrin.h>
44     #elif defined(__ARM_NEON)
45         #include <arm_neon.h>
46     #elif defined(__wasm_simd128__)
47         #include <wasm_simd128.h>
48     #endif
49 #endif
50 
51 // To avoid ODR violations, all methods must be force-inlined...
52 #if defined(_MSC_VER)
53     #define SKVX_ALWAYS_INLINE __forceinline
54 #else
55     #define SKVX_ALWAYS_INLINE __attribute__((always_inline))
56 #endif
57 
58 // ... and all standalone functions must be static.  Please use these helpers:
59 #define SI    static inline
60 #define SIT   template <       typename T> SI
61 #define SIN   template <int N            > SI
62 #define SINT  template <int N, typename T> SI
63 #define SINTU template <int N, typename T, typename U, \
64                         typename=std::enable_if_t<std::is_convertible<U,T>::value>> SI
65 
66 namespace skvx {
67 
68 template <int N, typename T>
69 struct alignas(N*sizeof(T)) Vec;
70 
71 template <int... Ix, int N, typename T>
72 SI Vec<sizeof...(Ix),T> shuffle(const Vec<N,T>&);
73 
74 template <typename D, typename S>
75 SI D bit_pun(const S&);
76 
77 // All Vec have the same simple memory layout, the same as `T vec[N]`.
78 template <int N, typename T>
79 struct alignas(N*sizeof(T)) VecStorage {
80     SKVX_ALWAYS_INLINE VecStorage() = default;
VecStorageVecStorage81     SKVX_ALWAYS_INLINE VecStorage(T s) : lo(s), hi(s) {}
82 
83     Vec<N/2,T> lo, hi;
84 };
85 
86 template <typename T>
87 struct VecStorage<4,T> {
88     SKVX_ALWAYS_INLINE VecStorage() = default;
89     SKVX_ALWAYS_INLINE VecStorage(T s) : lo(s), hi(s) {}
90     SKVX_ALWAYS_INLINE VecStorage(T x, T y, T z, T w) : lo(x,y), hi(z, w) {}
91     SKVX_ALWAYS_INLINE VecStorage(Vec<2,T> xy, T z, T w) : lo(xy), hi(z,w) {}
92     SKVX_ALWAYS_INLINE VecStorage(T x, T y, Vec<2,T> zw) : lo(x,y), hi(zw) {}
93     SKVX_ALWAYS_INLINE VecStorage(Vec<2,T> xy, Vec<2,T> zw) : lo(xy), hi(zw) {}
94 
95     SKVX_ALWAYS_INLINE Vec<2,T>& xy() { return lo; }
96     SKVX_ALWAYS_INLINE Vec<2,T>& zw() { return hi; }
97     SKVX_ALWAYS_INLINE T& x() { return lo.lo.val; }
98     SKVX_ALWAYS_INLINE T& y() { return lo.hi.val; }
99     SKVX_ALWAYS_INLINE T& z() { return hi.lo.val; }
100     SKVX_ALWAYS_INLINE T& w() { return hi.hi.val; }
101 
102     SKVX_ALWAYS_INLINE Vec<2,T> xy() const { return lo; }
103     SKVX_ALWAYS_INLINE Vec<2,T> zw() const { return hi; }
104     SKVX_ALWAYS_INLINE T x() const { return lo.lo.val; }
105     SKVX_ALWAYS_INLINE T y() const { return lo.hi.val; }
106     SKVX_ALWAYS_INLINE T z() const { return hi.lo.val; }
107     SKVX_ALWAYS_INLINE T w() const { return hi.hi.val; }
108 
109     // Exchange-based swizzles. These should take 1 cycle on NEON and 3 (pipelined) cycles on SSE.
110     SKVX_ALWAYS_INLINE Vec<4,T> yxwz() const { return shuffle<1,0,3,2>(bit_pun<Vec<4,T>>(*this)); }
111     SKVX_ALWAYS_INLINE Vec<4,T> zwxy() const { return shuffle<2,3,0,1>(bit_pun<Vec<4,T>>(*this)); }
112 
113     Vec<2,T> lo, hi;
114 };
115 
116 template <typename T>
117 struct VecStorage<2,T> {
118     SKVX_ALWAYS_INLINE VecStorage() = default;
119     SKVX_ALWAYS_INLINE VecStorage(T s) : lo(s), hi(s) {}
120     SKVX_ALWAYS_INLINE VecStorage(T x, T y) : lo(x), hi(y) {}
121 
122     SKVX_ALWAYS_INLINE T& x() { return lo.val; }
123     SKVX_ALWAYS_INLINE T& y() { return hi.val; }
124 
125     SKVX_ALWAYS_INLINE T x() const { return lo.val; }
126     SKVX_ALWAYS_INLINE T y() const { return hi.val; }
127 
128     // This exchange-based swizzle should take 1 cycle on NEON and 3 (pipelined) cycles on SSE.
129     SKVX_ALWAYS_INLINE Vec<2,T> yx() const { return shuffle<1,0>(bit_pun<Vec<2,T>>(*this)); }
130 
131     SKVX_ALWAYS_INLINE Vec<4,T> xyxy() const {
132         return Vec<4,T>(bit_pun<Vec<2,T>>(*this), bit_pun<Vec<2,T>>(*this));
133     }
134 
135     Vec<1,T> lo, hi;
136 };
137 
138 template <int N, typename T>
139 struct alignas(N*sizeof(T)) Vec : public VecStorage<N,T> {
140     static_assert((N & (N-1)) == 0,        "N must be a power of 2.");
141     static_assert(sizeof(T) >= alignof(T), "What kind of unusual T is this?");
142 
143     // Methods belong here in the class declaration of Vec only if:
144     //   - they must be here, like constructors or operator[];
145     //   - they'll definitely never want a specialized implementation.
146     // Other operations on Vec should be defined outside the type.
147 
148     SKVX_ALWAYS_INLINE Vec() = default;
149 
150     using VecStorage<N,T>::VecStorage;
151 
152     SKVX_ALWAYS_INLINE Vec(std::initializer_list<T> xs) {
153         T vals[N] = {0};
154         memcpy(vals, xs.begin(), std::min(xs.size(), (size_t)N)*sizeof(T));
155 
156         this->lo = Vec<N/2,T>::Load(vals +   0);
157         this->hi = Vec<N/2,T>::Load(vals + N/2);
158     }
159 
160     SKVX_ALWAYS_INLINE T  operator[](int i) const { return i<N/2 ? this->lo[i] : this->hi[i-N/2]; }
161     SKVX_ALWAYS_INLINE T& operator[](int i)       { return i<N/2 ? this->lo[i] : this->hi[i-N/2]; }
162 
163     SKVX_ALWAYS_INLINE static Vec Load(const void* ptr) {
164         Vec v;
165         memcpy(&v, ptr, sizeof(Vec));
166         return v;
167     }
168     SKVX_ALWAYS_INLINE void store(void* ptr) const {
169         memcpy(ptr, this, sizeof(Vec));
170     }
171 };
172 
173 template <typename T>
174 struct Vec<1,T> {
175     T val;
176 
177     SKVX_ALWAYS_INLINE Vec() = default;
178 
179     Vec(T s) : val(s) {}
180 
181     SKVX_ALWAYS_INLINE Vec(std::initializer_list<T> xs) : val(xs.size() ? *xs.begin() : 0) {}
182 
183     SKVX_ALWAYS_INLINE T  operator[](int) const { return val; }
184     SKVX_ALWAYS_INLINE T& operator[](int)       { return val; }
185 
186     SKVX_ALWAYS_INLINE static Vec Load(const void* ptr) {
187         Vec v;
188         memcpy(&v, ptr, sizeof(Vec));
189         return v;
190     }
191     SKVX_ALWAYS_INLINE void store(void* ptr) const {
192         memcpy(ptr, this, sizeof(Vec));
193     }
194 };
195 
196 // Ideally we'd only use bit_pun(), but until this file is always built as C++17 with constexpr if,
197 // we'll sometimes find need to use unchecked_bit_pun().  Please do check the call sites yourself!
198 template <typename D, typename S>
199 SI D unchecked_bit_pun(const S& s) {
200     D d;
201     memcpy(&d, &s, sizeof(D));
202     return d;
203 }
204 
205 template <typename D, typename S>
206 SI D bit_pun(const S& s) {
207     static_assert(sizeof(D) == sizeof(S), "");
208     return unchecked_bit_pun<D>(s);
209 }
210 
211 // Translate from a value type T to its corresponding Mask, the result of a comparison.
212 template <typename T> struct Mask { using type = T; };
213 template <> struct Mask<float > { using type = int32_t; };
214 template <> struct Mask<double> { using type = int64_t; };
215 template <typename T> using M = typename Mask<T>::type;
216 
217 // Join two Vec<N,T> into one Vec<2N,T>.
218 SINT Vec<2*N,T> join(const Vec<N,T>& lo, const Vec<N,T>& hi) {
219     Vec<2*N,T> v;
220     v.lo = lo;
221     v.hi = hi;
222     return v;
223 }
224 
225 // We have three strategies for implementing Vec operations:
226 //    1) lean on Clang/GCC vector extensions when available;
227 //    2) use map() to apply a scalar function lane-wise;
228 //    3) recurse on lo/hi to scalar portable implementations.
229 // We can slot in platform-specific implementations as overloads for particular Vec<N,T>,
230 // or often integrate them directly into the recursion of style 3), allowing fine control.
231 
232 #if SKVX_USE_SIMD && (defined(__clang__) || defined(__GNUC__))
233 
234     // VExt<N,T> types have the same size as Vec<N,T> and support most operations directly.
235     #if defined(__clang__)
236         template <int N, typename T>
237         using VExt = T __attribute__((ext_vector_type(N)));
238 
239     #elif defined(__GNUC__)
240         template <int N, typename T>
241         struct VExtHelper {
242             typedef T __attribute__((vector_size(N*sizeof(T)))) type;
243         };
244 
245         template <int N, typename T>
246         using VExt = typename VExtHelper<N,T>::type;
247 
248         // For some reason some (new!) versions of GCC cannot seem to deduce N in the generic
249         // to_vec<N,T>() below for N=4 and T=float.  This workaround seems to help...
250         SI Vec<4,float> to_vec(VExt<4,float> v) { return bit_pun<Vec<4,float>>(v); }
251     #endif
252 
253     SINT VExt<N,T> to_vext(const Vec<N,T>& v) { return bit_pun<VExt<N,T>>(v); }
254     SINT Vec <N,T> to_vec(const VExt<N,T>& v) { return bit_pun<Vec <N,T>>(v); }
255 
256     SINT Vec<N,T> operator+(const Vec<N,T>& x, const Vec<N,T>& y) {
257         return to_vec<N,T>(to_vext(x) + to_vext(y));
258     }
259     SINT Vec<N,T> operator-(const Vec<N,T>& x, const Vec<N,T>& y) {
260         return to_vec<N,T>(to_vext(x) - to_vext(y));
261     }
262     SINT Vec<N,T> operator*(const Vec<N,T>& x, const Vec<N,T>& y) {
263         return to_vec<N,T>(to_vext(x) * to_vext(y));
264     }
265     SINT Vec<N,T> operator/(const Vec<N,T>& x, const Vec<N,T>& y) {
266         return to_vec<N,T>(to_vext(x) / to_vext(y));
267     }
268 
269     SINT Vec<N,T> operator^(const Vec<N,T>& x, const Vec<N,T>& y) {
270         return to_vec<N,T>(to_vext(x) ^ to_vext(y));
271     }
272     SINT Vec<N,T> operator&(const Vec<N,T>& x, const Vec<N,T>& y) {
273         return to_vec<N,T>(to_vext(x) & to_vext(y));
274     }
275     SINT Vec<N,T> operator|(const Vec<N,T>& x, const Vec<N,T>& y) {
276         return to_vec<N,T>(to_vext(x) | to_vext(y));
277     }
278 
279     SINT Vec<N,T> operator!(const Vec<N,T>& x) { return to_vec<N,T>(!to_vext(x)); }
280     SINT Vec<N,T> operator-(const Vec<N,T>& x) { return to_vec<N,T>(-to_vext(x)); }
281     SINT Vec<N,T> operator~(const Vec<N,T>& x) { return to_vec<N,T>(~to_vext(x)); }
282 
283     SINT Vec<N,T> operator<<(const Vec<N,T>& x, int k) { return to_vec<N,T>(to_vext(x) << k); }
284     SINT Vec<N,T> operator>>(const Vec<N,T>& x, int k) { return to_vec<N,T>(to_vext(x) >> k); }
285 
286     SINT Vec<N,M<T>> operator==(const Vec<N,T>& x, const Vec<N,T>& y) {
287         return bit_pun<Vec<N,M<T>>>(to_vext(x) == to_vext(y));
288     }
289     SINT Vec<N,M<T>> operator!=(const Vec<N,T>& x, const Vec<N,T>& y) {
290         return bit_pun<Vec<N,M<T>>>(to_vext(x) != to_vext(y));
291     }
292     SINT Vec<N,M<T>> operator<=(const Vec<N,T>& x, const Vec<N,T>& y) {
293         return bit_pun<Vec<N,M<T>>>(to_vext(x) <= to_vext(y));
294     }
295     SINT Vec<N,M<T>> operator>=(const Vec<N,T>& x, const Vec<N,T>& y) {
296         return bit_pun<Vec<N,M<T>>>(to_vext(x) >= to_vext(y));
297     }
298     SINT Vec<N,M<T>> operator< (const Vec<N,T>& x, const Vec<N,T>& y) {
299         return bit_pun<Vec<N,M<T>>>(to_vext(x) <  to_vext(y));
300     }
301     SINT Vec<N,M<T>> operator> (const Vec<N,T>& x, const Vec<N,T>& y) {
302         return bit_pun<Vec<N,M<T>>>(to_vext(x) >  to_vext(y));
303     }
304 
305 #else
306 
307     // Either SKNX_NO_SIMD is defined, or Clang/GCC vector extensions are not available.
308     // We'll implement things portably with N==1 scalar implementations and recursion onto them.
309 
310     // N == 1 scalar implementations.
311     SIT Vec<1,T> operator+(const Vec<1,T>& x, const Vec<1,T>& y) { return x.val + y.val; }
312     SIT Vec<1,T> operator-(const Vec<1,T>& x, const Vec<1,T>& y) { return x.val - y.val; }
313     SIT Vec<1,T> operator*(const Vec<1,T>& x, const Vec<1,T>& y) { return x.val * y.val; }
314     SIT Vec<1,T> operator/(const Vec<1,T>& x, const Vec<1,T>& y) { return x.val / y.val; }
315 
316     SIT Vec<1,T> operator^(const Vec<1,T>& x, const Vec<1,T>& y) { return x.val ^ y.val; }
317     SIT Vec<1,T> operator&(const Vec<1,T>& x, const Vec<1,T>& y) { return x.val & y.val; }
318     SIT Vec<1,T> operator|(const Vec<1,T>& x, const Vec<1,T>& y) { return x.val | y.val; }
319 
320     SIT Vec<1,T> operator!(const Vec<1,T>& x) { return !x.val; }
321     SIT Vec<1,T> operator-(const Vec<1,T>& x) { return -x.val; }
322     SIT Vec<1,T> operator~(const Vec<1,T>& x) { return ~x.val; }
323 
324     SIT Vec<1,T> operator<<(const Vec<1,T>& x, int k) { return x.val << k; }
325     SIT Vec<1,T> operator>>(const Vec<1,T>& x, int k) { return x.val >> k; }
326 
327     SIT Vec<1,M<T>> operator==(const Vec<1,T>& x, const Vec<1,T>& y) {
328         return x.val == y.val ? ~0 : 0;
329     }
330     SIT Vec<1,M<T>> operator!=(const Vec<1,T>& x, const Vec<1,T>& y) {
331         return x.val != y.val ? ~0 : 0;
332     }
333     SIT Vec<1,M<T>> operator<=(const Vec<1,T>& x, const Vec<1,T>& y) {
334         return x.val <= y.val ? ~0 : 0;
335     }
336     SIT Vec<1,M<T>> operator>=(const Vec<1,T>& x, const Vec<1,T>& y) {
337         return x.val >= y.val ? ~0 : 0;
338     }
339     SIT Vec<1,M<T>> operator< (const Vec<1,T>& x, const Vec<1,T>& y) {
340         return x.val <  y.val ? ~0 : 0;
341     }
342     SIT Vec<1,M<T>> operator> (const Vec<1,T>& x, const Vec<1,T>& y) {
343         return x.val >  y.val ? ~0 : 0;
344     }
345 
346     // Recurse on lo/hi down to N==1 scalar implementations.
347     SINT Vec<N,T> operator+(const Vec<N,T>& x, const Vec<N,T>& y) {
348         return join(x.lo + y.lo, x.hi + y.hi);
349     }
350     SINT Vec<N,T> operator-(const Vec<N,T>& x, const Vec<N,T>& y) {
351         return join(x.lo - y.lo, x.hi - y.hi);
352     }
353     SINT Vec<N,T> operator*(const Vec<N,T>& x, const Vec<N,T>& y) {
354         return join(x.lo * y.lo, x.hi * y.hi);
355     }
356     SINT Vec<N,T> operator/(const Vec<N,T>& x, const Vec<N,T>& y) {
357         return join(x.lo / y.lo, x.hi / y.hi);
358     }
359 
360     SINT Vec<N,T> operator^(const Vec<N,T>& x, const Vec<N,T>& y) {
361         return join(x.lo ^ y.lo, x.hi ^ y.hi);
362     }
363     SINT Vec<N,T> operator&(const Vec<N,T>& x, const Vec<N,T>& y) {
364         return join(x.lo & y.lo, x.hi & y.hi);
365     }
366     SINT Vec<N,T> operator|(const Vec<N,T>& x, const Vec<N,T>& y) {
367         return join(x.lo | y.lo, x.hi | y.hi);
368     }
369 
370     SINT Vec<N,T> operator!(const Vec<N,T>& x) { return join(!x.lo, !x.hi); }
371     SINT Vec<N,T> operator-(const Vec<N,T>& x) { return join(-x.lo, -x.hi); }
372     SINT Vec<N,T> operator~(const Vec<N,T>& x) { return join(~x.lo, ~x.hi); }
373 
374     SINT Vec<N,T> operator<<(const Vec<N,T>& x, int k) { return join(x.lo << k, x.hi << k); }
375     SINT Vec<N,T> operator>>(const Vec<N,T>& x, int k) { return join(x.lo >> k, x.hi >> k); }
376 
377     SINT Vec<N,M<T>> operator==(const Vec<N,T>& x, const Vec<N,T>& y) {
378         return join(x.lo == y.lo, x.hi == y.hi);
379     }
380     SINT Vec<N,M<T>> operator!=(const Vec<N,T>& x, const Vec<N,T>& y) {
381         return join(x.lo != y.lo, x.hi != y.hi);
382     }
383     SINT Vec<N,M<T>> operator<=(const Vec<N,T>& x, const Vec<N,T>& y) {
384         return join(x.lo <= y.lo, x.hi <= y.hi);
385     }
386     SINT Vec<N,M<T>> operator>=(const Vec<N,T>& x, const Vec<N,T>& y) {
387         return join(x.lo >= y.lo, x.hi >= y.hi);
388     }
389     SINT Vec<N,M<T>> operator< (const Vec<N,T>& x, const Vec<N,T>& y) {
390         return join(x.lo <  y.lo, x.hi <  y.hi);
391     }
392     SINT Vec<N,M<T>> operator> (const Vec<N,T>& x, const Vec<N,T>& y) {
393         return join(x.lo >  y.lo, x.hi >  y.hi);
394     }
395 #endif
396 
397 // Scalar/vector operations splat the scalar to a vector.
398 SINTU Vec<N,T>    operator+ (U x, const Vec<N,T>& y) { return Vec<N,T>(x) +  y; }
399 SINTU Vec<N,T>    operator- (U x, const Vec<N,T>& y) { return Vec<N,T>(x) -  y; }
400 SINTU Vec<N,T>    operator* (U x, const Vec<N,T>& y) { return Vec<N,T>(x) *  y; }
401 SINTU Vec<N,T>    operator/ (U x, const Vec<N,T>& y) { return Vec<N,T>(x) /  y; }
402 SINTU Vec<N,T>    operator^ (U x, const Vec<N,T>& y) { return Vec<N,T>(x) ^  y; }
403 SINTU Vec<N,T>    operator& (U x, const Vec<N,T>& y) { return Vec<N,T>(x) &  y; }
404 SINTU Vec<N,T>    operator| (U x, const Vec<N,T>& y) { return Vec<N,T>(x) |  y; }
405 SINTU Vec<N,M<T>> operator==(U x, const Vec<N,T>& y) { return Vec<N,T>(x) == y; }
406 SINTU Vec<N,M<T>> operator!=(U x, const Vec<N,T>& y) { return Vec<N,T>(x) != y; }
407 SINTU Vec<N,M<T>> operator<=(U x, const Vec<N,T>& y) { return Vec<N,T>(x) <= y; }
408 SINTU Vec<N,M<T>> operator>=(U x, const Vec<N,T>& y) { return Vec<N,T>(x) >= y; }
409 SINTU Vec<N,M<T>> operator< (U x, const Vec<N,T>& y) { return Vec<N,T>(x) <  y; }
410 SINTU Vec<N,M<T>> operator> (U x, const Vec<N,T>& y) { return Vec<N,T>(x) >  y; }
411 
412 SINTU Vec<N,T>    operator+ (const Vec<N,T>& x, U y) { return x +  Vec<N,T>(y); }
413 SINTU Vec<N,T>    operator- (const Vec<N,T>& x, U y) { return x -  Vec<N,T>(y); }
414 SINTU Vec<N,T>    operator* (const Vec<N,T>& x, U y) { return x *  Vec<N,T>(y); }
415 SINTU Vec<N,T>    operator/ (const Vec<N,T>& x, U y) { return x /  Vec<N,T>(y); }
416 SINTU Vec<N,T>    operator^ (const Vec<N,T>& x, U y) { return x ^  Vec<N,T>(y); }
417 SINTU Vec<N,T>    operator& (const Vec<N,T>& x, U y) { return x &  Vec<N,T>(y); }
418 SINTU Vec<N,T>    operator| (const Vec<N,T>& x, U y) { return x |  Vec<N,T>(y); }
419 SINTU Vec<N,M<T>> operator==(const Vec<N,T>& x, U y) { return x == Vec<N,T>(y); }
420 SINTU Vec<N,M<T>> operator!=(const Vec<N,T>& x, U y) { return x != Vec<N,T>(y); }
421 SINTU Vec<N,M<T>> operator<=(const Vec<N,T>& x, U y) { return x <= Vec<N,T>(y); }
422 SINTU Vec<N,M<T>> operator>=(const Vec<N,T>& x, U y) { return x >= Vec<N,T>(y); }
423 SINTU Vec<N,M<T>> operator< (const Vec<N,T>& x, U y) { return x <  Vec<N,T>(y); }
424 SINTU Vec<N,M<T>> operator> (const Vec<N,T>& x, U y) { return x >  Vec<N,T>(y); }
425 
426 SINT Vec<N,T>& operator+=(Vec<N,T>& x, const Vec<N,T>& y) { return (x = x + y); }
427 SINT Vec<N,T>& operator-=(Vec<N,T>& x, const Vec<N,T>& y) { return (x = x - y); }
428 SINT Vec<N,T>& operator*=(Vec<N,T>& x, const Vec<N,T>& y) { return (x = x * y); }
429 SINT Vec<N,T>& operator/=(Vec<N,T>& x, const Vec<N,T>& y) { return (x = x / y); }
430 SINT Vec<N,T>& operator^=(Vec<N,T>& x, const Vec<N,T>& y) { return (x = x ^ y); }
431 SINT Vec<N,T>& operator&=(Vec<N,T>& x, const Vec<N,T>& y) { return (x = x & y); }
432 SINT Vec<N,T>& operator|=(Vec<N,T>& x, const Vec<N,T>& y) { return (x = x | y); }
433 
434 SINTU Vec<N,T>& operator+=(Vec<N,T>& x, U y) { return (x = x + Vec<N,T>(y)); }
435 SINTU Vec<N,T>& operator-=(Vec<N,T>& x, U y) { return (x = x - Vec<N,T>(y)); }
436 SINTU Vec<N,T>& operator*=(Vec<N,T>& x, U y) { return (x = x * Vec<N,T>(y)); }
437 SINTU Vec<N,T>& operator/=(Vec<N,T>& x, U y) { return (x = x / Vec<N,T>(y)); }
438 SINTU Vec<N,T>& operator^=(Vec<N,T>& x, U y) { return (x = x ^ Vec<N,T>(y)); }
439 SINTU Vec<N,T>& operator&=(Vec<N,T>& x, U y) { return (x = x & Vec<N,T>(y)); }
440 SINTU Vec<N,T>& operator|=(Vec<N,T>& x, U y) { return (x = x | Vec<N,T>(y)); }
441 
442 SINT Vec<N,T>& operator<<=(Vec<N,T>& x, int bits) { return (x = x << bits); }
443 SINT Vec<N,T>& operator>>=(Vec<N,T>& x, int bits) { return (x = x >> bits); }
444 
445 // Some operations we want are not expressible with Clang/GCC vector extensions.
446 
447 // Clang can reason about naive_if_then_else() and optimize through it better
448 // than if_then_else(), so it's sometimes useful to call it directly when we
449 // think an entire expression should optimize away, e.g. min()/max().
450 SINT Vec<N,T> naive_if_then_else(const Vec<N,M<T>>& cond, const Vec<N,T>& t, const Vec<N,T>& e) {
451     return bit_pun<Vec<N,T>>(( cond & bit_pun<Vec<N, M<T>>>(t)) |
452                              (~cond & bit_pun<Vec<N, M<T>>>(e)) );
453 }
454 
455 SIT Vec<1,T> if_then_else(const Vec<1,M<T>>& cond, const Vec<1,T>& t, const Vec<1,T>& e) {
456     // In practice this scalar implementation is unlikely to be used.  See next if_then_else().
457     return bit_pun<Vec<1,T>>(( cond & bit_pun<Vec<1, M<T>>>(t)) |
458                              (~cond & bit_pun<Vec<1, M<T>>>(e)) );
459 }
460 SINT Vec<N,T> if_then_else(const Vec<N,M<T>>& cond, const Vec<N,T>& t, const Vec<N,T>& e) {
461     // Specializations inline here so they can generalize what types the apply to.
462     // (This header is used in C++14 contexts, so we have to kind of fake constexpr if.)
463 #if SKVX_USE_SIMD && defined(__AVX2__)
464     if /*constexpr*/ (N*sizeof(T) == 32) {
465         return unchecked_bit_pun<Vec<N,T>>(_mm256_blendv_epi8(unchecked_bit_pun<__m256i>(e),
466                                                               unchecked_bit_pun<__m256i>(t),
467                                                               unchecked_bit_pun<__m256i>(cond)));
468     }
469 #endif
470 #if SKVX_USE_SIMD && defined(__SSE4_1__)
471     if /*constexpr*/ (N*sizeof(T) == 16) {
472         return unchecked_bit_pun<Vec<N,T>>(_mm_blendv_epi8(unchecked_bit_pun<__m128i>(e),
473                                                            unchecked_bit_pun<__m128i>(t),
474                                                            unchecked_bit_pun<__m128i>(cond)));
475     }
476 #endif
477 #if SKVX_USE_SIMD && defined(__ARM_NEON)
478     if /*constexpr*/ (N*sizeof(T) == 16) {
479         return unchecked_bit_pun<Vec<N,T>>(vbslq_u8(unchecked_bit_pun<uint8x16_t>(cond),
480                                                     unchecked_bit_pun<uint8x16_t>(t),
481                                                     unchecked_bit_pun<uint8x16_t>(e)));
482     }
483 #endif
484     // Recurse for large vectors to try to hit the specializations above.
485     if /*constexpr*/ (N*sizeof(T) > 16) {
486         return join(if_then_else(cond.lo, t.lo, e.lo),
487                     if_then_else(cond.hi, t.hi, e.hi));
488     }
489     // This default can lead to better code than the recursing onto scalars.
490     return naive_if_then_else(cond, t, e);
491 }
492 
493 SIT  bool any(const Vec<1,T>& x) { return x.val != 0; }
494 SINT bool any(const Vec<N,T>& x) {
495 #if SKVX_USE_SIMD && defined(__wasm_simd128__)
496     if constexpr (N == 4 && sizeof(T) == 4) {
497         return wasm_i32x4_any_true(unchecked_bit_pun<VExt<4,int>>(x));
498     }
499 #endif
500     return any(x.lo)
501         || any(x.hi);
502 }
503 
504 SIT  bool all(const Vec<1,T>& x) { return x.val != 0; }
505 SINT bool all(const Vec<N,T>& x) {
506 #if SKVX_USE_SIMD && defined(__AVX2__)
507     if /*constexpr*/ (N*sizeof(T) == 32) {
508         return _mm256_testc_si256(unchecked_bit_pun<__m256i>(x),
509                                   _mm256_set1_epi32(-1));
510     }
511 #endif
512 #if SKVX_USE_SIMD && defined(__SSE4_1__)
513     if /*constexpr*/ (N*sizeof(T) == 16) {
514         return _mm_testc_si128(unchecked_bit_pun<__m128i>(x),
515                                _mm_set1_epi32(-1));
516     }
517 #endif
518 #if SKVX_USE_SIMD && defined(__wasm_simd128__)
519     if /*constexpr*/ (N == 4 && sizeof(T) == 4) {
520         return wasm_i32x4_all_true(unchecked_bit_pun<VExt<4,int>>(x));
521     }
522 #endif
523     return all(x.lo)
524         && all(x.hi);
525 }
526 
527 // cast() Vec<N,S> to Vec<N,D>, as if applying a C-cast to each lane.
528 // TODO: implement with map()?
529 template <typename D, typename S>
530 SI Vec<1,D> cast(const Vec<1,S>& src) { return (D)src.val; }
531 
532 template <typename D, int N, typename S>
533 SI Vec<N,D> cast(const Vec<N,S>& src) {
534 #if SKVX_USE_SIMD && defined(__clang__)
535     return to_vec(__builtin_convertvector(to_vext(src), VExt<N,D>));
536 #else
537     return join(cast<D>(src.lo), cast<D>(src.hi));
538 #endif
539 }
540 
541 // min/max match logic of std::min/std::max, which is important when NaN is involved.
542 SIT  T min(const Vec<1,T>& x) { return x.val; }
543 SIT  T max(const Vec<1,T>& x) { return x.val; }
544 SINT T min(const Vec<N,T>& x) { return std::min(min(x.lo), min(x.hi)); }
545 SINT T max(const Vec<N,T>& x) { return std::max(max(x.lo), max(x.hi)); }
546 
547 SINT Vec<N,T> min(const Vec<N,T>& x, const Vec<N,T>& y) { return naive_if_then_else(y < x, y, x); }
548 SINT Vec<N,T> max(const Vec<N,T>& x, const Vec<N,T>& y) { return naive_if_then_else(x < y, y, x); }
549 
550 SINTU Vec<N,T> min(const Vec<N,T>& x, U y) { return min(x, Vec<N,T>(y)); }
551 SINTU Vec<N,T> max(const Vec<N,T>& x, U y) { return max(x, Vec<N,T>(y)); }
552 SINTU Vec<N,T> min(U x, const Vec<N,T>& y) { return min(Vec<N,T>(x), y); }
553 SINTU Vec<N,T> max(U x, const Vec<N,T>& y) { return max(Vec<N,T>(x), y); }
554 
555 // pin matches the logic of SkTPin, which is important when NaN is involved. It always returns
556 // values in the range lo..hi, and if x is NaN, it returns lo.
557 SINT Vec<N,T> pin(const Vec<N,T>& x, const Vec<N,T>& lo, const Vec<N,T>& hi) {
558     return max(lo, min(x, hi));
559 }
560 
561 // Shuffle values from a vector pretty arbitrarily:
562 //    skvx::Vec<4,float> rgba = {R,G,B,A};
563 //    shuffle<2,1,0,3>        (rgba) ~> {B,G,R,A}
564 //    shuffle<2,1>            (rgba) ~> {B,G}
565 //    shuffle<2,1,2,1,2,1,2,1>(rgba) ~> {B,G,B,G,B,G,B,G}
566 //    shuffle<3,3,3,3>        (rgba) ~> {A,A,A,A}
567 // The only real restriction is that the output also be a legal N=power-of-two sknx::Vec.
568 template <int... Ix, int N, typename T>
569 SI Vec<sizeof...(Ix),T> shuffle(const Vec<N,T>& x) {
570 #if SKVX_USE_SIMD && defined(__clang__)
571     // TODO: can we just always use { x[Ix]... }?
572     return to_vec<sizeof...(Ix),T>(__builtin_shufflevector(to_vext(x), to_vext(x), Ix...));
573 #else
574     return { x[Ix]... };
575 #endif
576 }
577 
578 // Call map(fn, x) for a vector with fn() applied to each lane of x, { fn(x[0]), fn(x[1]), ... },
579 // or map(fn, x,y) for a vector of fn(x[i], y[i]), etc.
580 
581 template <typename Fn, typename... Args, size_t... I>
582 SI auto map(std::index_sequence<I...>,
583             Fn&& fn, const Args&... args) -> skvx::Vec<sizeof...(I), decltype(fn(args[0]...))> {
584     auto lane = [&](size_t i)
585 #if defined(__clang__)
586     // CFI, specifically -fsanitize=cfi-icall, seems to give a false positive here,
587     // with errors like "control flow integrity check for type 'float (float)
588     // noexcept' failed during indirect function call... note: sqrtf.cfi_jt defined
589     // here".  But we can be quite sure fn is the right type: it's all inferred!
590     // So, stifle CFI in this function.
591     __attribute__((no_sanitize("cfi")))
592 #endif
593     { return fn(args[i]...); };
594 
595     return { lane(I)... };
596 }
597 
598 template <typename Fn, int N, typename T, typename... Rest>
599 auto map(Fn&& fn, const Vec<N,T>& first, const Rest&... rest) {
600     // Derive an {0...N-1} index_sequence from the size of the first arg: N lanes in, N lanes out.
601     return map(std::make_index_sequence<N>{}, fn, first,rest...);
602 }
603 
604 SIN Vec<N,float>  ceil(const Vec<N,float>& x) { return map( ceilf, x); }
605 SIN Vec<N,float> floor(const Vec<N,float>& x) { return map(floorf, x); }
606 SIN Vec<N,float> trunc(const Vec<N,float>& x) { return map(truncf, x); }
607 SIN Vec<N,float> round(const Vec<N,float>& x) { return map(roundf, x); }
608 SIN Vec<N,float>  sqrt(const Vec<N,float>& x) { return map( sqrtf, x); }
609 SIN Vec<N,float>   abs(const Vec<N,float>& x) { return map( fabsf, x); }
610 SIN Vec<N,float>   fma(const Vec<N,float>& x,
611                        const Vec<N,float>& y,
612                        const Vec<N,float>& z) {
613     // I don't understand why Clang's codegen is terrible if we write map(fmaf, x,y,z) directly.
614     auto fn = [](float x, float y, float z) { return fmaf(x,y,z); };
615     return map(fn, x,y,z);
616 }
617 
618 SI Vec<1,int> lrint(const Vec<1,float>& x) {
619     return (int)lrintf(x.val);
620 }
621 SIN Vec<N,int> lrint(const Vec<N,float>& x) {
622 #if SKVX_USE_SIMD && defined(__AVX__)
623     if /*constexpr*/ (N == 8) {
624         return unchecked_bit_pun<Vec<N,int>>(_mm256_cvtps_epi32(unchecked_bit_pun<__m256>(x)));
625     }
626 #endif
627 #if SKVX_USE_SIMD && defined(__SSE__)
628     if /*constexpr*/ (N == 4) {
629         return unchecked_bit_pun<Vec<N,int>>(_mm_cvtps_epi32(unchecked_bit_pun<__m128>(x)));
630     }
631 #endif
632     return join(lrint(x.lo),
633                 lrint(x.hi));
634 }
635 
636 SIN Vec<N,float> fract(const Vec<N,float>& x) { return x - floor(x); }
637 
638 // The default logic for to_half/from_half is borrowed from skcms,
639 // and assumes inputs are finite and treat/flush denorm half floats as/to zero.
640 // Key constants to watch for:
641 //    - a float is 32-bit, 1-8-23 sign-exponent-mantissa, with 127 exponent bias;
642 //    - a half  is 16-bit, 1-5-10 sign-exponent-mantissa, with  15 exponent bias.
643 SIN Vec<N,uint16_t> to_half_finite_ftz(const Vec<N,float>& x) {
644     Vec<N,uint32_t> sem = bit_pun<Vec<N,uint32_t>>(x),
645                     s   = sem & 0x8000'0000,
646                      em = sem ^ s,
647               is_denorm =  em < 0x3880'0000;
648     return cast<uint16_t>(if_then_else(is_denorm, Vec<N,uint32_t>(0)
649                                                 , (s>>16) + (em>>13) - ((127-15)<<10)));
650 }
651 SIN Vec<N,float> from_half_finite_ftz(const Vec<N,uint16_t>& x) {
652     Vec<N,uint32_t> wide = cast<uint32_t>(x),
653                       s  = wide & 0x8000,
654                       em = wide ^ s;
655     auto is_denorm = bit_pun<Vec<N,int32_t>>(em < 0x0400);
656     return if_then_else(is_denorm, Vec<N,float>(0)
657                                  , bit_pun<Vec<N,float>>( (s<<16) + (em<<13) + ((127-15)<<23) ));
658 }
659 
660 // Like if_then_else(), these N=1 base cases won't actually be used unless explicitly called.
661 SI Vec<1,uint16_t> to_half(const Vec<1,float>&    x) { return   to_half_finite_ftz(x); }
662 SI Vec<1,float>  from_half(const Vec<1,uint16_t>& x) { return from_half_finite_ftz(x); }
663 
664 SIN Vec<N,uint16_t> to_half(const Vec<N,float>& x) {
665 #if SKVX_USE_SIMD && defined(__F16C__)
666     if /*constexpr*/ (N == 8) {
667         return unchecked_bit_pun<Vec<N,uint16_t>>(_mm256_cvtps_ph(unchecked_bit_pun<__m256>(x),
668                                                                   _MM_FROUND_CUR_DIRECTION));
669     }
670 #endif
671 #if SKVX_USE_SIMD && defined(__aarch64__)
672     if /*constexpr*/ (N == 4) {
673         return unchecked_bit_pun<Vec<N,uint16_t>>(vcvt_f16_f32(unchecked_bit_pun<float32x4_t>(x)));
674 
675     }
676 #endif
677     if /*constexpr*/ (N > 4) {
678         return join(to_half(x.lo),
679                     to_half(x.hi));
680     }
681     return to_half_finite_ftz(x);
682 }
683 
684 SIN Vec<N,float> from_half(const Vec<N,uint16_t>& x) {
685 #if SKVX_USE_SIMD && defined(__F16C__)
686     if /*constexpr*/ (N == 8) {
687         return unchecked_bit_pun<Vec<N,float>>(_mm256_cvtph_ps(unchecked_bit_pun<__m128i>(x)));
688     }
689 #endif
690 #if SKVX_USE_SIMD && defined(__aarch64__)
691     if /*constexpr*/ (N == 4) {
692         return unchecked_bit_pun<Vec<N,float>>(vcvt_f32_f16(unchecked_bit_pun<float16x4_t>(x)));
693     }
694 #endif
695     if /*constexpr*/ (N > 4) {
696         return join(from_half(x.lo),
697                     from_half(x.hi));
698     }
699     return from_half_finite_ftz(x);
700 }
701 
702 // div255(x) = (x + 127) / 255 is a bit-exact rounding divide-by-255, packing down to 8-bit.
703 SIN Vec<N,uint8_t> div255(const Vec<N,uint16_t>& x) {
704     return cast<uint8_t>( (x+127)/255 );
705 }
706 
707 // approx_scale(x,y) approximates div255(cast<uint16_t>(x)*cast<uint16_t>(y)) within a bit,
708 // and is always perfect when x or y is 0 or 255.
709 SIN Vec<N,uint8_t> approx_scale(const Vec<N,uint8_t>& x, const Vec<N,uint8_t>& y) {
710     // All of (x*y+x)/256, (x*y+y)/256, and (x*y+255)/256 meet the criteria above.
711     // We happen to have historically picked (x*y+x)/256.
712     auto X = cast<uint16_t>(x),
713          Y = cast<uint16_t>(y);
714     return cast<uint8_t>( (X*Y+X)/256 );
715 }
716 
717 // The ScaledDividerU32 takes a divisor > 1, and creates a function divide(numerator) that
718 // calculates a numerator / denominator. For this to be rounded properly, numerator should have
719 // half added in:
720 // divide(numerator + half) == floor(numerator/denominator + 1/2).
721 //
722 // This gives an answer within +/- 1 from the true value.
723 //
724 // Derivation of half:
725 //    numerator/denominator + 1/2 = (numerator + half) / d
726 //    numerator + denominator / 2 = numerator + half
727 //    half = denominator / 2.
728 //
729 // Because half is divided by 2, that division must also be rounded.
730 //    half == denominator / 2 = (denominator + 1) / 2.
731 //
732 // The divisorFactor is just a scaled value:
733 //    divisorFactor = (1 / divisor) * 2 ^ 32.
734 // The maximum that can be divided and rounded is UINT_MAX - half.
735 class ScaledDividerU32 {
736 public:
737     explicit ScaledDividerU32(uint32_t divisor)
738             : fDivisorFactor{(uint32_t)(std::round((1.0 / divisor) * (1ull << 32)))}
739             , fHalf{(divisor + 1) >> 1} {
740         assert(divisor > 1);
741     }
742 
743     Vec<4, uint32_t> divide(const Vec<4, uint32_t>& numerator) const {
744 #if SKVX_USE_SIMD && defined(__ARM_NEON)
745         uint64x2_t hi = vmull_n_u32(vget_high_u32(to_vext(numerator)), fDivisorFactor);
746         uint64x2_t lo = vmull_n_u32(vget_low_u32(to_vext(numerator)),  fDivisorFactor);
747 
748         return to_vec<4, uint32_t>(vcombine_u32(vshrn_n_u64(lo,32), vshrn_n_u64(hi,32)));
749 #else
750         return cast<uint32_t>((cast<uint64_t>(numerator) * fDivisorFactor) >> 32);
751 #endif
752     }
753 
754     uint32_t half() const { return fHalf; }
755 
756 private:
757     const uint32_t fDivisorFactor;
758     const uint32_t fHalf;
759 };
760 
761 #if SKVX_USE_SIMD && defined(__ARM_NEON)
762 // With NEON we can do eight u8*u8 -> u16 in one instruction, vmull_u8 (read, mul-long).
763 SI Vec<8,uint16_t> mull(const Vec<8,uint8_t>& x,
764                         const Vec<8,uint8_t>& y) {
765     return to_vec<8,uint16_t>(vmull_u8(to_vext(x),
766                                         to_vext(y)));
767 }
768 
769 SIN std::enable_if_t<(N < 8), Vec<N,uint16_t>> mull(const Vec<N,uint8_t>& x,
770                                                     const Vec<N,uint8_t>& y) {
771     // N < 8 --> double up data until N == 8, returning the part we need.
772     return mull(join(x,x),
773                 join(y,y)).lo;
774 }
775 
776 SIN std::enable_if_t<(N > 8), Vec<N,uint16_t>> mull(const Vec<N,uint8_t>& x,
777                                                     const Vec<N,uint8_t>& y) {
778     // N > 8 --> usual join(lo,hi) strategy to recurse down to N == 8.
779     return join(mull(x.lo, y.lo),
780                 mull(x.hi, y.hi));
781 }
782 
783 #else
784 
785 // Nothing special when we don't have NEON... just cast up to 16-bit and multiply.
786 SIN Vec<N,uint16_t> mull(const Vec<N,uint8_t>& x,
787                             const Vec<N,uint8_t>& y) {
788     return cast<uint16_t>(x)
789             * cast<uint16_t>(y);
790 }
791 #endif
792 
793 // Allow floating point contraction. e.g., allow a*x + y to be compiled to a single FMA even though
794 // it introduces LSB differences on platforms that don't have an FMA instruction.
795 #if defined(__clang__)
796 #pragma STDC FP_CONTRACT ON
797 #endif
798 
799 // Approximates the inverse cosine of x within 0.96 degrees using the rational polynomial:
800 //
801 //     acos(x) ~= (bx^3 + ax) / (dx^4 + cx^2 + 1) + pi/2
802 //
803 // See: https://stackoverflow.com/a/36387954
804 //
805 // For a proof of max error, see the "SkVx_approx_acos" unit test.
806 //
807 // NOTE: This function deviates immediately from pi and 0 outside -1 and 1. (The derivatives are
808 // infinite at -1 and 1). So the input must still be clamped between -1 and 1.
809 #define SKVX_APPROX_ACOS_MAX_ERROR SkDegreesToRadians(.96f)
810 SIN Vec<N,float> approx_acos(Vec<N,float> x) {
811     constexpr static float a = -0.939115566365855f;
812     constexpr static float b =  0.9217841528914573f;
813     constexpr static float c = -1.2845906244690837f;
814     constexpr static float d =  0.295624144969963174f;
815     constexpr static float pi_over_2 = 1.5707963267948966f;
816     auto xx = x*x;
817     auto numer = b*xx + a;
818     auto denom = xx*(d*xx + c) + 1;
819     return x * (numer/denom) + pi_over_2;
820 }
821 
822 #if defined(__clang__)
823 #pragma STDC FP_CONTRACT DEFAULT
824 #endif
825 
826 // De-interleaving load of 4 vectors.
827 //
828 // WARNING: These are really only supported well on NEON. Consider restructuring your data before
829 // resorting to these methods.
830 SIT void strided_load4(const T* v,
831                        skvx::Vec<1,T>& a,
832                        skvx::Vec<1,T>& b,
833                        skvx::Vec<1,T>& c,
834                        skvx::Vec<1,T>& d) {
835     a.val = v[0];
836     b.val = v[1];
837     c.val = v[2];
838     d.val = v[3];
839 }
840 SINT void strided_load4(const T* v,
841                         skvx::Vec<N,T>& a,
842                         skvx::Vec<N,T>& b,
843                         skvx::Vec<N,T>& c,
844                         skvx::Vec<N,T>& d) {
845     strided_load4(v, a.lo, b.lo, c.lo, d.lo);
846     strided_load4(v + 4*(N/2), a.hi, b.hi, c.hi, d.hi);
847 }
848 #if SKVX_USE_SIMD && defined(__ARM_NEON)
849 #define IMPL_LOAD4_TRANSPOSED(N, T, VLD) \
850 SI void strided_load4(const T* v, \
851                       skvx::Vec<N,T>& a, \
852                       skvx::Vec<N,T>& b, \
853                       skvx::Vec<N,T>& c, \
854                       skvx::Vec<N,T>& d) { \
855     auto mat = VLD(v); \
856     a = skvx::bit_pun<skvx::Vec<N,T>>(mat.val[0]); \
857     b = skvx::bit_pun<skvx::Vec<N,T>>(mat.val[1]); \
858     c = skvx::bit_pun<skvx::Vec<N,T>>(mat.val[2]); \
859     d = skvx::bit_pun<skvx::Vec<N,T>>(mat.val[3]); \
860 }
861 IMPL_LOAD4_TRANSPOSED(2, uint32_t, vld4_u32)
862 IMPL_LOAD4_TRANSPOSED(4, uint16_t, vld4_u16)
863 IMPL_LOAD4_TRANSPOSED(8, uint8_t, vld4_u8)
864 IMPL_LOAD4_TRANSPOSED(2, int32_t, vld4_s32)
865 IMPL_LOAD4_TRANSPOSED(4, int16_t, vld4_s16)
866 IMPL_LOAD4_TRANSPOSED(8, int8_t, vld4_s8)
867 IMPL_LOAD4_TRANSPOSED(2, float, vld4_f32)
868 IMPL_LOAD4_TRANSPOSED(4, uint32_t, vld4q_u32)
869 IMPL_LOAD4_TRANSPOSED(8, uint16_t, vld4q_u16)
870 IMPL_LOAD4_TRANSPOSED(16, uint8_t, vld4q_u8)
871 IMPL_LOAD4_TRANSPOSED(4, int32_t, vld4q_s32)
872 IMPL_LOAD4_TRANSPOSED(8, int16_t, vld4q_s16)
873 IMPL_LOAD4_TRANSPOSED(16, int8_t, vld4q_s8)
874 IMPL_LOAD4_TRANSPOSED(4, float, vld4q_f32)
875 #undef IMPL_LOAD4_TRANSPOSED
876 
877 #elif SKVX_USE_SIMD && defined(__SSE__)
878 
879 SI void strided_load4(const float* v,
880                       Vec<4,float>& a,
881                       Vec<4,float>& b,
882                       Vec<4,float>& c,
883                       Vec<4,float>& d) {
884     using skvx::bit_pun;
885     __m128 a_ = _mm_loadu_ps(v);
886     __m128 b_ = _mm_loadu_ps(v+4);
887     __m128 c_ = _mm_loadu_ps(v+8);
888     __m128 d_ = _mm_loadu_ps(v+12);
889     _MM_TRANSPOSE4_PS(a_, b_, c_, d_);
890     a = bit_pun<Vec<4,float>>(a_);
891     b = bit_pun<Vec<4,float>>(b_);
892     c = bit_pun<Vec<4,float>>(c_);
893     d = bit_pun<Vec<4,float>>(d_);
894 }
895 #endif
896 
897 // De-interleaving load of 2 vectors.
898 //
899 // WARNING: These are really only supported well on NEON. Consider restructuring your data before
900 // resorting to these methods.
901 SIT void strided_load2(const T* v, skvx::Vec<1,T>& a, skvx::Vec<1,T>& b) {
902     a.val = v[0];
903     b.val = v[1];
904 }
905 SINT void strided_load2(const T* v, skvx::Vec<N,T>& a, skvx::Vec<N,T>& b) {
906     strided_load2(v, a.lo, b.lo);
907     strided_load2(v + 2*(N/2), a.hi, b.hi);
908 }
909 #if SKVX_USE_SIMD && defined(__ARM_NEON)
910 #define IMPL_LOAD2_TRANSPOSED(N, T, VLD) \
911 SI void strided_load2(const T* v, skvx::Vec<N,T>& a, skvx::Vec<N,T>& b) { \
912     auto mat = VLD(v); \
913     a = skvx::bit_pun<skvx::Vec<N,T>>(mat.val[0]); \
914     b = skvx::bit_pun<skvx::Vec<N,T>>(mat.val[1]); \
915 }
916 IMPL_LOAD2_TRANSPOSED(2, uint32_t, vld2_u32)
917 IMPL_LOAD2_TRANSPOSED(4, uint16_t, vld2_u16)
918 IMPL_LOAD2_TRANSPOSED(8, uint8_t, vld2_u8)
919 IMPL_LOAD2_TRANSPOSED(2, int32_t, vld2_s32)
920 IMPL_LOAD2_TRANSPOSED(4, int16_t, vld2_s16)
921 IMPL_LOAD2_TRANSPOSED(8, int8_t, vld2_s8)
922 IMPL_LOAD2_TRANSPOSED(2, float, vld2_f32)
923 IMPL_LOAD2_TRANSPOSED(4, uint32_t, vld2q_u32)
924 IMPL_LOAD2_TRANSPOSED(8, uint16_t, vld2q_u16)
925 IMPL_LOAD2_TRANSPOSED(16, uint8_t, vld2q_u8)
926 IMPL_LOAD2_TRANSPOSED(4, int32_t, vld2q_s32)
927 IMPL_LOAD2_TRANSPOSED(8, int16_t, vld2q_s16)
928 IMPL_LOAD2_TRANSPOSED(16, int8_t, vld2q_s8)
929 IMPL_LOAD2_TRANSPOSED(4, float, vld2q_f32)
930 #undef IMPL_LOAD2_TRANSPOSED
931 #endif
932 
933 }  // namespace skvx
934 
935 #undef SINTU
936 #undef SINT
937 #undef SIN
938 #undef SIT
939 #undef SI
940 #undef SKVX_ALWAYS_INLINE
941 #undef SKVX_USE_SIMD
942 
943 #endif//SKVX_DEFINED
944