1 // Copyright 2016 The SwiftShader Authors. All Rights Reserved.
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14
15 #ifndef sw_ShaderCore_hpp
16 #define sw_ShaderCore_hpp
17
18 #include "Reactor/Print.hpp"
19 #include "Reactor/Reactor.hpp"
20 #include "System/Debug.hpp"
21
22 #include <array>
23 #include <atomic> // std::memory_order
24 #include <utility> // std::pair
25
26 namespace sw {
27
28 using namespace rr;
29
30 class Vector4s
31 {
32 public:
33 Vector4s();
34 Vector4s(unsigned short x, unsigned short y, unsigned short z, unsigned short w);
35 Vector4s(const Vector4s &rhs);
36
37 Short4 &operator[](int i);
38 Vector4s &operator=(const Vector4s &rhs);
39
40 Short4 x;
41 Short4 y;
42 Short4 z;
43 Short4 w;
44 };
45
46 class Vector4f
47 {
48 public:
49 Vector4f();
50 Vector4f(float x, float y, float z, float w);
51 Vector4f(const Vector4f &rhs);
52
53 Float4 &operator[](int i);
54 Vector4f &operator=(const Vector4f &rhs);
55
56 Float4 x;
57 Float4 y;
58 Float4 z;
59 Float4 w;
60 };
61
62 class Vector4i
63 {
64 public:
65 Vector4i();
66 Vector4i(int x, int y, int z, int w);
67 Vector4i(const Vector4i &rhs);
68
69 Int4 &operator[](int i);
70 Vector4i &operator=(const Vector4i &rhs);
71
72 Int4 x;
73 Int4 y;
74 Int4 z;
75 Int4 w;
76 };
77
78 enum class OutOfBoundsBehavior
79 {
80 Nullify, // Loads become zero, stores are elided.
81 RobustBufferAccess, // As defined by the Vulkan spec (in short: access anywhere within bounds, or zeroing).
82 UndefinedValue, // Only for load operations. Not secure. No program termination.
83 UndefinedBehavior, // Program may terminate.
84 };
85
86 // SIMD contains types that represent multiple scalars packed into a single
87 // vector data type. Types in the SIMD namespace provide a semantic hint
88 // that the data should be treated as a per-execution-lane scalar instead of
89 // a typical euclidean-style vector type.
90 namespace SIMD {
91
92 // Width is the number of per-lane scalars packed into each SIMD vector.
93 static constexpr int Width = 4;
94
95 using Float = rr::Float4;
96 using Int = rr::Int4;
97 using UInt = rr::UInt4;
98
99 struct Pointer
100 {
101 Pointer(rr::Pointer<Byte> base, rr::Int limit);
102 Pointer(rr::Pointer<Byte> base, unsigned int limit);
103 Pointer(rr::Pointer<Byte> base, rr::Int limit, SIMD::Int offset);
104 Pointer(rr::Pointer<Byte> base, unsigned int limit, SIMD::Int offset);
105
106 Pointer &operator+=(Int i);
107 Pointer &operator*=(Int i);
108
109 Pointer operator+(SIMD::Int i);
110 Pointer operator*(SIMD::Int i);
111
112 Pointer &operator+=(int i);
113 Pointer &operator*=(int i);
114
115 Pointer operator+(int i);
116 Pointer operator*(int i);
117
118 SIMD::Int offsets() const;
119
120 SIMD::Int isInBounds(unsigned int accessSize, OutOfBoundsBehavior robustness) const;
121
122 bool isStaticallyInBounds(unsigned int accessSize, OutOfBoundsBehavior robustness) const;
123
124 rr::Int limit() const;
125
126 // Returns true if all offsets are sequential
127 // (N+0*step, N+1*step, N+2*step, N+3*step)
128 rr::Bool hasSequentialOffsets(unsigned int step) const;
129
130 // Returns true if all offsets are are compile-time static and
131 // sequential (N+0*step, N+1*step, N+2*step, N+3*step)
132 bool hasStaticSequentialOffsets(unsigned int step) const;
133
134 // Returns true if all offsets are equal (N, N, N, N)
135 rr::Bool hasEqualOffsets() const;
136
137 // Returns true if all offsets are compile-time static and are equal
138 // (N, N, N, N)
139 bool hasStaticEqualOffsets() const;
140
141 template<typename T>
142 inline T Load(OutOfBoundsBehavior robustness, Int mask, bool atomic = false, std::memory_order order = std::memory_order_relaxed, int alignment = sizeof(float));
143
144 template<typename T>
145 inline void Store(T val, OutOfBoundsBehavior robustness, Int mask, bool atomic = false, std::memory_order order = std::memory_order_relaxed);
146
147 template<typename T>
148 inline void Store(RValue<T> val, OutOfBoundsBehavior robustness, Int mask, bool atomic = false, std::memory_order order = std::memory_order_relaxed);
149
150 // Base address for the pointer, common across all lanes.
151 rr::Pointer<rr::Byte> base;
152
153 // Upper (non-inclusive) limit for offsets from base.
154 rr::Int dynamicLimit; // If hasDynamicLimit is false, dynamicLimit is zero.
155 unsigned int staticLimit;
156
157 // Per lane offsets from base.
158 SIMD::Int dynamicOffsets; // If hasDynamicOffsets is false, all dynamicOffsets are zero.
159 std::array<int32_t, SIMD::Width> staticOffsets;
160
161 bool hasDynamicLimit; // True if dynamicLimit is non-zero.
162 bool hasDynamicOffsets; // True if any dynamicOffsets are non-zero.
163 };
164
165 template<typename T>
166 struct Element
167 {};
168 template<>
169 struct Element<Float>
170 {
171 using type = rr::Float;
172 };
173 template<>
174 struct Element<Int>
175 {
176 using type = rr::Int;
177 };
178 template<>
179 struct Element<UInt>
180 {
181 using type = rr::UInt;
182 };
183
184 } // namespace SIMD
185
186 Float4 exponential2(RValue<Float4> x, bool pp = false);
187 Float4 logarithm2(RValue<Float4> x, bool pp = false);
188 Float4 exponential(RValue<Float4> x, bool pp = false);
189 Float4 logarithm(RValue<Float4> x, bool pp = false);
190 Float4 power(RValue<Float4> x, RValue<Float4> y, bool pp = false);
191 Float4 reciprocal(RValue<Float4> x, bool pp = false, bool finite = false, bool exactAtPow2 = false);
192 Float4 reciprocalSquareRoot(RValue<Float4> x, bool abs, bool pp = false);
193 Float4 modulo(RValue<Float4> x, RValue<Float4> y);
194 Float4 sine_pi(RValue<Float4> x, bool pp = false); // limited to [-pi, pi] range
195 Float4 cosine_pi(RValue<Float4> x, bool pp = false); // limited to [-pi, pi] range
196 Float4 sine(RValue<Float4> x, bool pp = false);
197 Float4 cosine(RValue<Float4> x, bool pp = false);
198 Float4 tangent(RValue<Float4> x, bool pp = false);
199 Float4 arccos(RValue<Float4> x, bool pp = false);
200 Float4 arcsin(RValue<Float4> x, bool pp = false);
201 Float4 arctan(RValue<Float4> x, bool pp = false);
202 Float4 arctan(RValue<Float4> y, RValue<Float4> x, bool pp = false);
203 Float4 sineh(RValue<Float4> x, bool pp = false);
204 Float4 cosineh(RValue<Float4> x, bool pp = false);
205 Float4 tangenth(RValue<Float4> x, bool pp = false);
206 Float4 arccosh(RValue<Float4> x, bool pp = false); // Limited to x >= 1
207 Float4 arcsinh(RValue<Float4> x, bool pp = false);
208 Float4 arctanh(RValue<Float4> x, bool pp = false); // Limited to ]-1, 1[ range
209
210 Float4 dot2(const Vector4f &v0, const Vector4f &v1);
211 Float4 dot3(const Vector4f &v0, const Vector4f &v1);
212 Float4 dot4(const Vector4f &v0, const Vector4f &v1);
213
214 void transpose4x4(Short4 &row0, Short4 &row1, Short4 &row2, Short4 &row3);
215 void transpose4x3(Short4 &row0, Short4 &row1, Short4 &row2, Short4 &row3);
216 void transpose4x4(Float4 &row0, Float4 &row1, Float4 &row2, Float4 &row3);
217 void transpose4x3(Float4 &row0, Float4 &row1, Float4 &row2, Float4 &row3);
218 void transpose4x2(Float4 &row0, Float4 &row1, Float4 &row2, Float4 &row3);
219 void transpose4x1(Float4 &row0, Float4 &row1, Float4 &row2, Float4 &row3);
220 void transpose2x4(Float4 &row0, Float4 &row1, Float4 &row2, Float4 &row3);
221 void transpose4xN(Float4 &row0, Float4 &row1, Float4 &row2, Float4 &row3, int N);
222
223 sw::SIMD::UInt halfToFloatBits(sw::SIMD::UInt halfBits);
224 sw::SIMD::UInt floatToHalfBits(sw::SIMD::UInt floatBits, bool storeInUpperBits);
225 Float4 r11g11b10Unpack(UInt r11g11b10bits);
226 UInt r11g11b10Pack(const Float4 &value);
227 Vector4s a2b10g10r10Unpack(const Int4 &value);
228 Vector4s a2r10g10b10Unpack(const Int4 &value);
229
230 rr::RValue<rr::Bool> AnyTrue(rr::RValue<sw::SIMD::Int> const &ints);
231
232 rr::RValue<rr::Bool> AnyFalse(rr::RValue<sw::SIMD::Int> const &ints);
233
234 template<typename T>
235 inline rr::RValue<T> AndAll(rr::RValue<T> const &mask);
236
237 template<typename T>
238 inline rr::RValue<T> OrAll(rr::RValue<T> const &mask);
239
240 rr::RValue<sw::SIMD::Float> Sign(rr::RValue<sw::SIMD::Float> const &val);
241
242 // Returns the <whole, frac> of val.
243 // Both whole and frac will have the same sign as val.
244 std::pair<rr::RValue<sw::SIMD::Float>, rr::RValue<sw::SIMD::Float>>
245 Modf(rr::RValue<sw::SIMD::Float> const &val);
246
247 // Returns the number of 1s in bits, per lane.
248 sw::SIMD::UInt CountBits(rr::RValue<sw::SIMD::UInt> const &bits);
249
250 // Returns 1 << bits.
251 // If the resulting bit overflows a 32 bit integer, 0 is returned.
252 rr::RValue<sw::SIMD::UInt> NthBit32(rr::RValue<sw::SIMD::UInt> const &bits);
253
254 // Returns bitCount number of of 1's starting from the LSB.
255 rr::RValue<sw::SIMD::UInt> Bitmask32(rr::RValue<sw::SIMD::UInt> const &bitCount);
256
257 // Performs a fused-multiply add, returning a * b + c.
258 rr::RValue<sw::SIMD::Float> FMA(
259 rr::RValue<sw::SIMD::Float> const &a,
260 rr::RValue<sw::SIMD::Float> const &b,
261 rr::RValue<sw::SIMD::Float> const &c);
262
263 // Returns the exponent of the floating point number f.
264 // Assumes IEEE 754
265 rr::RValue<sw::SIMD::Int> Exponent(rr::RValue<sw::SIMD::Float> f);
266
267 // Returns y if y < x; otherwise result is x.
268 // If one operand is a NaN, the other operand is the result.
269 // If both operands are NaN, the result is a NaN.
270 rr::RValue<sw::SIMD::Float> NMin(rr::RValue<sw::SIMD::Float> const &x, rr::RValue<sw::SIMD::Float> const &y);
271
272 // Returns y if y > x; otherwise result is x.
273 // If one operand is a NaN, the other operand is the result.
274 // If both operands are NaN, the result is a NaN.
275 rr::RValue<sw::SIMD::Float> NMax(rr::RValue<sw::SIMD::Float> const &x, rr::RValue<sw::SIMD::Float> const &y);
276
277 // Returns the determinant of a 2x2 matrix.
278 rr::RValue<sw::SIMD::Float> Determinant(
279 rr::RValue<sw::SIMD::Float> const &a, rr::RValue<sw::SIMD::Float> const &b,
280 rr::RValue<sw::SIMD::Float> const &c, rr::RValue<sw::SIMD::Float> const &d);
281
282 // Returns the determinant of a 3x3 matrix.
283 rr::RValue<sw::SIMD::Float> Determinant(
284 rr::RValue<sw::SIMD::Float> const &a, rr::RValue<sw::SIMD::Float> const &b, rr::RValue<sw::SIMD::Float> const &c,
285 rr::RValue<sw::SIMD::Float> const &d, rr::RValue<sw::SIMD::Float> const &e, rr::RValue<sw::SIMD::Float> const &f,
286 rr::RValue<sw::SIMD::Float> const &g, rr::RValue<sw::SIMD::Float> const &h, rr::RValue<sw::SIMD::Float> const &i);
287
288 // Returns the determinant of a 4x4 matrix.
289 rr::RValue<sw::SIMD::Float> Determinant(
290 rr::RValue<sw::SIMD::Float> const &a, rr::RValue<sw::SIMD::Float> const &b, rr::RValue<sw::SIMD::Float> const &c, rr::RValue<sw::SIMD::Float> const &d,
291 rr::RValue<sw::SIMD::Float> const &e, rr::RValue<sw::SIMD::Float> const &f, rr::RValue<sw::SIMD::Float> const &g, rr::RValue<sw::SIMD::Float> const &h,
292 rr::RValue<sw::SIMD::Float> const &i, rr::RValue<sw::SIMD::Float> const &j, rr::RValue<sw::SIMD::Float> const &k, rr::RValue<sw::SIMD::Float> const &l,
293 rr::RValue<sw::SIMD::Float> const &m, rr::RValue<sw::SIMD::Float> const &n, rr::RValue<sw::SIMD::Float> const &o, rr::RValue<sw::SIMD::Float> const &p);
294
295 // Returns the inverse of a 2x2 matrix.
296 std::array<rr::RValue<sw::SIMD::Float>, 4> MatrixInverse(
297 rr::RValue<sw::SIMD::Float> const &a, rr::RValue<sw::SIMD::Float> const &b,
298 rr::RValue<sw::SIMD::Float> const &c, rr::RValue<sw::SIMD::Float> const &d);
299
300 // Returns the inverse of a 3x3 matrix.
301 std::array<rr::RValue<sw::SIMD::Float>, 9> MatrixInverse(
302 rr::RValue<sw::SIMD::Float> const &a, rr::RValue<sw::SIMD::Float> const &b, rr::RValue<sw::SIMD::Float> const &c,
303 rr::RValue<sw::SIMD::Float> const &d, rr::RValue<sw::SIMD::Float> const &e, rr::RValue<sw::SIMD::Float> const &f,
304 rr::RValue<sw::SIMD::Float> const &g, rr::RValue<sw::SIMD::Float> const &h, rr::RValue<sw::SIMD::Float> const &i);
305
306 // Returns the inverse of a 4x4 matrix.
307 std::array<rr::RValue<sw::SIMD::Float>, 16> MatrixInverse(
308 rr::RValue<sw::SIMD::Float> const &a, rr::RValue<sw::SIMD::Float> const &b, rr::RValue<sw::SIMD::Float> const &c, rr::RValue<sw::SIMD::Float> const &d,
309 rr::RValue<sw::SIMD::Float> const &e, rr::RValue<sw::SIMD::Float> const &f, rr::RValue<sw::SIMD::Float> const &g, rr::RValue<sw::SIMD::Float> const &h,
310 rr::RValue<sw::SIMD::Float> const &i, rr::RValue<sw::SIMD::Float> const &j, rr::RValue<sw::SIMD::Float> const &k, rr::RValue<sw::SIMD::Float> const &l,
311 rr::RValue<sw::SIMD::Float> const &m, rr::RValue<sw::SIMD::Float> const &n, rr::RValue<sw::SIMD::Float> const &o, rr::RValue<sw::SIMD::Float> const &p);
312
313 ////////////////////////////////////////////////////////////////////////////
314 // Inline functions
315 ////////////////////////////////////////////////////////////////////////////
316
317 template<typename T>
Load(OutOfBoundsBehavior robustness,Int mask,bool atomic,std::memory_order order,int alignment)318 inline T SIMD::Pointer::Load(OutOfBoundsBehavior robustness, Int mask, bool atomic /* = false */, std::memory_order order /* = std::memory_order_relaxed */, int alignment /* = sizeof(float) */)
319 {
320 using EL = typename Element<T>::type;
321
322 if(isStaticallyInBounds(sizeof(float), robustness))
323 {
324 // All elements are statically known to be in-bounds.
325 // We can avoid costly conditional on masks.
326
327 if(hasStaticSequentialOffsets(sizeof(float)))
328 {
329 // Offsets are sequential. Perform regular load.
330 return rr::Load(rr::Pointer<T>(base + staticOffsets[0]), alignment, atomic, order);
331 }
332 if(hasStaticEqualOffsets())
333 {
334 // Load one, replicate.
335 return T(*rr::Pointer<EL>(base + staticOffsets[0], alignment));
336 }
337 }
338 else
339 {
340 switch(robustness)
341 {
342 case OutOfBoundsBehavior::Nullify:
343 case OutOfBoundsBehavior::RobustBufferAccess:
344 case OutOfBoundsBehavior::UndefinedValue:
345 mask &= isInBounds(sizeof(float), robustness); // Disable out-of-bounds reads.
346 break;
347 case OutOfBoundsBehavior::UndefinedBehavior:
348 // Nothing to do. Application/compiler must guarantee no out-of-bounds accesses.
349 break;
350 }
351 }
352
353 auto offs = offsets();
354
355 if(!atomic && order == std::memory_order_relaxed)
356 {
357 if(hasStaticEqualOffsets())
358 {
359 // Load one, replicate.
360 // Be careful of the case where the post-bounds-check mask
361 // is 0, in which case we must not load.
362 T out = T(0);
363 If(AnyTrue(mask))
364 {
365 EL el = *rr::Pointer<EL>(base + staticOffsets[0], alignment);
366 out = T(el);
367 }
368 return out;
369 }
370
371 bool zeroMaskedLanes = true;
372 switch(robustness)
373 {
374 case OutOfBoundsBehavior::Nullify:
375 case OutOfBoundsBehavior::RobustBufferAccess: // Must either return an in-bounds value, or zero.
376 zeroMaskedLanes = true;
377 break;
378 case OutOfBoundsBehavior::UndefinedValue:
379 case OutOfBoundsBehavior::UndefinedBehavior:
380 zeroMaskedLanes = false;
381 break;
382 }
383
384 if(hasStaticSequentialOffsets(sizeof(float)))
385 {
386 return rr::MaskedLoad(rr::Pointer<T>(base + staticOffsets[0]), mask, alignment, zeroMaskedLanes);
387 }
388
389 return rr::Gather(rr::Pointer<EL>(base), offs, mask, alignment, zeroMaskedLanes);
390 }
391 else
392 {
393 T out;
394 auto anyLanesDisabled = AnyFalse(mask);
395 If(hasEqualOffsets() && !anyLanesDisabled)
396 {
397 // Load one, replicate.
398 auto offset = Extract(offs, 0);
399 out = T(rr::Load(rr::Pointer<EL>(&base[offset]), alignment, atomic, order));
400 }
401 Else If(hasSequentialOffsets(sizeof(float)) && !anyLanesDisabled)
402 {
403 // Load all elements in a single SIMD instruction.
404 auto offset = Extract(offs, 0);
405 out = rr::Load(rr::Pointer<T>(&base[offset]), alignment, atomic, order);
406 }
407 Else
408 {
409 // Divergent offsets or masked lanes.
410 out = T(0);
411 for(int i = 0; i < SIMD::Width; i++)
412 {
413 If(Extract(mask, i) != 0)
414 {
415 auto offset = Extract(offs, i);
416 auto el = rr::Load(rr::Pointer<EL>(&base[offset]), alignment, atomic, order);
417 out = Insert(out, el, i);
418 }
419 }
420 }
421 return out;
422 }
423 }
424
425 template<typename T>
Store(T val,OutOfBoundsBehavior robustness,Int mask,bool atomic,std::memory_order order)426 inline void SIMD::Pointer::Store(T val, OutOfBoundsBehavior robustness, Int mask, bool atomic /* = false */, std::memory_order order /* = std::memory_order_relaxed */)
427 {
428 using EL = typename Element<T>::type;
429 constexpr size_t alignment = sizeof(float);
430 auto offs = offsets();
431
432 switch(robustness)
433 {
434 case OutOfBoundsBehavior::Nullify:
435 case OutOfBoundsBehavior::RobustBufferAccess: // TODO: Allows writing anywhere within bounds. Could be faster than masking.
436 case OutOfBoundsBehavior::UndefinedValue: // Should not be used for store operations. Treat as robust buffer access.
437 mask &= isInBounds(sizeof(float), robustness); // Disable out-of-bounds writes.
438 break;
439 case OutOfBoundsBehavior::UndefinedBehavior:
440 // Nothing to do. Application/compiler must guarantee no out-of-bounds accesses.
441 break;
442 }
443
444 if(!atomic && order == std::memory_order_relaxed)
445 {
446 if(hasStaticEqualOffsets())
447 {
448 If(AnyTrue(mask))
449 {
450 // All equal. One of these writes will win -- elect the winning lane.
451 auto v0111 = SIMD::Int(0, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
452 auto elect = mask & ~(v0111 & (mask.xxyz | mask.xxxy | mask.xxxx));
453 auto maskedVal = As<SIMD::Int>(val) & elect;
454 auto scalarVal = Extract(maskedVal, 0) |
455 Extract(maskedVal, 1) |
456 Extract(maskedVal, 2) |
457 Extract(maskedVal, 3);
458 *rr::Pointer<EL>(base + staticOffsets[0], alignment) = As<EL>(scalarVal);
459 }
460 }
461 else if(hasStaticSequentialOffsets(sizeof(float)))
462 {
463 if(isStaticallyInBounds(sizeof(float), robustness))
464 {
465 // Pointer has no elements OOB, and the store is not atomic.
466 // Perform a RMW.
467 auto p = rr::Pointer<SIMD::Int>(base + staticOffsets[0], alignment);
468 auto prev = *p;
469 *p = (prev & ~mask) | (As<SIMD::Int>(val) & mask);
470 }
471 else
472 {
473 rr::MaskedStore(rr::Pointer<T>(base + staticOffsets[0]), val, mask, alignment);
474 }
475 }
476 else
477 {
478 rr::Scatter(rr::Pointer<EL>(base), val, offs, mask, alignment);
479 }
480 }
481 else
482 {
483 auto anyLanesDisabled = AnyFalse(mask);
484 If(hasSequentialOffsets(sizeof(float)) && !anyLanesDisabled)
485 {
486 // Store all elements in a single SIMD instruction.
487 auto offset = Extract(offs, 0);
488 rr::Store(val, rr::Pointer<T>(&base[offset]), alignment, atomic, order);
489 }
490 Else
491 {
492 // Divergent offsets or masked lanes.
493 for(int i = 0; i < SIMD::Width; i++)
494 {
495 If(Extract(mask, i) != 0)
496 {
497 auto offset = Extract(offs, i);
498 rr::Store(Extract(val, i), rr::Pointer<EL>(&base[offset]), alignment, atomic, order);
499 }
500 }
501 }
502 }
503 }
504
505 template<typename T>
Store(RValue<T> val,OutOfBoundsBehavior robustness,Int mask,bool atomic,std::memory_order order)506 inline void SIMD::Pointer::Store(RValue<T> val, OutOfBoundsBehavior robustness, Int mask, bool atomic /* = false */, std::memory_order order /* = std::memory_order_relaxed */)
507 {
508 Store(T(val), robustness, mask, atomic, order);
509 }
510
511 template<typename T>
AndAll(rr::RValue<T> const & mask)512 inline rr::RValue<T> AndAll(rr::RValue<T> const &mask)
513 {
514 T v1 = mask; // [x] [y] [z] [w]
515 T v2 = v1.xzxz & v1.ywyw; // [xy] [zw] [xy] [zw]
516 return v2.xxxx & v2.yyyy; // [xyzw] [xyzw] [xyzw] [xyzw]
517 }
518
519 template<typename T>
OrAll(rr::RValue<T> const & mask)520 inline rr::RValue<T> OrAll(rr::RValue<T> const &mask)
521 {
522 T v1 = mask; // [x] [y] [z] [w]
523 T v2 = v1.xzxz | v1.ywyw; // [xy] [zw] [xy] [zw]
524 return v2.xxxx | v2.yyyy; // [xyzw] [xyzw] [xyzw] [xyzw]
525 }
526
527 } // namespace sw
528
529 #ifdef ENABLE_RR_PRINT
530 namespace rr {
531 template<>
532 struct PrintValue::Ty<sw::Vector4f>
533 {
fmtrr::PrintValue::Ty534 static std::string fmt(const sw::Vector4f &v)
535 {
536 return "[x: " + PrintValue::fmt(v.x) +
537 ", y: " + PrintValue::fmt(v.y) +
538 ", z: " + PrintValue::fmt(v.z) +
539 ", w: " + PrintValue::fmt(v.w) + "]";
540 }
541
valrr::PrintValue::Ty542 static std::vector<rr::Value *> val(const sw::Vector4f &v)
543 {
544 return PrintValue::vals(v.x, v.y, v.z, v.w);
545 }
546 };
547 template<>
548 struct PrintValue::Ty<sw::Vector4s>
549 {
fmtrr::PrintValue::Ty550 static std::string fmt(const sw::Vector4s &v)
551 {
552 return "[x: " + PrintValue::fmt(v.x) +
553 ", y: " + PrintValue::fmt(v.y) +
554 ", z: " + PrintValue::fmt(v.z) +
555 ", w: " + PrintValue::fmt(v.w) + "]";
556 }
557
valrr::PrintValue::Ty558 static std::vector<rr::Value *> val(const sw::Vector4s &v)
559 {
560 return PrintValue::vals(v.x, v.y, v.z, v.w);
561 }
562 };
563 template<>
564 struct PrintValue::Ty<sw::SIMD::Pointer>
565 {
fmtrr::PrintValue::Ty566 static std::string fmt(const sw::SIMD::Pointer &v)
567 {
568 return "{" + PrintValue::fmt(v.base) + " +" + PrintValue::fmt(v.offsets()) + "}";
569 }
570
valrr::PrintValue::Ty571 static std::vector<rr::Value *> val(const sw::SIMD::Pointer &v)
572 {
573 return PrintValue::vals(v.base, v.offsets());
574 }
575 };
576 } // namespace rr
577 #endif // ENABLE_RR_PRINT
578
579 #endif // sw_ShaderCore_hpp
580