1 /*
2 * Copyright 2018 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #ifndef SkRasterPipeline_opts_DEFINED
9 #define SkRasterPipeline_opts_DEFINED
10
11 #include "include/core/SkData.h"
12 #include "include/core/SkTypes.h"
13 #include "include/private/base/SkMalloc.h"
14 #include "modules/skcms/skcms.h"
15 #include "src/base/SkUtils.h" // unaligned_{load,store}
16 #include "src/core/SkRasterPipeline.h"
17 #include <cstdint>
18
19 // Every function in this file should be marked static and inline using SI.
20 #if defined(__clang__)
21 #define SI __attribute__((always_inline)) static inline
22 #else
23 #define SI static inline
24 #endif
25
26 template <typename Dst, typename Src>
widen_cast(const Src & src)27 SI Dst widen_cast(const Src& src) {
28 static_assert(sizeof(Dst) > sizeof(Src));
29 static_assert(std::is_trivially_copyable<Dst>::value);
30 static_assert(std::is_trivially_copyable<Src>::value);
31 Dst dst;
32 memcpy(&dst, &src, sizeof(Src));
33 return dst;
34 }
35
36 struct Ctx {
37 SkRasterPipelineStage* fStage;
38
39 template <typename T>
40 operator T*() {
41 return (T*)fStage->ctx;
42 }
43 };
44
45 using NoCtx = const void*;
46
47 #if !defined(__clang__)
48 #define JUMPER_IS_SCALAR
49 #elif defined(SK_ARM_HAS_NEON)
50 #define JUMPER_IS_NEON
51 #elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SKX
52 #define JUMPER_IS_SKX
53 #elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_AVX2
54 #define JUMPER_IS_HSW
55 #elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_AVX
56 #define JUMPER_IS_AVX
57 #elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41
58 #define JUMPER_IS_SSE41
59 #elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE2
60 #define JUMPER_IS_SSE2
61 #else
62 #define JUMPER_IS_SCALAR
63 #endif
64
65 // Older Clangs seem to crash when generating non-optimized NEON code for ARMv7.
66 #if defined(__clang__) && !defined(__OPTIMIZE__) && defined(SK_CPU_ARM32)
67 // Apple Clang 9 and vanilla Clang 5 are fine, and may even be conservative.
68 #if defined(__apple_build_version__) && __clang_major__ < 9
69 #define JUMPER_IS_SCALAR
70 #elif __clang_major__ < 5
71 #define JUMPER_IS_SCALAR
72 #endif
73
74 #if defined(JUMPER_IS_NEON) && defined(JUMPER_IS_SCALAR)
75 #undef JUMPER_IS_NEON
76 #endif
77 #endif
78
79 #if defined(JUMPER_IS_SCALAR)
80 #include <math.h>
81 #elif defined(JUMPER_IS_NEON)
82 #include <arm_neon.h>
83 #else
84 #include <immintrin.h>
85 #endif
86
87 // Notes:
88 // * rcp_fast and rcp_precise both produce a reciprocal, but rcp_fast is an estimate with at least
89 // 12 bits of precision while rcp_precise should be accurate for float size. For ARM rcp_precise
90 // requires 2 Newton-Raphson refinement steps because its estimate has 8 bit precision, and for
91 // Intel this requires one additional step because its estimate has 12 bit precision.
92
93 namespace SK_OPTS_NS {
94 #if defined(JUMPER_IS_SCALAR)
95 // This path should lead to portable scalar code.
96 using F = float ;
97 using I32 = int32_t;
98 using U64 = uint64_t;
99 using U32 = uint32_t;
100 using U16 = uint16_t;
101 using U8 = uint8_t ;
102
min(F a,F b)103 SI F min(F a, F b) { return fminf(a,b); }
min(I32 a,I32 b)104 SI I32 min(I32 a, I32 b) { return a < b ? a : b; }
min(U32 a,U32 b)105 SI U32 min(U32 a, U32 b) { return a < b ? a : b; }
max(F a,F b)106 SI F max(F a, F b) { return fmaxf(a,b); }
max(I32 a,I32 b)107 SI I32 max(I32 a, I32 b) { return a > b ? a : b; }
max(U32 a,U32 b)108 SI U32 max(U32 a, U32 b) { return a > b ? a : b; }
109
mad(F f,F m,F a)110 SI F mad(F f, F m, F a) { return f*m+a; }
abs_(F v)111 SI F abs_ (F v) { return fabsf(v); }
abs_(I32 v)112 SI I32 abs_ (I32 v) { return v < 0 ? -v : v; }
floor_(F v)113 SI F floor_(F v) { return floorf(v); }
ceil_(F v)114 SI F ceil_(F v) { return ceilf(v); }
rcp_fast(F v)115 SI F rcp_fast(F v) { return 1.0f / v; }
rsqrt(F v)116 SI F rsqrt (F v) { return 1.0f / sqrtf(v); }
sqrt_(F v)117 SI F sqrt_ (F v) { return sqrtf(v); }
rcp_precise(F v)118 SI F rcp_precise (F v) { return 1.0f / v; }
119
round(F v,F scale)120 SI U32 round (F v, F scale) { return (uint32_t)(v*scale + 0.5f); }
pack(U32 v)121 SI U16 pack(U32 v) { return (U16)v; }
pack(U16 v)122 SI U8 pack(U16 v) { return (U8)v; }
123
if_then_else(I32 c,F t,F e)124 SI F if_then_else(I32 c, F t, F e) { return c ? t : e; }
any(I32 c)125 SI bool any(I32 c) { return c != 0; }
all(I32 c)126 SI bool all(I32 c) { return c != 0; }
127
128 template <typename T>
gather(const T * p,U32 ix)129 SI T gather(const T* p, U32 ix) { return p[ix]; }
130
load2(const uint16_t * ptr,size_t tail,U16 * r,U16 * g)131 SI void load2(const uint16_t* ptr, size_t tail, U16* r, U16* g) {
132 *r = ptr[0];
133 *g = ptr[1];
134 }
store2(uint16_t * ptr,size_t tail,U16 r,U16 g)135 SI void store2(uint16_t* ptr, size_t tail, U16 r, U16 g) {
136 ptr[0] = r;
137 ptr[1] = g;
138 }
load3(const uint16_t * ptr,size_t tail,U16 * r,U16 * g,U16 * b)139 SI void load3(const uint16_t* ptr, size_t tail, U16* r, U16* g, U16* b) {
140 *r = ptr[0];
141 *g = ptr[1];
142 *b = ptr[2];
143 }
load4(const uint16_t * ptr,size_t tail,U16 * r,U16 * g,U16 * b,U16 * a)144 SI void load4(const uint16_t* ptr, size_t tail, U16* r, U16* g, U16* b, U16* a) {
145 *r = ptr[0];
146 *g = ptr[1];
147 *b = ptr[2];
148 *a = ptr[3];
149 }
store4(uint16_t * ptr,size_t tail,U16 r,U16 g,U16 b,U16 a)150 SI void store4(uint16_t* ptr, size_t tail, U16 r, U16 g, U16 b, U16 a) {
151 ptr[0] = r;
152 ptr[1] = g;
153 ptr[2] = b;
154 ptr[3] = a;
155 }
156
load2(const float * ptr,size_t tail,F * r,F * g)157 SI void load2(const float* ptr, size_t tail, F* r, F* g) {
158 *r = ptr[0];
159 *g = ptr[1];
160 }
store2(float * ptr,size_t tail,F r,F g)161 SI void store2(float* ptr, size_t tail, F r, F g) {
162 ptr[0] = r;
163 ptr[1] = g;
164 }
load4(const float * ptr,size_t tail,F * r,F * g,F * b,F * a)165 SI void load4(const float* ptr, size_t tail, F* r, F* g, F* b, F* a) {
166 *r = ptr[0];
167 *g = ptr[1];
168 *b = ptr[2];
169 *a = ptr[3];
170 }
store4(float * ptr,size_t tail,F r,F g,F b,F a)171 SI void store4(float* ptr, size_t tail, F r, F g, F b, F a) {
172 ptr[0] = r;
173 ptr[1] = g;
174 ptr[2] = b;
175 ptr[3] = a;
176 }
177
178 #elif defined(JUMPER_IS_NEON)
179 // Since we know we're using Clang, we can use its vector extensions.
180 template <typename T> using V = T __attribute__((ext_vector_type(4)));
181 using F = V<float >;
182 using I32 = V< int32_t>;
183 using U64 = V<uint64_t>;
184 using U32 = V<uint32_t>;
185 using U16 = V<uint16_t>;
186 using U8 = V<uint8_t >;
187
188 // We polyfill a few routines that Clang doesn't build into ext_vector_types.
189 SI F min(F a, F b) { return vminq_f32(a,b); }
190 SI I32 min(I32 a, I32 b) { return vminq_s32(a,b); }
191 SI U32 min(U32 a, U32 b) { return vminq_u32(a,b); }
192 SI F max(F a, F b) { return vmaxq_f32(a,b); }
193 SI I32 max(I32 a, I32 b) { return vmaxq_s32(a,b); }
194 SI U32 max(U32 a, U32 b) { return vmaxq_u32(a,b); }
195
196 SI F abs_ (F v) { return vabsq_f32(v); }
197 SI I32 abs_ (I32 v) { return vabsq_s32(v); }
198 SI F rcp_fast(F v) { auto e = vrecpeq_f32 (v); return vrecpsq_f32 (v,e ) * e; }
199 SI F rcp_precise (F v) { auto e = rcp_fast(v); return vrecpsq_f32 (v,e ) * e; }
200 SI F rsqrt (F v) { auto e = vrsqrteq_f32(v); return vrsqrtsq_f32(v,e*e) * e; }
201
202 SI U16 pack(U32 v) { return __builtin_convertvector(v, U16); }
203 SI U8 pack(U16 v) { return __builtin_convertvector(v, U8); }
204
205 SI F if_then_else(I32 c, F t, F e) { return vbslq_f32((U32)c,t,e); }
206
207 #if defined(SK_CPU_ARM64)
208 SI bool any(I32 c) { return vmaxvq_u32((U32)c) != 0; }
209 SI bool all(I32 c) { return vminvq_u32((U32)c) != 0; }
210
211 SI F mad(F f, F m, F a) { return vfmaq_f32(a,f,m); }
212 SI F floor_(F v) { return vrndmq_f32(v); }
213 SI F ceil_(F v) { return vrndpq_f32(v); }
214 SI F sqrt_(F v) { return vsqrtq_f32(v); }
215 SI U32 round(F v, F scale) { return vcvtnq_u32_f32(v*scale); }
216 #else
217 SI bool any(I32 c) { return c[0] | c[1] | c[2] | c[3]; }
218 SI bool all(I32 c) { return c[0] & c[1] & c[2] & c[3]; }
219
220 SI F mad(F f, F m, F a) { return vmlaq_f32(a,f,m); }
221 SI F floor_(F v) {
222 F roundtrip = vcvtq_f32_s32(vcvtq_s32_f32(v));
223 return roundtrip - if_then_else(roundtrip > v, 1, 0);
224 }
225
226 SI F ceil_(F v) {
227 F roundtrip = vcvtq_f32_s32(vcvtq_s32_f32(v));
228 return roundtrip + if_then_else(roundtrip < v, 1, 0);
229 }
230
231 SI F sqrt_(F v) {
232 auto e = vrsqrteq_f32(v); // Estimate and two refinement steps for e = rsqrt(v).
233 e *= vrsqrtsq_f32(v,e*e);
234 e *= vrsqrtsq_f32(v,e*e);
235 return v*e; // sqrt(v) == v*rsqrt(v).
236 }
237
238 SI U32 round(F v, F scale) {
239 return vcvtq_u32_f32(mad(v,scale,0.5f));
240 }
241 #endif
242
243 template <typename T>
244 SI V<T> gather(const T* p, U32 ix) {
245 return {p[ix[0]], p[ix[1]], p[ix[2]], p[ix[3]]};
246 }
247 SI void load2(const uint16_t* ptr, size_t tail, U16* r, U16* g) {
248 uint16x4x2_t rg;
249 if (__builtin_expect(tail,0)) {
250 if ( true ) { rg = vld2_lane_u16(ptr + 0, rg, 0); }
251 if (tail > 1) { rg = vld2_lane_u16(ptr + 2, rg, 1); }
252 if (tail > 2) { rg = vld2_lane_u16(ptr + 4, rg, 2); }
253 } else {
254 rg = vld2_u16(ptr);
255 }
256 *r = rg.val[0];
257 *g = rg.val[1];
258 }
259 SI void store2(uint16_t* ptr, size_t tail, U16 r, U16 g) {
260 if (__builtin_expect(tail,0)) {
261 if ( true ) { vst2_lane_u16(ptr + 0, (uint16x4x2_t{{r,g}}), 0); }
262 if (tail > 1) { vst2_lane_u16(ptr + 2, (uint16x4x2_t{{r,g}}), 1); }
263 if (tail > 2) { vst2_lane_u16(ptr + 4, (uint16x4x2_t{{r,g}}), 2); }
264 } else {
265 vst2_u16(ptr, (uint16x4x2_t{{r,g}}));
266 }
267 }
268 SI void load3(const uint16_t* ptr, size_t tail, U16* r, U16* g, U16* b) {
269 uint16x4x3_t rgb;
270 if (__builtin_expect(tail,0)) {
271 if ( true ) { rgb = vld3_lane_u16(ptr + 0, rgb, 0); }
272 if (tail > 1) { rgb = vld3_lane_u16(ptr + 3, rgb, 1); }
273 if (tail > 2) { rgb = vld3_lane_u16(ptr + 6, rgb, 2); }
274 } else {
275 rgb = vld3_u16(ptr);
276 }
277 *r = rgb.val[0];
278 *g = rgb.val[1];
279 *b = rgb.val[2];
280 }
281 SI void load4(const uint16_t* ptr, size_t tail, U16* r, U16* g, U16* b, U16* a) {
282 uint16x4x4_t rgba;
283 if (__builtin_expect(tail,0)) {
284 if ( true ) { rgba = vld4_lane_u16(ptr + 0, rgba, 0); }
285 if (tail > 1) { rgba = vld4_lane_u16(ptr + 4, rgba, 1); }
286 if (tail > 2) { rgba = vld4_lane_u16(ptr + 8, rgba, 2); }
287 } else {
288 rgba = vld4_u16(ptr);
289 }
290 *r = rgba.val[0];
291 *g = rgba.val[1];
292 *b = rgba.val[2];
293 *a = rgba.val[3];
294 }
295
296 SI void store4(uint16_t* ptr, size_t tail, U16 r, U16 g, U16 b, U16 a) {
297 if (__builtin_expect(tail,0)) {
298 if ( true ) { vst4_lane_u16(ptr + 0, (uint16x4x4_t{{r,g,b,a}}), 0); }
299 if (tail > 1) { vst4_lane_u16(ptr + 4, (uint16x4x4_t{{r,g,b,a}}), 1); }
300 if (tail > 2) { vst4_lane_u16(ptr + 8, (uint16x4x4_t{{r,g,b,a}}), 2); }
301 } else {
302 vst4_u16(ptr, (uint16x4x4_t{{r,g,b,a}}));
303 }
304 }
305 SI void load2(const float* ptr, size_t tail, F* r, F* g) {
306 float32x4x2_t rg;
307 if (__builtin_expect(tail,0)) {
308 if ( true ) { rg = vld2q_lane_f32(ptr + 0, rg, 0); }
309 if (tail > 1) { rg = vld2q_lane_f32(ptr + 2, rg, 1); }
310 if (tail > 2) { rg = vld2q_lane_f32(ptr + 4, rg, 2); }
311 } else {
312 rg = vld2q_f32(ptr);
313 }
314 *r = rg.val[0];
315 *g = rg.val[1];
316 }
317 SI void store2(float* ptr, size_t tail, F r, F g) {
318 if (__builtin_expect(tail,0)) {
319 if ( true ) { vst2q_lane_f32(ptr + 0, (float32x4x2_t{{r,g}}), 0); }
320 if (tail > 1) { vst2q_lane_f32(ptr + 2, (float32x4x2_t{{r,g}}), 1); }
321 if (tail > 2) { vst2q_lane_f32(ptr + 4, (float32x4x2_t{{r,g}}), 2); }
322 } else {
323 vst2q_f32(ptr, (float32x4x2_t{{r,g}}));
324 }
325 }
326 SI void load4(const float* ptr, size_t tail, F* r, F* g, F* b, F* a) {
327 float32x4x4_t rgba;
328 if (__builtin_expect(tail,0)) {
329 if ( true ) { rgba = vld4q_lane_f32(ptr + 0, rgba, 0); }
330 if (tail > 1) { rgba = vld4q_lane_f32(ptr + 4, rgba, 1); }
331 if (tail > 2) { rgba = vld4q_lane_f32(ptr + 8, rgba, 2); }
332 } else {
333 rgba = vld4q_f32(ptr);
334 }
335 *r = rgba.val[0];
336 *g = rgba.val[1];
337 *b = rgba.val[2];
338 *a = rgba.val[3];
339 }
340 SI void store4(float* ptr, size_t tail, F r, F g, F b, F a) {
341 if (__builtin_expect(tail,0)) {
342 if ( true ) { vst4q_lane_f32(ptr + 0, (float32x4x4_t{{r,g,b,a}}), 0); }
343 if (tail > 1) { vst4q_lane_f32(ptr + 4, (float32x4x4_t{{r,g,b,a}}), 1); }
344 if (tail > 2) { vst4q_lane_f32(ptr + 8, (float32x4x4_t{{r,g,b,a}}), 2); }
345 } else {
346 vst4q_f32(ptr, (float32x4x4_t{{r,g,b,a}}));
347 }
348 }
349
350 #elif defined(JUMPER_IS_HSW) || defined(JUMPER_IS_SKX)
351 // These are __m256 and __m256i, but friendlier and strongly-typed.
352 template <typename T> using V = T __attribute__((ext_vector_type(8)));
353 using F = V<float >;
354 using I32 = V< int32_t>;
355 using U64 = V<uint64_t>;
356 using U32 = V<uint32_t>;
357 using U16 = V<uint16_t>;
358 using U8 = V<uint8_t >;
359
360 SI F mad(F f, F m, F a) {
361 #if defined(JUMPER_IS_HSW) || defined(JUMPER_IS_SKX)
362 return _mm256_fmadd_ps(f,m,a);
363 #else
364 return f*m+a;
365 #endif
366 }
367
368 SI F min(F a, F b) { return _mm256_min_ps(a,b); }
369 SI I32 min(I32 a, I32 b) { return _mm256_min_epi32(a,b); }
370 SI U32 min(U32 a, U32 b) { return _mm256_min_epu32(a,b); }
371 SI F max(F a, F b) { return _mm256_max_ps(a,b); }
372 SI I32 max(I32 a, I32 b) { return _mm256_max_epi32(a,b); }
373 SI U32 max(U32 a, U32 b) { return _mm256_max_epu32(a,b); }
374
375 SI F abs_ (F v) { return _mm256_and_ps(v, 0-v); }
376 SI I32 abs_ (I32 v) { return _mm256_abs_epi32(v); }
377 SI F floor_(F v) { return _mm256_floor_ps(v); }
378 SI F ceil_(F v) { return _mm256_ceil_ps(v); }
379 SI F rcp_fast(F v) { return _mm256_rcp_ps (v); }
380 SI F rsqrt (F v) { return _mm256_rsqrt_ps(v); }
381 SI F sqrt_ (F v) { return _mm256_sqrt_ps (v); }
382 SI F rcp_precise (F v) {
383 F e = rcp_fast(v);
384 #if defined(JUMPER_IS_HSW) || defined(JUMPER_IS_SKX)
385 return _mm256_fnmadd_ps(v, e, _mm256_set1_ps(2.0f)) * e;
386 #else
387 return e * (2.0f - v * e);
388 #endif
389 }
390
391 SI U32 round (F v, F scale) { return _mm256_cvtps_epi32(v*scale); }
392 SI U16 pack(U32 v) {
393 return _mm_packus_epi32(_mm256_extractf128_si256(v, 0),
394 _mm256_extractf128_si256(v, 1));
395 }
396 SI U8 pack(U16 v) {
397 auto r = _mm_packus_epi16(v,v);
398 return sk_unaligned_load<U8>(&r);
399 }
400
401 SI F if_then_else(I32 c, F t, F e) { return _mm256_blendv_ps(e,t,c); }
402 // NOTE: This version of 'all' only works with mask values (true == all bits set)
403 SI bool any(I32 c) { return !_mm256_testz_si256(c, _mm256_set1_epi32(-1)); }
404 SI bool all(I32 c) { return _mm256_testc_si256(c, _mm256_set1_epi32(-1)); }
405
406 template <typename T>
407 SI V<T> gather(const T* p, U32 ix) {
408 return { p[ix[0]], p[ix[1]], p[ix[2]], p[ix[3]],
409 p[ix[4]], p[ix[5]], p[ix[6]], p[ix[7]], };
410 }
411 #if defined(JUMPER_IS_HSW) || defined(JUMPER_IS_SKX)
412 SI F gather(const float* p, U32 ix) { return _mm256_i32gather_ps (p, ix, 4); }
413 SI U32 gather(const uint32_t* p, U32 ix) { return _mm256_i32gather_epi32(p, ix, 4); }
414 SI U64 gather(const uint64_t* p, U32 ix) {
415 __m256i parts[] = {
416 _mm256_i32gather_epi64(p, _mm256_extracti128_si256(ix,0), 8),
417 _mm256_i32gather_epi64(p, _mm256_extracti128_si256(ix,1), 8),
418 };
419 return sk_bit_cast<U64>(parts);
420 }
421 #endif
422
423 SI void load2(const uint16_t* ptr, size_t tail, U16* r, U16* g) {
424 U16 _0123, _4567;
425 if (__builtin_expect(tail,0)) {
426 _0123 = _4567 = _mm_setzero_si128();
427 auto* d = &_0123;
428 if (tail > 3) {
429 *d = _mm_loadu_si128(((__m128i*)ptr) + 0);
430 tail -= 4;
431 ptr += 8;
432 d = &_4567;
433 }
434 bool high = false;
435 if (tail > 1) {
436 *d = _mm_loadu_si64(ptr);
437 tail -= 2;
438 ptr += 4;
439 high = true;
440 }
441 if (tail > 0) {
442 (*d)[high ? 4 : 0] = *(ptr + 0);
443 (*d)[high ? 5 : 1] = *(ptr + 1);
444 }
445 } else {
446 _0123 = _mm_loadu_si128(((__m128i*)ptr) + 0);
447 _4567 = _mm_loadu_si128(((__m128i*)ptr) + 1);
448 }
449 *r = _mm_packs_epi32(_mm_srai_epi32(_mm_slli_epi32(_0123, 16), 16),
450 _mm_srai_epi32(_mm_slli_epi32(_4567, 16), 16));
451 *g = _mm_packs_epi32(_mm_srai_epi32(_0123, 16),
452 _mm_srai_epi32(_4567, 16));
453 }
454 SI void store2(uint16_t* ptr, size_t tail, U16 r, U16 g) {
455 auto _0123 = _mm_unpacklo_epi16(r, g),
456 _4567 = _mm_unpackhi_epi16(r, g);
457 if (__builtin_expect(tail,0)) {
458 const auto* s = &_0123;
459 if (tail > 3) {
460 _mm_storeu_si128((__m128i*)ptr, *s);
461 s = &_4567;
462 tail -= 4;
463 ptr += 8;
464 }
465 bool high = false;
466 if (tail > 1) {
467 _mm_storel_epi64((__m128i*)ptr, *s);
468 ptr += 4;
469 tail -= 2;
470 high = true;
471 }
472 if (tail > 0) {
473 if (high) {
474 *(int32_t*)ptr = _mm_extract_epi32(*s, 2);
475 } else {
476 *(int32_t*)ptr = _mm_cvtsi128_si32(*s);
477 }
478 }
479 } else {
480 _mm_storeu_si128((__m128i*)ptr + 0, _0123);
481 _mm_storeu_si128((__m128i*)ptr + 1, _4567);
482 }
483 }
484
485 SI void load3(const uint16_t* ptr, size_t tail, U16* r, U16* g, U16* b) {
486 __m128i _0,_1,_2,_3,_4,_5,_6,_7;
487 if (__builtin_expect(tail,0)) {
488 auto load_rgb = [](const uint16_t* src) {
489 auto v = _mm_cvtsi32_si128(*(const uint32_t*)src);
490 return _mm_insert_epi16(v, src[2], 2);
491 };
492 _1 = _2 = _3 = _4 = _5 = _6 = _7 = _mm_setzero_si128();
493 if ( true ) { _0 = load_rgb(ptr + 0); }
494 if (tail > 1) { _1 = load_rgb(ptr + 3); }
495 if (tail > 2) { _2 = load_rgb(ptr + 6); }
496 if (tail > 3) { _3 = load_rgb(ptr + 9); }
497 if (tail > 4) { _4 = load_rgb(ptr + 12); }
498 if (tail > 5) { _5 = load_rgb(ptr + 15); }
499 if (tail > 6) { _6 = load_rgb(ptr + 18); }
500 } else {
501 // Load 0+1, 2+3, 4+5 normally, and 6+7 backed up 4 bytes so we don't run over.
502 auto _01 = _mm_loadu_si128((const __m128i*)(ptr + 0)) ;
503 auto _23 = _mm_loadu_si128((const __m128i*)(ptr + 6)) ;
504 auto _45 = _mm_loadu_si128((const __m128i*)(ptr + 12)) ;
505 auto _67 = _mm_srli_si128(_mm_loadu_si128((const __m128i*)(ptr + 16)), 4);
506 _0 = _01; _1 = _mm_srli_si128(_01, 6);
507 _2 = _23; _3 = _mm_srli_si128(_23, 6);
508 _4 = _45; _5 = _mm_srli_si128(_45, 6);
509 _6 = _67; _7 = _mm_srli_si128(_67, 6);
510 }
511
512 auto _02 = _mm_unpacklo_epi16(_0, _2), // r0 r2 g0 g2 b0 b2 xx xx
513 _13 = _mm_unpacklo_epi16(_1, _3),
514 _46 = _mm_unpacklo_epi16(_4, _6),
515 _57 = _mm_unpacklo_epi16(_5, _7);
516
517 auto rg0123 = _mm_unpacklo_epi16(_02, _13), // r0 r1 r2 r3 g0 g1 g2 g3
518 bx0123 = _mm_unpackhi_epi16(_02, _13), // b0 b1 b2 b3 xx xx xx xx
519 rg4567 = _mm_unpacklo_epi16(_46, _57),
520 bx4567 = _mm_unpackhi_epi16(_46, _57);
521
522 *r = _mm_unpacklo_epi64(rg0123, rg4567);
523 *g = _mm_unpackhi_epi64(rg0123, rg4567);
524 *b = _mm_unpacklo_epi64(bx0123, bx4567);
525 }
526 SI void load4(const uint16_t* ptr, size_t tail, U16* r, U16* g, U16* b, U16* a) {
527 __m128i _01, _23, _45, _67;
528 if (__builtin_expect(tail,0)) {
529 auto src = (const double*)ptr;
530 _01 = _23 = _45 = _67 = _mm_setzero_si128();
531 if (tail > 0) { _01 = _mm_loadl_pd(_01, src+0); }
532 if (tail > 1) { _01 = _mm_loadh_pd(_01, src+1); }
533 if (tail > 2) { _23 = _mm_loadl_pd(_23, src+2); }
534 if (tail > 3) { _23 = _mm_loadh_pd(_23, src+3); }
535 if (tail > 4) { _45 = _mm_loadl_pd(_45, src+4); }
536 if (tail > 5) { _45 = _mm_loadh_pd(_45, src+5); }
537 if (tail > 6) { _67 = _mm_loadl_pd(_67, src+6); }
538 } else {
539 _01 = _mm_loadu_si128(((__m128i*)ptr) + 0);
540 _23 = _mm_loadu_si128(((__m128i*)ptr) + 1);
541 _45 = _mm_loadu_si128(((__m128i*)ptr) + 2);
542 _67 = _mm_loadu_si128(((__m128i*)ptr) + 3);
543 }
544
545 auto _02 = _mm_unpacklo_epi16(_01, _23), // r0 r2 g0 g2 b0 b2 a0 a2
546 _13 = _mm_unpackhi_epi16(_01, _23), // r1 r3 g1 g3 b1 b3 a1 a3
547 _46 = _mm_unpacklo_epi16(_45, _67),
548 _57 = _mm_unpackhi_epi16(_45, _67);
549
550 auto rg0123 = _mm_unpacklo_epi16(_02, _13), // r0 r1 r2 r3 g0 g1 g2 g3
551 ba0123 = _mm_unpackhi_epi16(_02, _13), // b0 b1 b2 b3 a0 a1 a2 a3
552 rg4567 = _mm_unpacklo_epi16(_46, _57),
553 ba4567 = _mm_unpackhi_epi16(_46, _57);
554
555 *r = _mm_unpacklo_epi64(rg0123, rg4567);
556 *g = _mm_unpackhi_epi64(rg0123, rg4567);
557 *b = _mm_unpacklo_epi64(ba0123, ba4567);
558 *a = _mm_unpackhi_epi64(ba0123, ba4567);
559 }
560 SI void store4(uint16_t* ptr, size_t tail, U16 r, U16 g, U16 b, U16 a) {
561 auto rg0123 = _mm_unpacklo_epi16(r, g), // r0 g0 r1 g1 r2 g2 r3 g3
562 rg4567 = _mm_unpackhi_epi16(r, g), // r4 g4 r5 g5 r6 g6 r7 g7
563 ba0123 = _mm_unpacklo_epi16(b, a),
564 ba4567 = _mm_unpackhi_epi16(b, a);
565
566 auto _01 = _mm_unpacklo_epi32(rg0123, ba0123),
567 _23 = _mm_unpackhi_epi32(rg0123, ba0123),
568 _45 = _mm_unpacklo_epi32(rg4567, ba4567),
569 _67 = _mm_unpackhi_epi32(rg4567, ba4567);
570
571 if (__builtin_expect(tail,0)) {
572 auto dst = (double*)ptr;
573 if (tail > 0) { _mm_storel_pd(dst+0, _01); }
574 if (tail > 1) { _mm_storeh_pd(dst+1, _01); }
575 if (tail > 2) { _mm_storel_pd(dst+2, _23); }
576 if (tail > 3) { _mm_storeh_pd(dst+3, _23); }
577 if (tail > 4) { _mm_storel_pd(dst+4, _45); }
578 if (tail > 5) { _mm_storeh_pd(dst+5, _45); }
579 if (tail > 6) { _mm_storel_pd(dst+6, _67); }
580 } else {
581 _mm_storeu_si128((__m128i*)ptr + 0, _01);
582 _mm_storeu_si128((__m128i*)ptr + 1, _23);
583 _mm_storeu_si128((__m128i*)ptr + 2, _45);
584 _mm_storeu_si128((__m128i*)ptr + 3, _67);
585 }
586 }
587
588 SI void load2(const float* ptr, size_t tail, F* r, F* g) {
589 F _0123, _4567;
590 if (__builtin_expect(tail, 0)) {
591 _0123 = _4567 = _mm256_setzero_ps();
592 F* d = &_0123;
593 if (tail > 3) {
594 *d = _mm256_loadu_ps(ptr);
595 ptr += 8;
596 tail -= 4;
597 d = &_4567;
598 }
599 bool high = false;
600 if (tail > 1) {
601 *d = _mm256_castps128_ps256(_mm_loadu_ps(ptr));
602 ptr += 4;
603 tail -= 2;
604 high = true;
605 }
606 if (tail > 0) {
607 *d = high ? _mm256_insertf128_ps(*d, _mm_loadu_si64(ptr), 1)
608 : _mm256_insertf128_ps(*d, _mm_loadu_si64(ptr), 0);
609 }
610 } else {
611 _0123 = _mm256_loadu_ps(ptr + 0);
612 _4567 = _mm256_loadu_ps(ptr + 8);
613 }
614
615 F _0145 = _mm256_permute2f128_pd(_0123, _4567, 0x20),
616 _2367 = _mm256_permute2f128_pd(_0123, _4567, 0x31);
617
618 *r = _mm256_shuffle_ps(_0145, _2367, 0x88);
619 *g = _mm256_shuffle_ps(_0145, _2367, 0xDD);
620 }
621 SI void store2(float* ptr, size_t tail, F r, F g) {
622 F _0145 = _mm256_unpacklo_ps(r, g),
623 _2367 = _mm256_unpackhi_ps(r, g);
624 F _0123 = _mm256_permute2f128_pd(_0145, _2367, 0x20),
625 _4567 = _mm256_permute2f128_pd(_0145, _2367, 0x31);
626
627 if (__builtin_expect(tail, 0)) {
628 const __m256* s = &_0123;
629 if (tail > 3) {
630 _mm256_storeu_ps(ptr, *s);
631 s = &_4567;
632 tail -= 4;
633 ptr += 8;
634 }
635 bool high = false;
636 if (tail > 1) {
637 _mm_storeu_ps(ptr, _mm256_extractf128_ps(*s, 0));
638 ptr += 4;
639 tail -= 2;
640 high = true;
641 }
642 if (tail > 0) {
643 *(ptr + 0) = (*s)[ high ? 4 : 0];
644 *(ptr + 1) = (*s)[ high ? 5 : 1];
645 }
646 } else {
647 _mm256_storeu_ps(ptr + 0, _0123);
648 _mm256_storeu_ps(ptr + 8, _4567);
649 }
650 }
651
652 SI void load4(const float* ptr, size_t tail, F* r, F* g, F* b, F* a) {
653 F _04, _15, _26, _37;
654 _04 = _15 = _26 = _37 = 0;
655 switch (tail) {
656 case 0: _37 = _mm256_insertf128_ps(_37, _mm_loadu_ps(ptr+28), 1); [[fallthrough]];
657 case 7: _26 = _mm256_insertf128_ps(_26, _mm_loadu_ps(ptr+24), 1); [[fallthrough]];
658 case 6: _15 = _mm256_insertf128_ps(_15, _mm_loadu_ps(ptr+20), 1); [[fallthrough]];
659 case 5: _04 = _mm256_insertf128_ps(_04, _mm_loadu_ps(ptr+16), 1); [[fallthrough]];
660 case 4: _37 = _mm256_insertf128_ps(_37, _mm_loadu_ps(ptr+12), 0); [[fallthrough]];
661 case 3: _26 = _mm256_insertf128_ps(_26, _mm_loadu_ps(ptr+ 8), 0); [[fallthrough]];
662 case 2: _15 = _mm256_insertf128_ps(_15, _mm_loadu_ps(ptr+ 4), 0); [[fallthrough]];
663 case 1: _04 = _mm256_insertf128_ps(_04, _mm_loadu_ps(ptr+ 0), 0);
664 }
665
666 F rg0145 = _mm256_unpacklo_ps(_04,_15), // r0 r1 g0 g1 | r4 r5 g4 g5
667 ba0145 = _mm256_unpackhi_ps(_04,_15),
668 rg2367 = _mm256_unpacklo_ps(_26,_37),
669 ba2367 = _mm256_unpackhi_ps(_26,_37);
670
671 *r = _mm256_unpacklo_pd(rg0145, rg2367);
672 *g = _mm256_unpackhi_pd(rg0145, rg2367);
673 *b = _mm256_unpacklo_pd(ba0145, ba2367);
674 *a = _mm256_unpackhi_pd(ba0145, ba2367);
675 }
676 SI void store4(float* ptr, size_t tail, F r, F g, F b, F a) {
677 F rg0145 = _mm256_unpacklo_ps(r, g), // r0 g0 r1 g1 | r4 g4 r5 g5
678 rg2367 = _mm256_unpackhi_ps(r, g), // r2 ... | r6 ...
679 ba0145 = _mm256_unpacklo_ps(b, a), // b0 a0 b1 a1 | b4 a4 b5 a5
680 ba2367 = _mm256_unpackhi_ps(b, a); // b2 ... | b6 ...
681
682 F _04 = _mm256_unpacklo_pd(rg0145, ba0145), // r0 g0 b0 a0 | r4 g4 b4 a4
683 _15 = _mm256_unpackhi_pd(rg0145, ba0145), // r1 ... | r5 ...
684 _26 = _mm256_unpacklo_pd(rg2367, ba2367), // r2 ... | r6 ...
685 _37 = _mm256_unpackhi_pd(rg2367, ba2367); // r3 ... | r7 ...
686
687 if (__builtin_expect(tail, 0)) {
688 if (tail > 0) { _mm_storeu_ps(ptr+ 0, _mm256_extractf128_ps(_04, 0)); }
689 if (tail > 1) { _mm_storeu_ps(ptr+ 4, _mm256_extractf128_ps(_15, 0)); }
690 if (tail > 2) { _mm_storeu_ps(ptr+ 8, _mm256_extractf128_ps(_26, 0)); }
691 if (tail > 3) { _mm_storeu_ps(ptr+12, _mm256_extractf128_ps(_37, 0)); }
692 if (tail > 4) { _mm_storeu_ps(ptr+16, _mm256_extractf128_ps(_04, 1)); }
693 if (tail > 5) { _mm_storeu_ps(ptr+20, _mm256_extractf128_ps(_15, 1)); }
694 if (tail > 6) { _mm_storeu_ps(ptr+24, _mm256_extractf128_ps(_26, 1)); }
695 } else {
696 F _01 = _mm256_permute2f128_ps(_04, _15, 32), // 32 == 0010 0000 == lo, lo
697 _23 = _mm256_permute2f128_ps(_26, _37, 32),
698 _45 = _mm256_permute2f128_ps(_04, _15, 49), // 49 == 0011 0001 == hi, hi
699 _67 = _mm256_permute2f128_ps(_26, _37, 49);
700 _mm256_storeu_ps(ptr+ 0, _01);
701 _mm256_storeu_ps(ptr+ 8, _23);
702 _mm256_storeu_ps(ptr+16, _45);
703 _mm256_storeu_ps(ptr+24, _67);
704 }
705 }
706
707 #elif defined(JUMPER_IS_SSE2) || defined(JUMPER_IS_SSE41) || defined(JUMPER_IS_AVX)
708 template <typename T> using V = T __attribute__((ext_vector_type(4)));
709 using F = V<float >;
710 using I32 = V< int32_t>;
711 using U64 = V<uint64_t>;
712 using U32 = V<uint32_t>;
713 using U16 = V<uint16_t>;
714 using U8 = V<uint8_t >;
715
716 SI F if_then_else(I32 c, F t, F e) {
717 return _mm_or_ps(_mm_and_ps(c, t), _mm_andnot_ps(c, e));
718 }
719
720 SI F min(F a, F b) { return _mm_min_ps(a,b); }
721 SI F max(F a, F b) { return _mm_max_ps(a,b); }
722 #if defined(JUMPER_IS_SSE41) || defined(JUMPER_IS_AVX)
723 SI I32 min(I32 a, I32 b) { return _mm_min_epi32(a,b); }
724 SI U32 min(U32 a, U32 b) { return _mm_min_epu32(a,b); }
725 SI I32 max(I32 a, I32 b) { return _mm_max_epi32(a,b); }
726 SI U32 max(U32 a, U32 b) { return _mm_max_epu32(a,b); }
727 #else
728 SI I32 min(I32 a, I32 b) {
729 return sk_bit_cast<I32>(if_then_else(a < b, sk_bit_cast<F>(a), sk_bit_cast<F>(b)));
730 }
731 SI U32 min(U32 a, U32 b) {
732 return sk_bit_cast<U32>(if_then_else(a < b, sk_bit_cast<F>(a), sk_bit_cast<F>(b)));
733 }
734 SI I32 max(I32 a, I32 b) {
735 return sk_bit_cast<I32>(if_then_else(a > b, sk_bit_cast<F>(a), sk_bit_cast<F>(b)));
736 }
737 SI U32 max(U32 a, U32 b) {
738 return sk_bit_cast<U32>(if_then_else(a > b, sk_bit_cast<F>(a), sk_bit_cast<F>(b)));
739 }
740 #endif
741
742 SI F mad(F f, F m, F a) { return f*m+a; }
743 SI F abs_(F v) { return _mm_and_ps(v, 0-v); }
744 #if defined(JUMPER_IS_SSE41) || defined(JUMPER_IS_AVX)
745 SI I32 abs_(I32 v) { return _mm_abs_epi32(v); }
746 #else
747 SI I32 abs_(I32 v) { return max(v, -v); }
748 #endif
749 SI F rcp_fast(F v) { return _mm_rcp_ps (v); }
750 SI F rcp_precise (F v) { F e = rcp_fast(v); return e * (2.0f - v * e); }
751 SI F rsqrt (F v) { return _mm_rsqrt_ps(v); }
752 SI F sqrt_(F v) { return _mm_sqrt_ps (v); }
753
754 SI U32 round(F v, F scale) { return _mm_cvtps_epi32(v*scale); }
755
756 SI U16 pack(U32 v) {
757 #if defined(JUMPER_IS_SSE41) || defined(JUMPER_IS_AVX)
758 auto p = _mm_packus_epi32(v,v);
759 #else
760 // Sign extend so that _mm_packs_epi32() does the pack we want.
761 auto p = _mm_srai_epi32(_mm_slli_epi32(v, 16), 16);
762 p = _mm_packs_epi32(p,p);
763 #endif
764 return sk_unaligned_load<U16>(&p); // We have two copies. Return (the lower) one.
765 }
766 SI U8 pack(U16 v) {
767 auto r = widen_cast<__m128i>(v);
768 r = _mm_packus_epi16(r,r);
769 return sk_unaligned_load<U8>(&r);
770 }
771
772 // NOTE: This only checks the top bit of each lane, and is incorrect with non-mask values.
773 SI bool any(I32 c) { return _mm_movemask_ps(c) != 0b0000; }
774 SI bool all(I32 c) { return _mm_movemask_ps(c) == 0b1111; }
775
776 SI F floor_(F v) {
777 #if defined(JUMPER_IS_SSE41) || defined(JUMPER_IS_AVX)
778 return _mm_floor_ps(v);
779 #else
780 F roundtrip = _mm_cvtepi32_ps(_mm_cvttps_epi32(v));
781 return roundtrip - if_then_else(roundtrip > v, 1, 0);
782 #endif
783 }
784
785 SI F ceil_(F v) {
786 #if defined(JUMPER_IS_SSE41) || defined(JUMPER_IS_AVX)
787 return _mm_ceil_ps(v);
788 #else
789 F roundtrip = _mm_cvtepi32_ps(_mm_cvttps_epi32(v));
790 return roundtrip + if_then_else(roundtrip < v, 1, 0);
791 #endif
792 }
793
794 template <typename T>
795 SI V<T> gather(const T* p, U32 ix) {
796 return {p[ix[0]], p[ix[1]], p[ix[2]], p[ix[3]]};
797 }
798
799 SI void load2(const uint16_t* ptr, size_t tail, U16* r, U16* g) {
800 __m128i _01;
801 if (__builtin_expect(tail,0)) {
802 _01 = _mm_setzero_si128();
803 if (tail > 1) {
804 _01 = _mm_loadl_pd(_01, (const double*)ptr); // r0 g0 r1 g1 00 00 00 00
805 if (tail > 2) {
806 _01 = _mm_insert_epi16(_01, *(ptr+4), 4); // r0 g0 r1 g1 r2 00 00 00
807 _01 = _mm_insert_epi16(_01, *(ptr+5), 5); // r0 g0 r1 g1 r2 g2 00 00
808 }
809 } else {
810 _01 = _mm_cvtsi32_si128(*(const uint32_t*)ptr); // r0 g0 00 00 00 00 00 00
811 }
812 } else {
813 _01 = _mm_loadu_si128(((__m128i*)ptr) + 0); // r0 g0 r1 g1 r2 g2 r3 g3
814 }
815 auto rg01_23 = _mm_shufflelo_epi16(_01, 0xD8); // r0 r1 g0 g1 r2 g2 r3 g3
816 auto rg = _mm_shufflehi_epi16(rg01_23, 0xD8); // r0 r1 g0 g1 r2 r3 g2 g3
817
818 auto R = _mm_shuffle_epi32(rg, 0x88); // r0 r1 r2 r3 r0 r1 r2 r3
819 auto G = _mm_shuffle_epi32(rg, 0xDD); // g0 g1 g2 g3 g0 g1 g2 g3
820 *r = sk_unaligned_load<U16>(&R);
821 *g = sk_unaligned_load<U16>(&G);
822 }
823 SI void store2(uint16_t* ptr, size_t tail, U16 r, U16 g) {
824 U32 rg = _mm_unpacklo_epi16(widen_cast<__m128i>(r), widen_cast<__m128i>(g));
825 if (__builtin_expect(tail, 0)) {
826 if (tail > 1) {
827 _mm_storel_epi64((__m128i*)ptr, rg);
828 if (tail > 2) {
829 int32_t rgpair = rg[2];
830 memcpy(ptr + 4, &rgpair, sizeof(rgpair));
831 }
832 } else {
833 int32_t rgpair = rg[0];
834 memcpy(ptr, &rgpair, sizeof(rgpair));
835 }
836 } else {
837 _mm_storeu_si128((__m128i*)ptr + 0, rg);
838 }
839 }
840
841 SI void load3(const uint16_t* ptr, size_t tail, U16* r, U16* g, U16* b) {
842 __m128i _0, _1, _2, _3;
843 if (__builtin_expect(tail,0)) {
844 _1 = _2 = _3 = _mm_setzero_si128();
845 auto load_rgb = [](const uint16_t* src) {
846 auto v = _mm_cvtsi32_si128(*(const uint32_t*)src);
847 return _mm_insert_epi16(v, src[2], 2);
848 };
849 if ( true ) { _0 = load_rgb(ptr + 0); }
850 if (tail > 1) { _1 = load_rgb(ptr + 3); }
851 if (tail > 2) { _2 = load_rgb(ptr + 6); }
852 } else {
853 // Load slightly weirdly to make sure we don't load past the end of 4x48 bits.
854 auto _01 = _mm_loadu_si128((const __m128i*)(ptr + 0)) ,
855 _23 = _mm_srli_si128(_mm_loadu_si128((const __m128i*)(ptr + 4)), 4);
856
857 // Each _N holds R,G,B for pixel N in its lower 3 lanes (upper 5 are ignored).
858 _0 = _01;
859 _1 = _mm_srli_si128(_01, 6);
860 _2 = _23;
861 _3 = _mm_srli_si128(_23, 6);
862 }
863
864 // De-interlace to R,G,B.
865 auto _02 = _mm_unpacklo_epi16(_0, _2), // r0 r2 g0 g2 b0 b2 xx xx
866 _13 = _mm_unpacklo_epi16(_1, _3); // r1 r3 g1 g3 b1 b3 xx xx
867
868 auto R = _mm_unpacklo_epi16(_02, _13), // r0 r1 r2 r3 g0 g1 g2 g3
869 G = _mm_srli_si128(R, 8),
870 B = _mm_unpackhi_epi16(_02, _13); // b0 b1 b2 b3 xx xx xx xx
871
872 *r = sk_unaligned_load<U16>(&R);
873 *g = sk_unaligned_load<U16>(&G);
874 *b = sk_unaligned_load<U16>(&B);
875 }
876
877 SI void load4(const uint16_t* ptr, size_t tail, U16* r, U16* g, U16* b, U16* a) {
878 __m128i _01, _23;
879 if (__builtin_expect(tail,0)) {
880 _01 = _23 = _mm_setzero_si128();
881 auto src = (const double*)ptr;
882 if ( true ) { _01 = _mm_loadl_pd(_01, src + 0); } // r0 g0 b0 a0 00 00 00 00
883 if (tail > 1) { _01 = _mm_loadh_pd(_01, src + 1); } // r0 g0 b0 a0 r1 g1 b1 a1
884 if (tail > 2) { _23 = _mm_loadl_pd(_23, src + 2); } // r2 g2 b2 a2 00 00 00 00
885 } else {
886 _01 = _mm_loadu_si128(((__m128i*)ptr) + 0); // r0 g0 b0 a0 r1 g1 b1 a1
887 _23 = _mm_loadu_si128(((__m128i*)ptr) + 1); // r2 g2 b2 a2 r3 g3 b3 a3
888 }
889
890 auto _02 = _mm_unpacklo_epi16(_01, _23), // r0 r2 g0 g2 b0 b2 a0 a2
891 _13 = _mm_unpackhi_epi16(_01, _23); // r1 r3 g1 g3 b1 b3 a1 a3
892
893 auto rg = _mm_unpacklo_epi16(_02, _13), // r0 r1 r2 r3 g0 g1 g2 g3
894 ba = _mm_unpackhi_epi16(_02, _13); // b0 b1 b2 b3 a0 a1 a2 a3
895
896 *r = sk_unaligned_load<U16>((uint16_t*)&rg + 0);
897 *g = sk_unaligned_load<U16>((uint16_t*)&rg + 4);
898 *b = sk_unaligned_load<U16>((uint16_t*)&ba + 0);
899 *a = sk_unaligned_load<U16>((uint16_t*)&ba + 4);
900 }
901
902 SI void store4(uint16_t* ptr, size_t tail, U16 r, U16 g, U16 b, U16 a) {
903 auto rg = _mm_unpacklo_epi16(widen_cast<__m128i>(r), widen_cast<__m128i>(g)),
904 ba = _mm_unpacklo_epi16(widen_cast<__m128i>(b), widen_cast<__m128i>(a));
905
906 if (__builtin_expect(tail, 0)) {
907 auto dst = (double*)ptr;
908 if ( true ) { _mm_storel_pd(dst + 0, _mm_unpacklo_epi32(rg, ba)); }
909 if (tail > 1) { _mm_storeh_pd(dst + 1, _mm_unpacklo_epi32(rg, ba)); }
910 if (tail > 2) { _mm_storel_pd(dst + 2, _mm_unpackhi_epi32(rg, ba)); }
911 } else {
912 _mm_storeu_si128((__m128i*)ptr + 0, _mm_unpacklo_epi32(rg, ba));
913 _mm_storeu_si128((__m128i*)ptr + 1, _mm_unpackhi_epi32(rg, ba));
914 }
915 }
916
917 SI void load2(const float* ptr, size_t tail, F* r, F* g) {
918 F _01, _23;
919 if (__builtin_expect(tail, 0)) {
920 _01 = _23 = _mm_setzero_si128();
921 if ( true ) { _01 = _mm_loadl_pi(_01, (__m64 const*)(ptr + 0)); }
922 if (tail > 1) { _01 = _mm_loadh_pi(_01, (__m64 const*)(ptr + 2)); }
923 if (tail > 2) { _23 = _mm_loadl_pi(_23, (__m64 const*)(ptr + 4)); }
924 } else {
925 _01 = _mm_loadu_ps(ptr + 0);
926 _23 = _mm_loadu_ps(ptr + 4);
927 }
928 *r = _mm_shuffle_ps(_01, _23, 0x88);
929 *g = _mm_shuffle_ps(_01, _23, 0xDD);
930 }
931 SI void store2(float* ptr, size_t tail, F r, F g) {
932 F _01 = _mm_unpacklo_ps(r, g),
933 _23 = _mm_unpackhi_ps(r, g);
934 if (__builtin_expect(tail, 0)) {
935 if ( true ) { _mm_storel_pi((__m64*)(ptr + 0), _01); }
936 if (tail > 1) { _mm_storeh_pi((__m64*)(ptr + 2), _01); }
937 if (tail > 2) { _mm_storel_pi((__m64*)(ptr + 4), _23); }
938 } else {
939 _mm_storeu_ps(ptr + 0, _01);
940 _mm_storeu_ps(ptr + 4, _23);
941 }
942 }
943
944 SI void load4(const float* ptr, size_t tail, F* r, F* g, F* b, F* a) {
945 F _0, _1, _2, _3;
946 if (__builtin_expect(tail, 0)) {
947 _1 = _2 = _3 = _mm_setzero_si128();
948 if ( true ) { _0 = _mm_loadu_ps(ptr + 0); }
949 if (tail > 1) { _1 = _mm_loadu_ps(ptr + 4); }
950 if (tail > 2) { _2 = _mm_loadu_ps(ptr + 8); }
951 } else {
952 _0 = _mm_loadu_ps(ptr + 0);
953 _1 = _mm_loadu_ps(ptr + 4);
954 _2 = _mm_loadu_ps(ptr + 8);
955 _3 = _mm_loadu_ps(ptr +12);
956 }
957 _MM_TRANSPOSE4_PS(_0,_1,_2,_3);
958 *r = _0;
959 *g = _1;
960 *b = _2;
961 *a = _3;
962 }
963
964 SI void store4(float* ptr, size_t tail, F r, F g, F b, F a) {
965 _MM_TRANSPOSE4_PS(r,g,b,a);
966 if (__builtin_expect(tail, 0)) {
967 if ( true ) { _mm_storeu_ps(ptr + 0, r); }
968 if (tail > 1) { _mm_storeu_ps(ptr + 4, g); }
969 if (tail > 2) { _mm_storeu_ps(ptr + 8, b); }
970 } else {
971 _mm_storeu_ps(ptr + 0, r);
972 _mm_storeu_ps(ptr + 4, g);
973 _mm_storeu_ps(ptr + 8, b);
974 _mm_storeu_ps(ptr +12, a);
975 }
976 }
977 #endif
978
979 // We need to be a careful with casts.
980 // (F)x means cast x to float in the portable path, but bit_cast x to float in the others.
981 // These named casts and bit_cast() are always what they seem to be.
982 #if defined(JUMPER_IS_SCALAR)
cast(U32 v)983 SI F cast (U32 v) { return (F)v; }
cast64(U64 v)984 SI F cast64(U64 v) { return (F)v; }
trunc_(F v)985 SI U32 trunc_(F v) { return (U32)v; }
expand(U16 v)986 SI U32 expand(U16 v) { return (U32)v; }
expand(U8 v)987 SI U32 expand(U8 v) { return (U32)v; }
988 #else
cast(U32 v)989 SI F cast (U32 v) { return __builtin_convertvector((I32)v, F); }
cast64(U64 v)990 SI F cast64(U64 v) { return __builtin_convertvector( v, F); }
trunc_(F v)991 SI U32 trunc_(F v) { return (U32)__builtin_convertvector( v, I32); }
expand(U16 v)992 SI U32 expand(U16 v) { return __builtin_convertvector( v, U32); }
expand(U8 v)993 SI U32 expand(U8 v) { return __builtin_convertvector( v, U32); }
994 #endif
995
996 template <typename V>
if_then_else(I32 c,V t,V e)997 SI V if_then_else(I32 c, V t, V e) {
998 return sk_bit_cast<V>(if_then_else(c, sk_bit_cast<F>(t), sk_bit_cast<F>(e)));
999 }
1000
bswap(U16 x)1001 SI U16 bswap(U16 x) {
1002 #if defined(JUMPER_IS_SSE2) || defined(JUMPER_IS_SSE41)
1003 // Somewhat inexplicably Clang decides to do (x<<8) | (x>>8) in 32-bit lanes
1004 // when generating code for SSE2 and SSE4.1. We'll do it manually...
1005 auto v = widen_cast<__m128i>(x);
1006 v = _mm_slli_epi16(v,8) | _mm_srli_epi16(v,8);
1007 return sk_unaligned_load<U16>(&v);
1008 #else
1009 return (x<<8) | (x>>8);
1010 #endif
1011 }
1012
fract(F v)1013 SI F fract(F v) { return v - floor_(v); }
1014
1015 // See http://www.machinedlearnings.com/2011/06/fast-approximate-logarithm-exponential.html.
approx_log2(F x)1016 SI F approx_log2(F x) {
1017 // e - 127 is a fair approximation of log2(x) in its own right...
1018 F e = cast(sk_bit_cast<U32>(x)) * (1.0f / (1<<23));
1019
1020 // ... but using the mantissa to refine its error is _much_ better.
1021 F m = sk_bit_cast<F>((sk_bit_cast<U32>(x) & 0x007fffff) | 0x3f000000);
1022 return e
1023 - 124.225514990f
1024 - 1.498030302f * m
1025 - 1.725879990f / (0.3520887068f + m);
1026 }
1027
approx_log(F x)1028 SI F approx_log(F x) {
1029 const float ln2 = 0.69314718f;
1030 return ln2 * approx_log2(x);
1031 }
1032
approx_pow2(F x)1033 SI F approx_pow2(F x) {
1034 F f = fract(x);
1035 return sk_bit_cast<F>(round(1.0f * (1<<23),
1036 x + 121.274057500f
1037 - 1.490129070f * f
1038 + 27.728023300f / (4.84252568f - f)));
1039 }
1040
approx_exp(F x)1041 SI F approx_exp(F x) {
1042 const float log2_e = 1.4426950408889634074f;
1043 return approx_pow2(log2_e * x);
1044 }
1045
approx_powf(F x,F y)1046 SI F approx_powf(F x, F y) {
1047 return if_then_else((x == 0)|(x == 1), x
1048 , approx_pow2(approx_log2(x) * y));
1049 }
1050
from_half(U16 h)1051 SI F from_half(U16 h) {
1052 #if defined(JUMPER_IS_NEON) && defined(SK_CPU_ARM64) \
1053 && !defined(SK_BUILD_FOR_GOOGLE3) // Temporary workaround for some Google3 builds.
1054 return vcvt_f32_f16(h);
1055
1056 #elif defined(JUMPER_IS_HSW) || defined(JUMPER_IS_SKX)
1057 return _mm256_cvtph_ps(h);
1058
1059 #else
1060 // Remember, a half is 1-5-10 (sign-exponent-mantissa) with 15 exponent bias.
1061 U32 sem = expand(h),
1062 s = sem & 0x8000,
1063 em = sem ^ s;
1064
1065 // Convert to 1-8-23 float with 127 bias, flushing denorm halfs (including zero) to zero.
1066 auto denorm = (I32)em < 0x0400; // I32 comparison is often quicker, and always safe here.
1067 return if_then_else(denorm, F(0)
1068 , sk_bit_cast<F>( (s<<16) + (em<<13) + ((127-15)<<23) ));
1069 #endif
1070 }
1071
to_half(F f)1072 SI U16 to_half(F f) {
1073 #if defined(JUMPER_IS_NEON) && defined(SK_CPU_ARM64) \
1074 && !defined(SK_BUILD_FOR_GOOGLE3) // Temporary workaround for some Google3 builds.
1075 return vcvt_f16_f32(f);
1076
1077 #elif defined(JUMPER_IS_HSW) || defined(JUMPER_IS_SKX)
1078 return _mm256_cvtps_ph(f, _MM_FROUND_CUR_DIRECTION);
1079
1080 #else
1081 // Remember, a float is 1-8-23 (sign-exponent-mantissa) with 127 exponent bias.
1082 U32 sem = sk_bit_cast<U32>(f),
1083 s = sem & 0x80000000,
1084 em = sem ^ s;
1085
1086 // Convert to 1-5-10 half with 15 bias, flushing denorm halfs (including zero) to zero.
1087 auto denorm = (I32)em < 0x38800000; // I32 comparison is often quicker, and always safe here.
1088 return pack(if_then_else(denorm, U32(0)
1089 , (s>>16) + (em>>13) - ((127-15)<<10)));
1090 #endif
1091 }
1092
1093 // Our fundamental vector depth is our pixel stride.
1094 static constexpr size_t N = sizeof(F) / sizeof(float);
1095
1096 // We're finally going to get to what a Stage function looks like!
1097 // tail == 0 ~~> work on a full N pixels
1098 // tail != 0 ~~> work on only the first tail pixels
1099 // tail is always < N.
1100
1101 // Any custom ABI to use for all (non-externally-facing) stage functions?
1102 // Also decide here whether to use narrow (compromise) or wide (ideal) stages.
1103 #if defined(SK_CPU_ARM32) && defined(JUMPER_IS_NEON)
1104 // This lets us pass vectors more efficiently on 32-bit ARM.
1105 // We can still only pass 16 floats, so best as 4x {r,g,b,a}.
1106 #define ABI __attribute__((pcs("aapcs-vfp")))
1107 #define JUMPER_NARROW_STAGES 1
1108 #elif defined(_MSC_VER)
1109 // Even if not vectorized, this lets us pass {r,g,b,a} as registers,
1110 // instead of {b,a} on the stack. Narrow stages work best for __vectorcall.
1111 #define ABI __vectorcall
1112 #define JUMPER_NARROW_STAGES 1
1113 #elif defined(__x86_64__) || defined(SK_CPU_ARM64)
1114 // These platforms are ideal for wider stages, and their default ABI is ideal.
1115 #define ABI
1116 #define JUMPER_NARROW_STAGES 0
1117 #else
1118 // 32-bit or unknown... shunt them down the narrow path.
1119 // Odds are these have few registers and are better off there.
1120 #define ABI
1121 #define JUMPER_NARROW_STAGES 1
1122 #endif
1123
1124 #if JUMPER_NARROW_STAGES
1125 struct Params {
1126 size_t dx, dy, tail;
1127 F dr,dg,db,da;
1128 };
1129 using Stage = void(ABI*)(Params*, SkRasterPipelineStage* program, F r, F g, F b, F a);
1130 #else
1131 using Stage = void(ABI*)(size_t tail, SkRasterPipelineStage* program, size_t dx, size_t dy,
1132 F,F,F,F, F,F,F,F);
1133 #endif
1134
start_pipeline(size_t dx,size_t dy,size_t xlimit,size_t ylimit,SkRasterPipelineStage * program)1135 static void start_pipeline(size_t dx, size_t dy,
1136 size_t xlimit, size_t ylimit,
1137 SkRasterPipelineStage* program) {
1138 auto start = (Stage)program->fn;
1139 const size_t x0 = dx;
1140 for (; dy < ylimit; dy++) {
1141 #if JUMPER_NARROW_STAGES
1142 Params params = { x0,dy,0, 0,0,0,0 };
1143 while (params.dx + N <= xlimit) {
1144 start(¶ms,program, 0,0,0,0);
1145 params.dx += N;
1146 }
1147 if (size_t tail = xlimit - params.dx) {
1148 params.tail = tail;
1149 start(¶ms,program, 0,0,0,0);
1150 }
1151 #else
1152 dx = x0;
1153 while (dx + N <= xlimit) {
1154 start(0,program,dx,dy, 0,0,0,0, 0,0,0,0);
1155 dx += N;
1156 }
1157 if (size_t tail = xlimit - dx) {
1158 start(tail,program,dx,dy, 0,0,0,0, 0,0,0,0);
1159 }
1160 #endif
1161 }
1162 }
1163
1164 #if SK_HAS_MUSTTAIL
1165 #define JUMPER_MUSTTAIL [[clang::musttail]]
1166 #else
1167 #define JUMPER_MUSTTAIL
1168 #endif
1169
1170 #if JUMPER_NARROW_STAGES
1171 #define DECLARE_STAGE(name, ARG, STAGE_RET, INC, OFFSET, MUSTTAIL) \
1172 SI STAGE_RET name##_k(ARG, size_t dx, size_t dy, size_t tail, \
1173 F& r, F& g, F& b, F& a, F& dr, F& dg, F& db, F& da); \
1174 static void ABI name(Params* params, SkRasterPipelineStage* program, \
1175 F r, F g, F b, F a) { \
1176 OFFSET name##_k(Ctx{program},params->dx,params->dy,params->tail, r,g,b,a,\
1177 params->dr, params->dg, params->db, params->da); \
1178 INC; \
1179 auto fn = (Stage)program->fn; \
1180 MUSTTAIL return fn(params, program, r,g,b,a); \
1181 } \
1182 SI STAGE_RET name##_k(ARG, size_t dx, size_t dy, size_t tail, \
1183 F& r, F& g, F& b, F& a, F& dr, F& dg, F& db, F& da)
1184 #else
1185 #define DECLARE_STAGE(name, ARG, STAGE_RET, INC, OFFSET, MUSTTAIL) \
1186 SI STAGE_RET name##_k(ARG, size_t dx, size_t dy, size_t tail, \
1187 F& r, F& g, F& b, F& a, F& dr, F& dg, F& db, F& da); \
1188 static void ABI name(size_t tail, SkRasterPipelineStage* program, size_t dx, size_t dy, \
1189 F r, F g, F b, F a, F dr, F dg, F db, F da) { \
1190 OFFSET name##_k(Ctx{program},dx,dy,tail, r,g,b,a, dr,dg,db,da); \
1191 INC; \
1192 auto fn = (Stage)program->fn; \
1193 MUSTTAIL return fn(tail, program, dx,dy, r,g,b,a, dr,dg,db,da); \
1194 } \
1195 SI STAGE_RET name##_k(ARG, size_t dx, size_t dy, size_t tail, \
1196 F& r, F& g, F& b, F& a, F& dr, F& dg, F& db, F& da)
1197 #endif
1198
1199 // A typical stage returns void, always increments the program counter by 1, and lets the optimizer
1200 // decide whether or not tail-calling is appropriate.
1201 #define STAGE(name, arg) \
1202 DECLARE_STAGE(name, arg, void, ++program, /*no offset*/, /*no musttail*/)
1203
1204 // A tail stage returns void, always increments the program counter by 1, and uses tail-calling.
1205 // Tail-calling is necessary in SkSL-generated programs, which can be thousands of ops long, and
1206 // could overflow the stack (particularly in debug).
1207 #define STAGE_TAIL(name, arg) \
1208 DECLARE_STAGE(name, arg, void, ++program, /*no offset*/, JUMPER_MUSTTAIL)
1209
1210 // A branch stage returns an integer, which is added directly to the program counter, and tailcalls.
1211 #define STAGE_BRANCH(name, arg) \
1212 DECLARE_STAGE(name, arg, int, /*no increment*/, program +=, JUMPER_MUSTTAIL)
1213
1214 // just_return() is a simple no-op stage that only exists to end the chain,
1215 // returning back up to start_pipeline(), and from there to the caller.
1216 #if JUMPER_NARROW_STAGES
just_return(Params *,SkRasterPipelineStage *,F,F,F,F)1217 static void ABI just_return(Params*, SkRasterPipelineStage*, F,F,F,F) {}
1218 #else
just_return(size_t,SkRasterPipelineStage *,size_t,size_t,F,F,F,F,F,F,F,F)1219 static void ABI just_return(size_t, SkRasterPipelineStage*, size_t,size_t, F,F,F,F, F,F,F,F) {}
1220 #endif
1221
1222 // Note that in release builds, most stages consume no stack (thanks to tail call optimization).
1223 // However: certain builds (especially with non-clang compilers) may fail to optimize tail
1224 // calls, resulting in actual stack frames being generated.
1225 //
1226 // stack_checkpoint() and stack_rewind() are special stages that can be used to manage stack growth.
1227 // If a pipeline contains a stack_checkpoint, followed by any number of stack_rewind (at any point),
1228 // the C++ stack will be reset to the state it was at when the stack_checkpoint was initially hit.
1229 //
1230 // All instances of stack_rewind (as well as the one instance of stack_checkpoint near the start of
1231 // a pipeline) share a single context (of type SkRasterPipeline_RewindCtx). That context holds the
1232 // full state of the mutable registers that are normally passed to the next stage in the program.
1233 //
1234 // stack_rewind is the only stage other than just_return that actually returns (rather than jumping
1235 // to the next stage in the program). Before it does so, it stashes all of the registers in the
1236 // context. This includes the updated `program` pointer. Unlike stages that tail call exactly once,
1237 // stack_checkpoint calls the next stage in the program repeatedly, as long as the `program` in the
1238 // context is overwritten (i.e., as long as a stack_rewind was the reason the pipeline returned,
1239 // rather than a just_return).
1240 //
1241 // Normally, just_return is the only stage that returns, and no other stage does anything after a
1242 // subsequent (called) stage returns, so the stack just unwinds all the way to start_pipeline.
1243 // With stack_checkpoint on the stack, any stack_rewind stages will return all the way up to the
1244 // stack_checkpoint. That grabs the values that would have been passed to the next stage (from the
1245 // context), and continues the linear execution of stages, but has reclaimed all of the stack frames
1246 // pushed before the stack_rewind before doing so.
1247 #if JUMPER_NARROW_STAGES
stack_checkpoint(Params * params,SkRasterPipelineStage * program,F r,F g,F b,F a)1248 static void ABI stack_checkpoint(Params* params, SkRasterPipelineStage* program,
1249 F r, F g, F b, F a) {
1250 SkRasterPipeline_RewindCtx* ctx = Ctx{program};
1251 while (program) {
1252 auto next = (Stage)(++program)->fn;
1253
1254 ctx->stage = nullptr;
1255 next(params, program, r, g, b, a);
1256 program = ctx->stage;
1257
1258 if (program) {
1259 r = sk_unaligned_load<F>(ctx->r );
1260 g = sk_unaligned_load<F>(ctx->g );
1261 b = sk_unaligned_load<F>(ctx->b );
1262 a = sk_unaligned_load<F>(ctx->a );
1263 params->dr = sk_unaligned_load<F>(ctx->dr);
1264 params->dg = sk_unaligned_load<F>(ctx->dg);
1265 params->db = sk_unaligned_load<F>(ctx->db);
1266 params->da = sk_unaligned_load<F>(ctx->da);
1267 }
1268 }
1269 }
stack_rewind(Params * params,SkRasterPipelineStage * program,F r,F g,F b,F a)1270 static void ABI stack_rewind(Params* params, SkRasterPipelineStage* program,
1271 F r, F g, F b, F a) {
1272 SkRasterPipeline_RewindCtx* ctx = Ctx{program};
1273 sk_unaligned_store(ctx->r , r );
1274 sk_unaligned_store(ctx->g , g );
1275 sk_unaligned_store(ctx->b , b );
1276 sk_unaligned_store(ctx->a , a );
1277 sk_unaligned_store(ctx->dr, params->dr);
1278 sk_unaligned_store(ctx->dg, params->dg);
1279 sk_unaligned_store(ctx->db, params->db);
1280 sk_unaligned_store(ctx->da, params->da);
1281 ctx->stage = program;
1282 }
1283 #else
stack_checkpoint(size_t tail,SkRasterPipelineStage * program,size_t dx,size_t dy,F r,F g,F b,F a,F dr,F dg,F db,F da)1284 static void ABI stack_checkpoint(size_t tail, SkRasterPipelineStage* program,
1285 size_t dx, size_t dy,
1286 F r, F g, F b, F a, F dr, F dg, F db, F da) {
1287 SkRasterPipeline_RewindCtx* ctx = Ctx{program};
1288 while (program) {
1289 auto next = (Stage)(++program)->fn;
1290
1291 ctx->stage = nullptr;
1292 next(tail, program, dx, dy, r, g, b, a, dr, dg, db, da);
1293 program = ctx->stage;
1294
1295 if (program) {
1296 r = sk_unaligned_load<F>(ctx->r );
1297 g = sk_unaligned_load<F>(ctx->g );
1298 b = sk_unaligned_load<F>(ctx->b );
1299 a = sk_unaligned_load<F>(ctx->a );
1300 dr = sk_unaligned_load<F>(ctx->dr);
1301 dg = sk_unaligned_load<F>(ctx->dg);
1302 db = sk_unaligned_load<F>(ctx->db);
1303 da = sk_unaligned_load<F>(ctx->da);
1304 }
1305 }
1306 }
stack_rewind(size_t tail,SkRasterPipelineStage * program,size_t dx,size_t dy,F r,F g,F b,F a,F dr,F dg,F db,F da)1307 static void ABI stack_rewind(size_t tail, SkRasterPipelineStage* program,
1308 size_t dx, size_t dy,
1309 F r, F g, F b, F a, F dr, F dg, F db, F da) {
1310 SkRasterPipeline_RewindCtx* ctx = Ctx{program};
1311 sk_unaligned_store(ctx->r , r );
1312 sk_unaligned_store(ctx->g , g );
1313 sk_unaligned_store(ctx->b , b );
1314 sk_unaligned_store(ctx->a , a );
1315 sk_unaligned_store(ctx->dr, dr);
1316 sk_unaligned_store(ctx->dg, dg);
1317 sk_unaligned_store(ctx->db, db);
1318 sk_unaligned_store(ctx->da, da);
1319 ctx->stage = program;
1320 }
1321 #endif
1322
1323
1324 // We could start defining normal Stages now. But first, some helper functions.
1325
1326 // These load() and store() methods are tail-aware,
1327 // but focus mainly on keeping the at-stride tail==0 case fast.
1328
1329 template <typename V, typename T>
load(const T * src,size_t tail)1330 SI V load(const T* src, size_t tail) {
1331 #if !defined(JUMPER_IS_SCALAR)
1332 __builtin_assume(tail < N);
1333 if (__builtin_expect(tail, 0)) {
1334 V v{}; // Any inactive lanes are zeroed.
1335 switch (tail) {
1336 case 7: v[6] = src[6]; [[fallthrough]];
1337 case 6: v[5] = src[5]; [[fallthrough]];
1338 case 5: v[4] = src[4]; [[fallthrough]];
1339 case 4: memcpy(&v, src, 4*sizeof(T)); break;
1340 case 3: v[2] = src[2]; [[fallthrough]];
1341 case 2: memcpy(&v, src, 2*sizeof(T)); break;
1342 case 1: memcpy(&v, src, 1*sizeof(T)); break;
1343 }
1344 return v;
1345 }
1346 #endif
1347 return sk_unaligned_load<V>(src);
1348 }
1349
1350 template <typename V, typename T>
store(T * dst,V v,size_t tail)1351 SI void store(T* dst, V v, size_t tail) {
1352 #if !defined(JUMPER_IS_SCALAR)
1353 __builtin_assume(tail < N);
1354 if (__builtin_expect(tail, 0)) {
1355 switch (tail) {
1356 case 7: dst[6] = v[6]; [[fallthrough]];
1357 case 6: dst[5] = v[5]; [[fallthrough]];
1358 case 5: dst[4] = v[4]; [[fallthrough]];
1359 case 4: memcpy(dst, &v, 4*sizeof(T)); break;
1360 case 3: dst[2] = v[2]; [[fallthrough]];
1361 case 2: memcpy(dst, &v, 2*sizeof(T)); break;
1362 case 1: memcpy(dst, &v, 1*sizeof(T)); break;
1363 }
1364 return;
1365 }
1366 #endif
1367 sk_unaligned_store(dst, v);
1368 }
1369
from_byte(U8 b)1370 SI F from_byte(U8 b) {
1371 return cast(expand(b)) * (1/255.0f);
1372 }
from_short(U16 s)1373 SI F from_short(U16 s) {
1374 return cast(expand(s)) * (1/65535.0f);
1375 }
from_565(U16 _565,F * r,F * g,F * b)1376 SI void from_565(U16 _565, F* r, F* g, F* b) {
1377 U32 wide = expand(_565);
1378 *r = cast(wide & (31<<11)) * (1.0f / (31<<11));
1379 *g = cast(wide & (63<< 5)) * (1.0f / (63<< 5));
1380 *b = cast(wide & (31<< 0)) * (1.0f / (31<< 0));
1381 }
from_4444(U16 _4444,F * r,F * g,F * b,F * a)1382 SI void from_4444(U16 _4444, F* r, F* g, F* b, F* a) {
1383 U32 wide = expand(_4444);
1384 *r = cast(wide & (15<<12)) * (1.0f / (15<<12));
1385 *g = cast(wide & (15<< 8)) * (1.0f / (15<< 8));
1386 *b = cast(wide & (15<< 4)) * (1.0f / (15<< 4));
1387 *a = cast(wide & (15<< 0)) * (1.0f / (15<< 0));
1388 }
from_8888(U32 _8888,F * r,F * g,F * b,F * a)1389 SI void from_8888(U32 _8888, F* r, F* g, F* b, F* a) {
1390 *r = cast((_8888 ) & 0xff) * (1/255.0f);
1391 *g = cast((_8888 >> 8) & 0xff) * (1/255.0f);
1392 *b = cast((_8888 >> 16) & 0xff) * (1/255.0f);
1393 *a = cast((_8888 >> 24) ) * (1/255.0f);
1394 }
from_88(U16 _88,F * r,F * g)1395 SI void from_88(U16 _88, F* r, F* g) {
1396 U32 wide = expand(_88);
1397 *r = cast((wide ) & 0xff) * (1/255.0f);
1398 *g = cast((wide >> 8) & 0xff) * (1/255.0f);
1399 }
from_1010102(U32 rgba,F * r,F * g,F * b,F * a)1400 SI void from_1010102(U32 rgba, F* r, F* g, F* b, F* a) {
1401 *r = cast((rgba ) & 0x3ff) * (1/1023.0f);
1402 *g = cast((rgba >> 10) & 0x3ff) * (1/1023.0f);
1403 *b = cast((rgba >> 20) & 0x3ff) * (1/1023.0f);
1404 *a = cast((rgba >> 30) ) * (1/ 3.0f);
1405 }
from_1010102_xr(U32 rgba,F * r,F * g,F * b,F * a)1406 SI void from_1010102_xr(U32 rgba, F* r, F* g, F* b, F* a) {
1407 static constexpr float min = -0.752941f;
1408 static constexpr float max = 1.25098f;
1409 static constexpr float range = max - min;
1410 *r = cast((rgba ) & 0x3ff) * (1/1023.0f) * range + min;
1411 *g = cast((rgba >> 10) & 0x3ff) * (1/1023.0f) * range + min;
1412 *b = cast((rgba >> 20) & 0x3ff) * (1/1023.0f) * range + min;
1413 *a = cast((rgba >> 30) ) * (1/ 3.0f);
1414 }
from_1616(U32 _1616,F * r,F * g)1415 SI void from_1616(U32 _1616, F* r, F* g) {
1416 *r = cast((_1616 ) & 0xffff) * (1/65535.0f);
1417 *g = cast((_1616 >> 16) & 0xffff) * (1/65535.0f);
1418 }
from_16161616(U64 _16161616,F * r,F * g,F * b,F * a)1419 SI void from_16161616(U64 _16161616, F* r, F* g, F* b, F* a) {
1420 *r = cast64((_16161616 ) & 0xffff) * (1/65535.0f);
1421 *g = cast64((_16161616 >> 16) & 0xffff) * (1/65535.0f);
1422 *b = cast64((_16161616 >> 32) & 0xffff) * (1/65535.0f);
1423 *a = cast64((_16161616 >> 48) & 0xffff) * (1/65535.0f);
1424 }
1425
1426 // Used by load_ and store_ stages to get to the right (dx,dy) starting point of contiguous memory.
1427 template <typename T>
ptr_at_xy(const SkRasterPipeline_MemoryCtx * ctx,size_t dx,size_t dy)1428 SI T* ptr_at_xy(const SkRasterPipeline_MemoryCtx* ctx, size_t dx, size_t dy) {
1429 return (T*)ctx->pixels + dy*ctx->stride + dx;
1430 }
1431
1432 // clamp v to [0,limit).
clamp(F v,F limit)1433 SI F clamp(F v, F limit) {
1434 F inclusive = sk_bit_cast<F>( sk_bit_cast<U32>(limit) - 1 ); // Exclusive -> inclusive.
1435 return min(max(0.0f, v), inclusive);
1436 }
1437
1438 // clamp to (0,limit).
clamp_ex(F v,F limit)1439 SI F clamp_ex(F v, F limit) {
1440 const F inclusiveZ = std::numeric_limits<float>::min(),
1441 inclusiveL = sk_bit_cast<F>( sk_bit_cast<U32>(limit) - 1 );
1442 return min(max(inclusiveZ, v), inclusiveL);
1443 }
1444
1445 // Bhaskara I's sine approximation
1446 // 16x(pi - x) / (5*pi^2 - 4x(pi - x)
1447 // ... divide by 4
1448 // 4x(pi - x) / 5*pi^2/4 - x(pi - x)
1449 //
1450 // This is a good approximation only for 0 <= x <= pi, so we use symmetries to get
1451 // radians into that range first.
sin_(F v)1452 SI F sin_(F v) {
1453 constexpr float Pi = SK_ScalarPI;
1454 F x = fract(v * (0.5f/Pi)) * (2*Pi);
1455 I32 neg = x > Pi;
1456 x = if_then_else(neg, x - Pi, x);
1457
1458 F pair = x * (Pi - x);
1459 x = 4.0f * pair / ((5*Pi*Pi/4) - pair);
1460 x = if_then_else(neg, -x, x);
1461 return x;
1462 }
1463
cos_(F v)1464 SI F cos_(F v) {
1465 return sin_(v + (SK_ScalarPI/2));
1466 }
1467
1468 /* "GENERATING ACCURATE VALUES FOR THE TANGENT FUNCTION"
1469 https://mae.ufl.edu/~uhk/ACCURATE-TANGENT.pdf
1470
1471 approx = x + (1/3)x^3 + (2/15)x^5 + (17/315)x^7 + (62/2835)x^9
1472
1473 Some simplifications:
1474 1. tan(x) is periodic, -PI/2 < x < PI/2
1475 2. tan(x) is odd, so tan(-x) = -tan(x)
1476 3. Our polynomial approximation is best near zero, so we use the following identity
1477 tan(x) + tan(y)
1478 tan(x + y) = -----------------
1479 1 - tan(x)*tan(y)
1480 tan(PI/4) = 1
1481
1482 So for x > PI/8, we do the following refactor:
1483 x' = x - PI/4
1484
1485 1 + tan(x')
1486 tan(x) = ------------
1487 1 - tan(x')
1488 */
tan_(F x)1489 SI F tan_(F x) {
1490 constexpr float Pi = SK_ScalarPI;
1491 // periodic between -pi/2 ... pi/2
1492 // shift to 0...Pi, scale 1/Pi to get into 0...1, then fract, scale-up, shift-back
1493 x = fract((1/Pi)*x + 0.5f) * Pi - (Pi/2);
1494
1495 I32 neg = (x < 0.0f);
1496 x = if_then_else(neg, -x, x);
1497
1498 // minimize total error by shifting if x > pi/8
1499 I32 use_quotient = (x > (Pi/8));
1500 x = if_then_else(use_quotient, x - (Pi/4), x);
1501
1502 // 9th order poly = 4th order(x^2) * x
1503 F x2 = x * x;
1504 x *= 1 + x2 * (1/3.0f +
1505 x2 * (2/15.0f +
1506 x2 * (17/315.0f +
1507 x2 * (62/2835.0f))));
1508 x = if_then_else(use_quotient, (1+x)/(1-x), x);
1509 x = if_then_else(neg, -x, x);
1510 return x;
1511 }
1512
1513 /* Use 4th order polynomial approximation from https://arachnoid.com/polysolve/
1514 with 129 values of x,atan(x) for x:[0...1]
1515 This only works for 0 <= x <= 1
1516 */
approx_atan_unit(F x)1517 SI F approx_atan_unit(F x) {
1518 // y = 0.14130025741326729 x⁴
1519 // - 0.34312835980675116 x³
1520 // - 0.016172900528248768 x²
1521 // + 1.00376969762003850 x
1522 // - 0.00014758242182738969
1523 return x * (x * (x * (x * 0.14130025741326729f - 0.34312835980675116f)
1524 - 0.016172900528248768f)
1525 + 1.0037696976200385f)
1526 - 0.00014758242182738969f;
1527 }
1528
1529 // Use identity atan(x) = pi/2 - atan(1/x) for x > 1
atan_(F x)1530 SI F atan_(F x) {
1531 I32 neg = (x < 0.0f);
1532 x = if_then_else(neg, -x, x);
1533 I32 flip = (x > 1.0f);
1534 x = if_then_else(flip, 1/x, x);
1535 x = approx_atan_unit(x);
1536 x = if_then_else(flip, SK_ScalarPI/2 - x, x);
1537 x = if_then_else(neg, -x, x);
1538 return x;
1539 }
1540
1541 /* Use identity atan(x) = pi/2 - atan(1/x) for x > 1
1542 By swapping y,x to ensure the ratio is <= 1, we can safely call atan_unit()
1543 which avoids a 2nd divide instruction if we had instead called atan().
1544 */
atan2_(F y0,F x0)1545 SI F atan2_(F y0, F x0) {
1546 I32 flip = (abs_(y0) > abs_(x0));
1547 F y = if_then_else(flip, x0, y0);
1548 F x = if_then_else(flip, y0, x0);
1549 F arg = y/x;
1550
1551 I32 neg = (arg < 0.0f);
1552 arg = if_then_else(neg, -arg, arg);
1553
1554 F r = approx_atan_unit(arg);
1555 r = if_then_else(flip, SK_ScalarPI/2 - r, r);
1556 r = if_then_else(neg, -r, r);
1557
1558 // handle quadrant distinctions
1559 r = if_then_else((y0 >= 0) & (x0 < 0), r + SK_ScalarPI, r);
1560 r = if_then_else((y0 < 0) & (x0 <= 0), r - SK_ScalarPI, r);
1561 // Note: we don't try to handle 0,0 or infinities
1562 return r;
1563 }
1564
1565 // Used by gather_ stages to calculate the base pointer and a vector of indices to load.
1566 template <typename T>
ix_and_ptr(T ** ptr,const SkRasterPipeline_GatherCtx * ctx,F x,F y)1567 SI U32 ix_and_ptr(T** ptr, const SkRasterPipeline_GatherCtx* ctx, F x, F y) {
1568 // We use exclusive clamp so that our min value is > 0 because ULP subtraction using U32 would
1569 // produce a NaN if applied to +0.f.
1570 x = clamp_ex(x, ctx->width );
1571 y = clamp_ex(y, ctx->height);
1572 x = sk_bit_cast<F>(sk_bit_cast<U32>(x) - (uint32_t)ctx->roundDownAtInteger);
1573 y = sk_bit_cast<F>(sk_bit_cast<U32>(y) - (uint32_t)ctx->roundDownAtInteger);
1574 *ptr = (const T*)ctx->pixels;
1575 return trunc_(y)*ctx->stride + trunc_(x);
1576 }
1577
1578 // We often have a nominally [0,1] float value we need to scale and convert to an integer,
1579 // whether for a table lookup or to pack back down into bytes for storage.
1580 //
1581 // In practice, especially when dealing with interesting color spaces, that notionally
1582 // [0,1] float may be out of [0,1] range. Unorms cannot represent that, so we must clamp.
1583 //
1584 // You can adjust the expected input to [0,bias] by tweaking that parameter.
1585 SI U32 to_unorm(F v, F scale, F bias = 1.0f) {
1586 // Any time we use round() we probably want to use to_unorm().
1587 return round(min(max(0.0f, v), bias), scale);
1588 }
1589
cond_to_mask(I32 cond)1590 SI I32 cond_to_mask(I32 cond) {
1591 #if defined(JUMPER_IS_SCALAR)
1592 // In scalar mode, conditions are bools (0 or 1), but we want to store and operate on masks
1593 // (eg, using bitwise operations to select values).
1594 return if_then_else(cond, I32(~0), I32(0));
1595 #else
1596 // In SIMD mode, our various instruction sets already represent conditions as masks.
1597 return cond;
1598 #endif
1599 }
1600
1601 // Now finally, normal Stages!
1602
STAGE(seed_shader,NoCtx)1603 STAGE(seed_shader, NoCtx) {
1604 static constexpr float iota[] = {
1605 0.5f, 1.5f, 2.5f, 3.5f, 4.5f, 5.5f, 6.5f, 7.5f,
1606 8.5f, 9.5f,10.5f,11.5f,12.5f,13.5f,14.5f,15.5f,
1607 };
1608 // It's important for speed to explicitly cast(dx) and cast(dy),
1609 // which has the effect of splatting them to vectors before converting to floats.
1610 // On Intel this breaks a data dependency on previous loop iterations' registers.
1611 r = cast(dx) + sk_unaligned_load<F>(iota);
1612 g = cast(dy) + 0.5f;
1613 b = 1.0f; // This is w=1 for matrix multiplies by the device coords.
1614 a = 0;
1615 }
1616
STAGE(store_device_xy01,F * dst)1617 STAGE(store_device_xy01, F* dst) {
1618 // This is very similar to `seed_shader + store_src`, but b/a are backwards.
1619 // (sk_FragCoord actually puts w=1 in the w slot.)
1620 static constexpr float iota[] = {
1621 0.5f, 1.5f, 2.5f, 3.5f, 4.5f, 5.5f, 6.5f, 7.5f,
1622 8.5f, 9.5f,10.5f,11.5f,12.5f,13.5f,14.5f,15.5f,
1623 };
1624 dst[0] = cast(dx) + sk_unaligned_load<F>(iota);
1625 dst[1] = cast(dy) + 0.5f;
1626 dst[2] = 0.0f;
1627 dst[3] = 1.0f;
1628 }
1629
STAGE(dither,const float * rate)1630 STAGE(dither, const float* rate) {
1631 // Get [(dx,dy), (dx+1,dy), (dx+2,dy), ...] loaded up in integer vectors.
1632 uint32_t iota[] = {0,1,2,3,4,5,6,7};
1633 U32 X = dx + sk_unaligned_load<U32>(iota),
1634 Y = dy;
1635
1636 // We're doing 8x8 ordered dithering, see https://en.wikipedia.org/wiki/Ordered_dithering.
1637 // In this case n=8 and we're using the matrix that looks like 1/64 x [ 0 48 12 60 ... ].
1638
1639 // We only need X and X^Y from here on, so it's easier to just think of that as "Y".
1640 Y ^= X;
1641
1642 // We'll mix the bottom 3 bits of each of X and Y to make 6 bits,
1643 // for 2^6 == 64 == 8x8 matrix values. If X=abc and Y=def, we make fcebda.
1644 U32 M = (Y & 1) << 5 | (X & 1) << 4
1645 | (Y & 2) << 2 | (X & 2) << 1
1646 | (Y & 4) >> 1 | (X & 4) >> 2;
1647
1648 // Scale that dither to [0,1), then (-0.5,+0.5), here using 63/128 = 0.4921875 as 0.5-epsilon.
1649 // We want to make sure our dither is less than 0.5 in either direction to keep exact values
1650 // like 0 and 1 unchanged after rounding.
1651 F dither = cast(M) * (2/128.0f) - (63/128.0f);
1652
1653 r += *rate*dither;
1654 g += *rate*dither;
1655 b += *rate*dither;
1656
1657 r = max(0.0f, min(r, a));
1658 g = max(0.0f, min(g, a));
1659 b = max(0.0f, min(b, a));
1660 }
1661
1662 // load 4 floats from memory, and splat them into r,g,b,a
STAGE(uniform_color,const SkRasterPipeline_UniformColorCtx * c)1663 STAGE(uniform_color, const SkRasterPipeline_UniformColorCtx* c) {
1664 r = c->r;
1665 g = c->g;
1666 b = c->b;
1667 a = c->a;
1668 }
STAGE(unbounded_uniform_color,const SkRasterPipeline_UniformColorCtx * c)1669 STAGE(unbounded_uniform_color, const SkRasterPipeline_UniformColorCtx* c) {
1670 r = c->r;
1671 g = c->g;
1672 b = c->b;
1673 a = c->a;
1674 }
1675 // load 4 floats from memory, and splat them into dr,dg,db,da
STAGE(uniform_color_dst,const SkRasterPipeline_UniformColorCtx * c)1676 STAGE(uniform_color_dst, const SkRasterPipeline_UniformColorCtx* c) {
1677 dr = c->r;
1678 dg = c->g;
1679 db = c->b;
1680 da = c->a;
1681 }
1682
1683 // splats opaque-black into r,g,b,a
STAGE(black_color,NoCtx)1684 STAGE(black_color, NoCtx) {
1685 r = g = b = 0.0f;
1686 a = 1.0f;
1687 }
1688
STAGE(white_color,NoCtx)1689 STAGE(white_color, NoCtx) {
1690 r = g = b = a = 1.0f;
1691 }
1692
1693 // load registers r,g,b,a from context (mirrors store_src)
STAGE(load_src,const float * ptr)1694 STAGE(load_src, const float* ptr) {
1695 r = sk_unaligned_load<F>(ptr + 0*N);
1696 g = sk_unaligned_load<F>(ptr + 1*N);
1697 b = sk_unaligned_load<F>(ptr + 2*N);
1698 a = sk_unaligned_load<F>(ptr + 3*N);
1699 }
1700
1701 // store registers r,g,b,a into context (mirrors load_src)
STAGE(store_src,float * ptr)1702 STAGE(store_src, float* ptr) {
1703 sk_unaligned_store(ptr + 0*N, r);
1704 sk_unaligned_store(ptr + 1*N, g);
1705 sk_unaligned_store(ptr + 2*N, b);
1706 sk_unaligned_store(ptr + 3*N, a);
1707 }
1708 // store registers r,g into context
STAGE(store_src_rg,float * ptr)1709 STAGE(store_src_rg, float* ptr) {
1710 sk_unaligned_store(ptr + 0*N, r);
1711 sk_unaligned_store(ptr + 1*N, g);
1712 }
1713 // load registers r,g from context
STAGE(load_src_rg,float * ptr)1714 STAGE(load_src_rg, float* ptr) {
1715 r = sk_unaligned_load<F>(ptr + 0*N);
1716 g = sk_unaligned_load<F>(ptr + 1*N);
1717 }
1718 // store register a into context
STAGE(store_src_a,float * ptr)1719 STAGE(store_src_a, float* ptr) {
1720 sk_unaligned_store(ptr, a);
1721 }
1722
1723 // load registers dr,dg,db,da from context (mirrors store_dst)
STAGE(load_dst,const float * ptr)1724 STAGE(load_dst, const float* ptr) {
1725 dr = sk_unaligned_load<F>(ptr + 0*N);
1726 dg = sk_unaligned_load<F>(ptr + 1*N);
1727 db = sk_unaligned_load<F>(ptr + 2*N);
1728 da = sk_unaligned_load<F>(ptr + 3*N);
1729 }
1730
1731 // store registers dr,dg,db,da into context (mirrors load_dst)
STAGE(store_dst,float * ptr)1732 STAGE(store_dst, float* ptr) {
1733 sk_unaligned_store(ptr + 0*N, dr);
1734 sk_unaligned_store(ptr + 1*N, dg);
1735 sk_unaligned_store(ptr + 2*N, db);
1736 sk_unaligned_store(ptr + 3*N, da);
1737 }
1738
1739 // Most blend modes apply the same logic to each channel.
1740 #define BLEND_MODE(name) \
1741 SI F name##_channel(F s, F d, F sa, F da); \
1742 STAGE(name, NoCtx) { \
1743 r = name##_channel(r,dr,a,da); \
1744 g = name##_channel(g,dg,a,da); \
1745 b = name##_channel(b,db,a,da); \
1746 a = name##_channel(a,da,a,da); \
1747 } \
1748 SI F name##_channel(F s, F d, F sa, F da)
1749
inv(F x)1750 SI F inv(F x) { return 1.0f - x; }
two(F x)1751 SI F two(F x) { return x + x; }
1752
1753
BLEND_MODE(clear)1754 BLEND_MODE(clear) { return 0; }
BLEND_MODE(srcatop)1755 BLEND_MODE(srcatop) { return s*da + d*inv(sa); }
BLEND_MODE(dstatop)1756 BLEND_MODE(dstatop) { return d*sa + s*inv(da); }
BLEND_MODE(srcin)1757 BLEND_MODE(srcin) { return s * da; }
BLEND_MODE(dstin)1758 BLEND_MODE(dstin) { return d * sa; }
BLEND_MODE(srcout)1759 BLEND_MODE(srcout) { return s * inv(da); }
BLEND_MODE(dstout)1760 BLEND_MODE(dstout) { return d * inv(sa); }
BLEND_MODE(srcover)1761 BLEND_MODE(srcover) { return mad(d, inv(sa), s); }
BLEND_MODE(dstover)1762 BLEND_MODE(dstover) { return mad(s, inv(da), d); }
1763
BLEND_MODE(modulate)1764 BLEND_MODE(modulate) { return s*d; }
BLEND_MODE(multiply)1765 BLEND_MODE(multiply) { return s*inv(da) + d*inv(sa) + s*d; }
BLEND_MODE(plus_)1766 BLEND_MODE(plus_) { return min(s + d, 1.0f); } // We can clamp to either 1 or sa.
BLEND_MODE(screen)1767 BLEND_MODE(screen) { return s + d - s*d; }
BLEND_MODE(xor_)1768 BLEND_MODE(xor_) { return s*inv(da) + d*inv(sa); }
1769 #undef BLEND_MODE
1770
1771 // Most other blend modes apply the same logic to colors, and srcover to alpha.
1772 #define BLEND_MODE(name) \
1773 SI F name##_channel(F s, F d, F sa, F da); \
1774 STAGE(name, NoCtx) { \
1775 r = name##_channel(r,dr,a,da); \
1776 g = name##_channel(g,dg,a,da); \
1777 b = name##_channel(b,db,a,da); \
1778 a = mad(da, inv(a), a); \
1779 } \
1780 SI F name##_channel(F s, F d, F sa, F da)
1781
BLEND_MODE(darken)1782 BLEND_MODE(darken) { return s + d - max(s*da, d*sa) ; }
BLEND_MODE(lighten)1783 BLEND_MODE(lighten) { return s + d - min(s*da, d*sa) ; }
BLEND_MODE(difference)1784 BLEND_MODE(difference) { return s + d - two(min(s*da, d*sa)); }
BLEND_MODE(exclusion)1785 BLEND_MODE(exclusion) { return s + d - two(s*d); }
1786
BLEND_MODE(colorburn)1787 BLEND_MODE(colorburn) {
1788 return if_then_else(d == da, d + s*inv(da),
1789 if_then_else(s == 0, /* s + */ d*inv(sa),
1790 sa*(da - min(da, (da-d)*sa*rcp_fast(s))) + s*inv(da) + d*inv(sa)));
1791 }
BLEND_MODE(colordodge)1792 BLEND_MODE(colordodge) {
1793 return if_then_else(d == 0, /* d + */ s*inv(da),
1794 if_then_else(s == sa, s + d*inv(sa),
1795 sa*min(da, (d*sa)*rcp_fast(sa - s)) + s*inv(da) + d*inv(sa)));
1796 }
BLEND_MODE(hardlight)1797 BLEND_MODE(hardlight) {
1798 return s*inv(da) + d*inv(sa)
1799 + if_then_else(two(s) <= sa, two(s*d), sa*da - two((da-d)*(sa-s)));
1800 }
BLEND_MODE(overlay)1801 BLEND_MODE(overlay) {
1802 return s*inv(da) + d*inv(sa)
1803 + if_then_else(two(d) <= da, two(s*d), sa*da - two((da-d)*(sa-s)));
1804 }
1805
BLEND_MODE(softlight)1806 BLEND_MODE(softlight) {
1807 F m = if_then_else(da > 0, d / da, 0),
1808 s2 = two(s),
1809 m4 = two(two(m));
1810
1811 // The logic forks three ways:
1812 // 1. dark src?
1813 // 2. light src, dark dst?
1814 // 3. light src, light dst?
1815 F darkSrc = d*(sa + (s2 - sa)*(1.0f - m)), // Used in case 1.
1816 darkDst = (m4*m4 + m4)*(m - 1.0f) + 7.0f*m, // Used in case 2.
1817 liteDst = sqrt_(m) - m,
1818 liteSrc = d*sa + da*(s2 - sa) * if_then_else(two(two(d)) <= da, darkDst, liteDst); // 2 or 3?
1819 return s*inv(da) + d*inv(sa) + if_then_else(s2 <= sa, darkSrc, liteSrc); // 1 or (2 or 3)?
1820 }
1821 #undef BLEND_MODE
1822
1823 // We're basing our implemenation of non-separable blend modes on
1824 // https://www.w3.org/TR/compositing-1/#blendingnonseparable.
1825 // and
1826 // https://www.khronos.org/registry/OpenGL/specs/es/3.2/es_spec_3.2.pdf
1827 // They're equivalent, but ES' math has been better simplified.
1828 //
1829 // Anything extra we add beyond that is to make the math work with premul inputs.
1830
sat(F r,F g,F b)1831 SI F sat(F r, F g, F b) { return max(r, max(g,b)) - min(r, min(g,b)); }
lum(F r,F g,F b)1832 SI F lum(F r, F g, F b) { return r*0.30f + g*0.59f + b*0.11f; }
1833
set_sat(F * r,F * g,F * b,F s)1834 SI void set_sat(F* r, F* g, F* b, F s) {
1835 F mn = min(*r, min(*g,*b)),
1836 mx = max(*r, max(*g,*b)),
1837 sat = mx - mn;
1838
1839 // Map min channel to 0, max channel to s, and scale the middle proportionally.
1840 auto scale = [=](F c) {
1841 return if_then_else(sat == 0, 0, (c - mn) * s / sat);
1842 };
1843 *r = scale(*r);
1844 *g = scale(*g);
1845 *b = scale(*b);
1846 }
set_lum(F * r,F * g,F * b,F l)1847 SI void set_lum(F* r, F* g, F* b, F l) {
1848 F diff = l - lum(*r, *g, *b);
1849 *r += diff;
1850 *g += diff;
1851 *b += diff;
1852 }
clip_color(F * r,F * g,F * b,F a)1853 SI void clip_color(F* r, F* g, F* b, F a) {
1854 F mn = min(*r, min(*g, *b)),
1855 mx = max(*r, max(*g, *b)),
1856 l = lum(*r, *g, *b);
1857
1858 auto clip = [=](F c) {
1859 c = if_then_else(mn < 0 && l != mn, l + (c - l) * ( l) / (l - mn), c);
1860 c = if_then_else(mx > a && l != mx, l + (c - l) * (a - l) / (mx - l), c);
1861 c = max(c, 0.0f); // Sometimes without this we may dip just a little negative.
1862 return c;
1863 };
1864 *r = clip(*r);
1865 *g = clip(*g);
1866 *b = clip(*b);
1867 }
1868
STAGE(hue,NoCtx)1869 STAGE(hue, NoCtx) {
1870 F R = r*a,
1871 G = g*a,
1872 B = b*a;
1873
1874 set_sat(&R, &G, &B, sat(dr,dg,db)*a);
1875 set_lum(&R, &G, &B, lum(dr,dg,db)*a);
1876 clip_color(&R,&G,&B, a*da);
1877
1878 r = r*inv(da) + dr*inv(a) + R;
1879 g = g*inv(da) + dg*inv(a) + G;
1880 b = b*inv(da) + db*inv(a) + B;
1881 a = a + da - a*da;
1882 }
STAGE(saturation,NoCtx)1883 STAGE(saturation, NoCtx) {
1884 F R = dr*a,
1885 G = dg*a,
1886 B = db*a;
1887
1888 set_sat(&R, &G, &B, sat( r, g, b)*da);
1889 set_lum(&R, &G, &B, lum(dr,dg,db)* a); // (This is not redundant.)
1890 clip_color(&R,&G,&B, a*da);
1891
1892 r = r*inv(da) + dr*inv(a) + R;
1893 g = g*inv(da) + dg*inv(a) + G;
1894 b = b*inv(da) + db*inv(a) + B;
1895 a = a + da - a*da;
1896 }
STAGE(color,NoCtx)1897 STAGE(color, NoCtx) {
1898 F R = r*da,
1899 G = g*da,
1900 B = b*da;
1901
1902 set_lum(&R, &G, &B, lum(dr,dg,db)*a);
1903 clip_color(&R,&G,&B, a*da);
1904
1905 r = r*inv(da) + dr*inv(a) + R;
1906 g = g*inv(da) + dg*inv(a) + G;
1907 b = b*inv(da) + db*inv(a) + B;
1908 a = a + da - a*da;
1909 }
STAGE(luminosity,NoCtx)1910 STAGE(luminosity, NoCtx) {
1911 F R = dr*a,
1912 G = dg*a,
1913 B = db*a;
1914
1915 set_lum(&R, &G, &B, lum(r,g,b)*da);
1916 clip_color(&R,&G,&B, a*da);
1917
1918 r = r*inv(da) + dr*inv(a) + R;
1919 g = g*inv(da) + dg*inv(a) + G;
1920 b = b*inv(da) + db*inv(a) + B;
1921 a = a + da - a*da;
1922 }
1923
STAGE(srcover_rgba_8888,const SkRasterPipeline_MemoryCtx * ctx)1924 STAGE(srcover_rgba_8888, const SkRasterPipeline_MemoryCtx* ctx) {
1925 auto ptr = ptr_at_xy<uint32_t>(ctx, dx,dy);
1926
1927 U32 dst = load<U32>(ptr, tail);
1928 dr = cast((dst ) & 0xff);
1929 dg = cast((dst >> 8) & 0xff);
1930 db = cast((dst >> 16) & 0xff);
1931 da = cast((dst >> 24) );
1932 // {dr,dg,db,da} are in [0,255]
1933 // { r, g, b, a} are in [0, 1] (but may be out of gamut)
1934
1935 r = mad(dr, inv(a), r*255.0f);
1936 g = mad(dg, inv(a), g*255.0f);
1937 b = mad(db, inv(a), b*255.0f);
1938 a = mad(da, inv(a), a*255.0f);
1939 // { r, g, b, a} are now in [0,255] (but may be out of gamut)
1940
1941 // to_unorm() clamps back to gamut. Scaling by 1 since we're already 255-biased.
1942 dst = to_unorm(r, 1, 255)
1943 | to_unorm(g, 1, 255) << 8
1944 | to_unorm(b, 1, 255) << 16
1945 | to_unorm(a, 1, 255) << 24;
1946 store(ptr, dst, tail);
1947 }
1948
clamp_01_(F v)1949 SI F clamp_01_(F v) { return min(max(0.0f, v), 1.0f); }
1950
STAGE(clamp_01,NoCtx)1951 STAGE(clamp_01, NoCtx) {
1952 r = clamp_01_(r);
1953 g = clamp_01_(g);
1954 b = clamp_01_(b);
1955 a = clamp_01_(a);
1956 }
1957
STAGE(clamp_gamut,NoCtx)1958 STAGE(clamp_gamut, NoCtx) {
1959 a = min(max(a, 0.0f), 1.0f);
1960 r = min(max(r, 0.0f), a);
1961 g = min(max(g, 0.0f), a);
1962 b = min(max(b, 0.0f), a);
1963 }
1964
STAGE(set_rgb,const float * rgb)1965 STAGE(set_rgb, const float* rgb) {
1966 r = rgb[0];
1967 g = rgb[1];
1968 b = rgb[2];
1969 }
1970
STAGE(unbounded_set_rgb,const float * rgb)1971 STAGE(unbounded_set_rgb, const float* rgb) {
1972 r = rgb[0];
1973 g = rgb[1];
1974 b = rgb[2];
1975 }
1976
STAGE(swap_rb,NoCtx)1977 STAGE(swap_rb, NoCtx) {
1978 auto tmp = r;
1979 r = b;
1980 b = tmp;
1981 }
STAGE(swap_rb_dst,NoCtx)1982 STAGE(swap_rb_dst, NoCtx) {
1983 auto tmp = dr;
1984 dr = db;
1985 db = tmp;
1986 }
1987
STAGE(move_src_dst,NoCtx)1988 STAGE(move_src_dst, NoCtx) {
1989 dr = r;
1990 dg = g;
1991 db = b;
1992 da = a;
1993 }
STAGE(move_dst_src,NoCtx)1994 STAGE(move_dst_src, NoCtx) {
1995 r = dr;
1996 g = dg;
1997 b = db;
1998 a = da;
1999 }
STAGE(swap_src_dst,NoCtx)2000 STAGE(swap_src_dst, NoCtx) {
2001 std::swap(r, dr);
2002 std::swap(g, dg);
2003 std::swap(b, db);
2004 std::swap(a, da);
2005 }
2006
STAGE(premul,NoCtx)2007 STAGE(premul, NoCtx) {
2008 r = r * a;
2009 g = g * a;
2010 b = b * a;
2011 }
STAGE(premul_dst,NoCtx)2012 STAGE(premul_dst, NoCtx) {
2013 dr = dr * da;
2014 dg = dg * da;
2015 db = db * da;
2016 }
STAGE(unpremul,NoCtx)2017 STAGE(unpremul, NoCtx) {
2018 float inf = sk_bit_cast<float>(0x7f800000);
2019 auto scale = if_then_else(1.0f/a < inf, 1.0f/a, 0);
2020 r *= scale;
2021 g *= scale;
2022 b *= scale;
2023 }
STAGE(unpremul_polar,NoCtx)2024 STAGE(unpremul_polar, NoCtx) {
2025 float inf = sk_bit_cast<float>(0x7f800000);
2026 auto scale = if_then_else(1.0f/a < inf, 1.0f/a, 0);
2027 g *= scale;
2028 b *= scale;
2029 }
2030
STAGE(force_opaque,NoCtx)2031 STAGE(force_opaque , NoCtx) { a = 1; }
STAGE(force_opaque_dst,NoCtx)2032 STAGE(force_opaque_dst, NoCtx) { da = 1; }
2033
STAGE(rgb_to_hsl,NoCtx)2034 STAGE(rgb_to_hsl, NoCtx) {
2035 F mx = max(r, max(g,b)),
2036 mn = min(r, min(g,b)),
2037 d = mx - mn,
2038 d_rcp = 1.0f / d;
2039
2040 F h = (1/6.0f) *
2041 if_then_else(mx == mn, 0,
2042 if_then_else(mx == r, (g-b)*d_rcp + if_then_else(g < b, 6.0f, 0),
2043 if_then_else(mx == g, (b-r)*d_rcp + 2.0f,
2044 (r-g)*d_rcp + 4.0f)));
2045
2046 F l = (mx + mn) * 0.5f;
2047 F s = if_then_else(mx == mn, 0,
2048 d / if_then_else(l > 0.5f, 2.0f-mx-mn, mx+mn));
2049
2050 r = h;
2051 g = s;
2052 b = l;
2053 }
STAGE(hsl_to_rgb,NoCtx)2054 STAGE(hsl_to_rgb, NoCtx) {
2055 // See GrRGBToHSLFilterEffect.fp
2056
2057 F h = r,
2058 s = g,
2059 l = b,
2060 c = (1.0f - abs_(2.0f * l - 1)) * s;
2061
2062 auto hue_to_rgb = [&](F hue) {
2063 F q = clamp_01_(abs_(fract(hue) * 6.0f - 3.0f) - 1.0f);
2064 return (q - 0.5f) * c + l;
2065 };
2066
2067 r = hue_to_rgb(h + 0.0f/3.0f);
2068 g = hue_to_rgb(h + 2.0f/3.0f);
2069 b = hue_to_rgb(h + 1.0f/3.0f);
2070 }
2071
2072 // Color conversion functions used in gradient interpolation, based on
2073 // https://www.w3.org/TR/css-color-4/#color-conversion-code
STAGE(css_lab_to_xyz,NoCtx)2074 STAGE(css_lab_to_xyz, NoCtx) {
2075 constexpr float k = 24389 / 27.0f;
2076 constexpr float e = 216 / 24389.0f;
2077
2078 F f[3];
2079 f[1] = (r + 16) * (1 / 116.0f);
2080 f[0] = (g * (1 / 500.0f)) + f[1];
2081 f[2] = f[1] - (b * (1 / 200.0f));
2082
2083 F f_cubed[3] = { f[0]*f[0]*f[0], f[1]*f[1]*f[1], f[2]*f[2]*f[2] };
2084
2085 F xyz[3] = {
2086 if_then_else(f_cubed[0] > e, f_cubed[0], (116 * f[0] - 16) * (1 / k)),
2087 if_then_else(r > k * e, f_cubed[1], r * (1 / k)),
2088 if_then_else(f_cubed[2] > e, f_cubed[2], (116 * f[2] - 16) * (1 / k))
2089 };
2090
2091 constexpr float D50[3] = { 0.3457f / 0.3585f, 1.0f, (1.0f - 0.3457f - 0.3585f) / 0.3585f };
2092 r = xyz[0]*D50[0];
2093 g = xyz[1]*D50[1];
2094 b = xyz[2]*D50[2];
2095 }
2096
STAGE(css_oklab_to_linear_srgb,NoCtx)2097 STAGE(css_oklab_to_linear_srgb, NoCtx) {
2098 F l_ = r + 0.3963377774f * g + 0.2158037573f * b,
2099 m_ = r - 0.1055613458f * g - 0.0638541728f * b,
2100 s_ = r - 0.0894841775f * g - 1.2914855480f * b;
2101
2102 F l = l_*l_*l_,
2103 m = m_*m_*m_,
2104 s = s_*s_*s_;
2105
2106 r = +4.0767416621f * l - 3.3077115913f * m + 0.2309699292f * s;
2107 g = -1.2684380046f * l + 2.6097574011f * m - 0.3413193965f * s;
2108 b = -0.0041960863f * l - 0.7034186147f * m + 1.7076147010f * s;
2109 }
2110
2111 // Skia stores all polar colors with hue in the first component, so this "LCH -> Lab" transform
2112 // actually takes "HCL". This is also used to do the same polar transform for OkHCL to OkLAB.
2113 // See similar comments & logic in SkGradientShaderBase.cpp.
STAGE(css_hcl_to_lab,NoCtx)2114 STAGE(css_hcl_to_lab, NoCtx) {
2115 F H = r,
2116 C = g,
2117 L = b;
2118
2119 F hueRadians = H * (SK_FloatPI / 180);
2120
2121 r = L;
2122 g = C * cos_(hueRadians);
2123 b = C * sin_(hueRadians);
2124 }
2125
mod_(F x,float y)2126 SI F mod_(F x, float y) {
2127 return x - y * floor_(x * (1 / y));
2128 }
2129
2130 struct RGB { F r, g, b; };
2131
css_hsl_to_srgb_(F h,F s,F l)2132 SI RGB css_hsl_to_srgb_(F h, F s, F l) {
2133 h = mod_(h, 360);
2134
2135 s *= 0.01f;
2136 l *= 0.01f;
2137
2138 F k[3] = {
2139 mod_(0 + h * (1 / 30.0f), 12),
2140 mod_(8 + h * (1 / 30.0f), 12),
2141 mod_(4 + h * (1 / 30.0f), 12)
2142 };
2143 F a = s * min(l, 1 - l);
2144 return {
2145 l - a * max(-1.0f, min(min(k[0] - 3.0f, 9.0f - k[0]), 1.0f)),
2146 l - a * max(-1.0f, min(min(k[1] - 3.0f, 9.0f - k[1]), 1.0f)),
2147 l - a * max(-1.0f, min(min(k[2] - 3.0f, 9.0f - k[2]), 1.0f))
2148 };
2149 }
2150
STAGE(css_hsl_to_srgb,NoCtx)2151 STAGE(css_hsl_to_srgb, NoCtx) {
2152 RGB rgb = css_hsl_to_srgb_(r, g, b);
2153 r = rgb.r;
2154 g = rgb.g;
2155 b = rgb.b;
2156 }
2157
STAGE(css_hwb_to_srgb,NoCtx)2158 STAGE(css_hwb_to_srgb, NoCtx) {
2159 g *= 0.01f;
2160 b *= 0.01f;
2161
2162 F gray = g / (g + b);
2163
2164 RGB rgb = css_hsl_to_srgb_(r, 100.0f, 50.0f);
2165 rgb.r = rgb.r * (1 - g - b) + g;
2166 rgb.g = rgb.g * (1 - g - b) + g;
2167 rgb.b = rgb.b * (1 - g - b) + g;
2168
2169 auto isGray = (g + b) >= 1;
2170
2171 r = if_then_else(isGray, gray, rgb.r);
2172 g = if_then_else(isGray, gray, rgb.g);
2173 b = if_then_else(isGray, gray, rgb.b);
2174 }
2175
2176 // Derive alpha's coverage from rgb coverage and the values of src and dst alpha.
alpha_coverage_from_rgb_coverage(F a,F da,F cr,F cg,F cb)2177 SI F alpha_coverage_from_rgb_coverage(F a, F da, F cr, F cg, F cb) {
2178 return if_then_else(a < da, min(cr, min(cg,cb))
2179 , max(cr, max(cg,cb)));
2180 }
2181
STAGE(scale_1_float,const float * c)2182 STAGE(scale_1_float, const float* c) {
2183 r = r * *c;
2184 g = g * *c;
2185 b = b * *c;
2186 a = a * *c;
2187 }
STAGE(scale_u8,const SkRasterPipeline_MemoryCtx * ctx)2188 STAGE(scale_u8, const SkRasterPipeline_MemoryCtx* ctx) {
2189 auto ptr = ptr_at_xy<const uint8_t>(ctx, dx,dy);
2190
2191 auto scales = load<U8>(ptr, tail);
2192 auto c = from_byte(scales);
2193
2194 r = r * c;
2195 g = g * c;
2196 b = b * c;
2197 a = a * c;
2198 }
STAGE(scale_565,const SkRasterPipeline_MemoryCtx * ctx)2199 STAGE(scale_565, const SkRasterPipeline_MemoryCtx* ctx) {
2200 auto ptr = ptr_at_xy<const uint16_t>(ctx, dx,dy);
2201
2202 F cr,cg,cb;
2203 from_565(load<U16>(ptr, tail), &cr, &cg, &cb);
2204
2205 F ca = alpha_coverage_from_rgb_coverage(a,da, cr,cg,cb);
2206
2207 r = r * cr;
2208 g = g * cg;
2209 b = b * cb;
2210 a = a * ca;
2211 }
2212
lerp(F from,F to,F t)2213 SI F lerp(F from, F to, F t) {
2214 return mad(to-from, t, from);
2215 }
2216
STAGE(lerp_1_float,const float * c)2217 STAGE(lerp_1_float, const float* c) {
2218 r = lerp(dr, r, *c);
2219 g = lerp(dg, g, *c);
2220 b = lerp(db, b, *c);
2221 a = lerp(da, a, *c);
2222 }
STAGE(scale_native,const float scales[])2223 STAGE(scale_native, const float scales[]) {
2224 auto c = sk_unaligned_load<F>(scales);
2225 r = r * c;
2226 g = g * c;
2227 b = b * c;
2228 a = a * c;
2229 }
STAGE(lerp_native,const float scales[])2230 STAGE(lerp_native, const float scales[]) {
2231 auto c = sk_unaligned_load<F>(scales);
2232 r = lerp(dr, r, c);
2233 g = lerp(dg, g, c);
2234 b = lerp(db, b, c);
2235 a = lerp(da, a, c);
2236 }
STAGE(lerp_u8,const SkRasterPipeline_MemoryCtx * ctx)2237 STAGE(lerp_u8, const SkRasterPipeline_MemoryCtx* ctx) {
2238 auto ptr = ptr_at_xy<const uint8_t>(ctx, dx,dy);
2239
2240 auto scales = load<U8>(ptr, tail);
2241 auto c = from_byte(scales);
2242
2243 r = lerp(dr, r, c);
2244 g = lerp(dg, g, c);
2245 b = lerp(db, b, c);
2246 a = lerp(da, a, c);
2247 }
STAGE(lerp_565,const SkRasterPipeline_MemoryCtx * ctx)2248 STAGE(lerp_565, const SkRasterPipeline_MemoryCtx* ctx) {
2249 auto ptr = ptr_at_xy<const uint16_t>(ctx, dx,dy);
2250
2251 F cr,cg,cb;
2252 from_565(load<U16>(ptr, tail), &cr, &cg, &cb);
2253
2254 F ca = alpha_coverage_from_rgb_coverage(a,da, cr,cg,cb);
2255
2256 r = lerp(dr, r, cr);
2257 g = lerp(dg, g, cg);
2258 b = lerp(db, b, cb);
2259 a = lerp(da, a, ca);
2260 }
2261
STAGE(emboss,const SkRasterPipeline_EmbossCtx * ctx)2262 STAGE(emboss, const SkRasterPipeline_EmbossCtx* ctx) {
2263 auto mptr = ptr_at_xy<const uint8_t>(&ctx->mul, dx,dy),
2264 aptr = ptr_at_xy<const uint8_t>(&ctx->add, dx,dy);
2265
2266 F mul = from_byte(load<U8>(mptr, tail)),
2267 add = from_byte(load<U8>(aptr, tail));
2268
2269 r = mad(r, mul, add);
2270 g = mad(g, mul, add);
2271 b = mad(b, mul, add);
2272 }
2273
STAGE(byte_tables,const SkRasterPipeline_TablesCtx * tables)2274 STAGE(byte_tables, const SkRasterPipeline_TablesCtx* tables) {
2275 r = from_byte(gather(tables->r, to_unorm(r, 255)));
2276 g = from_byte(gather(tables->g, to_unorm(g, 255)));
2277 b = from_byte(gather(tables->b, to_unorm(b, 255)));
2278 a = from_byte(gather(tables->a, to_unorm(a, 255)));
2279 }
2280
strip_sign(F x,U32 * sign)2281 SI F strip_sign(F x, U32* sign) {
2282 U32 bits = sk_bit_cast<U32>(x);
2283 *sign = bits & 0x80000000;
2284 return sk_bit_cast<F>(bits ^ *sign);
2285 }
2286
apply_sign(F x,U32 sign)2287 SI F apply_sign(F x, U32 sign) {
2288 return sk_bit_cast<F>(sign | sk_bit_cast<U32>(x));
2289 }
2290
STAGE(parametric,const skcms_TransferFunction * ctx)2291 STAGE(parametric, const skcms_TransferFunction* ctx) {
2292 auto fn = [&](F v) {
2293 U32 sign;
2294 v = strip_sign(v, &sign);
2295
2296 F r = if_then_else(v <= ctx->d, mad(ctx->c, v, ctx->f)
2297 , approx_powf(mad(ctx->a, v, ctx->b), ctx->g) + ctx->e);
2298 return apply_sign(r, sign);
2299 };
2300 r = fn(r);
2301 g = fn(g);
2302 b = fn(b);
2303 }
2304
STAGE(gamma_,const float * G)2305 STAGE(gamma_, const float* G) {
2306 auto fn = [&](F v) {
2307 U32 sign;
2308 v = strip_sign(v, &sign);
2309 return apply_sign(approx_powf(v, *G), sign);
2310 };
2311 r = fn(r);
2312 g = fn(g);
2313 b = fn(b);
2314 }
2315
STAGE(PQish,const skcms_TransferFunction * ctx)2316 STAGE(PQish, const skcms_TransferFunction* ctx) {
2317 auto fn = [&](F v) {
2318 U32 sign;
2319 v = strip_sign(v, &sign);
2320
2321 F r = approx_powf(max(mad(ctx->b, approx_powf(v, ctx->c), ctx->a), 0.0f)
2322 / (mad(ctx->e, approx_powf(v, ctx->c), ctx->d)),
2323 ctx->f);
2324
2325 return apply_sign(r, sign);
2326 };
2327 r = fn(r);
2328 g = fn(g);
2329 b = fn(b);
2330 }
2331
STAGE(HLGish,const skcms_TransferFunction * ctx)2332 STAGE(HLGish, const skcms_TransferFunction* ctx) {
2333 auto fn = [&](F v) {
2334 U32 sign;
2335 v = strip_sign(v, &sign);
2336
2337 const float R = ctx->a, G = ctx->b,
2338 a = ctx->c, b = ctx->d, c = ctx->e,
2339 K = ctx->f + 1.0f;
2340
2341 F r = if_then_else(v*R <= 1, approx_powf(v*R, G)
2342 , approx_exp((v-c)*a) + b);
2343
2344 return K * apply_sign(r, sign);
2345 };
2346 r = fn(r);
2347 g = fn(g);
2348 b = fn(b);
2349 }
2350
STAGE(HLGinvish,const skcms_TransferFunction * ctx)2351 STAGE(HLGinvish, const skcms_TransferFunction* ctx) {
2352 auto fn = [&](F v) {
2353 U32 sign;
2354 v = strip_sign(v, &sign);
2355
2356 const float R = ctx->a, G = ctx->b,
2357 a = ctx->c, b = ctx->d, c = ctx->e,
2358 K = ctx->f + 1.0f;
2359
2360 v /= K;
2361 F r = if_then_else(v <= 1, R * approx_powf(v, G)
2362 , a * approx_log(v - b) + c);
2363
2364 return apply_sign(r, sign);
2365 };
2366 r = fn(r);
2367 g = fn(g);
2368 b = fn(b);
2369 }
2370
STAGE(load_a8,const SkRasterPipeline_MemoryCtx * ctx)2371 STAGE(load_a8, const SkRasterPipeline_MemoryCtx* ctx) {
2372 auto ptr = ptr_at_xy<const uint8_t>(ctx, dx,dy);
2373
2374 r = g = b = 0.0f;
2375 a = from_byte(load<U8>(ptr, tail));
2376 }
STAGE(load_a8_dst,const SkRasterPipeline_MemoryCtx * ctx)2377 STAGE(load_a8_dst, const SkRasterPipeline_MemoryCtx* ctx) {
2378 auto ptr = ptr_at_xy<const uint8_t>(ctx, dx,dy);
2379
2380 dr = dg = db = 0.0f;
2381 da = from_byte(load<U8>(ptr, tail));
2382 }
STAGE(gather_a8,const SkRasterPipeline_GatherCtx * ctx)2383 STAGE(gather_a8, const SkRasterPipeline_GatherCtx* ctx) {
2384 const uint8_t* ptr;
2385 U32 ix = ix_and_ptr(&ptr, ctx, r,g);
2386 r = g = b = 0.0f;
2387 a = from_byte(gather(ptr, ix));
2388 }
STAGE(store_a8,const SkRasterPipeline_MemoryCtx * ctx)2389 STAGE(store_a8, const SkRasterPipeline_MemoryCtx* ctx) {
2390 auto ptr = ptr_at_xy<uint8_t>(ctx, dx,dy);
2391
2392 U8 packed = pack(pack(to_unorm(a, 255)));
2393 store(ptr, packed, tail);
2394 }
STAGE(store_r8,const SkRasterPipeline_MemoryCtx * ctx)2395 STAGE(store_r8, const SkRasterPipeline_MemoryCtx* ctx) {
2396 auto ptr = ptr_at_xy<uint8_t>(ctx, dx,dy);
2397
2398 U8 packed = pack(pack(to_unorm(r, 255)));
2399 store(ptr, packed, tail);
2400 }
2401
STAGE(load_565,const SkRasterPipeline_MemoryCtx * ctx)2402 STAGE(load_565, const SkRasterPipeline_MemoryCtx* ctx) {
2403 auto ptr = ptr_at_xy<const uint16_t>(ctx, dx,dy);
2404
2405 from_565(load<U16>(ptr, tail), &r,&g,&b);
2406 a = 1.0f;
2407 }
STAGE(load_565_dst,const SkRasterPipeline_MemoryCtx * ctx)2408 STAGE(load_565_dst, const SkRasterPipeline_MemoryCtx* ctx) {
2409 auto ptr = ptr_at_xy<const uint16_t>(ctx, dx,dy);
2410
2411 from_565(load<U16>(ptr, tail), &dr,&dg,&db);
2412 da = 1.0f;
2413 }
STAGE(gather_565,const SkRasterPipeline_GatherCtx * ctx)2414 STAGE(gather_565, const SkRasterPipeline_GatherCtx* ctx) {
2415 const uint16_t* ptr;
2416 U32 ix = ix_and_ptr(&ptr, ctx, r,g);
2417 from_565(gather(ptr, ix), &r,&g,&b);
2418 a = 1.0f;
2419 }
STAGE(store_565,const SkRasterPipeline_MemoryCtx * ctx)2420 STAGE(store_565, const SkRasterPipeline_MemoryCtx* ctx) {
2421 auto ptr = ptr_at_xy<uint16_t>(ctx, dx,dy);
2422
2423 U16 px = pack( to_unorm(r, 31) << 11
2424 | to_unorm(g, 63) << 5
2425 | to_unorm(b, 31) );
2426 store(ptr, px, tail);
2427 }
2428
STAGE(load_4444,const SkRasterPipeline_MemoryCtx * ctx)2429 STAGE(load_4444, const SkRasterPipeline_MemoryCtx* ctx) {
2430 auto ptr = ptr_at_xy<const uint16_t>(ctx, dx,dy);
2431 from_4444(load<U16>(ptr, tail), &r,&g,&b,&a);
2432 }
STAGE(load_4444_dst,const SkRasterPipeline_MemoryCtx * ctx)2433 STAGE(load_4444_dst, const SkRasterPipeline_MemoryCtx* ctx) {
2434 auto ptr = ptr_at_xy<const uint16_t>(ctx, dx,dy);
2435 from_4444(load<U16>(ptr, tail), &dr,&dg,&db,&da);
2436 }
STAGE(gather_4444,const SkRasterPipeline_GatherCtx * ctx)2437 STAGE(gather_4444, const SkRasterPipeline_GatherCtx* ctx) {
2438 const uint16_t* ptr;
2439 U32 ix = ix_and_ptr(&ptr, ctx, r,g);
2440 from_4444(gather(ptr, ix), &r,&g,&b,&a);
2441 }
STAGE(store_4444,const SkRasterPipeline_MemoryCtx * ctx)2442 STAGE(store_4444, const SkRasterPipeline_MemoryCtx* ctx) {
2443 auto ptr = ptr_at_xy<uint16_t>(ctx, dx,dy);
2444 U16 px = pack( to_unorm(r, 15) << 12
2445 | to_unorm(g, 15) << 8
2446 | to_unorm(b, 15) << 4
2447 | to_unorm(a, 15) );
2448 store(ptr, px, tail);
2449 }
2450
STAGE(load_8888,const SkRasterPipeline_MemoryCtx * ctx)2451 STAGE(load_8888, const SkRasterPipeline_MemoryCtx* ctx) {
2452 auto ptr = ptr_at_xy<const uint32_t>(ctx, dx,dy);
2453 from_8888(load<U32>(ptr, tail), &r,&g,&b,&a);
2454 }
STAGE(load_8888_dst,const SkRasterPipeline_MemoryCtx * ctx)2455 STAGE(load_8888_dst, const SkRasterPipeline_MemoryCtx* ctx) {
2456 auto ptr = ptr_at_xy<const uint32_t>(ctx, dx,dy);
2457 from_8888(load<U32>(ptr, tail), &dr,&dg,&db,&da);
2458 }
STAGE(gather_8888,const SkRasterPipeline_GatherCtx * ctx)2459 STAGE(gather_8888, const SkRasterPipeline_GatherCtx* ctx) {
2460 const uint32_t* ptr;
2461 U32 ix = ix_and_ptr(&ptr, ctx, r,g);
2462 from_8888(gather(ptr, ix), &r,&g,&b,&a);
2463 }
STAGE(store_8888,const SkRasterPipeline_MemoryCtx * ctx)2464 STAGE(store_8888, const SkRasterPipeline_MemoryCtx* ctx) {
2465 auto ptr = ptr_at_xy<uint32_t>(ctx, dx,dy);
2466
2467 U32 px = to_unorm(r, 255)
2468 | to_unorm(g, 255) << 8
2469 | to_unorm(b, 255) << 16
2470 | to_unorm(a, 255) << 24;
2471 store(ptr, px, tail);
2472 }
2473
STAGE(load_rg88,const SkRasterPipeline_MemoryCtx * ctx)2474 STAGE(load_rg88, const SkRasterPipeline_MemoryCtx* ctx) {
2475 auto ptr = ptr_at_xy<const uint16_t>(ctx, dx, dy);
2476 from_88(load<U16>(ptr, tail), &r, &g);
2477 b = 0;
2478 a = 1;
2479 }
STAGE(load_rg88_dst,const SkRasterPipeline_MemoryCtx * ctx)2480 STAGE(load_rg88_dst, const SkRasterPipeline_MemoryCtx* ctx) {
2481 auto ptr = ptr_at_xy<const uint16_t>(ctx, dx, dy);
2482 from_88(load<U16>(ptr, tail), &dr, &dg);
2483 db = 0;
2484 da = 1;
2485 }
STAGE(gather_rg88,const SkRasterPipeline_GatherCtx * ctx)2486 STAGE(gather_rg88, const SkRasterPipeline_GatherCtx* ctx) {
2487 const uint16_t* ptr;
2488 U32 ix = ix_and_ptr(&ptr, ctx, r, g);
2489 from_88(gather(ptr, ix), &r, &g);
2490 b = 0;
2491 a = 1;
2492 }
STAGE(store_rg88,const SkRasterPipeline_MemoryCtx * ctx)2493 STAGE(store_rg88, const SkRasterPipeline_MemoryCtx* ctx) {
2494 auto ptr = ptr_at_xy<uint16_t>(ctx, dx, dy);
2495 U16 px = pack( to_unorm(r, 255) | to_unorm(g, 255) << 8 );
2496 store(ptr, px, tail);
2497 }
2498
STAGE(load_a16,const SkRasterPipeline_MemoryCtx * ctx)2499 STAGE(load_a16, const SkRasterPipeline_MemoryCtx* ctx) {
2500 auto ptr = ptr_at_xy<const uint16_t>(ctx, dx,dy);
2501 r = g = b = 0;
2502 a = from_short(load<U16>(ptr, tail));
2503 }
STAGE(load_a16_dst,const SkRasterPipeline_MemoryCtx * ctx)2504 STAGE(load_a16_dst, const SkRasterPipeline_MemoryCtx* ctx) {
2505 auto ptr = ptr_at_xy<const uint16_t>(ctx, dx, dy);
2506 dr = dg = db = 0.0f;
2507 da = from_short(load<U16>(ptr, tail));
2508 }
STAGE(gather_a16,const SkRasterPipeline_GatherCtx * ctx)2509 STAGE(gather_a16, const SkRasterPipeline_GatherCtx* ctx) {
2510 const uint16_t* ptr;
2511 U32 ix = ix_and_ptr(&ptr, ctx, r, g);
2512 r = g = b = 0.0f;
2513 a = from_short(gather(ptr, ix));
2514 }
STAGE(store_a16,const SkRasterPipeline_MemoryCtx * ctx)2515 STAGE(store_a16, const SkRasterPipeline_MemoryCtx* ctx) {
2516 auto ptr = ptr_at_xy<uint16_t>(ctx, dx,dy);
2517
2518 U16 px = pack(to_unorm(a, 65535));
2519 store(ptr, px, tail);
2520 }
2521
STAGE(load_rg1616,const SkRasterPipeline_MemoryCtx * ctx)2522 STAGE(load_rg1616, const SkRasterPipeline_MemoryCtx* ctx) {
2523 auto ptr = ptr_at_xy<const uint32_t>(ctx, dx, dy);
2524 b = 0; a = 1;
2525 from_1616(load<U32>(ptr, tail), &r,&g);
2526 }
STAGE(load_rg1616_dst,const SkRasterPipeline_MemoryCtx * ctx)2527 STAGE(load_rg1616_dst, const SkRasterPipeline_MemoryCtx* ctx) {
2528 auto ptr = ptr_at_xy<const uint32_t>(ctx, dx, dy);
2529 from_1616(load<U32>(ptr, tail), &dr, &dg);
2530 db = 0;
2531 da = 1;
2532 }
STAGE(gather_rg1616,const SkRasterPipeline_GatherCtx * ctx)2533 STAGE(gather_rg1616, const SkRasterPipeline_GatherCtx* ctx) {
2534 const uint32_t* ptr;
2535 U32 ix = ix_and_ptr(&ptr, ctx, r, g);
2536 from_1616(gather(ptr, ix), &r, &g);
2537 b = 0;
2538 a = 1;
2539 }
STAGE(store_rg1616,const SkRasterPipeline_MemoryCtx * ctx)2540 STAGE(store_rg1616, const SkRasterPipeline_MemoryCtx* ctx) {
2541 auto ptr = ptr_at_xy<uint32_t>(ctx, dx,dy);
2542
2543 U32 px = to_unorm(r, 65535)
2544 | to_unorm(g, 65535) << 16;
2545 store(ptr, px, tail);
2546 }
2547
STAGE(load_16161616,const SkRasterPipeline_MemoryCtx * ctx)2548 STAGE(load_16161616, const SkRasterPipeline_MemoryCtx* ctx) {
2549 auto ptr = ptr_at_xy<const uint64_t>(ctx, dx, dy);
2550 from_16161616(load<U64>(ptr, tail), &r,&g, &b, &a);
2551 }
STAGE(load_16161616_dst,const SkRasterPipeline_MemoryCtx * ctx)2552 STAGE(load_16161616_dst, const SkRasterPipeline_MemoryCtx* ctx) {
2553 auto ptr = ptr_at_xy<const uint64_t>(ctx, dx, dy);
2554 from_16161616(load<U64>(ptr, tail), &dr, &dg, &db, &da);
2555 }
STAGE(gather_16161616,const SkRasterPipeline_GatherCtx * ctx)2556 STAGE(gather_16161616, const SkRasterPipeline_GatherCtx* ctx) {
2557 const uint64_t* ptr;
2558 U32 ix = ix_and_ptr(&ptr, ctx, r, g);
2559 from_16161616(gather(ptr, ix), &r, &g, &b, &a);
2560 }
STAGE(store_16161616,const SkRasterPipeline_MemoryCtx * ctx)2561 STAGE(store_16161616, const SkRasterPipeline_MemoryCtx* ctx) {
2562 auto ptr = ptr_at_xy<uint16_t>(ctx, 4*dx,4*dy);
2563
2564 U16 R = pack(to_unorm(r, 65535)),
2565 G = pack(to_unorm(g, 65535)),
2566 B = pack(to_unorm(b, 65535)),
2567 A = pack(to_unorm(a, 65535));
2568
2569 store4(ptr,tail, R,G,B,A);
2570 }
2571
2572
STAGE(load_1010102,const SkRasterPipeline_MemoryCtx * ctx)2573 STAGE(load_1010102, const SkRasterPipeline_MemoryCtx* ctx) {
2574 auto ptr = ptr_at_xy<const uint32_t>(ctx, dx,dy);
2575 from_1010102(load<U32>(ptr, tail), &r,&g,&b,&a);
2576 }
STAGE(load_1010102_dst,const SkRasterPipeline_MemoryCtx * ctx)2577 STAGE(load_1010102_dst, const SkRasterPipeline_MemoryCtx* ctx) {
2578 auto ptr = ptr_at_xy<const uint32_t>(ctx, dx,dy);
2579 from_1010102(load<U32>(ptr, tail), &dr,&dg,&db,&da);
2580 }
STAGE(load_1010102_xr,const SkRasterPipeline_MemoryCtx * ctx)2581 STAGE(load_1010102_xr, const SkRasterPipeline_MemoryCtx* ctx) {
2582 auto ptr = ptr_at_xy<const uint32_t>(ctx, dx,dy);
2583 from_1010102_xr(load<U32>(ptr, tail), &r,&g,&b,&a);
2584 }
STAGE(load_1010102_xr_dst,const SkRasterPipeline_MemoryCtx * ctx)2585 STAGE(load_1010102_xr_dst, const SkRasterPipeline_MemoryCtx* ctx) {
2586 auto ptr = ptr_at_xy<const uint32_t>(ctx, dx,dy);
2587 from_1010102_xr(load<U32>(ptr, tail), &dr,&dg,&db,&da);
2588 }
STAGE(gather_1010102,const SkRasterPipeline_GatherCtx * ctx)2589 STAGE(gather_1010102, const SkRasterPipeline_GatherCtx* ctx) {
2590 const uint32_t* ptr;
2591 U32 ix = ix_and_ptr(&ptr, ctx, r,g);
2592 from_1010102(gather(ptr, ix), &r,&g,&b,&a);
2593 }
STAGE(store_1010102,const SkRasterPipeline_MemoryCtx * ctx)2594 STAGE(store_1010102, const SkRasterPipeline_MemoryCtx* ctx) {
2595 auto ptr = ptr_at_xy<uint32_t>(ctx, dx,dy);
2596
2597 U32 px = to_unorm(r, 1023)
2598 | to_unorm(g, 1023) << 10
2599 | to_unorm(b, 1023) << 20
2600 | to_unorm(a, 3) << 30;
2601 store(ptr, px, tail);
2602 }
STAGE(store_1010102_xr,const SkRasterPipeline_MemoryCtx * ctx)2603 STAGE(store_1010102_xr, const SkRasterPipeline_MemoryCtx* ctx) {
2604 auto ptr = ptr_at_xy<uint32_t>(ctx, dx,dy);
2605 static constexpr float min = -0.752941f;
2606 static constexpr float max = 1.25098f;
2607 static constexpr float range = max - min;
2608 U32 px = to_unorm((r - min) / range, 1023)
2609 | to_unorm((g - min) / range, 1023) << 10
2610 | to_unorm((b - min) / range, 1023) << 20
2611 | to_unorm(a, 3) << 30;
2612 store(ptr, px, tail);
2613 }
2614
STAGE(load_f16,const SkRasterPipeline_MemoryCtx * ctx)2615 STAGE(load_f16, const SkRasterPipeline_MemoryCtx* ctx) {
2616 auto ptr = ptr_at_xy<const uint64_t>(ctx, dx,dy);
2617
2618 U16 R,G,B,A;
2619 load4((const uint16_t*)ptr,tail, &R,&G,&B,&A);
2620 r = from_half(R);
2621 g = from_half(G);
2622 b = from_half(B);
2623 a = from_half(A);
2624 }
STAGE(load_f16_dst,const SkRasterPipeline_MemoryCtx * ctx)2625 STAGE(load_f16_dst, const SkRasterPipeline_MemoryCtx* ctx) {
2626 auto ptr = ptr_at_xy<const uint64_t>(ctx, dx,dy);
2627
2628 U16 R,G,B,A;
2629 load4((const uint16_t*)ptr,tail, &R,&G,&B,&A);
2630 dr = from_half(R);
2631 dg = from_half(G);
2632 db = from_half(B);
2633 da = from_half(A);
2634 }
STAGE(gather_f16,const SkRasterPipeline_GatherCtx * ctx)2635 STAGE(gather_f16, const SkRasterPipeline_GatherCtx* ctx) {
2636 const uint64_t* ptr;
2637 U32 ix = ix_and_ptr(&ptr, ctx, r,g);
2638 auto px = gather(ptr, ix);
2639
2640 U16 R,G,B,A;
2641 load4((const uint16_t*)&px,0, &R,&G,&B,&A);
2642 r = from_half(R);
2643 g = from_half(G);
2644 b = from_half(B);
2645 a = from_half(A);
2646 }
STAGE(store_f16,const SkRasterPipeline_MemoryCtx * ctx)2647 STAGE(store_f16, const SkRasterPipeline_MemoryCtx* ctx) {
2648 auto ptr = ptr_at_xy<uint64_t>(ctx, dx,dy);
2649 store4((uint16_t*)ptr,tail, to_half(r)
2650 , to_half(g)
2651 , to_half(b)
2652 , to_half(a));
2653 }
2654
STAGE(store_u16_be,const SkRasterPipeline_MemoryCtx * ctx)2655 STAGE(store_u16_be, const SkRasterPipeline_MemoryCtx* ctx) {
2656 auto ptr = ptr_at_xy<uint16_t>(ctx, 4*dx,dy);
2657
2658 U16 R = bswap(pack(to_unorm(r, 65535))),
2659 G = bswap(pack(to_unorm(g, 65535))),
2660 B = bswap(pack(to_unorm(b, 65535))),
2661 A = bswap(pack(to_unorm(a, 65535)));
2662
2663 store4(ptr,tail, R,G,B,A);
2664 }
2665
STAGE(load_af16,const SkRasterPipeline_MemoryCtx * ctx)2666 STAGE(load_af16, const SkRasterPipeline_MemoryCtx* ctx) {
2667 auto ptr = ptr_at_xy<const uint16_t>(ctx, dx,dy);
2668
2669 U16 A = load<U16>((const uint16_t*)ptr, tail);
2670 r = 0;
2671 g = 0;
2672 b = 0;
2673 a = from_half(A);
2674 }
STAGE(load_af16_dst,const SkRasterPipeline_MemoryCtx * ctx)2675 STAGE(load_af16_dst, const SkRasterPipeline_MemoryCtx* ctx) {
2676 auto ptr = ptr_at_xy<const uint16_t>(ctx, dx, dy);
2677
2678 U16 A = load<U16>((const uint16_t*)ptr, tail);
2679 dr = dg = db = 0.0f;
2680 da = from_half(A);
2681 }
STAGE(gather_af16,const SkRasterPipeline_GatherCtx * ctx)2682 STAGE(gather_af16, const SkRasterPipeline_GatherCtx* ctx) {
2683 const uint16_t* ptr;
2684 U32 ix = ix_and_ptr(&ptr, ctx, r, g);
2685 r = g = b = 0.0f;
2686 a = from_half(gather(ptr, ix));
2687 }
STAGE(store_af16,const SkRasterPipeline_MemoryCtx * ctx)2688 STAGE(store_af16, const SkRasterPipeline_MemoryCtx* ctx) {
2689 auto ptr = ptr_at_xy<uint16_t>(ctx, dx,dy);
2690 store(ptr, to_half(a), tail);
2691 }
2692
STAGE(load_rgf16,const SkRasterPipeline_MemoryCtx * ctx)2693 STAGE(load_rgf16, const SkRasterPipeline_MemoryCtx* ctx) {
2694 auto ptr = ptr_at_xy<const uint32_t>(ctx, dx, dy);
2695
2696 U16 R,G;
2697 load2((const uint16_t*)ptr, tail, &R, &G);
2698 r = from_half(R);
2699 g = from_half(G);
2700 b = 0;
2701 a = 1;
2702 }
STAGE(load_rgf16_dst,const SkRasterPipeline_MemoryCtx * ctx)2703 STAGE(load_rgf16_dst, const SkRasterPipeline_MemoryCtx* ctx) {
2704 auto ptr = ptr_at_xy<const uint32_t>(ctx, dx, dy);
2705
2706 U16 R,G;
2707 load2((const uint16_t*)ptr, tail, &R, &G);
2708 dr = from_half(R);
2709 dg = from_half(G);
2710 db = 0;
2711 da = 1;
2712 }
STAGE(gather_rgf16,const SkRasterPipeline_GatherCtx * ctx)2713 STAGE(gather_rgf16, const SkRasterPipeline_GatherCtx* ctx) {
2714 const uint32_t* ptr;
2715 U32 ix = ix_and_ptr(&ptr, ctx, r, g);
2716 auto px = gather(ptr, ix);
2717
2718 U16 R,G;
2719 load2((const uint16_t*)&px, 0, &R, &G);
2720 r = from_half(R);
2721 g = from_half(G);
2722 b = 0;
2723 a = 1;
2724 }
STAGE(store_rgf16,const SkRasterPipeline_MemoryCtx * ctx)2725 STAGE(store_rgf16, const SkRasterPipeline_MemoryCtx* ctx) {
2726 auto ptr = ptr_at_xy<uint32_t>(ctx, dx, dy);
2727 store2((uint16_t*)ptr, tail, to_half(r)
2728 , to_half(g));
2729 }
2730
STAGE(load_f32,const SkRasterPipeline_MemoryCtx * ctx)2731 STAGE(load_f32, const SkRasterPipeline_MemoryCtx* ctx) {
2732 auto ptr = ptr_at_xy<const float>(ctx, 4*dx,4*dy);
2733 load4(ptr,tail, &r,&g,&b,&a);
2734 }
STAGE(load_f32_dst,const SkRasterPipeline_MemoryCtx * ctx)2735 STAGE(load_f32_dst, const SkRasterPipeline_MemoryCtx* ctx) {
2736 auto ptr = ptr_at_xy<const float>(ctx, 4*dx,4*dy);
2737 load4(ptr,tail, &dr,&dg,&db,&da);
2738 }
STAGE(gather_f32,const SkRasterPipeline_GatherCtx * ctx)2739 STAGE(gather_f32, const SkRasterPipeline_GatherCtx* ctx) {
2740 const float* ptr;
2741 U32 ix = ix_and_ptr(&ptr, ctx, r,g);
2742 r = gather(ptr, 4*ix + 0);
2743 g = gather(ptr, 4*ix + 1);
2744 b = gather(ptr, 4*ix + 2);
2745 a = gather(ptr, 4*ix + 3);
2746 }
STAGE(store_f32,const SkRasterPipeline_MemoryCtx * ctx)2747 STAGE(store_f32, const SkRasterPipeline_MemoryCtx* ctx) {
2748 auto ptr = ptr_at_xy<float>(ctx, 4*dx,4*dy);
2749 store4(ptr,tail, r,g,b,a);
2750 }
2751
STAGE(load_rgf32,const SkRasterPipeline_MemoryCtx * ctx)2752 STAGE(load_rgf32, const SkRasterPipeline_MemoryCtx* ctx) {
2753 auto ptr = ptr_at_xy<const float>(ctx, 2*dx,2*dy);
2754 load2(ptr, tail, &r, &g);
2755 b = 0;
2756 a = 1;
2757 }
STAGE(store_rgf32,const SkRasterPipeline_MemoryCtx * ctx)2758 STAGE(store_rgf32, const SkRasterPipeline_MemoryCtx* ctx) {
2759 auto ptr = ptr_at_xy<float>(ctx, 2*dx,2*dy);
2760 store2(ptr, tail, r, g);
2761 }
2762
exclusive_repeat(F v,const SkRasterPipeline_TileCtx * ctx)2763 SI F exclusive_repeat(F v, const SkRasterPipeline_TileCtx* ctx) {
2764 return v - floor_(v*ctx->invScale)*ctx->scale;
2765 }
exclusive_mirror(F v,const SkRasterPipeline_TileCtx * ctx)2766 SI F exclusive_mirror(F v, const SkRasterPipeline_TileCtx* ctx) {
2767 auto limit = ctx->scale;
2768 auto invLimit = ctx->invScale;
2769
2770 // This is "repeat" over the range 0..2*limit
2771 auto u = v - floor_(v*invLimit*0.5f)*2*limit;
2772 // s will be 0 when moving forward (e.g. [0, limit)) and 1 when moving backward (e.g.
2773 // [limit, 2*limit)).
2774 auto s = floor_(u*invLimit);
2775 // This is the mirror result.
2776 auto m = u - 2*s*(u - limit);
2777 // Apply a bias to m if moving backwards so that we snap consistently at exact integer coords in
2778 // the logical infinite image. This is tested by mirror_tile GM. Note that all values
2779 // that have a non-zero bias applied are > 0.
2780 auto biasInUlps = trunc_(s);
2781 return sk_bit_cast<F>(sk_bit_cast<U32>(m) + ctx->mirrorBiasDir*biasInUlps);
2782 }
2783 // Tile x or y to [0,limit) == [0,limit - 1 ulp] (think, sampling from images).
2784 // The gather stages will hard clamp the output of these stages to [0,limit)...
2785 // we just need to do the basic repeat or mirroring.
STAGE(repeat_x,const SkRasterPipeline_TileCtx * ctx)2786 STAGE(repeat_x, const SkRasterPipeline_TileCtx* ctx) { r = exclusive_repeat(r, ctx); }
STAGE(repeat_y,const SkRasterPipeline_TileCtx * ctx)2787 STAGE(repeat_y, const SkRasterPipeline_TileCtx* ctx) { g = exclusive_repeat(g, ctx); }
STAGE(mirror_x,const SkRasterPipeline_TileCtx * ctx)2788 STAGE(mirror_x, const SkRasterPipeline_TileCtx* ctx) { r = exclusive_mirror(r, ctx); }
STAGE(mirror_y,const SkRasterPipeline_TileCtx * ctx)2789 STAGE(mirror_y, const SkRasterPipeline_TileCtx* ctx) { g = exclusive_mirror(g, ctx); }
2790
STAGE(clamp_x_1,NoCtx)2791 STAGE( clamp_x_1, NoCtx) { r = clamp_01_(r); }
STAGE(repeat_x_1,NoCtx)2792 STAGE(repeat_x_1, NoCtx) { r = clamp_01_(r - floor_(r)); }
STAGE(mirror_x_1,NoCtx)2793 STAGE(mirror_x_1, NoCtx) { r = clamp_01_(abs_( (r-1.0f) - two(floor_((r-1.0f)*0.5f)) - 1.0f )); }
2794
STAGE(clamp_x_and_y,const SkRasterPipeline_CoordClampCtx * ctx)2795 STAGE(clamp_x_and_y, const SkRasterPipeline_CoordClampCtx* ctx) {
2796 r = min(ctx->max_x, max(ctx->min_x, r));
2797 g = min(ctx->max_y, max(ctx->min_y, g));
2798 }
2799
2800 // Decal stores a 32bit mask after checking the coordinate (x and/or y) against its domain:
2801 // mask == 0x00000000 if the coordinate(s) are out of bounds
2802 // mask == 0xFFFFFFFF if the coordinate(s) are in bounds
2803 // After the gather stage, the r,g,b,a values are AND'd with this mask, setting them to 0
2804 // if either of the coordinates were out of bounds.
2805
STAGE(decal_x,SkRasterPipeline_DecalTileCtx * ctx)2806 STAGE(decal_x, SkRasterPipeline_DecalTileCtx* ctx) {
2807 auto w = ctx->limit_x;
2808 auto e = ctx->inclusiveEdge_x;
2809 auto cond = ((0 < r) & (r < w)) | (r == e);
2810 sk_unaligned_store(ctx->mask, cond_to_mask(cond));
2811 }
STAGE(decal_y,SkRasterPipeline_DecalTileCtx * ctx)2812 STAGE(decal_y, SkRasterPipeline_DecalTileCtx* ctx) {
2813 auto h = ctx->limit_y;
2814 auto e = ctx->inclusiveEdge_y;
2815 auto cond = ((0 < g) & (g < h)) | (g == e);
2816 sk_unaligned_store(ctx->mask, cond_to_mask(cond));
2817 }
STAGE(decal_x_and_y,SkRasterPipeline_DecalTileCtx * ctx)2818 STAGE(decal_x_and_y, SkRasterPipeline_DecalTileCtx* ctx) {
2819 auto w = ctx->limit_x;
2820 auto h = ctx->limit_y;
2821 auto ex = ctx->inclusiveEdge_x;
2822 auto ey = ctx->inclusiveEdge_y;
2823 auto cond = (((0 < r) & (r < w)) | (r == ex))
2824 & (((0 < g) & (g < h)) | (g == ey));
2825 sk_unaligned_store(ctx->mask, cond_to_mask(cond));
2826 }
STAGE(check_decal_mask,SkRasterPipeline_DecalTileCtx * ctx)2827 STAGE(check_decal_mask, SkRasterPipeline_DecalTileCtx* ctx) {
2828 auto mask = sk_unaligned_load<U32>(ctx->mask);
2829 r = sk_bit_cast<F>(sk_bit_cast<U32>(r) & mask);
2830 g = sk_bit_cast<F>(sk_bit_cast<U32>(g) & mask);
2831 b = sk_bit_cast<F>(sk_bit_cast<U32>(b) & mask);
2832 a = sk_bit_cast<F>(sk_bit_cast<U32>(a) & mask);
2833 }
2834
STAGE(alpha_to_gray,NoCtx)2835 STAGE(alpha_to_gray, NoCtx) {
2836 r = g = b = a;
2837 a = 1;
2838 }
STAGE(alpha_to_gray_dst,NoCtx)2839 STAGE(alpha_to_gray_dst, NoCtx) {
2840 dr = dg = db = da;
2841 da = 1;
2842 }
STAGE(alpha_to_red,NoCtx)2843 STAGE(alpha_to_red, NoCtx) {
2844 r = a;
2845 a = 1;
2846 }
STAGE(alpha_to_red_dst,NoCtx)2847 STAGE(alpha_to_red_dst, NoCtx) {
2848 dr = da;
2849 da = 1;
2850 }
2851
STAGE(bt709_luminance_or_luma_to_alpha,NoCtx)2852 STAGE(bt709_luminance_or_luma_to_alpha, NoCtx) {
2853 a = r*0.2126f + g*0.7152f + b*0.0722f;
2854 r = g = b = 0;
2855 }
STAGE(bt709_luminance_or_luma_to_rgb,NoCtx)2856 STAGE(bt709_luminance_or_luma_to_rgb, NoCtx) {
2857 r = g = b = r*0.2126f + g*0.7152f + b*0.0722f;
2858 }
2859
STAGE(matrix_translate,const float * m)2860 STAGE(matrix_translate, const float* m) {
2861 r += m[0];
2862 g += m[1];
2863 }
STAGE(matrix_scale_translate,const float * m)2864 STAGE(matrix_scale_translate, const float* m) {
2865 r = mad(r,m[0], m[2]);
2866 g = mad(g,m[1], m[3]);
2867 }
STAGE(matrix_2x3,const float * m)2868 STAGE(matrix_2x3, const float* m) {
2869 auto R = mad(r,m[0], mad(g,m[1], m[2])),
2870 G = mad(r,m[3], mad(g,m[4], m[5]));
2871 r = R;
2872 g = G;
2873 }
STAGE(matrix_3x3,const float * m)2874 STAGE(matrix_3x3, const float* m) {
2875 auto R = mad(r,m[0], mad(g,m[3], b*m[6])),
2876 G = mad(r,m[1], mad(g,m[4], b*m[7])),
2877 B = mad(r,m[2], mad(g,m[5], b*m[8]));
2878 r = R;
2879 g = G;
2880 b = B;
2881 }
STAGE(matrix_3x4,const float * m)2882 STAGE(matrix_3x4, const float* m) {
2883 auto R = mad(r,m[0], mad(g,m[3], mad(b,m[6], m[ 9]))),
2884 G = mad(r,m[1], mad(g,m[4], mad(b,m[7], m[10]))),
2885 B = mad(r,m[2], mad(g,m[5], mad(b,m[8], m[11])));
2886 r = R;
2887 g = G;
2888 b = B;
2889 }
STAGE(matrix_4x5,const float * m)2890 STAGE(matrix_4x5, const float* m) {
2891 auto R = mad(r,m[ 0], mad(g,m[ 1], mad(b,m[ 2], mad(a,m[ 3], m[ 4])))),
2892 G = mad(r,m[ 5], mad(g,m[ 6], mad(b,m[ 7], mad(a,m[ 8], m[ 9])))),
2893 B = mad(r,m[10], mad(g,m[11], mad(b,m[12], mad(a,m[13], m[14])))),
2894 A = mad(r,m[15], mad(g,m[16], mad(b,m[17], mad(a,m[18], m[19]))));
2895 r = R;
2896 g = G;
2897 b = B;
2898 a = A;
2899 }
STAGE(matrix_4x3,const float * m)2900 STAGE(matrix_4x3, const float* m) {
2901 auto X = r,
2902 Y = g;
2903
2904 r = mad(X, m[0], mad(Y, m[4], m[ 8]));
2905 g = mad(X, m[1], mad(Y, m[5], m[ 9]));
2906 b = mad(X, m[2], mad(Y, m[6], m[10]));
2907 a = mad(X, m[3], mad(Y, m[7], m[11]));
2908 }
STAGE(matrix_perspective,const float * m)2909 STAGE(matrix_perspective, const float* m) {
2910 // N.B. Unlike the other matrix_ stages, this matrix is row-major.
2911 auto R = mad(r,m[0], mad(g,m[1], m[2])),
2912 G = mad(r,m[3], mad(g,m[4], m[5])),
2913 Z = mad(r,m[6], mad(g,m[7], m[8]));
2914 r = R * rcp_precise(Z);
2915 g = G * rcp_precise(Z);
2916 }
2917
gradient_lookup(const SkRasterPipeline_GradientCtx * c,U32 idx,F t,F * r,F * g,F * b,F * a)2918 SI void gradient_lookup(const SkRasterPipeline_GradientCtx* c, U32 idx, F t,
2919 F* r, F* g, F* b, F* a) {
2920 F fr, br, fg, bg, fb, bb, fa, ba;
2921 #if defined(JUMPER_IS_HSW) || defined(JUMPER_IS_SKX)
2922 if (c->stopCount <=8) {
2923 fr = _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->fs[0]), idx);
2924 br = _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->bs[0]), idx);
2925 fg = _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->fs[1]), idx);
2926 bg = _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->bs[1]), idx);
2927 fb = _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->fs[2]), idx);
2928 bb = _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->bs[2]), idx);
2929 fa = _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->fs[3]), idx);
2930 ba = _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->bs[3]), idx);
2931 } else
2932 #endif
2933 {
2934 fr = gather(c->fs[0], idx);
2935 br = gather(c->bs[0], idx);
2936 fg = gather(c->fs[1], idx);
2937 bg = gather(c->bs[1], idx);
2938 fb = gather(c->fs[2], idx);
2939 bb = gather(c->bs[2], idx);
2940 fa = gather(c->fs[3], idx);
2941 ba = gather(c->bs[3], idx);
2942 }
2943
2944 *r = mad(t, fr, br);
2945 *g = mad(t, fg, bg);
2946 *b = mad(t, fb, bb);
2947 *a = mad(t, fa, ba);
2948 }
2949
STAGE(evenly_spaced_gradient,const SkRasterPipeline_GradientCtx * c)2950 STAGE(evenly_spaced_gradient, const SkRasterPipeline_GradientCtx* c) {
2951 auto t = r;
2952 auto idx = trunc_(t * (c->stopCount-1));
2953 gradient_lookup(c, idx, t, &r, &g, &b, &a);
2954 }
2955
STAGE(gradient,const SkRasterPipeline_GradientCtx * c)2956 STAGE(gradient, const SkRasterPipeline_GradientCtx* c) {
2957 auto t = r;
2958 U32 idx = 0;
2959
2960 // N.B. The loop starts at 1 because idx 0 is the color to use before the first stop.
2961 for (size_t i = 1; i < c->stopCount; i++) {
2962 idx += if_then_else(t >= c->ts[i], U32(1), U32(0));
2963 }
2964
2965 gradient_lookup(c, idx, t, &r, &g, &b, &a);
2966 }
2967
STAGE(evenly_spaced_2_stop_gradient,const SkRasterPipeline_EvenlySpaced2StopGradientCtx * c)2968 STAGE(evenly_spaced_2_stop_gradient, const SkRasterPipeline_EvenlySpaced2StopGradientCtx* c) {
2969 auto t = r;
2970 r = mad(t, c->f[0], c->b[0]);
2971 g = mad(t, c->f[1], c->b[1]);
2972 b = mad(t, c->f[2], c->b[2]);
2973 a = mad(t, c->f[3], c->b[3]);
2974 }
2975
STAGE(xy_to_unit_angle,NoCtx)2976 STAGE(xy_to_unit_angle, NoCtx) {
2977 F X = r,
2978 Y = g;
2979 F xabs = abs_(X),
2980 yabs = abs_(Y);
2981
2982 F slope = min(xabs, yabs)/max(xabs, yabs);
2983 F s = slope * slope;
2984
2985 // Use a 7th degree polynomial to approximate atan.
2986 // This was generated using sollya.gforge.inria.fr.
2987 // A float optimized polynomial was generated using the following command.
2988 // P1 = fpminimax((1/(2*Pi))*atan(x),[|1,3,5,7|],[|24...|],[2^(-40),1],relative);
2989 F phi = slope
2990 * (0.15912117063999176025390625f + s
2991 * (-5.185396969318389892578125e-2f + s
2992 * (2.476101927459239959716796875e-2f + s
2993 * (-7.0547382347285747528076171875e-3f))));
2994
2995 phi = if_then_else(xabs < yabs, 1.0f/4.0f - phi, phi);
2996 phi = if_then_else(X < 0.0f , 1.0f/2.0f - phi, phi);
2997 phi = if_then_else(Y < 0.0f , 1.0f - phi , phi);
2998 phi = if_then_else(phi != phi , 0 , phi); // Check for NaN.
2999 r = phi;
3000 }
3001
STAGE(xy_to_radius,NoCtx)3002 STAGE(xy_to_radius, NoCtx) {
3003 F X2 = r * r,
3004 Y2 = g * g;
3005 r = sqrt_(X2 + Y2);
3006 }
3007
3008 // Please see https://skia.org/dev/design/conical for how our 2pt conical shader works.
3009
STAGE(negate_x,NoCtx)3010 STAGE(negate_x, NoCtx) { r = -r; }
3011
STAGE(xy_to_2pt_conical_strip,const SkRasterPipeline_2PtConicalCtx * ctx)3012 STAGE(xy_to_2pt_conical_strip, const SkRasterPipeline_2PtConicalCtx* ctx) {
3013 F x = r, y = g, &t = r;
3014 t = x + sqrt_(ctx->fP0 - y*y); // ctx->fP0 = r0 * r0
3015 }
3016
STAGE(xy_to_2pt_conical_focal_on_circle,NoCtx)3017 STAGE(xy_to_2pt_conical_focal_on_circle, NoCtx) {
3018 F x = r, y = g, &t = r;
3019 t = x + y*y / x; // (x^2 + y^2) / x
3020 }
3021
STAGE(xy_to_2pt_conical_well_behaved,const SkRasterPipeline_2PtConicalCtx * ctx)3022 STAGE(xy_to_2pt_conical_well_behaved, const SkRasterPipeline_2PtConicalCtx* ctx) {
3023 F x = r, y = g, &t = r;
3024 t = sqrt_(x*x + y*y) - x * ctx->fP0; // ctx->fP0 = 1/r1
3025 }
3026
STAGE(xy_to_2pt_conical_greater,const SkRasterPipeline_2PtConicalCtx * ctx)3027 STAGE(xy_to_2pt_conical_greater, const SkRasterPipeline_2PtConicalCtx* ctx) {
3028 F x = r, y = g, &t = r;
3029 t = sqrt_(x*x - y*y) - x * ctx->fP0; // ctx->fP0 = 1/r1
3030 }
3031
STAGE(xy_to_2pt_conical_smaller,const SkRasterPipeline_2PtConicalCtx * ctx)3032 STAGE(xy_to_2pt_conical_smaller, const SkRasterPipeline_2PtConicalCtx* ctx) {
3033 F x = r, y = g, &t = r;
3034 t = -sqrt_(x*x - y*y) - x * ctx->fP0; // ctx->fP0 = 1/r1
3035 }
3036
STAGE(alter_2pt_conical_compensate_focal,const SkRasterPipeline_2PtConicalCtx * ctx)3037 STAGE(alter_2pt_conical_compensate_focal, const SkRasterPipeline_2PtConicalCtx* ctx) {
3038 F& t = r;
3039 t = t + ctx->fP1; // ctx->fP1 = f
3040 }
3041
STAGE(alter_2pt_conical_unswap,NoCtx)3042 STAGE(alter_2pt_conical_unswap, NoCtx) {
3043 F& t = r;
3044 t = 1 - t;
3045 }
3046
STAGE(mask_2pt_conical_nan,SkRasterPipeline_2PtConicalCtx * c)3047 STAGE(mask_2pt_conical_nan, SkRasterPipeline_2PtConicalCtx* c) {
3048 F& t = r;
3049 auto is_degenerate = (t != t); // NaN
3050 t = if_then_else(is_degenerate, F(0), t);
3051 sk_unaligned_store(&c->fMask, cond_to_mask(!is_degenerate));
3052 }
3053
STAGE(mask_2pt_conical_degenerates,SkRasterPipeline_2PtConicalCtx * c)3054 STAGE(mask_2pt_conical_degenerates, SkRasterPipeline_2PtConicalCtx* c) {
3055 F& t = r;
3056 auto is_degenerate = (t <= 0) | (t != t);
3057 t = if_then_else(is_degenerate, F(0), t);
3058 sk_unaligned_store(&c->fMask, cond_to_mask(!is_degenerate));
3059 }
3060
STAGE(apply_vector_mask,const uint32_t * ctx)3061 STAGE(apply_vector_mask, const uint32_t* ctx) {
3062 const U32 mask = sk_unaligned_load<U32>(ctx);
3063 r = sk_bit_cast<F>(sk_bit_cast<U32>(r) & mask);
3064 g = sk_bit_cast<F>(sk_bit_cast<U32>(g) & mask);
3065 b = sk_bit_cast<F>(sk_bit_cast<U32>(b) & mask);
3066 a = sk_bit_cast<F>(sk_bit_cast<U32>(a) & mask);
3067 }
3068
save_xy(F * r,F * g,SkRasterPipeline_SamplerCtx * c)3069 SI void save_xy(F* r, F* g, SkRasterPipeline_SamplerCtx* c) {
3070 // Whether bilinear or bicubic, all sample points are at the same fractional offset (fx,fy).
3071 // They're either the 4 corners of a logical 1x1 pixel or the 16 corners of a 3x3 grid
3072 // surrounding (x,y) at (0.5,0.5) off-center.
3073 F fx = fract(*r + 0.5f),
3074 fy = fract(*g + 0.5f);
3075
3076 // Samplers will need to load x and fx, or y and fy.
3077 sk_unaligned_store(c->x, *r);
3078 sk_unaligned_store(c->y, *g);
3079 sk_unaligned_store(c->fx, fx);
3080 sk_unaligned_store(c->fy, fy);
3081 }
3082
STAGE(accumulate,const SkRasterPipeline_SamplerCtx * c)3083 STAGE(accumulate, const SkRasterPipeline_SamplerCtx* c) {
3084 // Bilinear and bicubic filters are both separable, so we produce independent contributions
3085 // from x and y, multiplying them together here to get each pixel's total scale factor.
3086 auto scale = sk_unaligned_load<F>(c->scalex)
3087 * sk_unaligned_load<F>(c->scaley);
3088 dr = mad(scale, r, dr);
3089 dg = mad(scale, g, dg);
3090 db = mad(scale, b, db);
3091 da = mad(scale, a, da);
3092 }
3093
3094 // In bilinear interpolation, the 4 pixels at +/- 0.5 offsets from the sample pixel center
3095 // are combined in direct proportion to their area overlapping that logical query pixel.
3096 // At positive offsets, the x-axis contribution to that rectangle is fx, or (1-fx) at negative x.
3097 // The y-axis is symmetric.
3098
3099 template <int kScale>
bilinear_x(SkRasterPipeline_SamplerCtx * ctx,F * x)3100 SI void bilinear_x(SkRasterPipeline_SamplerCtx* ctx, F* x) {
3101 *x = sk_unaligned_load<F>(ctx->x) + (kScale * 0.5f);
3102 F fx = sk_unaligned_load<F>(ctx->fx);
3103
3104 F scalex;
3105 if (kScale == -1) { scalex = 1.0f - fx; }
3106 if (kScale == +1) { scalex = fx; }
3107 sk_unaligned_store(ctx->scalex, scalex);
3108 }
3109 template <int kScale>
bilinear_y(SkRasterPipeline_SamplerCtx * ctx,F * y)3110 SI void bilinear_y(SkRasterPipeline_SamplerCtx* ctx, F* y) {
3111 *y = sk_unaligned_load<F>(ctx->y) + (kScale * 0.5f);
3112 F fy = sk_unaligned_load<F>(ctx->fy);
3113
3114 F scaley;
3115 if (kScale == -1) { scaley = 1.0f - fy; }
3116 if (kScale == +1) { scaley = fy; }
3117 sk_unaligned_store(ctx->scaley, scaley);
3118 }
3119
STAGE(bilinear_setup,SkRasterPipeline_SamplerCtx * ctx)3120 STAGE(bilinear_setup, SkRasterPipeline_SamplerCtx* ctx) {
3121 save_xy(&r, &g, ctx);
3122 // Init for accumulate
3123 dr = dg = db = da = 0;
3124 }
3125
STAGE(bilinear_nx,SkRasterPipeline_SamplerCtx * ctx)3126 STAGE(bilinear_nx, SkRasterPipeline_SamplerCtx* ctx) { bilinear_x<-1>(ctx, &r); }
STAGE(bilinear_px,SkRasterPipeline_SamplerCtx * ctx)3127 STAGE(bilinear_px, SkRasterPipeline_SamplerCtx* ctx) { bilinear_x<+1>(ctx, &r); }
STAGE(bilinear_ny,SkRasterPipeline_SamplerCtx * ctx)3128 STAGE(bilinear_ny, SkRasterPipeline_SamplerCtx* ctx) { bilinear_y<-1>(ctx, &g); }
STAGE(bilinear_py,SkRasterPipeline_SamplerCtx * ctx)3129 STAGE(bilinear_py, SkRasterPipeline_SamplerCtx* ctx) { bilinear_y<+1>(ctx, &g); }
3130
3131
3132 // In bicubic interpolation, the 16 pixels and +/- 0.5 and +/- 1.5 offsets from the sample
3133 // pixel center are combined with a non-uniform cubic filter, with higher values near the center.
3134 //
3135 // This helper computes the total weight along one axis (our bicubic filter is separable), given one
3136 // column of the sampling matrix, and a fractional pixel offset. See SkCubicResampler for details.
3137
bicubic_wts(F t,float A,float B,float C,float D)3138 SI F bicubic_wts(F t, float A, float B, float C, float D) {
3139 return mad(t, mad(t, mad(t, D, C), B), A);
3140 }
3141
3142 template <int kScale>
bicubic_x(SkRasterPipeline_SamplerCtx * ctx,F * x)3143 SI void bicubic_x(SkRasterPipeline_SamplerCtx* ctx, F* x) {
3144 *x = sk_unaligned_load<F>(ctx->x) + (kScale * 0.5f);
3145
3146 F scalex;
3147 if (kScale == -3) { scalex = sk_unaligned_load<F>(ctx->wx[0]); }
3148 if (kScale == -1) { scalex = sk_unaligned_load<F>(ctx->wx[1]); }
3149 if (kScale == +1) { scalex = sk_unaligned_load<F>(ctx->wx[2]); }
3150 if (kScale == +3) { scalex = sk_unaligned_load<F>(ctx->wx[3]); }
3151 sk_unaligned_store(ctx->scalex, scalex);
3152 }
3153 template <int kScale>
bicubic_y(SkRasterPipeline_SamplerCtx * ctx,F * y)3154 SI void bicubic_y(SkRasterPipeline_SamplerCtx* ctx, F* y) {
3155 *y = sk_unaligned_load<F>(ctx->y) + (kScale * 0.5f);
3156
3157 F scaley;
3158 if (kScale == -3) { scaley = sk_unaligned_load<F>(ctx->wy[0]); }
3159 if (kScale == -1) { scaley = sk_unaligned_load<F>(ctx->wy[1]); }
3160 if (kScale == +1) { scaley = sk_unaligned_load<F>(ctx->wy[2]); }
3161 if (kScale == +3) { scaley = sk_unaligned_load<F>(ctx->wy[3]); }
3162 sk_unaligned_store(ctx->scaley, scaley);
3163 }
3164
STAGE(bicubic_setup,SkRasterPipeline_SamplerCtx * ctx)3165 STAGE(bicubic_setup, SkRasterPipeline_SamplerCtx* ctx) {
3166 save_xy(&r, &g, ctx);
3167
3168 const float* w = ctx->weights;
3169
3170 F fx = sk_unaligned_load<F>(ctx->fx);
3171 sk_unaligned_store(ctx->wx[0], bicubic_wts(fx, w[0], w[4], w[ 8], w[12]));
3172 sk_unaligned_store(ctx->wx[1], bicubic_wts(fx, w[1], w[5], w[ 9], w[13]));
3173 sk_unaligned_store(ctx->wx[2], bicubic_wts(fx, w[2], w[6], w[10], w[14]));
3174 sk_unaligned_store(ctx->wx[3], bicubic_wts(fx, w[3], w[7], w[11], w[15]));
3175
3176 F fy = sk_unaligned_load<F>(ctx->fy);
3177 sk_unaligned_store(ctx->wy[0], bicubic_wts(fy, w[0], w[4], w[ 8], w[12]));
3178 sk_unaligned_store(ctx->wy[1], bicubic_wts(fy, w[1], w[5], w[ 9], w[13]));
3179 sk_unaligned_store(ctx->wy[2], bicubic_wts(fy, w[2], w[6], w[10], w[14]));
3180 sk_unaligned_store(ctx->wy[3], bicubic_wts(fy, w[3], w[7], w[11], w[15]));
3181
3182 // Init for accumulate
3183 dr = dg = db = da = 0;
3184 }
3185
STAGE(bicubic_n3x,SkRasterPipeline_SamplerCtx * ctx)3186 STAGE(bicubic_n3x, SkRasterPipeline_SamplerCtx* ctx) { bicubic_x<-3>(ctx, &r); }
STAGE(bicubic_n1x,SkRasterPipeline_SamplerCtx * ctx)3187 STAGE(bicubic_n1x, SkRasterPipeline_SamplerCtx* ctx) { bicubic_x<-1>(ctx, &r); }
STAGE(bicubic_p1x,SkRasterPipeline_SamplerCtx * ctx)3188 STAGE(bicubic_p1x, SkRasterPipeline_SamplerCtx* ctx) { bicubic_x<+1>(ctx, &r); }
STAGE(bicubic_p3x,SkRasterPipeline_SamplerCtx * ctx)3189 STAGE(bicubic_p3x, SkRasterPipeline_SamplerCtx* ctx) { bicubic_x<+3>(ctx, &r); }
3190
STAGE(bicubic_n3y,SkRasterPipeline_SamplerCtx * ctx)3191 STAGE(bicubic_n3y, SkRasterPipeline_SamplerCtx* ctx) { bicubic_y<-3>(ctx, &g); }
STAGE(bicubic_n1y,SkRasterPipeline_SamplerCtx * ctx)3192 STAGE(bicubic_n1y, SkRasterPipeline_SamplerCtx* ctx) { bicubic_y<-1>(ctx, &g); }
STAGE(bicubic_p1y,SkRasterPipeline_SamplerCtx * ctx)3193 STAGE(bicubic_p1y, SkRasterPipeline_SamplerCtx* ctx) { bicubic_y<+1>(ctx, &g); }
STAGE(bicubic_p3y,SkRasterPipeline_SamplerCtx * ctx)3194 STAGE(bicubic_p3y, SkRasterPipeline_SamplerCtx* ctx) { bicubic_y<+3>(ctx, &g); }
3195
STAGE(mipmap_linear_init,SkRasterPipeline_MipmapCtx * ctx)3196 STAGE(mipmap_linear_init, SkRasterPipeline_MipmapCtx* ctx) {
3197 sk_unaligned_store(ctx->x, r);
3198 sk_unaligned_store(ctx->y, g);
3199 }
3200
STAGE(mipmap_linear_update,SkRasterPipeline_MipmapCtx * ctx)3201 STAGE(mipmap_linear_update, SkRasterPipeline_MipmapCtx* ctx) {
3202 sk_unaligned_store(ctx->r, r);
3203 sk_unaligned_store(ctx->g, g);
3204 sk_unaligned_store(ctx->b, b);
3205 sk_unaligned_store(ctx->a, a);
3206
3207 r = sk_unaligned_load<F>(ctx->x) * ctx->scaleX;
3208 g = sk_unaligned_load<F>(ctx->y) * ctx->scaleY;
3209 }
3210
STAGE(mipmap_linear_finish,SkRasterPipeline_MipmapCtx * ctx)3211 STAGE(mipmap_linear_finish, SkRasterPipeline_MipmapCtx* ctx) {
3212 r = lerp(sk_unaligned_load<F>(ctx->r), r, ctx->lowerWeight);
3213 g = lerp(sk_unaligned_load<F>(ctx->g), g, ctx->lowerWeight);
3214 b = lerp(sk_unaligned_load<F>(ctx->b), b, ctx->lowerWeight);
3215 a = lerp(sk_unaligned_load<F>(ctx->a), a, ctx->lowerWeight);
3216 }
3217
STAGE(callback,SkRasterPipeline_CallbackCtx * c)3218 STAGE(callback, SkRasterPipeline_CallbackCtx* c) {
3219 store4(c->rgba,0, r,g,b,a);
3220 c->fn(c, tail ? tail : N);
3221 load4(c->read_from,0, &r,&g,&b,&a);
3222 }
3223
3224 // All control flow stages used by SkSL maintain some state in the common registers:
3225 // dr: condition mask
3226 // dg: loop mask
3227 // db: return mask
3228 // da: execution mask (intersection of all three masks)
3229 // After updating dr/dg/db, you must invoke update_execution_mask().
3230 #define execution_mask() sk_bit_cast<I32>(da)
3231 #define update_execution_mask() da = sk_bit_cast<F>(sk_bit_cast<I32>(dr) & \
3232 sk_bit_cast<I32>(dg) & \
3233 sk_bit_cast<I32>(db))
3234
STAGE_TAIL(init_lane_masks,NoCtx)3235 STAGE_TAIL(init_lane_masks, NoCtx) {
3236 uint32_t iota[] = {0,1,2,3,4,5,6,7};
3237 I32 mask = tail ? cond_to_mask(sk_unaligned_load<U32>(iota) < tail) : I32(~0);
3238 dr = dg = db = da = sk_bit_cast<F>(mask);
3239 }
3240
STAGE_TAIL(load_condition_mask,F * ctx)3241 STAGE_TAIL(load_condition_mask, F* ctx) {
3242 dr = sk_unaligned_load<F>(ctx);
3243 update_execution_mask();
3244 }
3245
STAGE_TAIL(store_condition_mask,F * ctx)3246 STAGE_TAIL(store_condition_mask, F* ctx) {
3247 sk_unaligned_store(ctx, dr);
3248 }
3249
STAGE_TAIL(merge_condition_mask,I32 * ptr)3250 STAGE_TAIL(merge_condition_mask, I32* ptr) {
3251 // Set the condition-mask to the intersection of two adjacent masks at the pointer.
3252 dr = sk_bit_cast<F>(ptr[0] & ptr[1]);
3253 update_execution_mask();
3254 }
3255
STAGE_TAIL(load_loop_mask,F * ctx)3256 STAGE_TAIL(load_loop_mask, F* ctx) {
3257 dg = sk_unaligned_load<F>(ctx);
3258 update_execution_mask();
3259 }
3260
STAGE_TAIL(store_loop_mask,F * ctx)3261 STAGE_TAIL(store_loop_mask, F* ctx) {
3262 sk_unaligned_store(ctx, dg);
3263 }
3264
STAGE_TAIL(mask_off_loop_mask,NoCtx)3265 STAGE_TAIL(mask_off_loop_mask, NoCtx) {
3266 // We encountered a break statement. If a lane was active, it should be masked off now, and stay
3267 // masked-off until the termination of the loop.
3268 dg = sk_bit_cast<F>(sk_bit_cast<I32>(dg) & ~execution_mask());
3269 update_execution_mask();
3270 }
3271
STAGE_TAIL(reenable_loop_mask,I32 * ptr)3272 STAGE_TAIL(reenable_loop_mask, I32* ptr) {
3273 // Set the loop-mask to the union of the current loop-mask with the mask at the pointer.
3274 dg = sk_bit_cast<F>(sk_bit_cast<I32>(dg) | ptr[0]);
3275 update_execution_mask();
3276 }
3277
STAGE_TAIL(merge_loop_mask,I32 * ptr)3278 STAGE_TAIL(merge_loop_mask, I32* ptr) {
3279 // Set the loop-mask to the intersection of the current loop-mask with the mask at the pointer.
3280 // (Note: this behavior subtly differs from merge_condition_mask!)
3281 dg = sk_bit_cast<F>(sk_bit_cast<I32>(dg) & ptr[0]);
3282 update_execution_mask();
3283 }
3284
STAGE_TAIL(case_op,SkRasterPipeline_CaseOpCtx * ctx)3285 STAGE_TAIL(case_op, SkRasterPipeline_CaseOpCtx* ctx) {
3286 // Check each lane to see if the case value matches the expectation.
3287 I32* actualValue = (I32*)ctx->ptr;
3288 I32 caseMatches = cond_to_mask(*actualValue == ctx->expectedValue);
3289
3290 // In lanes where we found a match, enable the loop mask...
3291 dg = sk_bit_cast<F>(sk_bit_cast<I32>(dg) | caseMatches);
3292 update_execution_mask();
3293
3294 // ... and clear the default-case mask.
3295 I32* defaultMask = actualValue + 1;
3296 *defaultMask &= ~caseMatches;
3297 }
3298
STAGE_TAIL(load_return_mask,F * ctx)3299 STAGE_TAIL(load_return_mask, F* ctx) {
3300 db = sk_unaligned_load<F>(ctx);
3301 update_execution_mask();
3302 }
3303
STAGE_TAIL(store_return_mask,F * ctx)3304 STAGE_TAIL(store_return_mask, F* ctx) {
3305 sk_unaligned_store(ctx, db);
3306 }
3307
STAGE_TAIL(mask_off_return_mask,NoCtx)3308 STAGE_TAIL(mask_off_return_mask, NoCtx) {
3309 // We encountered a return statement. If a lane was active, it should be masked off now, and
3310 // stay masked-off until the end of the function.
3311 db = sk_bit_cast<F>(sk_bit_cast<I32>(db) & ~execution_mask());
3312 update_execution_mask();
3313 }
3314
STAGE_BRANCH(branch_if_any_active_lanes,SkRasterPipeline_BranchCtx * ctx)3315 STAGE_BRANCH(branch_if_any_active_lanes, SkRasterPipeline_BranchCtx* ctx) {
3316 return any(execution_mask()) ? ctx->offset : 1;
3317 }
3318
STAGE_BRANCH(branch_if_no_active_lanes,SkRasterPipeline_BranchCtx * ctx)3319 STAGE_BRANCH(branch_if_no_active_lanes, SkRasterPipeline_BranchCtx* ctx) {
3320 return any(execution_mask()) ? 1 : ctx->offset;
3321 }
3322
STAGE_BRANCH(jump,SkRasterPipeline_BranchCtx * ctx)3323 STAGE_BRANCH(jump, SkRasterPipeline_BranchCtx* ctx) {
3324 return ctx->offset;
3325 }
3326
STAGE_BRANCH(branch_if_no_active_lanes_eq,SkRasterPipeline_BranchIfEqualCtx * ctx)3327 STAGE_BRANCH(branch_if_no_active_lanes_eq, SkRasterPipeline_BranchIfEqualCtx* ctx) {
3328 // Compare each lane against the expected value...
3329 I32 match = cond_to_mask(*(I32*)ctx->ptr == ctx->value);
3330 // ... but mask off lanes that aren't executing.
3331 match &= execution_mask();
3332 // If any lanes matched, don't take the branch.
3333 return any(match) ? 1 : ctx->offset;
3334 }
3335
STAGE_TAIL(zero_slot_unmasked,F * dst)3336 STAGE_TAIL(zero_slot_unmasked, F* dst) {
3337 // We don't even bother masking off the tail; we're filling slots, not the destination surface.
3338 sk_bzero(dst, sizeof(F) * 1);
3339 }
STAGE_TAIL(zero_2_slots_unmasked,F * dst)3340 STAGE_TAIL(zero_2_slots_unmasked, F* dst) {
3341 sk_bzero(dst, sizeof(F) * 2);
3342 }
STAGE_TAIL(zero_3_slots_unmasked,F * dst)3343 STAGE_TAIL(zero_3_slots_unmasked, F* dst) {
3344 sk_bzero(dst, sizeof(F) * 3);
3345 }
STAGE_TAIL(zero_4_slots_unmasked,F * dst)3346 STAGE_TAIL(zero_4_slots_unmasked, F* dst) {
3347 sk_bzero(dst, sizeof(F) * 4);
3348 }
3349
STAGE_TAIL(copy_constant,SkRasterPipeline_BinaryOpCtx * ctx)3350 STAGE_TAIL(copy_constant, SkRasterPipeline_BinaryOpCtx* ctx) {
3351 const float* src = ctx->src;
3352 F* dst = (F*)ctx->dst;
3353 dst[0] = src[0];
3354 }
STAGE_TAIL(copy_2_constants,SkRasterPipeline_BinaryOpCtx * ctx)3355 STAGE_TAIL(copy_2_constants, SkRasterPipeline_BinaryOpCtx* ctx) {
3356 const float* src = ctx->src;
3357 F* dst = (F*)ctx->dst;
3358 dst[0] = src[0];
3359 dst[1] = src[1];
3360 }
STAGE_TAIL(copy_3_constants,SkRasterPipeline_BinaryOpCtx * ctx)3361 STAGE_TAIL(copy_3_constants, SkRasterPipeline_BinaryOpCtx* ctx) {
3362 const float* src = ctx->src;
3363 F* dst = (F*)ctx->dst;
3364 dst[0] = src[0];
3365 dst[1] = src[1];
3366 dst[2] = src[2];
3367 }
STAGE_TAIL(copy_4_constants,SkRasterPipeline_BinaryOpCtx * ctx)3368 STAGE_TAIL(copy_4_constants, SkRasterPipeline_BinaryOpCtx* ctx) {
3369 const float* src = ctx->src;
3370 F* dst = (F*)ctx->dst;
3371 dst[0] = src[0];
3372 dst[1] = src[1];
3373 dst[2] = src[2];
3374 dst[3] = src[3];
3375 }
3376
STAGE_TAIL(copy_slot_unmasked,SkRasterPipeline_BinaryOpCtx * ctx)3377 STAGE_TAIL(copy_slot_unmasked, SkRasterPipeline_BinaryOpCtx* ctx) {
3378 // We don't even bother masking off the tail; we're filling slots, not the destination surface.
3379 memcpy(ctx->dst, ctx->src, sizeof(F) * 1);
3380 }
STAGE_TAIL(copy_2_slots_unmasked,SkRasterPipeline_BinaryOpCtx * ctx)3381 STAGE_TAIL(copy_2_slots_unmasked, SkRasterPipeline_BinaryOpCtx* ctx) {
3382 memcpy(ctx->dst, ctx->src, sizeof(F) * 2);
3383 }
STAGE_TAIL(copy_3_slots_unmasked,SkRasterPipeline_BinaryOpCtx * ctx)3384 STAGE_TAIL(copy_3_slots_unmasked, SkRasterPipeline_BinaryOpCtx* ctx) {
3385 memcpy(ctx->dst, ctx->src, sizeof(F) * 3);
3386 }
STAGE_TAIL(copy_4_slots_unmasked,SkRasterPipeline_BinaryOpCtx * ctx)3387 STAGE_TAIL(copy_4_slots_unmasked, SkRasterPipeline_BinaryOpCtx* ctx) {
3388 memcpy(ctx->dst, ctx->src, sizeof(F) * 4);
3389 }
3390
3391 template <int NumSlots>
copy_n_slots_masked_fn(SkRasterPipeline_BinaryOpCtx * ctx,I32 mask)3392 SI void copy_n_slots_masked_fn(SkRasterPipeline_BinaryOpCtx* ctx, I32 mask) {
3393 if (any(mask)) {
3394 // Get pointers to our slots.
3395 F* dst = (F*)ctx->dst;
3396 F* src = (F*)ctx->src;
3397
3398 // Mask off and copy slots.
3399 for (int count = 0; count < NumSlots; ++count) {
3400 *dst = if_then_else(mask, *src, *dst);
3401 dst += 1;
3402 src += 1;
3403 }
3404 }
3405 }
3406
STAGE_TAIL(copy_slot_masked,SkRasterPipeline_BinaryOpCtx * ctx)3407 STAGE_TAIL(copy_slot_masked, SkRasterPipeline_BinaryOpCtx* ctx) {
3408 copy_n_slots_masked_fn<1>(ctx, execution_mask());
3409 }
STAGE_TAIL(copy_2_slots_masked,SkRasterPipeline_BinaryOpCtx * ctx)3410 STAGE_TAIL(copy_2_slots_masked, SkRasterPipeline_BinaryOpCtx* ctx) {
3411 copy_n_slots_masked_fn<2>(ctx, execution_mask());
3412 }
STAGE_TAIL(copy_3_slots_masked,SkRasterPipeline_BinaryOpCtx * ctx)3413 STAGE_TAIL(copy_3_slots_masked, SkRasterPipeline_BinaryOpCtx* ctx) {
3414 copy_n_slots_masked_fn<3>(ctx, execution_mask());
3415 }
STAGE_TAIL(copy_4_slots_masked,SkRasterPipeline_BinaryOpCtx * ctx)3416 STAGE_TAIL(copy_4_slots_masked, SkRasterPipeline_BinaryOpCtx* ctx) {
3417 copy_n_slots_masked_fn<4>(ctx, execution_mask());
3418 }
3419
3420 template <int LoopCount>
shuffle_fn(F * dst,uint16_t * offsets,int numSlots)3421 SI void shuffle_fn(F* dst, uint16_t* offsets, int numSlots) {
3422 F scratch[16];
3423 std::byte* src = (std::byte*)dst;
3424 for (int count = 0; count < LoopCount; ++count) {
3425 scratch[count] = *(F*)(src + offsets[count]);
3426 }
3427 // Surprisingly, this switch generates significantly better code than a memcpy (on x86-64) when
3428 // the number of slots is unknown at compile time, and generates roughly identical code when the
3429 // number of slots is hardcoded. Using a switch allows `scratch` to live in ymm0-ymm15 instead
3430 // of being written out to the stack and then read back in. Also, the intrinsic memcpy assumes
3431 // that `numSlots` could be arbitrarily large, and so it emits more code than we need.
3432 switch (numSlots) {
3433 case 16: dst[15] = scratch[15]; [[fallthrough]];
3434 case 15: dst[14] = scratch[14]; [[fallthrough]];
3435 case 14: dst[13] = scratch[13]; [[fallthrough]];
3436 case 13: dst[12] = scratch[12]; [[fallthrough]];
3437 case 12: dst[11] = scratch[11]; [[fallthrough]];
3438 case 11: dst[10] = scratch[10]; [[fallthrough]];
3439 case 10: dst[ 9] = scratch[ 9]; [[fallthrough]];
3440 case 9: dst[ 8] = scratch[ 8]; [[fallthrough]];
3441 case 8: dst[ 7] = scratch[ 7]; [[fallthrough]];
3442 case 7: dst[ 6] = scratch[ 6]; [[fallthrough]];
3443 case 6: dst[ 5] = scratch[ 5]; [[fallthrough]];
3444 case 5: dst[ 4] = scratch[ 4]; [[fallthrough]];
3445 case 4: dst[ 3] = scratch[ 3]; [[fallthrough]];
3446 case 3: dst[ 2] = scratch[ 2]; [[fallthrough]];
3447 case 2: dst[ 1] = scratch[ 1]; [[fallthrough]];
3448 case 1: dst[ 0] = scratch[ 0];
3449 }
3450 }
3451
STAGE_TAIL(swizzle_1,SkRasterPipeline_SwizzleCtx * ctx)3452 STAGE_TAIL(swizzle_1, SkRasterPipeline_SwizzleCtx* ctx) {
3453 shuffle_fn<1>((F*)ctx->ptr, ctx->offsets, 1);
3454 }
STAGE_TAIL(swizzle_2,SkRasterPipeline_SwizzleCtx * ctx)3455 STAGE_TAIL(swizzle_2, SkRasterPipeline_SwizzleCtx* ctx) {
3456 shuffle_fn<2>((F*)ctx->ptr, ctx->offsets, 2);
3457 }
STAGE_TAIL(swizzle_3,SkRasterPipeline_SwizzleCtx * ctx)3458 STAGE_TAIL(swizzle_3, SkRasterPipeline_SwizzleCtx* ctx) {
3459 shuffle_fn<3>((F*)ctx->ptr, ctx->offsets, 3);
3460 }
STAGE_TAIL(swizzle_4,SkRasterPipeline_SwizzleCtx * ctx)3461 STAGE_TAIL(swizzle_4, SkRasterPipeline_SwizzleCtx* ctx) {
3462 shuffle_fn<4>((F*)ctx->ptr, ctx->offsets, 4);
3463 }
STAGE_TAIL(shuffle,SkRasterPipeline_ShuffleCtx * ctx)3464 STAGE_TAIL(shuffle, SkRasterPipeline_ShuffleCtx* ctx) {
3465 shuffle_fn<16>((F*)ctx->ptr, ctx->offsets, ctx->count);
3466 }
3467
3468 template <int NumSlots>
swizzle_copy_masked_fn(F * dst,const F * src,uint16_t * offsets,I32 mask)3469 SI void swizzle_copy_masked_fn(F* dst, const F* src, uint16_t* offsets, I32 mask) {
3470 std::byte* dstB = (std::byte*)dst;
3471 for (int count = 0; count < NumSlots; ++count) {
3472 F* dstS = (F*)(dstB + *offsets);
3473 *dstS = if_then_else(mask, *src, *dstS);
3474 offsets += 1;
3475 src += 1;
3476 }
3477 }
3478
STAGE_TAIL(swizzle_copy_slot_masked,SkRasterPipeline_SwizzleCopyCtx * ctx)3479 STAGE_TAIL(swizzle_copy_slot_masked, SkRasterPipeline_SwizzleCopyCtx* ctx) {
3480 swizzle_copy_masked_fn<1>((F*)ctx->dst, (F*)ctx->src, ctx->offsets, execution_mask());
3481 }
STAGE_TAIL(swizzle_copy_2_slots_masked,SkRasterPipeline_SwizzleCopyCtx * ctx)3482 STAGE_TAIL(swizzle_copy_2_slots_masked, SkRasterPipeline_SwizzleCopyCtx* ctx) {
3483 swizzle_copy_masked_fn<2>((F*)ctx->dst, (F*)ctx->src, ctx->offsets, execution_mask());
3484 }
STAGE_TAIL(swizzle_copy_3_slots_masked,SkRasterPipeline_SwizzleCopyCtx * ctx)3485 STAGE_TAIL(swizzle_copy_3_slots_masked, SkRasterPipeline_SwizzleCopyCtx* ctx) {
3486 swizzle_copy_masked_fn<3>((F*)ctx->dst, (F*)ctx->src, ctx->offsets, execution_mask());
3487 }
STAGE_TAIL(swizzle_copy_4_slots_masked,SkRasterPipeline_SwizzleCopyCtx * ctx)3488 STAGE_TAIL(swizzle_copy_4_slots_masked, SkRasterPipeline_SwizzleCopyCtx* ctx) {
3489 swizzle_copy_masked_fn<4>((F*)ctx->dst, (F*)ctx->src, ctx->offsets, execution_mask());
3490 }
3491
STAGE_TAIL(copy_from_indirect_masked,SkRasterPipeline_CopyIndirectCtx * ctx)3492 STAGE_TAIL(copy_from_indirect_masked, SkRasterPipeline_CopyIndirectCtx* ctx) {
3493 // Clamp the indirect offsets to stay within the limit.
3494 U32 offsets = *(U32*)ctx->indirectOffset;
3495 offsets = min(offsets, ctx->indirectLimit);
3496
3497 // Scale up the offsets to account for the N lanes per value.
3498 offsets *= N;
3499
3500 // Adjust the offsets forward so that they fetch from the correct lane.
3501 static constexpr uint32_t iota[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
3502 offsets += sk_unaligned_load<I32>(iota);
3503
3504 // Use gather to perform indirect lookups; write the results into `dst`.
3505 const float* src = ctx->src;
3506 F* dst = (F*)ctx->dst;
3507 F* end = dst + ctx->slots;
3508 I32 mask = execution_mask();
3509 do {
3510 *dst = if_then_else(mask, gather(src, offsets), *dst);
3511 dst += 1;
3512 src += N;
3513 } while (dst != end);
3514 }
3515
3516 // Unary operations take a single input, and overwrite it with their output.
3517 // Unlike binary or ternary operations, we provide variations of 1-4 slots, but don't provide
3518 // an arbitrary-width "n-slot" variation; the Builder can chain together longer sequences manually.
3519 template <typename T, void (*ApplyFn)(T*)>
apply_adjacent_unary(T * dst,T * end)3520 SI void apply_adjacent_unary(T* dst, T* end) {
3521 do {
3522 ApplyFn(dst);
3523 dst += 1;
3524 } while (dst != end);
3525 }
3526
bitwise_not_fn(I32 * dst)3527 SI void bitwise_not_fn(I32* dst) {
3528 *dst = ~*dst;
3529 }
3530
3531 #if defined(JUMPER_IS_SCALAR)
3532 template <typename T>
cast_to_float_from_fn(T * dst)3533 SI void cast_to_float_from_fn(T* dst) {
3534 *dst = sk_bit_cast<T>((F)*dst);
3535 }
cast_to_int_from_fn(F * dst)3536 SI void cast_to_int_from_fn(F* dst) {
3537 *dst = sk_bit_cast<F>((I32)*dst);
3538 }
cast_to_uint_from_fn(F * dst)3539 SI void cast_to_uint_from_fn(F* dst) {
3540 *dst = sk_bit_cast<F>((U32)*dst);
3541 }
3542 #else
3543 template <typename T>
cast_to_float_from_fn(T * dst)3544 SI void cast_to_float_from_fn(T* dst) {
3545 *dst = sk_bit_cast<T>(__builtin_convertvector(*dst, F));
3546 }
cast_to_int_from_fn(F * dst)3547 SI void cast_to_int_from_fn(F* dst) {
3548 *dst = sk_bit_cast<F>(__builtin_convertvector(*dst, I32));
3549 }
cast_to_uint_from_fn(F * dst)3550 SI void cast_to_uint_from_fn(F* dst) {
3551 *dst = sk_bit_cast<F>(__builtin_convertvector(*dst, U32));
3552 }
3553 #endif
3554
3555 template <typename T>
abs_fn(T * dst)3556 SI void abs_fn(T* dst) {
3557 *dst = abs_(*dst);
3558 }
3559
floor_fn(F * dst)3560 SI void floor_fn(F* dst) {
3561 *dst = floor_(*dst);
3562 }
3563
ceil_fn(F * dst)3564 SI void ceil_fn(F* dst) {
3565 *dst = ceil_(*dst);
3566 }
3567
3568 #define DECLARE_UNARY_FLOAT(name) \
3569 STAGE_TAIL(name##_float, F* dst) { apply_adjacent_unary<F, &name##_fn>(dst, dst + 1); } \
3570 STAGE_TAIL(name##_2_floats, F* dst) { apply_adjacent_unary<F, &name##_fn>(dst, dst + 2); } \
3571 STAGE_TAIL(name##_3_floats, F* dst) { apply_adjacent_unary<F, &name##_fn>(dst, dst + 3); } \
3572 STAGE_TAIL(name##_4_floats, F* dst) { apply_adjacent_unary<F, &name##_fn>(dst, dst + 4); }
3573
3574 #define DECLARE_UNARY_INT(name) \
3575 STAGE_TAIL(name##_int, I32* dst) { apply_adjacent_unary<I32, &name##_fn>(dst, dst + 1); } \
3576 STAGE_TAIL(name##_2_ints, I32* dst) { apply_adjacent_unary<I32, &name##_fn>(dst, dst + 2); } \
3577 STAGE_TAIL(name##_3_ints, I32* dst) { apply_adjacent_unary<I32, &name##_fn>(dst, dst + 3); } \
3578 STAGE_TAIL(name##_4_ints, I32* dst) { apply_adjacent_unary<I32, &name##_fn>(dst, dst + 4); }
3579
3580 #define DECLARE_UNARY_UINT(name) \
3581 STAGE_TAIL(name##_uint, U32* dst) { apply_adjacent_unary<U32, &name##_fn>(dst, dst + 1); } \
3582 STAGE_TAIL(name##_2_uints, U32* dst) { apply_adjacent_unary<U32, &name##_fn>(dst, dst + 2); } \
3583 STAGE_TAIL(name##_3_uints, U32* dst) { apply_adjacent_unary<U32, &name##_fn>(dst, dst + 3); } \
3584 STAGE_TAIL(name##_4_uints, U32* dst) { apply_adjacent_unary<U32, &name##_fn>(dst, dst + 4); }
3585
3586 DECLARE_UNARY_INT(bitwise_not)
DECLARE_UNARY_INT(cast_to_float_from)3587 DECLARE_UNARY_INT(cast_to_float_from) DECLARE_UNARY_UINT(cast_to_float_from)
3588 DECLARE_UNARY_FLOAT(cast_to_int_from)
3589 DECLARE_UNARY_FLOAT(cast_to_uint_from)
3590 DECLARE_UNARY_FLOAT(abs) DECLARE_UNARY_INT(abs)
3591 DECLARE_UNARY_FLOAT(floor)
3592 DECLARE_UNARY_FLOAT(ceil)
3593
3594 #undef DECLARE_UNARY_FLOAT
3595 #undef DECLARE_UNARY_INT
3596 #undef DECLARE_UNARY_UINT
3597
3598 // For complex unary ops, we only provide a 1-slot version to reduce code bloat.
3599 STAGE_TAIL(sin_float, F* dst) { *dst = sin_(*dst); }
STAGE_TAIL(cos_float,F * dst)3600 STAGE_TAIL(cos_float, F* dst) { *dst = cos_(*dst); }
STAGE_TAIL(tan_float,F * dst)3601 STAGE_TAIL(tan_float, F* dst) { *dst = tan_(*dst); }
STAGE_TAIL(atan_float,F * dst)3602 STAGE_TAIL(atan_float, F* dst) { *dst = atan_(*dst); }
STAGE_TAIL(sqrt_float,F * dst)3603 STAGE_TAIL(sqrt_float, F* dst) { *dst = sqrt_(*dst); }
STAGE_TAIL(exp_float,F * dst)3604 STAGE_TAIL(exp_float, F* dst) { *dst = approx_exp(*dst); }
3605
3606 // Binary operations take two adjacent inputs, and write their output in the first position.
3607 template <typename T, void (*ApplyFn)(T*, T*)>
apply_adjacent_binary(T * dst,T * src)3608 SI void apply_adjacent_binary(T* dst, T* src) {
3609 T* end = src;
3610 do {
3611 ApplyFn(dst, src);
3612 dst += 1;
3613 src += 1;
3614 } while (dst != end);
3615 }
3616
3617 template <typename T>
add_fn(T * dst,T * src)3618 SI void add_fn(T* dst, T* src) {
3619 *dst += *src;
3620 }
3621
3622 template <typename T>
sub_fn(T * dst,T * src)3623 SI void sub_fn(T* dst, T* src) {
3624 *dst -= *src;
3625 }
3626
3627 template <typename T>
mul_fn(T * dst,T * src)3628 SI void mul_fn(T* dst, T* src) {
3629 *dst *= *src;
3630 }
3631
3632 template <typename T>
div_fn(T * dst,T * src)3633 SI void div_fn(T* dst, T* src) {
3634 *dst /= *src;
3635 }
3636
bitwise_and_fn(I32 * dst,I32 * src)3637 SI void bitwise_and_fn(I32* dst, I32* src) {
3638 *dst &= *src;
3639 }
3640
bitwise_or_fn(I32 * dst,I32 * src)3641 SI void bitwise_or_fn(I32* dst, I32* src) {
3642 *dst |= *src;
3643 }
3644
bitwise_xor_fn(I32 * dst,I32 * src)3645 SI void bitwise_xor_fn(I32* dst, I32* src) {
3646 *dst ^= *src;
3647 }
3648
3649 template <typename T>
max_fn(T * dst,T * src)3650 SI void max_fn(T* dst, T* src) {
3651 *dst = max(*dst, *src);
3652 }
3653
3654 template <typename T>
min_fn(T * dst,T * src)3655 SI void min_fn(T* dst, T* src) {
3656 *dst = min(*dst, *src);
3657 }
3658
3659 template <typename T>
cmplt_fn(T * dst,T * src)3660 SI void cmplt_fn(T* dst, T* src) {
3661 static_assert(sizeof(T) == sizeof(I32));
3662 I32 result = cond_to_mask(*dst < *src);
3663 memcpy(dst, &result, sizeof(I32));
3664 }
3665
3666 template <typename T>
cmple_fn(T * dst,T * src)3667 SI void cmple_fn(T* dst, T* src) {
3668 static_assert(sizeof(T) == sizeof(I32));
3669 I32 result = cond_to_mask(*dst <= *src);
3670 memcpy(dst, &result, sizeof(I32));
3671 }
3672
3673 template <typename T>
cmpeq_fn(T * dst,T * src)3674 SI void cmpeq_fn(T* dst, T* src) {
3675 static_assert(sizeof(T) == sizeof(I32));
3676 I32 result = cond_to_mask(*dst == *src);
3677 memcpy(dst, &result, sizeof(I32));
3678 }
3679
3680 template <typename T>
cmpne_fn(T * dst,T * src)3681 SI void cmpne_fn(T* dst, T* src) {
3682 static_assert(sizeof(T) == sizeof(I32));
3683 I32 result = cond_to_mask(*dst != *src);
3684 memcpy(dst, &result, sizeof(I32));
3685 }
3686
atan2_fn(F * dst,F * src)3687 SI void atan2_fn(F* dst, F* src) {
3688 *dst = atan2_(*dst, *src);
3689 }
3690
pow_fn(F * dst,F * src)3691 SI void pow_fn(F* dst, F* src) {
3692 *dst = approx_powf(*dst, *src);
3693 }
3694
3695 #define DECLARE_N_WAY_BINARY_FLOAT(name) \
3696 STAGE_TAIL(name##_n_floats, SkRasterPipeline_BinaryOpCtx* ctx) { \
3697 apply_adjacent_binary<F, &name##_fn>((F*)ctx->dst, (F*)ctx->src); \
3698 }
3699
3700 #define DECLARE_BINARY_FLOAT(name) \
3701 STAGE_TAIL(name##_float, F* dst) { apply_adjacent_binary<F, &name##_fn>(dst, dst + 1); } \
3702 STAGE_TAIL(name##_2_floats, F* dst) { apply_adjacent_binary<F, &name##_fn>(dst, dst + 2); } \
3703 STAGE_TAIL(name##_3_floats, F* dst) { apply_adjacent_binary<F, &name##_fn>(dst, dst + 3); } \
3704 STAGE_TAIL(name##_4_floats, F* dst) { apply_adjacent_binary<F, &name##_fn>(dst, dst + 4); } \
3705 DECLARE_N_WAY_BINARY_FLOAT(name)
3706
3707 #define DECLARE_N_WAY_BINARY_INT(name) \
3708 STAGE_TAIL(name##_n_ints, SkRasterPipeline_BinaryOpCtx* ctx) { \
3709 apply_adjacent_binary<I32, &name##_fn>((I32*)ctx->dst, (I32*)ctx->src); \
3710 }
3711
3712 #define DECLARE_BINARY_INT(name) \
3713 STAGE_TAIL(name##_int, I32* dst) { apply_adjacent_binary<I32, &name##_fn>(dst, dst + 1); } \
3714 STAGE_TAIL(name##_2_ints, I32* dst) { apply_adjacent_binary<I32, &name##_fn>(dst, dst + 2); } \
3715 STAGE_TAIL(name##_3_ints, I32* dst) { apply_adjacent_binary<I32, &name##_fn>(dst, dst + 3); } \
3716 STAGE_TAIL(name##_4_ints, I32* dst) { apply_adjacent_binary<I32, &name##_fn>(dst, dst + 4); } \
3717 DECLARE_N_WAY_BINARY_INT(name)
3718
3719 #define DECLARE_N_WAY_BINARY_UINT(name) \
3720 STAGE_TAIL(name##_n_uints, SkRasterPipeline_BinaryOpCtx* ctx) { \
3721 apply_adjacent_binary<U32, &name##_fn>((U32*)ctx->dst, (U32*)ctx->src); \
3722 }
3723
3724 #define DECLARE_BINARY_UINT(name) \
3725 STAGE_TAIL(name##_uint, U32* dst) { apply_adjacent_binary<U32, &name##_fn>(dst, dst + 1); } \
3726 STAGE_TAIL(name##_2_uints, U32* dst) { apply_adjacent_binary<U32, &name##_fn>(dst, dst + 2); } \
3727 STAGE_TAIL(name##_3_uints, U32* dst) { apply_adjacent_binary<U32, &name##_fn>(dst, dst + 3); } \
3728 STAGE_TAIL(name##_4_uints, U32* dst) { apply_adjacent_binary<U32, &name##_fn>(dst, dst + 4); } \
3729 DECLARE_N_WAY_BINARY_UINT(name)
3730
3731 // Many ops reuse the int stages when performing uint arithmetic, since they're equivalent on a
3732 // two's-complement machine. (Even multiplication is equivalent in the lower 32 bits.)
DECLARE_BINARY_INT(add)3733 DECLARE_BINARY_FLOAT(add) DECLARE_BINARY_INT(add)
3734 DECLARE_BINARY_FLOAT(sub) DECLARE_BINARY_INT(sub)
3735 DECLARE_BINARY_FLOAT(mul) DECLARE_BINARY_INT(mul)
3736 DECLARE_BINARY_FLOAT(div) DECLARE_BINARY_INT(div) DECLARE_BINARY_UINT(div)
3737 DECLARE_BINARY_INT(bitwise_and)
3738 DECLARE_BINARY_INT(bitwise_or)
3739 DECLARE_BINARY_INT(bitwise_xor)
3740 DECLARE_BINARY_FLOAT(min) DECLARE_BINARY_INT(min) DECLARE_BINARY_UINT(min)
3741 DECLARE_BINARY_FLOAT(max) DECLARE_BINARY_INT(max) DECLARE_BINARY_UINT(max)
3742 DECLARE_BINARY_FLOAT(cmplt) DECLARE_BINARY_INT(cmplt) DECLARE_BINARY_UINT(cmplt)
3743 DECLARE_BINARY_FLOAT(cmple) DECLARE_BINARY_INT(cmple) DECLARE_BINARY_UINT(cmple)
3744 DECLARE_BINARY_FLOAT(cmpeq) DECLARE_BINARY_INT(cmpeq)
3745 DECLARE_BINARY_FLOAT(cmpne) DECLARE_BINARY_INT(cmpne)
3746
3747 // Sufficiently complex ops only provide an N-way version, to avoid code bloat from the dedicated
3748 // 1-4 slot versions.
3749 DECLARE_N_WAY_BINARY_FLOAT(atan2)
3750 DECLARE_N_WAY_BINARY_FLOAT(pow)
3751
3752 #undef DECLARE_BINARY_FLOAT
3753 #undef DECLARE_BINARY_INT
3754 #undef DECLARE_BINARY_UINT
3755 #undef DECLARE_N_WAY_BINARY_FLOAT
3756 #undef DECLARE_N_WAY_BINARY_INT
3757 #undef DECLARE_N_WAY_BINARY_UINT
3758
3759 // Dots can be represented with multiply and add ops, but they are so foundational that it's worth
3760 // having dedicated ops.
3761 STAGE_TAIL(dot_2_floats, F* dst) {
3762 dst[0] = mad(dst[0], dst[2],
3763 dst[1] * dst[3]);
3764 }
3765
STAGE_TAIL(dot_3_floats,F * dst)3766 STAGE_TAIL(dot_3_floats, F* dst) {
3767 dst[0] = mad(dst[0], dst[3],
3768 mad(dst[1], dst[4],
3769 dst[2] * dst[5]));
3770 }
3771
STAGE_TAIL(dot_4_floats,F * dst)3772 STAGE_TAIL(dot_4_floats, F* dst) {
3773 dst[0] = mad(dst[0], dst[4],
3774 mad(dst[1], dst[5],
3775 mad(dst[2], dst[6],
3776 dst[3] * dst[7])));
3777 }
3778
3779 // Ternary operations work like binary ops (see immediately above) but take two source inputs.
3780 template <typename T, void (*ApplyFn)(T*, T*, T*)>
apply_adjacent_ternary(T * dst,T * src0,T * src1)3781 SI void apply_adjacent_ternary(T* dst, T* src0, T* src1) {
3782 T* end = src0;
3783 do {
3784 ApplyFn(dst, src0, src1);
3785 dst += 1;
3786 src0 += 1;
3787 src1 += 1;
3788 } while (dst != end);
3789 }
3790
mix_fn(F * a,F * x,F * y)3791 SI void mix_fn(F* a, F* x, F* y) {
3792 // We reorder the arguments here to match lerp's GLSL-style order (interpolation point last).
3793 *a = lerp(*x, *y, *a);
3794 }
3795
mix_fn(I32 * a,I32 * x,I32 * y)3796 SI void mix_fn(I32* a, I32* x, I32* y) {
3797 // We reorder the arguments here to match if_then_else's expected order (y before x).
3798 *a = if_then_else(*a, *y, *x);
3799 }
3800
3801 #define DECLARE_TERNARY_FLOAT(name) \
3802 STAGE_TAIL(name##_float, F* p) { apply_adjacent_ternary<F, &name##_fn>(p, p+1, p+2); } \
3803 STAGE_TAIL(name##_2_floats, F* p) { apply_adjacent_ternary<F, &name##_fn>(p, p+2, p+4); } \
3804 STAGE_TAIL(name##_3_floats, F* p) { apply_adjacent_ternary<F, &name##_fn>(p, p+3, p+6); } \
3805 STAGE_TAIL(name##_4_floats, F* p) { apply_adjacent_ternary<F, &name##_fn>(p, p+4, p+8); } \
3806 STAGE_TAIL(name##_n_floats, SkRasterPipeline_TernaryOpCtx* ctx) { \
3807 apply_adjacent_ternary<F, &name##_fn>((F*)ctx->dst, (F*)ctx->src0, (F*)ctx->src1); \
3808 }
3809
3810 #define DECLARE_TERNARY_INT(name) \
3811 STAGE_TAIL(name##_int, I32* p) { apply_adjacent_ternary<I32, &name##_fn>(p, p+1, p+2); } \
3812 STAGE_TAIL(name##_2_ints, I32* p) { apply_adjacent_ternary<I32, &name##_fn>(p, p+2, p+4); } \
3813 STAGE_TAIL(name##_3_ints, I32* p) { apply_adjacent_ternary<I32, &name##_fn>(p, p+3, p+6); } \
3814 STAGE_TAIL(name##_4_ints, I32* p) { apply_adjacent_ternary<I32, &name##_fn>(p, p+4, p+8); } \
3815 STAGE_TAIL(name##_n_ints, SkRasterPipeline_TernaryOpCtx* ctx) { \
3816 apply_adjacent_ternary<I32, &name##_fn>((I32*)ctx->dst, (I32*)ctx->src0, (I32*)ctx->src1); \
3817 }
3818
3819 DECLARE_TERNARY_FLOAT(mix)
DECLARE_TERNARY_INT(mix)3820 DECLARE_TERNARY_INT(mix)
3821
3822 #undef DECLARE_TERNARY_FLOAT
3823 #undef DECLARE_TERNARY_INT
3824
3825 STAGE(gauss_a_to_rgba, NoCtx) {
3826 // x = 1 - x;
3827 // exp(-x * x * 4) - 0.018f;
3828 // ... now approximate with quartic
3829 //
3830 const float c4 = -2.26661229133605957031f;
3831 const float c3 = 2.89795351028442382812f;
3832 const float c2 = 0.21345567703247070312f;
3833 const float c1 = 0.15489584207534790039f;
3834 const float c0 = 0.00030726194381713867f;
3835 a = mad(a, mad(a, mad(a, mad(a, c4, c3), c2), c1), c0);
3836 r = a;
3837 g = a;
3838 b = a;
3839 }
3840
3841 // A specialized fused image shader for clamp-x, clamp-y, non-sRGB sampling.
STAGE(bilerp_clamp_8888,const SkRasterPipeline_GatherCtx * ctx)3842 STAGE(bilerp_clamp_8888, const SkRasterPipeline_GatherCtx* ctx) {
3843 // (cx,cy) are the center of our sample.
3844 F cx = r,
3845 cy = g;
3846
3847 // All sample points are at the same fractional offset (fx,fy).
3848 // They're the 4 corners of a logical 1x1 pixel surrounding (x,y) at (0.5,0.5) offsets.
3849 F fx = fract(cx + 0.5f),
3850 fy = fract(cy + 0.5f);
3851
3852 // We'll accumulate the color of all four samples into {r,g,b,a} directly.
3853 r = g = b = a = 0;
3854
3855 for (float py = -0.5f; py <= +0.5f; py += 1.0f)
3856 for (float px = -0.5f; px <= +0.5f; px += 1.0f) {
3857 // (x,y) are the coordinates of this sample point.
3858 F x = cx + px,
3859 y = cy + py;
3860
3861 // ix_and_ptr() will clamp to the image's bounds for us.
3862 const uint32_t* ptr;
3863 U32 ix = ix_and_ptr(&ptr, ctx, x,y);
3864
3865 F sr,sg,sb,sa;
3866 from_8888(gather(ptr, ix), &sr,&sg,&sb,&sa);
3867
3868 // In bilinear interpolation, the 4 pixels at +/- 0.5 offsets from the sample pixel center
3869 // are combined in direct proportion to their area overlapping that logical query pixel.
3870 // At positive offsets, the x-axis contribution to that rectangle is fx,
3871 // or (1-fx) at negative x. Same deal for y.
3872 F sx = (px > 0) ? fx : 1.0f - fx,
3873 sy = (py > 0) ? fy : 1.0f - fy,
3874 area = sx * sy;
3875
3876 r += sr * area;
3877 g += sg * area;
3878 b += sb * area;
3879 a += sa * area;
3880 }
3881 }
3882
3883 // A specialized fused image shader for clamp-x, clamp-y, non-sRGB sampling.
STAGE(bicubic_clamp_8888,const SkRasterPipeline_GatherCtx * ctx)3884 STAGE(bicubic_clamp_8888, const SkRasterPipeline_GatherCtx* ctx) {
3885 // (cx,cy) are the center of our sample.
3886 F cx = r,
3887 cy = g;
3888
3889 // All sample points are at the same fractional offset (fx,fy).
3890 // They're the 4 corners of a logical 1x1 pixel surrounding (x,y) at (0.5,0.5) offsets.
3891 F fx = fract(cx + 0.5f),
3892 fy = fract(cy + 0.5f);
3893
3894 // We'll accumulate the color of all four samples into {r,g,b,a} directly.
3895 r = g = b = a = 0;
3896
3897 const float* w = ctx->weights;
3898 const F scaley[4] = {bicubic_wts(fy, w[0], w[4], w[ 8], w[12]),
3899 bicubic_wts(fy, w[1], w[5], w[ 9], w[13]),
3900 bicubic_wts(fy, w[2], w[6], w[10], w[14]),
3901 bicubic_wts(fy, w[3], w[7], w[11], w[15])};
3902 const F scalex[4] = {bicubic_wts(fx, w[0], w[4], w[ 8], w[12]),
3903 bicubic_wts(fx, w[1], w[5], w[ 9], w[13]),
3904 bicubic_wts(fx, w[2], w[6], w[10], w[14]),
3905 bicubic_wts(fx, w[3], w[7], w[11], w[15])};
3906
3907 F sample_y = cy - 1.5f;
3908 for (int yy = 0; yy <= 3; ++yy) {
3909 F sample_x = cx - 1.5f;
3910 for (int xx = 0; xx <= 3; ++xx) {
3911 F scale = scalex[xx] * scaley[yy];
3912
3913 // ix_and_ptr() will clamp to the image's bounds for us.
3914 const uint32_t* ptr;
3915 U32 ix = ix_and_ptr(&ptr, ctx, sample_x, sample_y);
3916
3917 F sr,sg,sb,sa;
3918 from_8888(gather(ptr, ix), &sr,&sg,&sb,&sa);
3919
3920 r = mad(scale, sr, r);
3921 g = mad(scale, sg, g);
3922 b = mad(scale, sb, b);
3923 a = mad(scale, sa, a);
3924
3925 sample_x += 1;
3926 }
3927 sample_y += 1;
3928 }
3929 }
3930
3931 // ~~~~~~ skgpu::Swizzle stage ~~~~~~ //
3932
STAGE(swizzle,void * ctx)3933 STAGE(swizzle, void* ctx) {
3934 auto ir = r, ig = g, ib = b, ia = a;
3935 F* o[] = {&r, &g, &b, &a};
3936 char swiz[4];
3937 memcpy(swiz, &ctx, sizeof(swiz));
3938
3939 for (int i = 0; i < 4; ++i) {
3940 switch (swiz[i]) {
3941 case 'r': *o[i] = ir; break;
3942 case 'g': *o[i] = ig; break;
3943 case 'b': *o[i] = ib; break;
3944 case 'a': *o[i] = ia; break;
3945 case '0': *o[i] = F(0); break;
3946 case '1': *o[i] = F(1); break;
3947 default: break;
3948 }
3949 }
3950 }
3951
3952 namespace lowp {
3953 #if defined(JUMPER_IS_SCALAR) || defined(SK_DISABLE_LOWP_RASTER_PIPELINE)
3954 // If we're not compiled by Clang, or otherwise switched into scalar mode (old Clang, manually),
3955 // we don't generate lowp stages. All these nullptrs will tell SkJumper.cpp to always use the
3956 // highp float pipeline.
3957 #define M(st) static void (*st)(void) = nullptr;
3958 SK_RASTER_PIPELINE_OPS_LOWP(M)
3959 #undef M
3960 static void (*just_return)(void) = nullptr;
3961
start_pipeline(size_t,size_t,size_t,size_t,SkRasterPipelineStage *)3962 static void start_pipeline(size_t,size_t,size_t,size_t, SkRasterPipelineStage*) {}
3963
3964 #else // We are compiling vector code with Clang... let's make some lowp stages!
3965
3966 #if defined(JUMPER_IS_HSW) || defined(JUMPER_IS_SKX)
3967 using U8 = uint8_t __attribute__((ext_vector_type(16)));
3968 using U16 = uint16_t __attribute__((ext_vector_type(16)));
3969 using I16 = int16_t __attribute__((ext_vector_type(16)));
3970 using I32 = int32_t __attribute__((ext_vector_type(16)));
3971 using U32 = uint32_t __attribute__((ext_vector_type(16)));
3972 using I64 = int64_t __attribute__((ext_vector_type(16)));
3973 using U64 = uint64_t __attribute__((ext_vector_type(16)));
3974 using F = float __attribute__((ext_vector_type(16)));
3975 #else
3976 using U8 = uint8_t __attribute__((ext_vector_type(8)));
3977 using U16 = uint16_t __attribute__((ext_vector_type(8)));
3978 using I16 = int16_t __attribute__((ext_vector_type(8)));
3979 using I32 = int32_t __attribute__((ext_vector_type(8)));
3980 using U32 = uint32_t __attribute__((ext_vector_type(8)));
3981 using I64 = int64_t __attribute__((ext_vector_type(8)));
3982 using U64 = uint64_t __attribute__((ext_vector_type(8)));
3983 using F = float __attribute__((ext_vector_type(8)));
3984 #endif
3985
3986 static constexpr size_t N = sizeof(U16) / sizeof(uint16_t);
3987
3988 // Once again, some platforms benefit from a restricted Stage calling convention,
3989 // but others can pass tons and tons of registers and we're happy to exploit that.
3990 // It's exactly the same decision and implementation strategy as the F stages above.
3991 #if JUMPER_NARROW_STAGES
3992 struct Params {
3993 size_t dx, dy, tail;
3994 U16 dr,dg,db,da;
3995 };
3996 using Stage = void (ABI*)(Params*, SkRasterPipelineStage* program, U16 r, U16 g, U16 b, U16 a);
3997 #else
3998 using Stage = void (ABI*)(size_t tail, SkRasterPipelineStage* program,
3999 size_t dx, size_t dy,
4000 U16 r, U16 g, U16 b, U16 a,
4001 U16 dr, U16 dg, U16 db, U16 da);
4002 #endif
4003
4004 static void start_pipeline(const size_t x0, const size_t y0,
4005 const size_t xlimit, const size_t ylimit,
4006 SkRasterPipelineStage* program) {
4007 auto start = (Stage)program->fn;
4008 for (size_t dy = y0; dy < ylimit; dy++) {
4009 #if JUMPER_NARROW_STAGES
4010 Params params = { x0,dy,0, 0,0,0,0 };
4011 for (; params.dx + N <= xlimit; params.dx += N) {
4012 start(¶ms, program, 0,0,0,0);
4013 }
4014 if (size_t tail = xlimit - params.dx) {
4015 params.tail = tail;
4016 start(¶ms, program, 0,0,0,0);
4017 }
4018 #else
4019 size_t dx = x0;
4020 for (; dx + N <= xlimit; dx += N) {
4021 start( 0, program, dx,dy, 0,0,0,0, 0,0,0,0);
4022 }
4023 if (size_t tail = xlimit - dx) {
4024 start(tail, program, dx,dy, 0,0,0,0, 0,0,0,0);
4025 }
4026 #endif
4027 }
4028 }
4029
4030 #if JUMPER_NARROW_STAGES
4031 static void ABI just_return(Params*, SkRasterPipelineStage*, U16,U16,U16,U16) {}
4032 #else
4033 static void ABI just_return(size_t, SkRasterPipelineStage*,size_t,size_t,
4034 U16,U16,U16,U16, U16,U16,U16,U16) {}
4035 #endif
4036
4037 // All stages use the same function call ABI to chain into each other, but there are three types:
4038 // GG: geometry in, geometry out -- think, a matrix
4039 // GP: geometry in, pixels out. -- think, a memory gather
4040 // PP: pixels in, pixels out. -- think, a blend mode
4041 //
4042 // (Some stages ignore their inputs or produce no logical output. That's perfectly fine.)
4043 //
4044 // These three STAGE_ macros let you define each type of stage,
4045 // and will have (x,y) geometry and/or (r,g,b,a, dr,dg,db,da) pixel arguments as appropriate.
4046
4047 #if JUMPER_NARROW_STAGES
4048 #define STAGE_GG(name, ARG) \
4049 SI void name##_k(ARG, size_t dx, size_t dy, size_t tail, F& x, F& y); \
4050 static void ABI name(Params* params, SkRasterPipelineStage* program, \
4051 U16 r, U16 g, U16 b, U16 a) { \
4052 auto x = join<F>(r,g), \
4053 y = join<F>(b,a); \
4054 name##_k(Ctx{program}, params->dx,params->dy,params->tail, x,y); \
4055 split(x, &r,&g); \
4056 split(y, &b,&a); \
4057 auto fn = (Stage)(++program)->fn; \
4058 fn(params, program, r,g,b,a); \
4059 } \
4060 SI void name##_k(ARG, size_t dx, size_t dy, size_t tail, F& x, F& y)
4061
4062 #define STAGE_GP(name, ARG) \
4063 SI void name##_k(ARG, size_t dx, size_t dy, size_t tail, F x, F y, \
4064 U16& r, U16& g, U16& b, U16& a, \
4065 U16& dr, U16& dg, U16& db, U16& da); \
4066 static void ABI name(Params* params, SkRasterPipelineStage* program, \
4067 U16 r, U16 g, U16 b, U16 a) { \
4068 auto x = join<F>(r,g), \
4069 y = join<F>(b,a); \
4070 name##_k(Ctx{program}, params->dx,params->dy,params->tail, x,y, r,g,b,a, \
4071 params->dr,params->dg,params->db,params->da); \
4072 auto fn = (Stage)(++program)->fn; \
4073 fn(params, program, r,g,b,a); \
4074 } \
4075 SI void name##_k(ARG, size_t dx, size_t dy, size_t tail, F x, F y, \
4076 U16& r, U16& g, U16& b, U16& a, \
4077 U16& dr, U16& dg, U16& db, U16& da)
4078
4079 #define STAGE_PP(name, ARG) \
4080 SI void name##_k(ARG, size_t dx, size_t dy, size_t tail, \
4081 U16& r, U16& g, U16& b, U16& a, \
4082 U16& dr, U16& dg, U16& db, U16& da); \
4083 static void ABI name(Params* params, SkRasterPipelineStage* program, \
4084 U16 r, U16 g, U16 b, U16 a) { \
4085 name##_k(Ctx{program}, params->dx,params->dy,params->tail, r,g,b,a, \
4086 params->dr,params->dg,params->db,params->da); \
4087 auto fn = (Stage)(++program)->fn; \
4088 fn(params, program, r,g,b,a); \
4089 } \
4090 SI void name##_k(ARG, size_t dx, size_t dy, size_t tail, \
4091 U16& r, U16& g, U16& b, U16& a, \
4092 U16& dr, U16& dg, U16& db, U16& da)
4093 #else
4094 #define STAGE_GG(name, ARG) \
4095 SI void name##_k(ARG, size_t dx, size_t dy, size_t tail, F& x, F& y); \
4096 static void ABI name(size_t tail, SkRasterPipelineStage* program, \
4097 size_t dx, size_t dy, \
4098 U16 r, U16 g, U16 b, U16 a, \
4099 U16 dr, U16 dg, U16 db, U16 da) { \
4100 auto x = join<F>(r,g), \
4101 y = join<F>(b,a); \
4102 name##_k(Ctx{program}, dx,dy,tail, x,y); \
4103 split(x, &r,&g); \
4104 split(y, &b,&a); \
4105 auto fn = (Stage)(++program)->fn; \
4106 fn(tail, program, dx,dy, r,g,b,a, dr,dg,db,da); \
4107 } \
4108 SI void name##_k(ARG, size_t dx, size_t dy, size_t tail, F& x, F& y)
4109
4110 #define STAGE_GP(name, ARG) \
4111 SI void name##_k(ARG, size_t dx, size_t dy, size_t tail, F x, F y, \
4112 U16& r, U16& g, U16& b, U16& a, \
4113 U16& dr, U16& dg, U16& db, U16& da); \
4114 static void ABI name(size_t tail, SkRasterPipelineStage* program, \
4115 size_t dx, size_t dy, \
4116 U16 r, U16 g, U16 b, U16 a, \
4117 U16 dr, U16 dg, U16 db, U16 da) { \
4118 auto x = join<F>(r,g), \
4119 y = join<F>(b,a); \
4120 name##_k(Ctx{program}, dx,dy,tail, x,y, r,g,b,a, dr,dg,db,da); \
4121 auto fn = (Stage)(++program)->fn; \
4122 fn(tail, program, dx,dy, r,g,b,a, dr,dg,db,da); \
4123 } \
4124 SI void name##_k(ARG, size_t dx, size_t dy, size_t tail, F x, F y, \
4125 U16& r, U16& g, U16& b, U16& a, \
4126 U16& dr, U16& dg, U16& db, U16& da)
4127
4128 #define STAGE_PP(name, ARG) \
4129 SI void name##_k(ARG, size_t dx, size_t dy, size_t tail, \
4130 U16& r, U16& g, U16& b, U16& a, \
4131 U16& dr, U16& dg, U16& db, U16& da); \
4132 static void ABI name(size_t tail, SkRasterPipelineStage* program, \
4133 size_t dx, size_t dy, \
4134 U16 r, U16 g, U16 b, U16 a, \
4135 U16 dr, U16 dg, U16 db, U16 da) { \
4136 name##_k(Ctx{program}, dx,dy,tail, r,g,b,a, dr,dg,db,da); \
4137 auto fn = (Stage)(++program)->fn; \
4138 fn(tail, program, dx,dy, r,g,b,a, dr,dg,db,da); \
4139 } \
4140 SI void name##_k(ARG, size_t dx, size_t dy, size_t tail, \
4141 U16& r, U16& g, U16& b, U16& a, \
4142 U16& dr, U16& dg, U16& db, U16& da)
4143 #endif
4144
4145 // ~~~~~~ Commonly used helper functions ~~~~~~ //
4146
4147 /**
4148 * Helpers to to properly rounded division (by 255). The ideal answer we want to compute is slow,
4149 * thanks to a division by a non-power of two:
4150 * [1] (v + 127) / 255
4151 *
4152 * There is a two-step process that computes the correct answer for all inputs:
4153 * [2] (v + 128 + ((v + 128) >> 8)) >> 8
4154 *
4155 * There is also a single iteration approximation, but it's wrong (+-1) ~25% of the time:
4156 * [3] (v + 255) >> 8;
4157 *
4158 * We offer two different implementations here, depending on the requirements of the calling stage.
4159 */
4160
4161 /**
4162 * div255 favors speed over accuracy. It uses formula [2] on NEON (where we can compute it as fast
4163 * as [3]), and uses [3] elsewhere.
4164 */
4165 SI U16 div255(U16 v) {
4166 #if defined(JUMPER_IS_NEON)
4167 // With NEON we can compute [2] just as fast as [3], so let's be correct.
4168 // First we compute v + ((v+128)>>8), then one more round of (...+128)>>8 to finish up:
4169 return vrshrq_n_u16(vrsraq_n_u16(v, v, 8), 8);
4170 #else
4171 // Otherwise, use [3], which is never wrong by more than 1:
4172 return (v+255)/256;
4173 #endif
4174 }
4175
4176 /**
4177 * div255_accurate guarantees the right answer on all platforms, at the expense of performance.
4178 */
4179 SI U16 div255_accurate(U16 v) {
4180 #if defined(JUMPER_IS_NEON)
4181 // Our NEON implementation of div255 is already correct for all inputs:
4182 return div255(v);
4183 #else
4184 // This is [2] (the same formulation as NEON), but written without the benefit of intrinsics:
4185 v += 128;
4186 return (v+(v/256))/256;
4187 #endif
4188 }
4189
4190 SI U16 inv(U16 v) { return 255-v; }
4191
4192 SI U16 if_then_else(I16 c, U16 t, U16 e) { return (t & c) | (e & ~c); }
4193 SI U32 if_then_else(I32 c, U32 t, U32 e) { return (t & c) | (e & ~c); }
4194
4195 SI U16 max(U16 x, U16 y) { return if_then_else(x < y, y, x); }
4196 SI U16 min(U16 x, U16 y) { return if_then_else(x < y, x, y); }
4197
4198 SI U16 from_float(float f) { return f * 255.0f + 0.5f; }
4199
4200 SI U16 lerp(U16 from, U16 to, U16 t) { return div255( from*inv(t) + to*t ); }
4201
4202 template <typename D, typename S>
4203 SI D cast(S src) {
4204 return __builtin_convertvector(src, D);
4205 }
4206
4207 template <typename D, typename S>
4208 SI void split(S v, D* lo, D* hi) {
4209 static_assert(2*sizeof(D) == sizeof(S), "");
4210 memcpy(lo, (const char*)&v + 0*sizeof(D), sizeof(D));
4211 memcpy(hi, (const char*)&v + 1*sizeof(D), sizeof(D));
4212 }
4213 template <typename D, typename S>
4214 SI D join(S lo, S hi) {
4215 static_assert(sizeof(D) == 2*sizeof(S), "");
4216 D v;
4217 memcpy((char*)&v + 0*sizeof(S), &lo, sizeof(S));
4218 memcpy((char*)&v + 1*sizeof(S), &hi, sizeof(S));
4219 return v;
4220 }
4221
4222 SI F if_then_else(I32 c, F t, F e) {
4223 return sk_bit_cast<F>( (sk_bit_cast<I32>(t) & c) | (sk_bit_cast<I32>(e) & ~c) );
4224 }
4225 SI F max(F x, F y) { return if_then_else(x < y, y, x); }
4226 SI F min(F x, F y) { return if_then_else(x < y, x, y); }
4227
4228 SI I32 if_then_else(I32 c, I32 t, I32 e) {
4229 return (t & c) | (e & ~c);
4230 }
4231 SI I32 max(I32 x, I32 y) { return if_then_else(x < y, y, x); }
4232 SI I32 min(I32 x, I32 y) { return if_then_else(x < y, x, y); }
4233
4234 SI F mad(F f, F m, F a) { return f*m+a; }
4235 SI U32 trunc_(F x) { return (U32)cast<I32>(x); }
4236
4237 // Use approximate instructions and one Newton-Raphson step to calculate 1/x.
4238 SI F rcp_precise(F x) {
4239 #if defined(JUMPER_IS_HSW) || defined(JUMPER_IS_SKX)
4240 __m256 lo,hi;
4241 split(x, &lo,&hi);
4242 return join<F>(SK_OPTS_NS::rcp_precise(lo), SK_OPTS_NS::rcp_precise(hi));
4243 #elif defined(JUMPER_IS_SSE2) || defined(JUMPER_IS_SSE41) || defined(JUMPER_IS_AVX)
4244 __m128 lo,hi;
4245 split(x, &lo,&hi);
4246 return join<F>(SK_OPTS_NS::rcp_precise(lo), SK_OPTS_NS::rcp_precise(hi));
4247 #elif defined(JUMPER_IS_NEON)
4248 float32x4_t lo,hi;
4249 split(x, &lo,&hi);
4250 return join<F>(SK_OPTS_NS::rcp_precise(lo), SK_OPTS_NS::rcp_precise(hi));
4251 #else
4252 return 1.0f / x;
4253 #endif
4254 }
4255 SI F sqrt_(F x) {
4256 #if defined(JUMPER_IS_HSW) || defined(JUMPER_IS_SKX)
4257 __m256 lo,hi;
4258 split(x, &lo,&hi);
4259 return join<F>(_mm256_sqrt_ps(lo), _mm256_sqrt_ps(hi));
4260 #elif defined(JUMPER_IS_SSE2) || defined(JUMPER_IS_SSE41) || defined(JUMPER_IS_AVX)
4261 __m128 lo,hi;
4262 split(x, &lo,&hi);
4263 return join<F>(_mm_sqrt_ps(lo), _mm_sqrt_ps(hi));
4264 #elif defined(SK_CPU_ARM64)
4265 float32x4_t lo,hi;
4266 split(x, &lo,&hi);
4267 return join<F>(vsqrtq_f32(lo), vsqrtq_f32(hi));
4268 #elif defined(JUMPER_IS_NEON)
4269 auto sqrt = [](float32x4_t v) {
4270 auto est = vrsqrteq_f32(v); // Estimate and two refinement steps for est = rsqrt(v).
4271 est *= vrsqrtsq_f32(v,est*est);
4272 est *= vrsqrtsq_f32(v,est*est);
4273 return v*est; // sqrt(v) == v*rsqrt(v).
4274 };
4275 float32x4_t lo,hi;
4276 split(x, &lo,&hi);
4277 return join<F>(sqrt(lo), sqrt(hi));
4278 #else
4279 return F{
4280 sqrtf(x[0]), sqrtf(x[1]), sqrtf(x[2]), sqrtf(x[3]),
4281 sqrtf(x[4]), sqrtf(x[5]), sqrtf(x[6]), sqrtf(x[7]),
4282 };
4283 #endif
4284 }
4285
4286 SI F floor_(F x) {
4287 #if defined(SK_CPU_ARM64)
4288 float32x4_t lo,hi;
4289 split(x, &lo,&hi);
4290 return join<F>(vrndmq_f32(lo), vrndmq_f32(hi));
4291 #elif defined(JUMPER_IS_HSW) || defined(JUMPER_IS_SKX)
4292 __m256 lo,hi;
4293 split(x, &lo,&hi);
4294 return join<F>(_mm256_floor_ps(lo), _mm256_floor_ps(hi));
4295 #elif defined(JUMPER_IS_SSE41) || defined(JUMPER_IS_AVX)
4296 __m128 lo,hi;
4297 split(x, &lo,&hi);
4298 return join<F>(_mm_floor_ps(lo), _mm_floor_ps(hi));
4299 #else
4300 F roundtrip = cast<F>(cast<I32>(x));
4301 return roundtrip - if_then_else(roundtrip > x, F(1), F(0));
4302 #endif
4303 }
4304
4305 // scaled_mult interprets a and b as number on [-1, 1) which are numbers in Q15 format. Functionally
4306 // this multiply is:
4307 // (2 * a * b + (1 << 15)) >> 16
4308 // The result is a number on [-1, 1).
4309 // Note: on neon this is a saturating multiply while the others are not.
4310 SI I16 scaled_mult(I16 a, I16 b) {
4311 #if defined(JUMPER_IS_HSW) || defined(JUMPER_IS_SKX)
4312 return _mm256_mulhrs_epi16(a, b);
4313 #elif defined(JUMPER_IS_SSE41) || defined(JUMPER_IS_AVX)
4314 return _mm_mulhrs_epi16(a, b);
4315 #elif defined(SK_CPU_ARM64)
4316 return vqrdmulhq_s16(a, b);
4317 #elif defined(JUMPER_IS_NEON)
4318 return vqrdmulhq_s16(a, b);
4319 #else
4320 const I32 roundingTerm = 1 << 14;
4321 return cast<I16>((cast<I32>(a) * cast<I32>(b) + roundingTerm) >> 15);
4322 #endif
4323 }
4324
4325 // This sum is to support lerp where the result will always be a positive number. In general,
4326 // a sum like this would require an additional bit, but because we know the range of the result
4327 // we know that the extra bit will always be zero.
4328 SI U16 constrained_add(I16 a, U16 b) {
4329 #if defined(SK_DEBUG)
4330 for (size_t i = 0; i < N; i++) {
4331 // Ensure that a + b is on the interval [0, UINT16_MAX]
4332 int ia = a[i],
4333 ib = b[i];
4334 // Use 65535 here because fuchsia's compiler evaluates UINT16_MAX - ib, which is
4335 // 65536U - ib, as an uint32_t instead of an int32_t. This was forcing ia to be
4336 // interpreted as an uint32_t.
4337 SkASSERT(-ib <= ia && ia <= 65535 - ib);
4338 }
4339 #endif
4340 return b + a;
4341 }
4342
4343 SI F fract(F x) { return x - floor_(x); }
4344 SI F abs_(F x) { return sk_bit_cast<F>( sk_bit_cast<I32>(x) & 0x7fffffff ); }
4345
4346 // ~~~~~~ Basic / misc. stages ~~~~~~ //
4347
4348 STAGE_GG(seed_shader, NoCtx) {
4349 static constexpr float iota[] = {
4350 0.5f, 1.5f, 2.5f, 3.5f, 4.5f, 5.5f, 6.5f, 7.5f,
4351 8.5f, 9.5f,10.5f,11.5f,12.5f,13.5f,14.5f,15.5f,
4352 };
4353 x = cast<F>(I32(dx)) + sk_unaligned_load<F>(iota);
4354 y = cast<F>(I32(dy)) + 0.5f;
4355 }
4356
4357 STAGE_GG(matrix_translate, const float* m) {
4358 x += m[0];
4359 y += m[1];
4360 }
4361 STAGE_GG(matrix_scale_translate, const float* m) {
4362 x = mad(x,m[0], m[2]);
4363 y = mad(y,m[1], m[3]);
4364 }
4365 STAGE_GG(matrix_2x3, const float* m) {
4366 auto X = mad(x,m[0], mad(y,m[1], m[2])),
4367 Y = mad(x,m[3], mad(y,m[4], m[5]));
4368 x = X;
4369 y = Y;
4370 }
4371 STAGE_GG(matrix_perspective, const float* m) {
4372 // N.B. Unlike the other matrix_ stages, this matrix is row-major.
4373 auto X = mad(x,m[0], mad(y,m[1], m[2])),
4374 Y = mad(x,m[3], mad(y,m[4], m[5])),
4375 Z = mad(x,m[6], mad(y,m[7], m[8]));
4376 x = X * rcp_precise(Z);
4377 y = Y * rcp_precise(Z);
4378 }
4379
4380 STAGE_PP(uniform_color, const SkRasterPipeline_UniformColorCtx* c) {
4381 r = c->rgba[0];
4382 g = c->rgba[1];
4383 b = c->rgba[2];
4384 a = c->rgba[3];
4385 }
4386 STAGE_PP(uniform_color_dst, const SkRasterPipeline_UniformColorCtx* c) {
4387 dr = c->rgba[0];
4388 dg = c->rgba[1];
4389 db = c->rgba[2];
4390 da = c->rgba[3];
4391 }
4392 STAGE_PP(black_color, NoCtx) { r = g = b = 0; a = 255; }
4393 STAGE_PP(white_color, NoCtx) { r = g = b = 255; a = 255; }
4394
4395 STAGE_PP(set_rgb, const float rgb[3]) {
4396 r = from_float(rgb[0]);
4397 g = from_float(rgb[1]);
4398 b = from_float(rgb[2]);
4399 }
4400
4401 // No need to clamp against 0 here (values are unsigned)
4402 STAGE_PP(clamp_01, NoCtx) {
4403 r = min(r, 255);
4404 g = min(g, 255);
4405 b = min(b, 255);
4406 a = min(a, 255);
4407 }
4408
4409 STAGE_PP(clamp_gamut, NoCtx) {
4410 a = min(a, 255);
4411 r = min(r, a);
4412 g = min(g, a);
4413 b = min(b, a);
4414 }
4415
4416 STAGE_PP(premul, NoCtx) {
4417 r = div255_accurate(r * a);
4418 g = div255_accurate(g * a);
4419 b = div255_accurate(b * a);
4420 }
4421 STAGE_PP(premul_dst, NoCtx) {
4422 dr = div255_accurate(dr * da);
4423 dg = div255_accurate(dg * da);
4424 db = div255_accurate(db * da);
4425 }
4426
4427 STAGE_PP(force_opaque , NoCtx) { a = 255; }
4428 STAGE_PP(force_opaque_dst, NoCtx) { da = 255; }
4429
4430 STAGE_PP(swap_rb, NoCtx) {
4431 auto tmp = r;
4432 r = b;
4433 b = tmp;
4434 }
4435 STAGE_PP(swap_rb_dst, NoCtx) {
4436 auto tmp = dr;
4437 dr = db;
4438 db = tmp;
4439 }
4440
4441 STAGE_PP(move_src_dst, NoCtx) {
4442 dr = r;
4443 dg = g;
4444 db = b;
4445 da = a;
4446 }
4447
4448 STAGE_PP(move_dst_src, NoCtx) {
4449 r = dr;
4450 g = dg;
4451 b = db;
4452 a = da;
4453 }
4454
4455 STAGE_PP(swap_src_dst, NoCtx) {
4456 std::swap(r, dr);
4457 std::swap(g, dg);
4458 std::swap(b, db);
4459 std::swap(a, da);
4460 }
4461
4462 // ~~~~~~ Blend modes ~~~~~~ //
4463
4464 // The same logic applied to all 4 channels.
4465 #define BLEND_MODE(name) \
4466 SI U16 name##_channel(U16 s, U16 d, U16 sa, U16 da); \
4467 STAGE_PP(name, NoCtx) { \
4468 r = name##_channel(r,dr,a,da); \
4469 g = name##_channel(g,dg,a,da); \
4470 b = name##_channel(b,db,a,da); \
4471 a = name##_channel(a,da,a,da); \
4472 } \
4473 SI U16 name##_channel(U16 s, U16 d, U16 sa, U16 da)
4474
4475 BLEND_MODE(clear) { return 0; }
4476 BLEND_MODE(srcatop) { return div255( s*da + d*inv(sa) ); }
4477 BLEND_MODE(dstatop) { return div255( d*sa + s*inv(da) ); }
4478 BLEND_MODE(srcin) { return div255( s*da ); }
4479 BLEND_MODE(dstin) { return div255( d*sa ); }
4480 BLEND_MODE(srcout) { return div255( s*inv(da) ); }
4481 BLEND_MODE(dstout) { return div255( d*inv(sa) ); }
4482 BLEND_MODE(srcover) { return s + div255( d*inv(sa) ); }
4483 BLEND_MODE(dstover) { return d + div255( s*inv(da) ); }
4484 BLEND_MODE(modulate) { return div255( s*d ); }
4485 BLEND_MODE(multiply) { return div255( s*inv(da) + d*inv(sa) + s*d ); }
4486 BLEND_MODE(plus_) { return min(s+d, 255); }
4487 BLEND_MODE(screen) { return s + d - div255( s*d ); }
4488 BLEND_MODE(xor_) { return div255( s*inv(da) + d*inv(sa) ); }
4489 #undef BLEND_MODE
4490
4491 // The same logic applied to color, and srcover for alpha.
4492 #define BLEND_MODE(name) \
4493 SI U16 name##_channel(U16 s, U16 d, U16 sa, U16 da); \
4494 STAGE_PP(name, NoCtx) { \
4495 r = name##_channel(r,dr,a,da); \
4496 g = name##_channel(g,dg,a,da); \
4497 b = name##_channel(b,db,a,da); \
4498 a = a + div255( da*inv(a) ); \
4499 } \
4500 SI U16 name##_channel(U16 s, U16 d, U16 sa, U16 da)
4501
4502 BLEND_MODE(darken) { return s + d - div255( max(s*da, d*sa) ); }
4503 BLEND_MODE(lighten) { return s + d - div255( min(s*da, d*sa) ); }
4504 BLEND_MODE(difference) { return s + d - 2*div255( min(s*da, d*sa) ); }
4505 BLEND_MODE(exclusion) { return s + d - 2*div255( s*d ); }
4506
4507 BLEND_MODE(hardlight) {
4508 return div255( s*inv(da) + d*inv(sa) +
4509 if_then_else(2*s <= sa, 2*s*d, sa*da - 2*(sa-s)*(da-d)) );
4510 }
4511 BLEND_MODE(overlay) {
4512 return div255( s*inv(da) + d*inv(sa) +
4513 if_then_else(2*d <= da, 2*s*d, sa*da - 2*(sa-s)*(da-d)) );
4514 }
4515 #undef BLEND_MODE
4516
4517 // ~~~~~~ Helpers for interacting with memory ~~~~~~ //
4518
4519 template <typename T>
4520 SI T* ptr_at_xy(const SkRasterPipeline_MemoryCtx* ctx, size_t dx, size_t dy) {
4521 return (T*)ctx->pixels + dy*ctx->stride + dx;
4522 }
4523
4524 template <typename T>
4525 SI U32 ix_and_ptr(T** ptr, const SkRasterPipeline_GatherCtx* ctx, F x, F y) {
4526 // Exclusive -> inclusive.
4527 const F w = sk_bit_cast<float>( sk_bit_cast<uint32_t>(ctx->width ) - 1),
4528 h = sk_bit_cast<float>( sk_bit_cast<uint32_t>(ctx->height) - 1);
4529
4530 const F z = std::numeric_limits<float>::min();
4531
4532 x = min(max(z, x), w);
4533 y = min(max(z, y), h);
4534
4535 x = sk_bit_cast<F>(sk_bit_cast<U32>(x) - (uint32_t)ctx->roundDownAtInteger);
4536 y = sk_bit_cast<F>(sk_bit_cast<U32>(y) - (uint32_t)ctx->roundDownAtInteger);
4537
4538 *ptr = (const T*)ctx->pixels;
4539 return trunc_(y)*ctx->stride + trunc_(x);
4540 }
4541
4542 template <typename T>
4543 SI U32 ix_and_ptr(T** ptr, const SkRasterPipeline_GatherCtx* ctx, I32 x, I32 y) {
4544 // This flag doesn't make sense when the coords are integers.
4545 SkASSERT(ctx->roundDownAtInteger == 0);
4546 // Exclusive -> inclusive.
4547 const I32 w = ctx->width - 1,
4548 h = ctx->height - 1;
4549
4550 U32 ax = cast<U32>(min(max(0, x), w)),
4551 ay = cast<U32>(min(max(0, y), h));
4552
4553 *ptr = (const T*)ctx->pixels;
4554 return ay * ctx->stride + ax;
4555 }
4556
4557 template <typename V, typename T>
4558 SI V load(const T* ptr, size_t tail) {
4559 V v = 0;
4560 switch (tail & (N-1)) {
4561 case 0: memcpy(&v, ptr, sizeof(v)); break;
4562 #if defined(JUMPER_IS_HSW) || defined(JUMPER_IS_SKX)
4563 case 15: v[14] = ptr[14]; [[fallthrough]];
4564 case 14: v[13] = ptr[13]; [[fallthrough]];
4565 case 13: v[12] = ptr[12]; [[fallthrough]];
4566 case 12: memcpy(&v, ptr, 12*sizeof(T)); break;
4567 case 11: v[10] = ptr[10]; [[fallthrough]];
4568 case 10: v[ 9] = ptr[ 9]; [[fallthrough]];
4569 case 9: v[ 8] = ptr[ 8]; [[fallthrough]];
4570 case 8: memcpy(&v, ptr, 8*sizeof(T)); break;
4571 #endif
4572 case 7: v[ 6] = ptr[ 6]; [[fallthrough]];
4573 case 6: v[ 5] = ptr[ 5]; [[fallthrough]];
4574 case 5: v[ 4] = ptr[ 4]; [[fallthrough]];
4575 case 4: memcpy(&v, ptr, 4*sizeof(T)); break;
4576 case 3: v[ 2] = ptr[ 2]; [[fallthrough]];
4577 case 2: memcpy(&v, ptr, 2*sizeof(T)); break;
4578 case 1: v[ 0] = ptr[ 0];
4579 }
4580 return v;
4581 }
4582 template <typename V, typename T>
4583 SI void store(T* ptr, size_t tail, V v) {
4584 switch (tail & (N-1)) {
4585 case 0: memcpy(ptr, &v, sizeof(v)); break;
4586 #if defined(JUMPER_IS_HSW) || defined(JUMPER_IS_SKX)
4587 case 15: ptr[14] = v[14]; [[fallthrough]];
4588 case 14: ptr[13] = v[13]; [[fallthrough]];
4589 case 13: ptr[12] = v[12]; [[fallthrough]];
4590 case 12: memcpy(ptr, &v, 12*sizeof(T)); break;
4591 case 11: ptr[10] = v[10]; [[fallthrough]];
4592 case 10: ptr[ 9] = v[ 9]; [[fallthrough]];
4593 case 9: ptr[ 8] = v[ 8]; [[fallthrough]];
4594 case 8: memcpy(ptr, &v, 8*sizeof(T)); break;
4595 #endif
4596 case 7: ptr[ 6] = v[ 6]; [[fallthrough]];
4597 case 6: ptr[ 5] = v[ 5]; [[fallthrough]];
4598 case 5: ptr[ 4] = v[ 4]; [[fallthrough]];
4599 case 4: memcpy(ptr, &v, 4*sizeof(T)); break;
4600 case 3: ptr[ 2] = v[ 2]; [[fallthrough]];
4601 case 2: memcpy(ptr, &v, 2*sizeof(T)); break;
4602 case 1: ptr[ 0] = v[ 0];
4603 }
4604 }
4605
4606 #if defined(JUMPER_IS_HSW) || defined(JUMPER_IS_SKX)
4607 template <typename V, typename T>
4608 SI V gather(const T* ptr, U32 ix) {
4609 return V{ ptr[ix[ 0]], ptr[ix[ 1]], ptr[ix[ 2]], ptr[ix[ 3]],
4610 ptr[ix[ 4]], ptr[ix[ 5]], ptr[ix[ 6]], ptr[ix[ 7]],
4611 ptr[ix[ 8]], ptr[ix[ 9]], ptr[ix[10]], ptr[ix[11]],
4612 ptr[ix[12]], ptr[ix[13]], ptr[ix[14]], ptr[ix[15]], };
4613 }
4614
4615 template<>
4616 F gather(const float* ptr, U32 ix) {
4617 __m256i lo, hi;
4618 split(ix, &lo, &hi);
4619
4620 return join<F>(_mm256_i32gather_ps(ptr, lo, 4),
4621 _mm256_i32gather_ps(ptr, hi, 4));
4622 }
4623
4624 template<>
4625 U32 gather(const uint32_t* ptr, U32 ix) {
4626 __m256i lo, hi;
4627 split(ix, &lo, &hi);
4628
4629 return join<U32>(_mm256_i32gather_epi32(ptr, lo, 4),
4630 _mm256_i32gather_epi32(ptr, hi, 4));
4631 }
4632 #else
4633 template <typename V, typename T>
4634 SI V gather(const T* ptr, U32 ix) {
4635 return V{ ptr[ix[ 0]], ptr[ix[ 1]], ptr[ix[ 2]], ptr[ix[ 3]],
4636 ptr[ix[ 4]], ptr[ix[ 5]], ptr[ix[ 6]], ptr[ix[ 7]], };
4637 }
4638 #endif
4639
4640
4641 // ~~~~~~ 32-bit memory loads and stores ~~~~~~ //
4642
4643 SI void from_8888(U32 rgba, U16* r, U16* g, U16* b, U16* a) {
4644 #if 1 && defined(JUMPER_IS_HSW) || defined(JUMPER_IS_SKX)
4645 // Swap the middle 128-bit lanes to make _mm256_packus_epi32() in cast_U16() work out nicely.
4646 __m256i _01,_23;
4647 split(rgba, &_01, &_23);
4648 __m256i _02 = _mm256_permute2x128_si256(_01,_23, 0x20),
4649 _13 = _mm256_permute2x128_si256(_01,_23, 0x31);
4650 rgba = join<U32>(_02, _13);
4651
4652 auto cast_U16 = [](U32 v) -> U16 {
4653 __m256i _02,_13;
4654 split(v, &_02,&_13);
4655 return _mm256_packus_epi32(_02,_13);
4656 };
4657 #else
4658 auto cast_U16 = [](U32 v) -> U16 {
4659 return cast<U16>(v);
4660 };
4661 #endif
4662 *r = cast_U16(rgba & 65535) & 255;
4663 *g = cast_U16(rgba & 65535) >> 8;
4664 *b = cast_U16(rgba >> 16) & 255;
4665 *a = cast_U16(rgba >> 16) >> 8;
4666 }
4667
4668 SI void load_8888_(const uint32_t* ptr, size_t tail, U16* r, U16* g, U16* b, U16* a) {
4669 #if 1 && defined(JUMPER_IS_NEON)
4670 uint8x8x4_t rgba;
4671 switch (tail & (N-1)) {
4672 case 0: rgba = vld4_u8 ((const uint8_t*)(ptr+0) ); break;
4673 case 7: rgba = vld4_lane_u8((const uint8_t*)(ptr+6), rgba, 6); [[fallthrough]];
4674 case 6: rgba = vld4_lane_u8((const uint8_t*)(ptr+5), rgba, 5); [[fallthrough]];
4675 case 5: rgba = vld4_lane_u8((const uint8_t*)(ptr+4), rgba, 4); [[fallthrough]];
4676 case 4: rgba = vld4_lane_u8((const uint8_t*)(ptr+3), rgba, 3); [[fallthrough]];
4677 case 3: rgba = vld4_lane_u8((const uint8_t*)(ptr+2), rgba, 2); [[fallthrough]];
4678 case 2: rgba = vld4_lane_u8((const uint8_t*)(ptr+1), rgba, 1); [[fallthrough]];
4679 case 1: rgba = vld4_lane_u8((const uint8_t*)(ptr+0), rgba, 0);
4680 }
4681 *r = cast<U16>(rgba.val[0]);
4682 *g = cast<U16>(rgba.val[1]);
4683 *b = cast<U16>(rgba.val[2]);
4684 *a = cast<U16>(rgba.val[3]);
4685 #else
4686 from_8888(load<U32>(ptr, tail), r,g,b,a);
4687 #endif
4688 }
4689 SI void store_8888_(uint32_t* ptr, size_t tail, U16 r, U16 g, U16 b, U16 a) {
4690 r = min(r, 255);
4691 g = min(g, 255);
4692 b = min(b, 255);
4693 a = min(a, 255);
4694
4695 #if 1 && defined(JUMPER_IS_NEON)
4696 uint8x8x4_t rgba = {{
4697 cast<U8>(r),
4698 cast<U8>(g),
4699 cast<U8>(b),
4700 cast<U8>(a),
4701 }};
4702 switch (tail & (N-1)) {
4703 case 0: vst4_u8 ((uint8_t*)(ptr+0), rgba ); break;
4704 case 7: vst4_lane_u8((uint8_t*)(ptr+6), rgba, 6); [[fallthrough]];
4705 case 6: vst4_lane_u8((uint8_t*)(ptr+5), rgba, 5); [[fallthrough]];
4706 case 5: vst4_lane_u8((uint8_t*)(ptr+4), rgba, 4); [[fallthrough]];
4707 case 4: vst4_lane_u8((uint8_t*)(ptr+3), rgba, 3); [[fallthrough]];
4708 case 3: vst4_lane_u8((uint8_t*)(ptr+2), rgba, 2); [[fallthrough]];
4709 case 2: vst4_lane_u8((uint8_t*)(ptr+1), rgba, 1); [[fallthrough]];
4710 case 1: vst4_lane_u8((uint8_t*)(ptr+0), rgba, 0);
4711 }
4712 #else
4713 store(ptr, tail, cast<U32>(r | (g<<8)) << 0
4714 | cast<U32>(b | (a<<8)) << 16);
4715 #endif
4716 }
4717
4718 STAGE_PP(load_8888, const SkRasterPipeline_MemoryCtx* ctx) {
4719 load_8888_(ptr_at_xy<const uint32_t>(ctx, dx,dy), tail, &r,&g,&b,&a);
4720 }
4721 STAGE_PP(load_8888_dst, const SkRasterPipeline_MemoryCtx* ctx) {
4722 load_8888_(ptr_at_xy<const uint32_t>(ctx, dx,dy), tail, &dr,&dg,&db,&da);
4723 }
4724 STAGE_PP(store_8888, const SkRasterPipeline_MemoryCtx* ctx) {
4725 store_8888_(ptr_at_xy<uint32_t>(ctx, dx,dy), tail, r,g,b,a);
4726 }
4727 STAGE_GP(gather_8888, const SkRasterPipeline_GatherCtx* ctx) {
4728 const uint32_t* ptr;
4729 U32 ix = ix_and_ptr(&ptr, ctx, x,y);
4730 from_8888(gather<U32>(ptr, ix), &r, &g, &b, &a);
4731 }
4732
4733 // ~~~~~~ 16-bit memory loads and stores ~~~~~~ //
4734
4735 SI void from_565(U16 rgb, U16* r, U16* g, U16* b) {
4736 // Format for 565 buffers: 15|rrrrr gggggg bbbbb|0
4737 U16 R = (rgb >> 11) & 31,
4738 G = (rgb >> 5) & 63,
4739 B = (rgb >> 0) & 31;
4740
4741 // These bit replications are the same as multiplying by 255/31 or 255/63 to scale to 8-bit.
4742 *r = (R << 3) | (R >> 2);
4743 *g = (G << 2) | (G >> 4);
4744 *b = (B << 3) | (B >> 2);
4745 }
4746 SI void load_565_(const uint16_t* ptr, size_t tail, U16* r, U16* g, U16* b) {
4747 from_565(load<U16>(ptr, tail), r,g,b);
4748 }
4749 SI void store_565_(uint16_t* ptr, size_t tail, U16 r, U16 g, U16 b) {
4750 r = min(r, 255);
4751 g = min(g, 255);
4752 b = min(b, 255);
4753
4754 // Round from [0,255] to [0,31] or [0,63], as if x * (31/255.0f) + 0.5f.
4755 // (Don't feel like you need to find some fundamental truth in these...
4756 // they were brute-force searched.)
4757 U16 R = (r * 9 + 36) / 74, // 9/74 ≈ 31/255, plus 36/74, about half.
4758 G = (g * 21 + 42) / 85, // 21/85 = 63/255 exactly.
4759 B = (b * 9 + 36) / 74;
4760 // Pack them back into 15|rrrrr gggggg bbbbb|0.
4761 store(ptr, tail, R << 11
4762 | G << 5
4763 | B << 0);
4764 }
4765
4766 STAGE_PP(load_565, const SkRasterPipeline_MemoryCtx* ctx) {
4767 load_565_(ptr_at_xy<const uint16_t>(ctx, dx,dy), tail, &r,&g,&b);
4768 a = 255;
4769 }
4770 STAGE_PP(load_565_dst, const SkRasterPipeline_MemoryCtx* ctx) {
4771 load_565_(ptr_at_xy<const uint16_t>(ctx, dx,dy), tail, &dr,&dg,&db);
4772 da = 255;
4773 }
4774 STAGE_PP(store_565, const SkRasterPipeline_MemoryCtx* ctx) {
4775 store_565_(ptr_at_xy<uint16_t>(ctx, dx,dy), tail, r,g,b);
4776 }
4777 STAGE_GP(gather_565, const SkRasterPipeline_GatherCtx* ctx) {
4778 const uint16_t* ptr;
4779 U32 ix = ix_and_ptr(&ptr, ctx, x,y);
4780 from_565(gather<U16>(ptr, ix), &r, &g, &b);
4781 a = 255;
4782 }
4783
4784 SI void from_4444(U16 rgba, U16* r, U16* g, U16* b, U16* a) {
4785 // Format for 4444 buffers: 15|rrrr gggg bbbb aaaa|0.
4786 U16 R = (rgba >> 12) & 15,
4787 G = (rgba >> 8) & 15,
4788 B = (rgba >> 4) & 15,
4789 A = (rgba >> 0) & 15;
4790
4791 // Scale [0,15] to [0,255].
4792 *r = (R << 4) | R;
4793 *g = (G << 4) | G;
4794 *b = (B << 4) | B;
4795 *a = (A << 4) | A;
4796 }
4797 SI void load_4444_(const uint16_t* ptr, size_t tail, U16* r, U16* g, U16* b, U16* a) {
4798 from_4444(load<U16>(ptr, tail), r,g,b,a);
4799 }
4800 SI void store_4444_(uint16_t* ptr, size_t tail, U16 r, U16 g, U16 b, U16 a) {
4801 r = min(r, 255);
4802 g = min(g, 255);
4803 b = min(b, 255);
4804 a = min(a, 255);
4805
4806 // Round from [0,255] to [0,15], producing the same value as (x*(15/255.0f) + 0.5f).
4807 U16 R = (r + 8) / 17,
4808 G = (g + 8) / 17,
4809 B = (b + 8) / 17,
4810 A = (a + 8) / 17;
4811 // Pack them back into 15|rrrr gggg bbbb aaaa|0.
4812 store(ptr, tail, R << 12
4813 | G << 8
4814 | B << 4
4815 | A << 0);
4816 }
4817
4818 STAGE_PP(load_4444, const SkRasterPipeline_MemoryCtx* ctx) {
4819 load_4444_(ptr_at_xy<const uint16_t>(ctx, dx,dy), tail, &r,&g,&b,&a);
4820 }
4821 STAGE_PP(load_4444_dst, const SkRasterPipeline_MemoryCtx* ctx) {
4822 load_4444_(ptr_at_xy<const uint16_t>(ctx, dx,dy), tail, &dr,&dg,&db,&da);
4823 }
4824 STAGE_PP(store_4444, const SkRasterPipeline_MemoryCtx* ctx) {
4825 store_4444_(ptr_at_xy<uint16_t>(ctx, dx,dy), tail, r,g,b,a);
4826 }
4827 STAGE_GP(gather_4444, const SkRasterPipeline_GatherCtx* ctx) {
4828 const uint16_t* ptr;
4829 U32 ix = ix_and_ptr(&ptr, ctx, x,y);
4830 from_4444(gather<U16>(ptr, ix), &r,&g,&b,&a);
4831 }
4832
4833 SI void from_88(U16 rg, U16* r, U16* g) {
4834 *r = (rg & 0xFF);
4835 *g = (rg >> 8);
4836 }
4837
4838 SI void load_88_(const uint16_t* ptr, size_t tail, U16* r, U16* g) {
4839 #if 1 && defined(JUMPER_IS_NEON)
4840 uint8x8x2_t rg;
4841 switch (tail & (N-1)) {
4842 case 0: rg = vld2_u8 ((const uint8_t*)(ptr+0) ); break;
4843 case 7: rg = vld2_lane_u8((const uint8_t*)(ptr+6), rg, 6); [[fallthrough]];
4844 case 6: rg = vld2_lane_u8((const uint8_t*)(ptr+5), rg, 5); [[fallthrough]];
4845 case 5: rg = vld2_lane_u8((const uint8_t*)(ptr+4), rg, 4); [[fallthrough]];
4846 case 4: rg = vld2_lane_u8((const uint8_t*)(ptr+3), rg, 3); [[fallthrough]];
4847 case 3: rg = vld2_lane_u8((const uint8_t*)(ptr+2), rg, 2); [[fallthrough]];
4848 case 2: rg = vld2_lane_u8((const uint8_t*)(ptr+1), rg, 1); [[fallthrough]];
4849 case 1: rg = vld2_lane_u8((const uint8_t*)(ptr+0), rg, 0);
4850 }
4851 *r = cast<U16>(rg.val[0]);
4852 *g = cast<U16>(rg.val[1]);
4853 #else
4854 from_88(load<U16>(ptr, tail), r,g);
4855 #endif
4856 }
4857
4858 SI void store_88_(uint16_t* ptr, size_t tail, U16 r, U16 g) {
4859 r = min(r, 255);
4860 g = min(g, 255);
4861
4862 #if 1 && defined(JUMPER_IS_NEON)
4863 uint8x8x2_t rg = {{
4864 cast<U8>(r),
4865 cast<U8>(g),
4866 }};
4867 switch (tail & (N-1)) {
4868 case 0: vst2_u8 ((uint8_t*)(ptr+0), rg ); break;
4869 case 7: vst2_lane_u8((uint8_t*)(ptr+6), rg, 6); [[fallthrough]];
4870 case 6: vst2_lane_u8((uint8_t*)(ptr+5), rg, 5); [[fallthrough]];
4871 case 5: vst2_lane_u8((uint8_t*)(ptr+4), rg, 4); [[fallthrough]];
4872 case 4: vst2_lane_u8((uint8_t*)(ptr+3), rg, 3); [[fallthrough]];
4873 case 3: vst2_lane_u8((uint8_t*)(ptr+2), rg, 2); [[fallthrough]];
4874 case 2: vst2_lane_u8((uint8_t*)(ptr+1), rg, 1); [[fallthrough]];
4875 case 1: vst2_lane_u8((uint8_t*)(ptr+0), rg, 0);
4876 }
4877 #else
4878 store(ptr, tail, cast<U16>(r | (g<<8)) << 0);
4879 #endif
4880 }
4881
4882 STAGE_PP(load_rg88, const SkRasterPipeline_MemoryCtx* ctx) {
4883 load_88_(ptr_at_xy<const uint16_t>(ctx, dx, dy), tail, &r, &g);
4884 b = 0;
4885 a = 255;
4886 }
4887 STAGE_PP(load_rg88_dst, const SkRasterPipeline_MemoryCtx* ctx) {
4888 load_88_(ptr_at_xy<const uint16_t>(ctx, dx, dy), tail, &dr, &dg);
4889 db = 0;
4890 da = 255;
4891 }
4892 STAGE_PP(store_rg88, const SkRasterPipeline_MemoryCtx* ctx) {
4893 store_88_(ptr_at_xy<uint16_t>(ctx, dx, dy), tail, r, g);
4894 }
4895 STAGE_GP(gather_rg88, const SkRasterPipeline_GatherCtx* ctx) {
4896 const uint16_t* ptr;
4897 U32 ix = ix_and_ptr(&ptr, ctx, x, y);
4898 from_88(gather<U16>(ptr, ix), &r, &g);
4899 b = 0;
4900 a = 255;
4901 }
4902
4903 // ~~~~~~ 8-bit memory loads and stores ~~~~~~ //
4904
4905 SI U16 load_8(const uint8_t* ptr, size_t tail) {
4906 return cast<U16>(load<U8>(ptr, tail));
4907 }
4908 SI void store_8(uint8_t* ptr, size_t tail, U16 v) {
4909 v = min(v, 255);
4910 store(ptr, tail, cast<U8>(v));
4911 }
4912
4913 STAGE_PP(load_a8, const SkRasterPipeline_MemoryCtx* ctx) {
4914 r = g = b = 0;
4915 a = load_8(ptr_at_xy<const uint8_t>(ctx, dx,dy), tail);
4916 }
4917 STAGE_PP(load_a8_dst, const SkRasterPipeline_MemoryCtx* ctx) {
4918 dr = dg = db = 0;
4919 da = load_8(ptr_at_xy<const uint8_t>(ctx, dx,dy), tail);
4920 }
4921 STAGE_PP(store_a8, const SkRasterPipeline_MemoryCtx* ctx) {
4922 store_8(ptr_at_xy<uint8_t>(ctx, dx,dy), tail, a);
4923 }
4924 STAGE_GP(gather_a8, const SkRasterPipeline_GatherCtx* ctx) {
4925 const uint8_t* ptr;
4926 U32 ix = ix_and_ptr(&ptr, ctx, x,y);
4927 r = g = b = 0;
4928 a = cast<U16>(gather<U8>(ptr, ix));
4929 }
4930 STAGE_PP(store_r8, const SkRasterPipeline_MemoryCtx* ctx) {
4931 store_8(ptr_at_xy<uint8_t>(ctx, dx,dy), tail, r);
4932 }
4933
4934 STAGE_PP(alpha_to_gray, NoCtx) {
4935 r = g = b = a;
4936 a = 255;
4937 }
4938 STAGE_PP(alpha_to_gray_dst, NoCtx) {
4939 dr = dg = db = da;
4940 da = 255;
4941 }
4942 STAGE_PP(alpha_to_red, NoCtx) {
4943 r = a;
4944 a = 255;
4945 }
4946 STAGE_PP(alpha_to_red_dst, NoCtx) {
4947 dr = da;
4948 da = 255;
4949 }
4950
4951 STAGE_PP(bt709_luminance_or_luma_to_alpha, NoCtx) {
4952 a = (r*54 + g*183 + b*19)/256; // 0.2126, 0.7152, 0.0722 with 256 denominator.
4953 r = g = b = 0;
4954 }
4955 STAGE_PP(bt709_luminance_or_luma_to_rgb, NoCtx) {
4956 r = g = b =(r*54 + g*183 + b*19)/256; // 0.2126, 0.7152, 0.0722 with 256 denominator.
4957 }
4958
4959 // ~~~~~~ Coverage scales / lerps ~~~~~~ //
4960
4961 STAGE_PP(load_src, const uint16_t* ptr) {
4962 r = sk_unaligned_load<U16>(ptr + 0*N);
4963 g = sk_unaligned_load<U16>(ptr + 1*N);
4964 b = sk_unaligned_load<U16>(ptr + 2*N);
4965 a = sk_unaligned_load<U16>(ptr + 3*N);
4966 }
4967 STAGE_PP(store_src, uint16_t* ptr) {
4968 sk_unaligned_store(ptr + 0*N, r);
4969 sk_unaligned_store(ptr + 1*N, g);
4970 sk_unaligned_store(ptr + 2*N, b);
4971 sk_unaligned_store(ptr + 3*N, a);
4972 }
4973 STAGE_PP(store_src_a, uint16_t* ptr) {
4974 sk_unaligned_store(ptr, a);
4975 }
4976 STAGE_PP(load_dst, const uint16_t* ptr) {
4977 dr = sk_unaligned_load<U16>(ptr + 0*N);
4978 dg = sk_unaligned_load<U16>(ptr + 1*N);
4979 db = sk_unaligned_load<U16>(ptr + 2*N);
4980 da = sk_unaligned_load<U16>(ptr + 3*N);
4981 }
4982 STAGE_PP(store_dst, uint16_t* ptr) {
4983 sk_unaligned_store(ptr + 0*N, dr);
4984 sk_unaligned_store(ptr + 1*N, dg);
4985 sk_unaligned_store(ptr + 2*N, db);
4986 sk_unaligned_store(ptr + 3*N, da);
4987 }
4988
4989 // ~~~~~~ Coverage scales / lerps ~~~~~~ //
4990
4991 STAGE_PP(scale_1_float, const float* f) {
4992 U16 c = from_float(*f);
4993 r = div255( r * c );
4994 g = div255( g * c );
4995 b = div255( b * c );
4996 a = div255( a * c );
4997 }
4998 STAGE_PP(lerp_1_float, const float* f) {
4999 U16 c = from_float(*f);
5000 r = lerp(dr, r, c);
5001 g = lerp(dg, g, c);
5002 b = lerp(db, b, c);
5003 a = lerp(da, a, c);
5004 }
5005 STAGE_PP(scale_native, const uint16_t scales[]) {
5006 auto c = sk_unaligned_load<U16>(scales);
5007 r = div255( r * c );
5008 g = div255( g * c );
5009 b = div255( b * c );
5010 a = div255( a * c );
5011 }
5012
5013 STAGE_PP(lerp_native, const uint16_t scales[]) {
5014 auto c = sk_unaligned_load<U16>(scales);
5015 r = lerp(dr, r, c);
5016 g = lerp(dg, g, c);
5017 b = lerp(db, b, c);
5018 a = lerp(da, a, c);
5019 }
5020
5021 STAGE_PP(scale_u8, const SkRasterPipeline_MemoryCtx* ctx) {
5022 U16 c = load_8(ptr_at_xy<const uint8_t>(ctx, dx,dy), tail);
5023 r = div255( r * c );
5024 g = div255( g * c );
5025 b = div255( b * c );
5026 a = div255( a * c );
5027 }
5028 STAGE_PP(lerp_u8, const SkRasterPipeline_MemoryCtx* ctx) {
5029 U16 c = load_8(ptr_at_xy<const uint8_t>(ctx, dx,dy), tail);
5030 r = lerp(dr, r, c);
5031 g = lerp(dg, g, c);
5032 b = lerp(db, b, c);
5033 a = lerp(da, a, c);
5034 }
5035
5036 // Derive alpha's coverage from rgb coverage and the values of src and dst alpha.
5037 SI U16 alpha_coverage_from_rgb_coverage(U16 a, U16 da, U16 cr, U16 cg, U16 cb) {
5038 return if_then_else(a < da, min(cr, min(cg,cb))
5039 , max(cr, max(cg,cb)));
5040 }
5041 STAGE_PP(scale_565, const SkRasterPipeline_MemoryCtx* ctx) {
5042 U16 cr,cg,cb;
5043 load_565_(ptr_at_xy<const uint16_t>(ctx, dx,dy), tail, &cr,&cg,&cb);
5044 U16 ca = alpha_coverage_from_rgb_coverage(a,da, cr,cg,cb);
5045
5046 r = div255( r * cr );
5047 g = div255( g * cg );
5048 b = div255( b * cb );
5049 a = div255( a * ca );
5050 }
5051 STAGE_PP(lerp_565, const SkRasterPipeline_MemoryCtx* ctx) {
5052 U16 cr,cg,cb;
5053 load_565_(ptr_at_xy<const uint16_t>(ctx, dx,dy), tail, &cr,&cg,&cb);
5054 U16 ca = alpha_coverage_from_rgb_coverage(a,da, cr,cg,cb);
5055
5056 r = lerp(dr, r, cr);
5057 g = lerp(dg, g, cg);
5058 b = lerp(db, b, cb);
5059 a = lerp(da, a, ca);
5060 }
5061
5062 STAGE_PP(emboss, const SkRasterPipeline_EmbossCtx* ctx) {
5063 U16 mul = load_8(ptr_at_xy<const uint8_t>(&ctx->mul, dx,dy), tail),
5064 add = load_8(ptr_at_xy<const uint8_t>(&ctx->add, dx,dy), tail);
5065
5066 r = min(div255(r*mul) + add, a);
5067 g = min(div255(g*mul) + add, a);
5068 b = min(div255(b*mul) + add, a);
5069 }
5070
5071
5072 // ~~~~~~ Gradient stages ~~~~~~ //
5073
5074 // Clamp x to [0,1], both sides inclusive (think, gradients).
5075 // Even repeat and mirror funnel through a clamp to handle bad inputs like +Inf, NaN.
5076 SI F clamp_01_(F v) { return min(max(0, v), 1); }
5077
5078 STAGE_GG(clamp_x_1 , NoCtx) { x = clamp_01_(x); }
5079 STAGE_GG(repeat_x_1, NoCtx) { x = clamp_01_(x - floor_(x)); }
5080 STAGE_GG(mirror_x_1, NoCtx) {
5081 auto two = [](F x){ return x+x; };
5082 x = clamp_01_(abs_( (x-1.0f) - two(floor_((x-1.0f)*0.5f)) - 1.0f ));
5083 }
5084
5085 SI I16 cond_to_mask_16(I32 cond) { return cast<I16>(cond); }
5086
5087 STAGE_GG(decal_x, SkRasterPipeline_DecalTileCtx* ctx) {
5088 auto w = ctx->limit_x;
5089 sk_unaligned_store(ctx->mask, cond_to_mask_16((0 <= x) & (x < w)));
5090 }
5091 STAGE_GG(decal_y, SkRasterPipeline_DecalTileCtx* ctx) {
5092 auto h = ctx->limit_y;
5093 sk_unaligned_store(ctx->mask, cond_to_mask_16((0 <= y) & (y < h)));
5094 }
5095 STAGE_GG(decal_x_and_y, SkRasterPipeline_DecalTileCtx* ctx) {
5096 auto w = ctx->limit_x;
5097 auto h = ctx->limit_y;
5098 sk_unaligned_store(ctx->mask, cond_to_mask_16((0 <= x) & (x < w) & (0 <= y) & (y < h)));
5099 }
5100 STAGE_GG(clamp_x_and_y, SkRasterPipeline_CoordClampCtx* ctx) {
5101 x = min(ctx->max_x, max(ctx->min_x, x));
5102 y = min(ctx->max_y, max(ctx->min_y, y));
5103 }
5104 STAGE_PP(check_decal_mask, SkRasterPipeline_DecalTileCtx* ctx) {
5105 auto mask = sk_unaligned_load<U16>(ctx->mask);
5106 r = r & mask;
5107 g = g & mask;
5108 b = b & mask;
5109 a = a & mask;
5110 }
5111
5112 SI void round_F_to_U16(F R, F G, F B, F A, U16* r, U16* g, U16* b, U16* a) {
5113 auto round = [](F x) { return cast<U16>(x * 255.0f + 0.5f); };
5114
5115 *r = round(min(max(0, R), 1));
5116 *g = round(min(max(0, G), 1));
5117 *b = round(min(max(0, B), 1));
5118 *a = round(A); // we assume alpha is already in [0,1].
5119 }
5120
5121 SI void gradient_lookup(const SkRasterPipeline_GradientCtx* c, U32 idx, F t,
5122 U16* r, U16* g, U16* b, U16* a) {
5123
5124 F fr, fg, fb, fa, br, bg, bb, ba;
5125 #if defined(JUMPER_IS_HSW) || defined(JUMPER_IS_SKX)
5126 if (c->stopCount <=8) {
5127 __m256i lo, hi;
5128 split(idx, &lo, &hi);
5129
5130 fr = join<F>(_mm256_permutevar8x32_ps(_mm256_loadu_ps(c->fs[0]), lo),
5131 _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->fs[0]), hi));
5132 br = join<F>(_mm256_permutevar8x32_ps(_mm256_loadu_ps(c->bs[0]), lo),
5133 _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->bs[0]), hi));
5134 fg = join<F>(_mm256_permutevar8x32_ps(_mm256_loadu_ps(c->fs[1]), lo),
5135 _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->fs[1]), hi));
5136 bg = join<F>(_mm256_permutevar8x32_ps(_mm256_loadu_ps(c->bs[1]), lo),
5137 _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->bs[1]), hi));
5138 fb = join<F>(_mm256_permutevar8x32_ps(_mm256_loadu_ps(c->fs[2]), lo),
5139 _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->fs[2]), hi));
5140 bb = join<F>(_mm256_permutevar8x32_ps(_mm256_loadu_ps(c->bs[2]), lo),
5141 _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->bs[2]), hi));
5142 fa = join<F>(_mm256_permutevar8x32_ps(_mm256_loadu_ps(c->fs[3]), lo),
5143 _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->fs[3]), hi));
5144 ba = join<F>(_mm256_permutevar8x32_ps(_mm256_loadu_ps(c->bs[3]), lo),
5145 _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->bs[3]), hi));
5146 } else
5147 #endif
5148 {
5149 fr = gather<F>(c->fs[0], idx);
5150 fg = gather<F>(c->fs[1], idx);
5151 fb = gather<F>(c->fs[2], idx);
5152 fa = gather<F>(c->fs[3], idx);
5153 br = gather<F>(c->bs[0], idx);
5154 bg = gather<F>(c->bs[1], idx);
5155 bb = gather<F>(c->bs[2], idx);
5156 ba = gather<F>(c->bs[3], idx);
5157 }
5158 round_F_to_U16(mad(t, fr, br),
5159 mad(t, fg, bg),
5160 mad(t, fb, bb),
5161 mad(t, fa, ba),
5162 r,g,b,a);
5163 }
5164
5165 STAGE_GP(gradient, const SkRasterPipeline_GradientCtx* c) {
5166 auto t = x;
5167 U32 idx = 0;
5168
5169 // N.B. The loop starts at 1 because idx 0 is the color to use before the first stop.
5170 for (size_t i = 1; i < c->stopCount; i++) {
5171 idx += if_then_else(t >= c->ts[i], U32(1), U32(0));
5172 }
5173
5174 gradient_lookup(c, idx, t, &r, &g, &b, &a);
5175 }
5176
5177 STAGE_GP(evenly_spaced_gradient, const SkRasterPipeline_GradientCtx* c) {
5178 auto t = x;
5179 auto idx = trunc_(t * (c->stopCount-1));
5180 gradient_lookup(c, idx, t, &r, &g, &b, &a);
5181 }
5182
5183 STAGE_GP(evenly_spaced_2_stop_gradient, const SkRasterPipeline_EvenlySpaced2StopGradientCtx* c) {
5184 auto t = x;
5185 round_F_to_U16(mad(t, c->f[0], c->b[0]),
5186 mad(t, c->f[1], c->b[1]),
5187 mad(t, c->f[2], c->b[2]),
5188 mad(t, c->f[3], c->b[3]),
5189 &r,&g,&b,&a);
5190 }
5191
5192 STAGE_GP(bilerp_clamp_8888, const SkRasterPipeline_GatherCtx* ctx) {
5193 // Quantize sample point and transform into lerp coordinates converting them to 16.16 fixed
5194 // point number.
5195 I32 qx = cast<I32>(floor_(65536.0f * x + 0.5f)) - 32768,
5196 qy = cast<I32>(floor_(65536.0f * y + 0.5f)) - 32768;
5197
5198 // Calculate screen coordinates sx & sy by flooring qx and qy.
5199 I32 sx = qx >> 16,
5200 sy = qy >> 16;
5201
5202 // We are going to perform a change of parameters for qx on [0, 1) to tx on [-1, 1).
5203 // This will put tx in Q15 format for use with q_mult.
5204 // Calculate tx and ty on the interval of [-1, 1). Give {qx} and {qy} are on the interval
5205 // [0, 1), where {v} is fract(v), we can transform to tx in the following manner ty follows
5206 // the same math:
5207 // tx = 2 * {qx} - 1, so
5208 // {qx} = (tx + 1) / 2.
5209 // Calculate {qx} - 1 and {qy} - 1 where the {} operation is handled by the cast, and the - 1
5210 // is handled by the ^ 0x8000, dividing by 2 is deferred and handled in lerpX and lerpY in
5211 // order to use the full 16-bit resolution.
5212 I16 tx = cast<I16>(qx ^ 0x8000),
5213 ty = cast<I16>(qy ^ 0x8000);
5214
5215 // Substituting the {qx} by the equation for tx from above into the lerp equation where v is
5216 // the lerped value:
5217 // v = {qx}*(R - L) + L,
5218 // v = 1/2*(tx + 1)*(R - L) + L
5219 // 2 * v = (tx + 1)*(R - L) + 2*L
5220 // = tx*R - tx*L + R - L + 2*L
5221 // = tx*(R - L) + (R + L).
5222 // Since R and L are on [0, 255] we need them on the interval [0, 1/2] to get them into form
5223 // for Q15_mult. If L and R where in 16.16 format, this would be done by dividing by 2^9. In
5224 // code, we can multiply by 2^7 to get the value directly.
5225 // 2 * v = tx*(R - L) + (R + L)
5226 // 2^-9 * 2 * v = tx*(R - L)*2^-9 + (R + L)*2^-9
5227 // 2^-8 * v = 2^-9 * (tx*(R - L) + (R + L))
5228 // v = 1/2 * (tx*(R - L) + (R + L))
5229 auto lerpX = [&](U16 left, U16 right) -> U16 {
5230 I16 width = (I16)(right - left) << 7;
5231 U16 middle = (right + left) << 7;
5232 // The constrained_add is the most subtle part of lerp. The first term is on the interval
5233 // [-1, 1), and the second term is on the interval is on the interval [0, 1) because
5234 // both terms are too high by a factor of 2 which will be handled below. (Both R and L are
5235 // on [0, 1/2), but the sum R + L is on the interval [0, 1).) Generally, the sum below
5236 // should overflow, but because we know that sum produces an output on the
5237 // interval [0, 1) we know that the extra bit that would be needed will always be 0. So
5238 // we need to be careful to treat this sum as an unsigned positive number in the divide
5239 // by 2 below. Add +1 for rounding.
5240 U16 v2 = constrained_add(scaled_mult(tx, width), middle) + 1;
5241 // Divide by 2 to calculate v and at the same time bring the intermediate value onto the
5242 // interval [0, 1/2] to set up for the lerpY.
5243 return v2 >> 1;
5244 };
5245
5246 const uint32_t* ptr;
5247 U32 ix = ix_and_ptr(&ptr, ctx, sx, sy);
5248 U16 leftR, leftG, leftB, leftA;
5249 from_8888(gather<U32>(ptr, ix), &leftR,&leftG,&leftB,&leftA);
5250
5251 ix = ix_and_ptr(&ptr, ctx, sx+1, sy);
5252 U16 rightR, rightG, rightB, rightA;
5253 from_8888(gather<U32>(ptr, ix), &rightR,&rightG,&rightB,&rightA);
5254
5255 U16 topR = lerpX(leftR, rightR),
5256 topG = lerpX(leftG, rightG),
5257 topB = lerpX(leftB, rightB),
5258 topA = lerpX(leftA, rightA);
5259
5260 ix = ix_and_ptr(&ptr, ctx, sx, sy+1);
5261 from_8888(gather<U32>(ptr, ix), &leftR,&leftG,&leftB,&leftA);
5262
5263 ix = ix_and_ptr(&ptr, ctx, sx+1, sy+1);
5264 from_8888(gather<U32>(ptr, ix), &rightR,&rightG,&rightB,&rightA);
5265
5266 U16 bottomR = lerpX(leftR, rightR),
5267 bottomG = lerpX(leftG, rightG),
5268 bottomB = lerpX(leftB, rightB),
5269 bottomA = lerpX(leftA, rightA);
5270
5271 // lerpY plays the same mathematical tricks as lerpX, but the final divide is by 256 resulting
5272 // in a value on [0, 255].
5273 auto lerpY = [&](U16 top, U16 bottom) -> U16 {
5274 I16 width = (I16)bottom - top;
5275 U16 middle = bottom + top;
5276 // Add + 0x80 for rounding.
5277 U16 blend = constrained_add(scaled_mult(ty, width), middle) + 0x80;
5278
5279 return blend >> 8;
5280 };
5281
5282 r = lerpY(topR, bottomR);
5283 g = lerpY(topG, bottomG);
5284 b = lerpY(topB, bottomB);
5285 a = lerpY(topA, bottomA);
5286 }
5287
5288 STAGE_GG(xy_to_unit_angle, NoCtx) {
5289 F xabs = abs_(x),
5290 yabs = abs_(y);
5291
5292 F slope = min(xabs, yabs)/max(xabs, yabs);
5293 F s = slope * slope;
5294
5295 // Use a 7th degree polynomial to approximate atan.
5296 // This was generated using sollya.gforge.inria.fr.
5297 // A float optimized polynomial was generated using the following command.
5298 // P1 = fpminimax((1/(2*Pi))*atan(x),[|1,3,5,7|],[|24...|],[2^(-40),1],relative);
5299 F phi = slope
5300 * (0.15912117063999176025390625f + s
5301 * (-5.185396969318389892578125e-2f + s
5302 * (2.476101927459239959716796875e-2f + s
5303 * (-7.0547382347285747528076171875e-3f))));
5304
5305 phi = if_then_else(xabs < yabs, 1.0f/4.0f - phi, phi);
5306 phi = if_then_else(x < 0.0f , 1.0f/2.0f - phi, phi);
5307 phi = if_then_else(y < 0.0f , 1.0f - phi , phi);
5308 phi = if_then_else(phi != phi , 0 , phi); // Check for NaN.
5309 x = phi;
5310 }
5311 STAGE_GG(xy_to_radius, NoCtx) {
5312 x = sqrt_(x*x + y*y);
5313 }
5314
5315 // ~~~~~~ Compound stages ~~~~~~ //
5316
5317 STAGE_PP(srcover_rgba_8888, const SkRasterPipeline_MemoryCtx* ctx) {
5318 auto ptr = ptr_at_xy<uint32_t>(ctx, dx,dy);
5319
5320 load_8888_(ptr, tail, &dr,&dg,&db,&da);
5321 r = r + div255( dr*inv(a) );
5322 g = g + div255( dg*inv(a) );
5323 b = b + div255( db*inv(a) );
5324 a = a + div255( da*inv(a) );
5325 store_8888_(ptr, tail, r,g,b,a);
5326 }
5327
5328 // ~~~~~~ skgpu::Swizzle stage ~~~~~~ //
5329
5330 STAGE_PP(swizzle, void* ctx) {
5331 auto ir = r, ig = g, ib = b, ia = a;
5332 U16* o[] = {&r, &g, &b, &a};
5333 char swiz[4];
5334 memcpy(swiz, &ctx, sizeof(swiz));
5335
5336 for (int i = 0; i < 4; ++i) {
5337 switch (swiz[i]) {
5338 case 'r': *o[i] = ir; break;
5339 case 'g': *o[i] = ig; break;
5340 case 'b': *o[i] = ib; break;
5341 case 'a': *o[i] = ia; break;
5342 case '0': *o[i] = U16(0); break;
5343 case '1': *o[i] = U16(255); break;
5344 default: break;
5345 }
5346 }
5347 }
5348
5349 #endif//defined(JUMPER_IS_SCALAR) controlling whether we build lowp stages
5350 } // namespace lowp
5351
5352 /* This gives us SK_OPTS::lowp::N if lowp::N has been set, or SK_OPTS::N if it hasn't. */
5353 namespace lowp { static constexpr size_t lowp_N = N; }
5354
5355 /** Allow outside code to access the Raster Pipeline pixel stride. */
raster_pipeline_lowp_stride()5356 constexpr size_t raster_pipeline_lowp_stride() { return lowp::lowp_N; }
raster_pipeline_highp_stride()5357 constexpr size_t raster_pipeline_highp_stride() { return N; }
5358
5359 } // namespace SK_OPTS_NS
5360
5361 #undef SI
5362
5363 #endif//SkRasterPipeline_opts_DEFINED
5364