• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2023 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #ifndef SkRasterPipelineOpContexts_DEFINED
9 #define SkRasterPipelineOpContexts_DEFINED
10 
11 #include <algorithm>
12 #include <cstddef>
13 #include <cstdint>
14 
15 namespace SkSL { class TraceHook; }
16 
17 // The largest number of pixels we handle at a time. We have a separate value for the largest number
18 // of pixels we handle in the highp pipeline. Many of the context structs in this file are only used
19 // by stages that have no lowp implementation. They can therefore use the (smaller) highp value to
20 // save memory in the arena.
21 inline static constexpr int SkRasterPipeline_kMaxStride = 16;
22 inline static constexpr int SkRasterPipeline_kMaxStride_highp = 16;
23 
24 // How much space to allocate for each MemoryCtx scratch buffer, as part of tail-pixel handling.
25 inline static constexpr size_t SkRasterPipeline_MaxScratchPerPatch =
26         std::max(SkRasterPipeline_kMaxStride_highp * 16,  // 16 == largest highp bpp (RGBA_F32)
27                  SkRasterPipeline_kMaxStride * 4);        // 4 == largest lowp bpp (RGBA_8888)
28 
29 // These structs hold the context data for many of the Raster Pipeline ops.
30 struct SkRasterPipeline_MemoryCtx {
31     void* pixels;
32     int   stride;
33 };
34 
35 // Raster Pipeline typically processes N (4, 8, 16) pixels at a time, in SIMT fashion. If the
36 // number of pixels in a row isn't evenly divisible by N, there will be leftover pixels; this is
37 // called the "tail". To avoid reading or writing past the end of any source or destination buffers
38 // when we reach the tail:
39 //
40 //   1) Source buffers have their tail contents copied to a scratch buffer that is at least N wide.
41 //      In practice, each scratch buffer uses SkRasterPipeline_MaxScratchPerPatch bytes.
42 //   2) Each MemoryCtx in the pipeline is patched, such that access to them (at the current scanline
43 //      and x-offset) will land in the scratch buffer.
44 //   3) Pipeline is run as normal (with all memory access happening safely in the scratch buffers).
45 //   4) Destination buffers have their tail contents copied back from the scratch buffer.
46 //   5) Each MemoryCtx is "un-patched".
47 //
48 // To do all of this, the pipeline creates a MemoryCtxPatch for each unique MemoryCtx referenced by
49 // the pipeline.
50 struct SkRasterPipeline_MemoryCtxInfo {
51     SkRasterPipeline_MemoryCtx* context;
52 
53     int bytesPerPixel;
54     bool load;
55     bool store;
56 };
57 
58 struct SkRasterPipeline_MemoryCtxPatch {
59     SkRasterPipeline_MemoryCtxInfo info;
60 
61     void* backup;  // Remembers context->pixels so we can restore it
62     std::byte scratch[SkRasterPipeline_MaxScratchPerPatch];
63 };
64 
65 struct SkRasterPipeline_GatherCtx {
66     const void* pixels;
67     int         stride;
68     float       width;
69     float       height;
70     float       weights[16];  // for bicubic and bicubic_clamp_8888
71     // Controls whether pixel i-1 or i is selected when floating point sample position is exactly i.
72     bool        roundDownAtInteger = false;
73 };
74 
75 // State shared by save_xy, accumulate, and bilinear_* / bicubic_*.
76 struct SkRasterPipeline_SamplerCtx {
77     float      x[SkRasterPipeline_kMaxStride_highp];
78     float      y[SkRasterPipeline_kMaxStride_highp];
79     float     fx[SkRasterPipeline_kMaxStride_highp];
80     float     fy[SkRasterPipeline_kMaxStride_highp];
81     float scalex[SkRasterPipeline_kMaxStride_highp];
82     float scaley[SkRasterPipeline_kMaxStride_highp];
83 
84     // for bicubic_[np][13][xy]
85     float weights[16];
86     float wx[4][SkRasterPipeline_kMaxStride_highp];
87     float wy[4][SkRasterPipeline_kMaxStride_highp];
88 };
89 
90 struct SkRasterPipeline_TileCtx {
91     float scale;
92     float invScale; // cache of 1/scale
93     // When in the reflection portion of mirror tiling we need to snap the opposite direction
94     // at integer sample points than when in the forward direction. This controls which way we bias
95     // in the reflection. It should be 1 if SkRasterPipeline_GatherCtx::roundDownAtInteger is true
96     // and otherwise -1.
97     int   mirrorBiasDir = -1;
98 };
99 
100 struct SkRasterPipeline_DecalTileCtx {
101     uint32_t mask[SkRasterPipeline_kMaxStride];
102     float    limit_x;
103     float    limit_y;
104     // These control which edge of the interval is included (i.e. closed interval at 0 or at limit).
105     // They should be set to limit_x and limit_y if SkRasterPipeline_GatherCtx::roundDownAtInteger
106     // is true and otherwise zero.
107     float    inclusiveEdge_x = 0;
108     float    inclusiveEdge_y = 0;
109 };
110 
111 enum class SkPerlinNoiseShaderType;
112 
113 struct SkRasterPipeline_PerlinNoiseCtx {
114     SkPerlinNoiseShaderType noiseType;
115     float baseFrequencyX, baseFrequencyY;
116     float stitchDataInX, stitchDataInY;
117     bool stitching;
118     int numOctaves;
119     const uint8_t* latticeSelector;  // [256 values]
120     const uint16_t* noiseData;       // [4 channels][256 elements][vector of 2]
121 };
122 
123 // State used by mipmap_linear_*
124 struct SkRasterPipeline_MipmapCtx {
125     // Original coords, saved before the base level logic
126     float x[SkRasterPipeline_kMaxStride_highp];
127     float y[SkRasterPipeline_kMaxStride_highp];
128 
129     // Base level color
130     float r[SkRasterPipeline_kMaxStride_highp];
131     float g[SkRasterPipeline_kMaxStride_highp];
132     float b[SkRasterPipeline_kMaxStride_highp];
133     float a[SkRasterPipeline_kMaxStride_highp];
134 
135     // Scale factors to transform base level coords to lower level coords
136     float scaleX;
137     float scaleY;
138 
139     float lowerWeight;
140 };
141 
142 struct SkRasterPipeline_CoordClampCtx {
143     float min_x, min_y;
144     float max_x, max_y;
145 };
146 
147 struct SkRasterPipeline_CallbackCtx {
148     void (*fn)(SkRasterPipeline_CallbackCtx* self,
149                int active_pixels /*<= SkRasterPipeline_kMaxStride_highp*/);
150 
151     // When called, fn() will have our active pixels available in rgba.
152     // When fn() returns, the pipeline will read back those active pixels from read_from.
153     float rgba[4*SkRasterPipeline_kMaxStride_highp];
154     float* read_from = rgba;
155 };
156 
157 // state shared by stack_checkpoint and stack_rewind
158 struct SkRasterPipelineStage;
159 
160 struct SkRasterPipeline_RewindCtx {
161     float  r[SkRasterPipeline_kMaxStride_highp];
162     float  g[SkRasterPipeline_kMaxStride_highp];
163     float  b[SkRasterPipeline_kMaxStride_highp];
164     float  a[SkRasterPipeline_kMaxStride_highp];
165     float dr[SkRasterPipeline_kMaxStride_highp];
166     float dg[SkRasterPipeline_kMaxStride_highp];
167     float db[SkRasterPipeline_kMaxStride_highp];
168     float da[SkRasterPipeline_kMaxStride_highp];
169     std::byte* base;
170     SkRasterPipelineStage* stage;
171 };
172 
173 constexpr size_t kRGBAChannels = 4;
174 
175 struct SkRasterPipeline_GradientCtx {
176     size_t stopCount;
177     float* factors[kRGBAChannels];
178     float* biases[kRGBAChannels];
179     float* ts;
180 };
181 
182 struct SkRasterPipeline_EvenlySpaced2StopGradientCtx {
183     float factor[kRGBAChannels];
184     float bias[kRGBAChannels];
185 };
186 
187 struct SkRasterPipeline_2PtConicalCtx {
188     uint32_t fMask[SkRasterPipeline_kMaxStride_highp];
189     float    fP0,
190              fP1;
191 };
192 
193 struct SkRasterPipeline_UniformColorCtx {
194     float r,g,b,a;
195     uint16_t rgba[4];  // [0,255] in a 16-bit lane.
196 };
197 
198 struct SkRasterPipeline_EmbossCtx {
199     SkRasterPipeline_MemoryCtx mul,
200                                add;
201 };
202 
203 struct SkRasterPipeline_TablesCtx {
204     const uint8_t *r, *g, *b, *a;
205 };
206 
207 using SkRPOffset = uint32_t;
208 
209 struct SkRasterPipeline_InitLaneMasksCtx {
210     uint8_t* tail;
211 };
212 
213 struct SkRasterPipeline_ConstantCtx {
214     int32_t value;
215     SkRPOffset dst;
216 };
217 
218 struct SkRasterPipeline_UniformCtx {
219     int32_t* dst;
220     const int32_t* src;
221 };
222 
223 struct SkRasterPipeline_BinaryOpCtx {
224     SkRPOffset dst;
225     SkRPOffset src;
226 };
227 
228 struct SkRasterPipeline_TernaryOpCtx {
229     SkRPOffset dst;
230     SkRPOffset delta;
231 };
232 
233 struct SkRasterPipeline_MatrixMultiplyCtx {
234     SkRPOffset dst;
235     uint8_t leftColumns, leftRows, rightColumns, rightRows;
236 };
237 
238 struct SkRasterPipeline_SwizzleCtx {
239     // If we are processing more than 16 pixels at a time, an 8-bit offset won't be sufficient and
240     // `offsets` will need to use uint16_t (or dial down the premultiplication).
241     static_assert(SkRasterPipeline_kMaxStride_highp <= 16);
242 
243     SkRPOffset dst;
244     uint8_t offsets[4];  // values must be byte offsets (4 * highp-stride * component-index)
245 };
246 
247 struct SkRasterPipeline_ShuffleCtx {
248     int32_t* ptr;
249     int count;
250     uint16_t offsets[16];  // values must be byte offsets (4 * highp-stride * component-index)
251 };
252 
253 struct SkRasterPipeline_SwizzleCopyCtx {
254     int32_t* dst;
255     const int32_t* src;   // src values must _not_ overlap dst values
256     uint16_t offsets[4];  // values must be byte offsets (4 * highp-stride * component-index)
257 };
258 
259 struct SkRasterPipeline_CopyIndirectCtx {
260     int32_t* dst;
261     const int32_t* src;
262     const uint32_t *indirectOffset;  // this applies to `src` or `dst` based on the op
263     uint32_t indirectLimit;          // the indirect offset is clamped to this upper bound
264     uint32_t slots;                  // the number of slots to copy
265 };
266 
267 struct SkRasterPipeline_SwizzleCopyIndirectCtx : public SkRasterPipeline_CopyIndirectCtx {
268     uint16_t offsets[4];  // values must be byte offsets (4 * highp-stride * component-index)
269 };
270 
271 struct SkRasterPipeline_BranchCtx {
272     int offset;  // contains the label ID during compilation, and the program offset when compiled
273 };
274 
275 struct SkRasterPipeline_BranchIfAllLanesActiveCtx : public SkRasterPipeline_BranchCtx {
276     uint8_t* tail = nullptr;  // lanes past the tail are _never_ active, so we need to exclude them
277 };
278 
279 struct SkRasterPipeline_BranchIfEqualCtx : public SkRasterPipeline_BranchCtx {
280     int value;
281     const int* ptr;
282 };
283 
284 struct SkRasterPipeline_CaseOpCtx {
285     int expectedValue;
286     SkRPOffset offset;  // points to a pair of adjacent I32s: {I32 actualValue, I32 defaultMask}
287 };
288 
289 struct SkRasterPipeline_TraceFuncCtx {
290     const int* traceMask;
291     SkSL::TraceHook* traceHook;
292     int funcIdx;
293 };
294 
295 struct SkRasterPipeline_TraceScopeCtx {
296     const int* traceMask;
297     SkSL::TraceHook* traceHook;
298     int delta;
299 };
300 
301 struct SkRasterPipeline_TraceLineCtx {
302     const int* traceMask;
303     SkSL::TraceHook* traceHook;
304     int lineNumber;
305 };
306 
307 struct SkRasterPipeline_TraceVarCtx {
308     const int* traceMask;
309     SkSL::TraceHook* traceHook;
310     int slotIdx, numSlots;
311     const int* data;
312     const uint32_t *indirectOffset;  // can be null; if set, an offset applied to `data`
313     uint32_t indirectLimit;          // the indirect offset is clamped to this upper bound
314 };
315 
316 #endif  // SkRasterPipelineOpContexts_DEFINED
317