• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2023 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #ifndef SkRasterPipelineOpContexts_DEFINED
9 #define SkRasterPipelineOpContexts_DEFINED
10 
11 // The largest number of pixels we handle at a time. We have a separate value for the largest number
12 // of pixels we handle in the highp pipeline. Many of the context structs in this file are only used
13 // by stages that have no lowp implementation. They can therefore use the (smaller) highp value to
14 // save memory in the arena.
15 inline static constexpr int SkRasterPipeline_kMaxStride = 16;
16 inline static constexpr int SkRasterPipeline_kMaxStride_highp = 8;
17 
18 // These structs hold the context data for many of the Raster Pipeline ops.
19 struct SkRasterPipeline_MemoryCtx {
20     void* pixels;
21     int   stride;
22 };
23 
24 struct SkRasterPipeline_GatherCtx {
25     const void* pixels;
26     int         stride;
27     float       width;
28     float       height;
29     float       weights[16];  // for bicubic and bicubic_clamp_8888
30     // Controls whether pixel i-1 or i is selected when floating point sample position is exactly i.
31     bool        roundDownAtInteger = false;
32 };
33 
34 // State shared by save_xy, accumulate, and bilinear_* / bicubic_*.
35 struct SkRasterPipeline_SamplerCtx {
36     float      x[SkRasterPipeline_kMaxStride_highp];
37     float      y[SkRasterPipeline_kMaxStride_highp];
38     float     fx[SkRasterPipeline_kMaxStride_highp];
39     float     fy[SkRasterPipeline_kMaxStride_highp];
40     float scalex[SkRasterPipeline_kMaxStride_highp];
41     float scaley[SkRasterPipeline_kMaxStride_highp];
42 
43     // for bicubic_[np][13][xy]
44     float weights[16];
45     float wx[4][SkRasterPipeline_kMaxStride_highp];
46     float wy[4][SkRasterPipeline_kMaxStride_highp];
47 };
48 
49 struct SkRasterPipeline_TileCtx {
50     float scale;
51     float invScale; // cache of 1/scale
52     // When in the reflection portion of mirror tiling we need to snap the opposite direction
53     // at integer sample points than when in the forward direction. This controls which way we bias
54     // in the reflection. It should be 1 if SkRasterPipeline_GatherCtx::roundDownAtInteger is true
55     // and otherwise -1.
56     int   mirrorBiasDir = -1;
57 };
58 
59 struct SkRasterPipeline_DecalTileCtx {
60     uint32_t mask[SkRasterPipeline_kMaxStride];
61     float    limit_x;
62     float    limit_y;
63     // These control which edge of the interval is included (i.e. closed interval at 0 or at limit).
64     // They should be set to limit_x and limit_y if SkRasterPipeline_GatherCtx::roundDownAtInteger
65     // is true and otherwise zero.
66     float    inclusiveEdge_x = 0;
67     float    inclusiveEdge_y = 0;
68 };
69 
70 // State used by mipmap_linear_*
71 struct SkRasterPipeline_MipmapCtx {
72     // Original coords, saved before the base level logic
73     float x[SkRasterPipeline_kMaxStride_highp];
74     float y[SkRasterPipeline_kMaxStride_highp];
75 
76     // Base level color
77     float r[SkRasterPipeline_kMaxStride_highp];
78     float g[SkRasterPipeline_kMaxStride_highp];
79     float b[SkRasterPipeline_kMaxStride_highp];
80     float a[SkRasterPipeline_kMaxStride_highp];
81 
82     // Scale factors to transform base level coords to lower level coords
83     float scaleX;
84     float scaleY;
85 
86     float lowerWeight;
87 };
88 
89 struct SkRasterPipeline_CoordClampCtx {
90     float min_x, min_y;
91     float max_x, max_y;
92 };
93 
94 struct SkRasterPipeline_CallbackCtx {
95     void (*fn)(SkRasterPipeline_CallbackCtx* self,
96                int active_pixels /*<= SkRasterPipeline_kMaxStride_highp*/);
97 
98     // When called, fn() will have our active pixels available in rgba.
99     // When fn() returns, the pipeline will read back those active pixels from read_from.
100     float rgba[4*SkRasterPipeline_kMaxStride_highp];
101     float* read_from = rgba;
102 };
103 
104 // state shared by stack_checkpoint and stack_rewind
105 struct SkRasterPipelineStage;
106 
107 struct SkRasterPipeline_RewindCtx {
108     float  r[SkRasterPipeline_kMaxStride_highp];
109     float  g[SkRasterPipeline_kMaxStride_highp];
110     float  b[SkRasterPipeline_kMaxStride_highp];
111     float  a[SkRasterPipeline_kMaxStride_highp];
112     float dr[SkRasterPipeline_kMaxStride_highp];
113     float dg[SkRasterPipeline_kMaxStride_highp];
114     float db[SkRasterPipeline_kMaxStride_highp];
115     float da[SkRasterPipeline_kMaxStride_highp];
116     SkRasterPipelineStage* stage;
117 };
118 
119 struct SkRasterPipeline_GradientCtx {
120     size_t stopCount;
121     float* fs[4];
122     float* bs[4];
123     float* ts;
124 };
125 
126 struct SkRasterPipeline_EvenlySpaced2StopGradientCtx {
127     float f[4];
128     float b[4];
129 };
130 
131 struct SkRasterPipeline_2PtConicalCtx {
132     uint32_t fMask[SkRasterPipeline_kMaxStride_highp];
133     float    fP0,
134              fP1;
135 };
136 
137 struct SkRasterPipeline_UniformColorCtx {
138     float r,g,b,a;
139     uint16_t rgba[4];  // [0,255] in a 16-bit lane.
140 };
141 
142 struct SkRasterPipeline_EmbossCtx {
143     SkRasterPipeline_MemoryCtx mul,
144                                add;
145 };
146 
147 struct SkRasterPipeline_TablesCtx {
148     const uint8_t *r, *g, *b, *a;
149 };
150 
151 struct SkRasterPipeline_BinaryOpCtx {
152     float *dst;
153     const float *src;
154 };
155 
156 struct SkRasterPipeline_TernaryOpCtx {
157     float *dst;
158     const float *src0;
159     const float *src1;
160 };
161 
162 struct SkRasterPipeline_SwizzleCtx {
163     float *ptr;
164     uint16_t offsets[4];  // values must be byte offsets (4 * highp-stride * component-index)
165 };
166 
167 struct SkRasterPipeline_ShuffleCtx {
168     float *ptr;
169     int count;
170     uint16_t offsets[16];  // values must be byte offsets (4 * highp-stride * component-index)
171 };
172 
173 struct SkRasterPipeline_SwizzleCopyCtx {
174     float *dst;
175     float *src;           // src values must _not_ overlap dst values
176     uint16_t offsets[4];  // values must be byte offsets (4 * highp-stride * component-index)
177 };
178 
179 struct SkRasterPipeline_CopyIndirectCtx {
180     float *dst;
181     const float *src;
182     const uint32_t *indirectOffset;  // this applies to `src` or `dst` based on the op
183     uint32_t indirectLimit;          // the indirect offset is clamped to this upper bound
184     uint32_t slots;                  // the number of slots to copy
185 };
186 
187 struct SkRasterPipeline_BranchCtx {
188     int offset;  // contains the label ID during compilation, and the program offset when compiled
189 };
190 
191 struct SkRasterPipeline_BranchIfEqualCtx : public SkRasterPipeline_BranchCtx {
192     int value;
193     const int *ptr;
194 };
195 
196 struct SkRasterPipeline_CaseOpCtx {
197     int expectedValue;
198     int* ptr;  // points to a pair of adjacent I32s: {I32 actualValue, I32 defaultMask}
199 };
200 
201 #endif  // SkRasterPipelineOpContexts_DEFINED
202