• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2021 Google LLC
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #ifndef skgpu_graphite_ResourceTypes_DEFINED
9 #define skgpu_graphite_ResourceTypes_DEFINED
10 
11 #include "include/core/SkSamplingOptions.h"
12 #include "include/core/SkTileMode.h"
13 #include "include/gpu/graphite/GraphiteTypes.h"
14 #include "include/private/base/SkTo.h"
15 #include "src/base/SkEnumBitMask.h"
16 #include "src/base/SkMathPriv.h"
17 
18 namespace skgpu::graphite {
19 
20 class Buffer;
21 
22 enum class DepthStencilFlags : int {
23     kNone = 0b000,
24     kDepth = 0b001,
25     kStencil = 0b010,
26     kDepthStencil = kDepth | kStencil,
27 };
28 SK_MAKE_BITMASK_OPS(DepthStencilFlags)
29 
30 /**
31  * This enum is used to specify the load operation to be used when a RenderPass begins execution
32  */
33 enum class LoadOp : uint8_t {
34     kLoad,
35     kClear,
36     kDiscard,
37 
38     kLast = kDiscard
39 };
40 inline static constexpr int kLoadOpCount = (int)(LoadOp::kLast) + 1;
41 
42 /**
43  * This enum is used to specify the store operation to be used when a RenderPass ends execution.
44  */
45 enum class StoreOp : uint8_t {
46     kStore,
47     kDiscard,
48 
49     kLast = kDiscard
50 };
51 inline static constexpr int kStoreOpCount = (int)(StoreOp::kLast) + 1;
52 
53 /**
54  * What a GPU buffer will be used for
55  */
56 enum class BufferType : int {
57     kVertex,
58     kIndex,
59     kXferCpuToGpu,
60     kXferGpuToCpu,
61     kUniform,
62     kStorage,
63 
64     // GPU-only buffer types
65     kIndirect,
66     kVertexStorage,
67     kIndexStorage,
68 
69     kLast = kIndexStorage,
70 };
71 static const int kBufferTypeCount = static_cast<int>(BufferType::kLast) + 1;
72 
73 /**
74  * Data layout requirements on host-shareable buffer contents.
75  */
76 enum class Layout {
77     kInvalid = 0,
78     kStd140,
79     kStd430,
80     kMetal,
81 };
82 
LayoutString(Layout layout)83 static constexpr const char* LayoutString(Layout layout) {
84     switch(layout) {
85         case Layout::kStd140:  return "std140";
86         case Layout::kStd430:  return "std430";
87         case Layout::kMetal:   return "metal";
88         case Layout::kInvalid: return "invalid";
89     }
90     SkUNREACHABLE;
91 }
92 
93 /**
94  * Indicates the intended access pattern over resource memory. This is used to select the most
95  * efficient memory type during resource creation based on the capabilities of the platform.
96  *
97  * This is only a hint and the actual memory type will be determined based on the resource type and
98  * backend capabilities.
99  */
100 enum class AccessPattern : int {
101     // GPU-only memory does not need to support reads/writes from the CPU. GPU-private memory will
102     // be preferred if the backend supports an efficient private memory type.
103     kGpuOnly,
104 
105     // The resource needs to be CPU visible, e.g. for read-back or as a copy/upload source.
106     kHostVisible,
107 };
108 
109 /**
110  * Determines whether the contents of a GPU buffer sub-allocation gets cleared to 0 before being
111  * used in a GPU command submission.
112  */
113 enum class ClearBuffer : bool {
114     kNo = false,
115     kYes = true,
116 };
117 
118 /**
119  * Must the contents of the Resource be preserved af a render pass or can a more efficient
120  * representation be chosen when supported by hardware.
121  */
122 enum class Discardable : bool {
123     kNo = false,
124     kYes = true
125 };
126 
127 enum class Ownership {
128     kOwned,
129     kWrapped,
130 };
131 
132 /** Uniquely identifies the type of resource that is cached with a GraphiteResourceKey. */
133 using ResourceType = uint32_t;
134 
135 /**
136  * Can the resource be held by multiple users at the same time?
137  * For example, stencil buffers, pipelines, etc.
138  */
139 enum class Shareable : bool {
140     kNo = false,
141     kYes = true,
142 };
143 
144 /**
145  * This enum is used to notify the ResourceCache which type of ref just dropped to zero on a
146  * Resource.
147  */
148 enum class LastRemovedRef {
149     kUsage,
150     kCommandBuffer,
151     kCache,
152 };
153 
154 /*
155  * Struct that can be passed into bind buffer calls on the CommandBuffer. The ownership of the
156  * buffer and its usage in command submission must be tracked by the caller (e.g. as with
157  * buffers created by DrawBufferManager).
158  */
159 struct BindBufferInfo {
160     const Buffer* fBuffer = nullptr;
161     size_t fOffset = 0;
162 
163     operator bool() const { return SkToBool(fBuffer); }
164 
165     bool operator==(const BindBufferInfo& o) const {
166         return fBuffer == o.fBuffer && (!fBuffer || fOffset == o.fOffset);
167     }
168     bool operator!=(const BindBufferInfo& o) const { return !(*this == o); }
169 };
170 
171 /*
172  * Struct that can be passed into bind uniform buffer calls on the CommandBuffer.
173  * It is similar to BindBufferInfo with additional fBindingSize member.
174  */
175 struct BindUniformBufferInfo : public BindBufferInfo {
176     // TODO(b/308933713): Add size to BindBufferInfo instead
177     uint32_t fBindingSize = 0;
178 
179     bool operator==(const BindUniformBufferInfo& o) const {
180         return BindBufferInfo::operator==(o) && (!fBuffer || fBindingSize == o.fBindingSize);
181     }
182     bool operator!=(const BindUniformBufferInfo& o) const { return !(*this == o); }
183 };
184 
185 /**
186  * Represents a buffer region that should be cleared to 0. A ClearBuffersTask does not take an
187  * owning reference to the buffer it clears. A higher layer is responsible for managing the lifetime
188  * and usage refs of the buffer.
189  */
190 struct ClearBufferInfo {
191     const Buffer* fBuffer = nullptr;
192     size_t fOffset = 0;
193     size_t fSize = 0;
194 
195     operator bool() const { return SkToBool(fBuffer); }
196 };
197 
198 struct ImmutableSamplerInfo {
199     // If the sampler requires YCbCr conversion, backends can place that information here.
200     // In order to fit within SamplerDesc's uint32 desc field, backends can only utilize up to
201     // kMaxNumConversionInfoBits bits.
202     uint32_t fNonFormatYcbcrConversionInfo = 0;
203     // fFormat represents known OR external format numerical representation.
204     uint64_t fFormat = 0;
205 };
206 
207 
208 /**
209  * Struct used to describe how a Texture/TextureProxy/TextureProxyView is sampled.
210  */
211 struct SamplerDesc {
212     static_assert(kSkTileModeCount <= 4 && kSkFilterModeCount <= 2 && kSkMipmapModeCount <= 4);
213 
214     SamplerDesc(const SkSamplingOptions& samplingOptions,
215                 const SkTileMode tileModes[2],
216                 const ImmutableSamplerInfo info = {})
217             : fDesc((static_cast<int>(tileModes[0])               << kTileModeXShift           ) |
218                     (static_cast<int>(tileModes[1])               << kTileModeYShift           ) |
219                     (static_cast<int>(samplingOptions.filter)     << kFilterModeShift          ) |
220                     (static_cast<int>(samplingOptions.mipmap)     << kMipmapModeShift          ) |
221                     (info.fNonFormatYcbcrConversionInfo           << kImmutableSamplerInfoShift) )
222             , fFormat(info.fFormat)
223             , fExternalFormatMostSignificantBits(info.fFormat >> 32) {
224 
225         // Cubic sampling is handled in a shader, with the actual texture sampled by with NN,
226         // but that is what a cubic SkSamplingOptions is set to if you ignore 'cubic', which let's
227         // us simplify how we construct SamplerDec's from the options passed to high-level draws.
228         SkASSERT(!samplingOptions.useCubic || (samplingOptions.filter == SkFilterMode::kNearest &&
229                                                samplingOptions.mipmap == SkMipmapMode::kNone));
230 
231         // TODO: Add aniso value when used.
232 
233         // Assert that fYcbcrConversionInfo does not exceed kMaxNumConversionInfoBits such that
234         // the conversion information can fit within an uint32.
235         SkASSERT(info.fNonFormatYcbcrConversionInfo >> kMaxNumConversionInfoBits == 0);
236     }
237 
238     SamplerDesc(const SamplerDesc&) = default;
239 
240     bool operator==(const SamplerDesc& o) const {
241         return o.fDesc == fDesc && o.fFormat == fFormat &&
242                o.fExternalFormatMostSignificantBits == fExternalFormatMostSignificantBits;
243     }
244 
245     bool operator!=(const SamplerDesc& o) const { return !(*this == o); }
246 
tileModeXSamplerDesc247     SkTileMode tileModeX()          const { return static_cast<SkTileMode>((fDesc >> 0) & 0b11); }
tileModeYSamplerDesc248     SkTileMode tileModeY()          const { return static_cast<SkTileMode>((fDesc >> 2) & 0b11); }
descSamplerDesc249     uint32_t   desc()               const { return fDesc;                                        }
formatSamplerDesc250     uint32_t   format()             const { return fFormat;                                      }
externalFormatMSBsSamplerDesc251     uint32_t   externalFormatMSBs() const { return fExternalFormatMostSignificantBits;           }
252 
253     // NOTE: returns the HW sampling options to use, so a bicubic SkSamplingOptions will become
254     // nearest-neighbor sampling in HW.
samplingOptionsSamplerDesc255     SkSamplingOptions samplingOptions() const {
256         // TODO: Add support for anisotropic filtering
257         SkFilterMode filter = static_cast<SkFilterMode>((fDesc >> 4) & 0b01);
258         SkMipmapMode mipmap = static_cast<SkMipmapMode>((fDesc >> 5) & 0b11);
259         return SkSamplingOptions(filter, mipmap);
260     }
261 
262     // These are public such that backends can bitshift data in order to determine whatever
263     // sampler qualities they need from fDesc.
264     static constexpr int kNumTileModeBits   = SkNextLog2_portable(int(SkTileMode::kLastTileMode)+1);
265     static constexpr int kNumFilterModeBits = SkNextLog2_portable(int(SkFilterMode::kLast)+1);
266     static constexpr int kNumMipmapModeBits = SkNextLog2_portable(int(SkMipmapMode::kLast)+1);
267     static constexpr int kMaxNumConversionInfoBits =
268             32 - kNumFilterModeBits - kNumMipmapModeBits - kNumTileModeBits;
269 
270     static constexpr int kTileModeXShift            = 0;
271     static constexpr int kTileModeYShift            = kTileModeXShift  + kNumTileModeBits;
272     static constexpr int kFilterModeShift           = kTileModeYShift  + kNumTileModeBits;
273     static constexpr int kMipmapModeShift           = kFilterModeShift + kNumFilterModeBits;
274     static constexpr int kImmutableSamplerInfoShift = kMipmapModeShift + kNumMipmapModeBits;
275 
276 private:
277     // Note: The order of these member attributes matters to keep unique object representation
278     // such that SkGoodHash can be used to hash SamplerDesc objects.
279     uint32_t fDesc;
280 
281     // Data fields populated by backend Caps which store texture format information (needed for
282     // YCbCr sampling). Only relevant when using immutable samplers. Otherwise, can be ignored.
283     // Known formats only require a uint32, but external formats can be up to a uint64. We store
284     // this as two separate uint32s such that has_unique_object_representation can be true, allowing
285     // this structure to be easily hashed using SkGoodHash. So, external formats can be represented
286     // with (fExternalFormatMostSignificantBits << 32) | fFormat.
287     uint32_t fFormat = 0;
288     uint32_t fExternalFormatMostSignificantBits = 0;
289 };
290 
291 };  // namespace skgpu::graphite
292 
293 #endif // skgpu_graphite_ResourceTypes_DEFINED
294