1 /*
2 * Copyright 2010 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #ifndef GrTypes_DEFINED
9 #define GrTypes_DEFINED
10
11 #include "include/core/SkMath.h"
12 #include "include/core/SkTypes.h"
13 #include "include/gpu/GrConfig.h"
14
15 class GrBackendSemaphore;
16 class SkImage;
17 class SkSurface;
18
19 ////////////////////////////////////////////////////////////////////////////////
20
21 /**
22 * Defines overloaded bitwise operators to make it easier to use an enum as a
23 * bitfield.
24 */
25 #define GR_MAKE_BITFIELD_OPS(X) \
26 inline X operator |(X a, X b) { \
27 return (X) (+a | +b); \
28 } \
29 inline X& operator |=(X& a, X b) { \
30 return (a = a | b); \
31 } \
32 inline X operator &(X a, X b) { \
33 return (X) (+a & +b); \
34 } \
35 inline X& operator &=(X& a, X b) { \
36 return (a = a & b); \
37 } \
38 template <typename T> \
39 inline X operator &(T a, X b) { \
40 return (X) (+a & +b); \
41 } \
42 template <typename T> \
43 inline X operator &(X a, T b) { \
44 return (X) (+a & +b); \
45 } \
46
47 #define GR_DECL_BITFIELD_OPS_FRIENDS(X) \
48 friend X operator |(X a, X b); \
49 friend X& operator |=(X& a, X b); \
50 \
51 friend X operator &(X a, X b); \
52 friend X& operator &=(X& a, X b); \
53 \
54 template <typename T> \
55 friend X operator &(T a, X b); \
56 \
57 template <typename T> \
58 friend X operator &(X a, T b); \
59
60 /**
61 * Wraps a C++11 enum that we use as a bitfield, and enables a limited amount of
62 * masking with type safety. Instantiated with the ~ operator.
63 */
64 template<typename TFlags> class GrTFlagsMask {
65 public:
GrTFlagsMask(TFlags value)66 constexpr explicit GrTFlagsMask(TFlags value) : GrTFlagsMask(static_cast<int>(value)) {}
GrTFlagsMask(int value)67 constexpr explicit GrTFlagsMask(int value) : fValue(value) {}
value()68 constexpr int value() const { return fValue; }
69 private:
70 const int fValue;
71 };
72
73 // Or-ing a mask always returns another mask.
74 template<typename TFlags> constexpr GrTFlagsMask<TFlags> operator|(GrTFlagsMask<TFlags> a,
75 GrTFlagsMask<TFlags> b) {
76 return GrTFlagsMask<TFlags>(a.value() | b.value());
77 }
78 template<typename TFlags> constexpr GrTFlagsMask<TFlags> operator|(GrTFlagsMask<TFlags> a,
79 TFlags b) {
80 return GrTFlagsMask<TFlags>(a.value() | static_cast<int>(b));
81 }
82 template<typename TFlags> constexpr GrTFlagsMask<TFlags> operator|(TFlags a,
83 GrTFlagsMask<TFlags> b) {
84 return GrTFlagsMask<TFlags>(static_cast<int>(a) | b.value());
85 }
86 template<typename TFlags> inline GrTFlagsMask<TFlags>& operator|=(GrTFlagsMask<TFlags>& a,
87 GrTFlagsMask<TFlags> b) {
88 return (a = a | b);
89 }
90
91 // And-ing two masks returns another mask; and-ing one with regular flags returns flags.
92 template<typename TFlags> constexpr GrTFlagsMask<TFlags> operator&(GrTFlagsMask<TFlags> a,
93 GrTFlagsMask<TFlags> b) {
94 return GrTFlagsMask<TFlags>(a.value() & b.value());
95 }
96 template<typename TFlags> constexpr TFlags operator&(GrTFlagsMask<TFlags> a, TFlags b) {
97 return static_cast<TFlags>(a.value() & static_cast<int>(b));
98 }
99 template<typename TFlags> constexpr TFlags operator&(TFlags a, GrTFlagsMask<TFlags> b) {
100 return static_cast<TFlags>(static_cast<int>(a) & b.value());
101 }
102 template<typename TFlags> inline TFlags& operator&=(TFlags& a, GrTFlagsMask<TFlags> b) {
103 return (a = a & b);
104 }
105
106 /**
107 * Defines bitwise operators that make it possible to use an enum class as a
108 * basic bitfield.
109 */
110 #define GR_MAKE_BITFIELD_CLASS_OPS(X) \
111 constexpr GrTFlagsMask<X> operator~(X a) { \
112 return GrTFlagsMask<X>(~static_cast<int>(a)); \
113 } \
114 constexpr X operator|(X a, X b) { \
115 return static_cast<X>(static_cast<int>(a) | static_cast<int>(b)); \
116 } \
117 inline X& operator|=(X& a, X b) { \
118 return (a = a | b); \
119 } \
120 constexpr bool operator&(X a, X b) { \
121 return SkToBool(static_cast<int>(a) & static_cast<int>(b)); \
122 } \
123
124 #define GR_DECL_BITFIELD_CLASS_OPS_FRIENDS(X) \
125 friend constexpr GrTFlagsMask<X> operator ~(X); \
126 friend constexpr X operator |(X, X); \
127 friend X& operator |=(X&, X); \
128 friend constexpr bool operator &(X, X)
129
130 ////////////////////////////////////////////////////////////////////////////////
131
132 // compile time versions of min/max
133 #define GR_CT_MAX(a, b) (((b) < (a)) ? (a) : (b))
134 #define GR_CT_MIN(a, b) (((b) < (a)) ? (b) : (a))
135
136 /**
137 * divide, rounding up
138 */
GrIDivRoundUp(int x,int y)139 static inline constexpr int32_t GrIDivRoundUp(int x, int y) {
140 SkASSERT(y > 0);
141 return (x + (y-1)) / y;
142 }
GrUIDivRoundUp(uint32_t x,uint32_t y)143 static inline constexpr uint32_t GrUIDivRoundUp(uint32_t x, uint32_t y) {
144 return (x + (y-1)) / y;
145 }
GrSizeDivRoundUp(size_t x,size_t y)146 static inline constexpr size_t GrSizeDivRoundUp(size_t x, size_t y) { return (x + (y - 1)) / y; }
147
148 /**
149 * align up
150 */
GrUIAlignUp(uint32_t x,uint32_t alignment)151 static inline constexpr uint32_t GrUIAlignUp(uint32_t x, uint32_t alignment) {
152 return GrUIDivRoundUp(x, alignment) * alignment;
153 }
GrSizeAlignUp(size_t x,size_t alignment)154 static inline constexpr size_t GrSizeAlignUp(size_t x, size_t alignment) {
155 return GrSizeDivRoundUp(x, alignment) * alignment;
156 }
157
158 /**
159 * amount of pad needed to align up
160 */
GrUIAlignUpPad(uint32_t x,uint32_t alignment)161 static inline constexpr uint32_t GrUIAlignUpPad(uint32_t x, uint32_t alignment) {
162 return (alignment - x % alignment) % alignment;
163 }
GrSizeAlignUpPad(size_t x,size_t alignment)164 static inline constexpr size_t GrSizeAlignUpPad(size_t x, size_t alignment) {
165 return (alignment - x % alignment) % alignment;
166 }
167
168 /**
169 * align down
170 */
GrUIAlignDown(uint32_t x,uint32_t alignment)171 static inline constexpr uint32_t GrUIAlignDown(uint32_t x, uint32_t alignment) {
172 return (x / alignment) * alignment;
173 }
GrSizeAlignDown(size_t x,uint32_t alignment)174 static inline constexpr size_t GrSizeAlignDown(size_t x, uint32_t alignment) {
175 return (x / alignment) * alignment;
176 }
177
178 ///////////////////////////////////////////////////////////////////////////////
179
180 /**
181 * Possible 3D APIs that may be used by Ganesh.
182 */
183 enum class GrBackendApi : unsigned {
184 kMetal,
185 kDawn,
186 kOpenGL,
187 kVulkan,
188 /**
189 * Mock is a backend that does not draw anything. It is used for unit tests
190 * and to measure CPU overhead.
191 */
192 kMock,
193
194 /**
195 * Added here to support the legacy GrBackend enum value and clients who referenced it using
196 * GrBackend::kOpenGL_GrBackend.
197 */
198 kOpenGL_GrBackend = kOpenGL,
199 };
200
201 /**
202 * Previously the above enum was not an enum class but a normal enum. To support the legacy use of
203 * the enum values we define them below so that no clients break.
204 */
205 typedef GrBackendApi GrBackend;
206
207 static constexpr GrBackendApi kMetal_GrBackend = GrBackendApi::kMetal;
208 static constexpr GrBackendApi kVulkan_GrBackend = GrBackendApi::kVulkan;
209 static constexpr GrBackendApi kMock_GrBackend = GrBackendApi::kMock;
210
211 ///////////////////////////////////////////////////////////////////////////////
212
213 /**
214 * Used to say whether a texture has mip levels allocated or not.
215 */
216 enum class GrMipMapped : bool {
217 kNo = false,
218 kYes = true
219 };
220
221 /*
222 * Can a GrBackendObject be rendered to?
223 */
224 enum class GrRenderable : bool {
225 kNo = false,
226 kYes = true
227 };
228
229 /*
230 * Used to say whether texture is backed by protected memory.
231 */
232 enum class GrProtected : bool {
233 kNo = false,
234 kYes = true
235 };
236
237 ///////////////////////////////////////////////////////////////////////////////
238
239 /**
240 * GPU SkImage and SkSurfaces can be stored such that (0, 0) in texture space may correspond to
241 * either the top-left or bottom-left content pixel.
242 */
243 enum GrSurfaceOrigin : int {
244 kTopLeft_GrSurfaceOrigin,
245 kBottomLeft_GrSurfaceOrigin,
246 };
247
248 /**
249 * A GrContext's cache of backend context state can be partially invalidated.
250 * These enums are specific to the GL backend and we'd add a new set for an alternative backend.
251 */
252 enum GrGLBackendState {
253 kRenderTarget_GrGLBackendState = 1 << 0,
254 // Also includes samplers bound to texture units.
255 kTextureBinding_GrGLBackendState = 1 << 1,
256 // View state stands for scissor and viewport
257 kView_GrGLBackendState = 1 << 2,
258 kBlend_GrGLBackendState = 1 << 3,
259 kMSAAEnable_GrGLBackendState = 1 << 4,
260 kVertex_GrGLBackendState = 1 << 5,
261 kStencil_GrGLBackendState = 1 << 6,
262 kPixelStore_GrGLBackendState = 1 << 7,
263 kProgram_GrGLBackendState = 1 << 8,
264 kFixedFunction_GrGLBackendState = 1 << 9,
265 kMisc_GrGLBackendState = 1 << 10,
266 kPathRendering_GrGLBackendState = 1 << 11,
267 kALL_GrGLBackendState = 0xffff
268 };
269
270 /**
271 * This value translates to reseting all the context state for any backend.
272 */
273 static const uint32_t kAll_GrBackendState = 0xffffffff;
274
275 enum GrFlushFlags {
276 kNone_GrFlushFlags = 0,
277 // flush will wait till all submitted GPU work is finished before returning.
278 kSyncCpu_GrFlushFlag = 0x1,
279 };
280
281 typedef void* GrGpuFinishedContext;
282 typedef void (*GrGpuFinishedProc)(GrGpuFinishedContext finishedContext);
283
284 /**
285 * Struct to supply options to flush calls.
286 *
287 * After issuing all commands, fNumSemaphore semaphores will be signaled by the gpu. The client
288 * passes in an array of fNumSemaphores GrBackendSemaphores. In general these GrBackendSemaphore's
289 * can be either initialized or not. If they are initialized, the backend uses the passed in
290 * semaphore. If it is not initialized, a new semaphore is created and the GrBackendSemaphore
291 * object is initialized with that semaphore.
292 *
293 * The client will own and be responsible for deleting the underlying semaphores that are stored
294 * and returned in initialized GrBackendSemaphore objects. The GrBackendSemaphore objects
295 * themselves can be deleted as soon as this function returns.
296 *
297 * If a finishedProc is provided, the finishedProc will be called when all work submitted to the gpu
298 * from this flush call and all previous flush calls has finished on the GPU. If the flush call
299 * fails due to an error and nothing ends up getting sent to the GPU, the finished proc is called
300 * immediately.
301 */
302 struct GrFlushInfo {
303 GrFlushFlags fFlags = kNone_GrFlushFlags;
304 int fNumSemaphores = 0;
305 GrBackendSemaphore* fSignalSemaphores = nullptr;
306 GrGpuFinishedProc fFinishedProc = nullptr;
307 GrGpuFinishedContext fFinishedContext = nullptr;
308 };
309
310 /**
311 * Enum used as return value when flush with semaphores so the client knows whether the semaphores
312 * were submitted to GPU or not.
313 */
314 enum class GrSemaphoresSubmitted : bool {
315 kNo = false,
316 kYes = true
317 };
318
319 /**
320 * Array of SkImages and SkSurfaces which Skia will prepare for external use when passed into a
321 * flush call on GrContext. All the SkImages and SkSurfaces must be GPU backed.
322 *
323 * If fPrepareSurfaceForPresent is not nullptr, then it must be an array the size of fNumSurfaces.
324 * Each entry in the array corresponds to the SkSurface at the same index in the fSurfaces array. If
325 * an entry is true, then that surface will be prepared for both external use and present.
326 *
327 * Currently this only has an effect if the backend API is Vulkan. In this case, all the underlying
328 * VkImages associated with the SkImages and SkSurfaces will be transitioned into the VkQueueFamily
329 * in which they were originally wrapped or created with. This allows a client to wrap a VkImage
330 * from a queue which is different from the graphics queue and then have Skia transition it back to
331 * that queue without needing to delete the SkImage or SkSurface. If the an SkSurface is also
332 * flagged to be prepared for present, then its VkImageLayout will be set to
333 * VK_IMAGE_LAYOUT_PRESENT_SRC_KHR if the VK_KHR_swapchain extension has been enabled for the
334 * GrContext and the original queue is not VK_QUEUE_FAMILY_EXTERNAL or VK_QUEUE_FAMILY_FOREIGN_EXT.
335 *
336 * If an SkSurface or SkImage is used again, it will be transitioned back to the graphics queue and
337 * whatever layout is needed for its use.
338 */
339 struct GrPrepareForExternalIORequests {
340 int fNumImages = 0;
341 SkImage** fImages = nullptr;
342 int fNumSurfaces = 0;
343 SkSurface** fSurfaces = nullptr;
344 bool* fPrepareSurfaceForPresent = nullptr;
345
hasRequestsGrPrepareForExternalIORequests346 bool hasRequests() const { return fNumImages || fNumSurfaces; }
347 };
348
349 #endif
350