1 /*
2 * Copyright 2010 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #ifndef GrTypes_DEFINED
9 #define GrTypes_DEFINED
10
11 #include "SkMath.h"
12 #include "SkTypes.h"
13 #include "GrConfig.h"
14
15 ////////////////////////////////////////////////////////////////////////////////
16
17 /**
18 * Defines overloaded bitwise operators to make it easier to use an enum as a
19 * bitfield.
20 */
21 #define GR_MAKE_BITFIELD_OPS(X) \
22 inline X operator |(X a, X b) { \
23 return (X) (+a | +b); \
24 } \
25 inline X& operator |=(X& a, X b) { \
26 return (a = a | b); \
27 } \
28 inline X operator &(X a, X b) { \
29 return (X) (+a & +b); \
30 } \
31 inline X& operator &=(X& a, X b) { \
32 return (a = a & b); \
33 } \
34 template <typename T> \
35 inline X operator &(T a, X b) { \
36 return (X) (+a & +b); \
37 } \
38 template <typename T> \
39 inline X operator &(X a, T b) { \
40 return (X) (+a & +b); \
41 } \
42
43 #define GR_DECL_BITFIELD_OPS_FRIENDS(X) \
44 friend X operator |(X a, X b); \
45 friend X& operator |=(X& a, X b); \
46 \
47 friend X operator &(X a, X b); \
48 friend X& operator &=(X& a, X b); \
49 \
50 template <typename T> \
51 friend X operator &(T a, X b); \
52 \
53 template <typename T> \
54 friend X operator &(X a, T b); \
55
56 /**
57 * Wraps a C++11 enum that we use as a bitfield, and enables a limited amount of
58 * masking with type safety. Instantiated with the ~ operator.
59 */
60 template<typename TFlags> class GrTFlagsMask {
61 public:
GrTFlagsMask(TFlags value)62 constexpr explicit GrTFlagsMask(TFlags value) : GrTFlagsMask(static_cast<int>(value)) {}
GrTFlagsMask(int value)63 constexpr explicit GrTFlagsMask(int value) : fValue(value) {}
value()64 constexpr int value() const { return fValue; }
65 private:
66 const int fValue;
67 };
68
69 // Or-ing a mask always returns another mask.
70 template<typename TFlags> constexpr GrTFlagsMask<TFlags> operator|(GrTFlagsMask<TFlags> a,
71 GrTFlagsMask<TFlags> b) {
72 return GrTFlagsMask<TFlags>(a.value() | b.value());
73 }
74 template<typename TFlags> constexpr GrTFlagsMask<TFlags> operator|(GrTFlagsMask<TFlags> a,
75 TFlags b) {
76 return GrTFlagsMask<TFlags>(a.value() | static_cast<int>(b));
77 }
78 template<typename TFlags> constexpr GrTFlagsMask<TFlags> operator|(TFlags a,
79 GrTFlagsMask<TFlags> b) {
80 return GrTFlagsMask<TFlags>(static_cast<int>(a) | b.value());
81 }
82 template<typename TFlags> inline GrTFlagsMask<TFlags>& operator|=(GrTFlagsMask<TFlags>& a,
83 GrTFlagsMask<TFlags> b) {
84 return (a = a | b);
85 }
86
87 // And-ing two masks returns another mask; and-ing one with regular flags returns flags.
88 template<typename TFlags> constexpr GrTFlagsMask<TFlags> operator&(GrTFlagsMask<TFlags> a,
89 GrTFlagsMask<TFlags> b) {
90 return GrTFlagsMask<TFlags>(a.value() & b.value());
91 }
92 template<typename TFlags> constexpr TFlags operator&(GrTFlagsMask<TFlags> a, TFlags b) {
93 return static_cast<TFlags>(a.value() & static_cast<int>(b));
94 }
95 template<typename TFlags> constexpr TFlags operator&(TFlags a, GrTFlagsMask<TFlags> b) {
96 return static_cast<TFlags>(static_cast<int>(a) & b.value());
97 }
98 template<typename TFlags> inline TFlags& operator&=(TFlags& a, GrTFlagsMask<TFlags> b) {
99 return (a = a & b);
100 }
101
102 /**
103 * Defines bitwise operators that make it possible to use an enum class as a
104 * basic bitfield.
105 */
106 #define GR_MAKE_BITFIELD_CLASS_OPS(X) \
107 constexpr GrTFlagsMask<X> operator~(X a) { \
108 return GrTFlagsMask<X>(~static_cast<int>(a)); \
109 } \
110 constexpr X operator|(X a, X b) { \
111 return static_cast<X>(static_cast<int>(a) | static_cast<int>(b)); \
112 } \
113 inline X& operator|=(X& a, X b) { \
114 return (a = a | b); \
115 } \
116 constexpr bool operator&(X a, X b) { \
117 return SkToBool(static_cast<int>(a) & static_cast<int>(b)); \
118 } \
119
120 #define GR_DECL_BITFIELD_CLASS_OPS_FRIENDS(X) \
121 friend constexpr GrTFlagsMask<X> operator ~(X); \
122 friend constexpr X operator |(X, X); \
123 friend X& operator |=(X&, X); \
124 friend constexpr bool operator &(X, X);
125
126 ////////////////////////////////////////////////////////////////////////////////
127
128 // compile time versions of min/max
129 #define GR_CT_MAX(a, b) (((b) < (a)) ? (a) : (b))
130 #define GR_CT_MIN(a, b) (((b) < (a)) ? (b) : (a))
131
132 /**
133 * divide, rounding up
134 */
GrIDivRoundUp(int x,int y)135 static inline int32_t GrIDivRoundUp(int x, int y) {
136 SkASSERT(y > 0);
137 return (x + (y-1)) / y;
138 }
GrUIDivRoundUp(uint32_t x,uint32_t y)139 static inline uint32_t GrUIDivRoundUp(uint32_t x, uint32_t y) {
140 return (x + (y-1)) / y;
141 }
GrSizeDivRoundUp(size_t x,size_t y)142 static inline size_t GrSizeDivRoundUp(size_t x, size_t y) {
143 return (x + (y-1)) / y;
144 }
145
146 // compile time, evaluates Y multiple times
147 #define GR_CT_DIV_ROUND_UP(X, Y) (((X) + ((Y)-1)) / (Y))
148
149 /**
150 * align up
151 */
GrUIAlignUp(uint32_t x,uint32_t alignment)152 static inline uint32_t GrUIAlignUp(uint32_t x, uint32_t alignment) {
153 return GrUIDivRoundUp(x, alignment) * alignment;
154 }
GrSizeAlignUp(size_t x,size_t alignment)155 static inline size_t GrSizeAlignUp(size_t x, size_t alignment) {
156 return GrSizeDivRoundUp(x, alignment) * alignment;
157 }
158
159 // compile time, evaluates A multiple times
160 #define GR_CT_ALIGN_UP(X, A) (GR_CT_DIV_ROUND_UP((X),(A)) * (A))
161
162 /**
163 * amount of pad needed to align up
164 */
GrUIAlignUpPad(uint32_t x,uint32_t alignment)165 static inline uint32_t GrUIAlignUpPad(uint32_t x, uint32_t alignment) {
166 return (alignment - x % alignment) % alignment;
167 }
GrSizeAlignUpPad(size_t x,size_t alignment)168 static inline size_t GrSizeAlignUpPad(size_t x, size_t alignment) {
169 return (alignment - x % alignment) % alignment;
170 }
171
172 /**
173 * align down
174 */
GrUIAlignDown(uint32_t x,uint32_t alignment)175 static inline uint32_t GrUIAlignDown(uint32_t x, uint32_t alignment) {
176 return (x / alignment) * alignment;
177 }
GrSizeAlignDown(size_t x,uint32_t alignment)178 static inline size_t GrSizeAlignDown(size_t x, uint32_t alignment) {
179 return (x / alignment) * alignment;
180 }
181
182 ///////////////////////////////////////////////////////////////////////////////
183
184 /**
185 * Possible 3D APIs that may be used by Ganesh.
186 */
187 enum GrBackend {
188 kMetal_GrBackend,
189 kOpenGL_GrBackend,
190 kVulkan_GrBackend,
191 /**
192 * Mock is a backend that does not draw anything. It is used for unit tests
193 * and to measure CPU overhead.
194 */
195 kMock_GrBackend,
196 };
197
198 /**
199 * Backend-specific 3D context handle
200 * OpenGL: const GrGLInterface*. If null will use the result of GrGLMakeNativeInterface().
201 * Vulkan: GrVkBackendContext*.
202 * Mock: const GrMockOptions* or null for default constructed GrMockContextOptions.
203 */
204 typedef intptr_t GrBackendContext;
205
206 ///////////////////////////////////////////////////////////////////////////////
207
208 /**
209 * Used to control antialiasing in draw calls.
210 */
211 enum class GrAA : bool {
212 kNo = false,
213 kYes = true
214 };
215
216 ///////////////////////////////////////////////////////////////////////////////
217
218 /**
219 * Used to say whether a texture has mip levels allocated or not.
220 */
221 enum class GrMipMapped : bool {
222 kNo = false,
223 kYes = true
224 };
225
226 ///////////////////////////////////////////////////////////////////////////////
227
228 /**
229 * Geometric primitives used for drawing.
230 */
231 enum class GrPrimitiveType {
232 kTriangles,
233 kTriangleStrip,
234 kTriangleFan,
235 kPoints,
236 kLines, // 1 pix wide only
237 kLineStrip, // 1 pix wide only
238 kLinesAdjacency // requires geometry shader support.
239 };
240 static constexpr int kNumGrPrimitiveTypes = (int) GrPrimitiveType::kLinesAdjacency + 1;
241
GrIsPrimTypeLines(GrPrimitiveType type)242 static constexpr bool GrIsPrimTypeLines(GrPrimitiveType type) {
243 return GrPrimitiveType::kLines == type ||
244 GrPrimitiveType::kLineStrip == type ||
245 GrPrimitiveType::kLinesAdjacency == type;
246 }
247
GrIsPrimTypeTris(GrPrimitiveType type)248 static constexpr bool GrIsPrimTypeTris(GrPrimitiveType type) {
249 return GrPrimitiveType::kTriangles == type ||
250 GrPrimitiveType::kTriangleStrip == type ||
251 GrPrimitiveType::kTriangleFan == type;
252 }
253
GrPrimTypeRequiresGeometryShaderSupport(GrPrimitiveType type)254 static constexpr bool GrPrimTypeRequiresGeometryShaderSupport(GrPrimitiveType type) {
255 return GrPrimitiveType::kLinesAdjacency == type;
256 }
257
258 /**
259 * Formats for masks, used by the font cache.
260 * Important that these are 0-based.
261 */
262 enum GrMaskFormat {
263 kA8_GrMaskFormat, //!< 1-byte per pixel
264 kA565_GrMaskFormat, //!< 2-bytes per pixel, RGB represent 3-channel LCD coverage
265 kARGB_GrMaskFormat, //!< 4-bytes per pixel, color format
266
267 kLast_GrMaskFormat = kARGB_GrMaskFormat
268 };
269 static const int kMaskFormatCount = kLast_GrMaskFormat + 1;
270
271 /**
272 * Return the number of bytes-per-pixel for the specified mask format.
273 */
GrMaskFormatBytesPerPixel(GrMaskFormat format)274 static inline int GrMaskFormatBytesPerPixel(GrMaskFormat format) {
275 SkASSERT(format < kMaskFormatCount);
276 // kA8 (0) -> 1
277 // kA565 (1) -> 2
278 // kARGB (2) -> 4
279 static const int sBytesPerPixel[] = { 1, 2, 4 };
280 static_assert(SK_ARRAY_COUNT(sBytesPerPixel) == kMaskFormatCount, "array_size_mismatch");
281 static_assert(kA8_GrMaskFormat == 0, "enum_order_dependency");
282 static_assert(kA565_GrMaskFormat == 1, "enum_order_dependency");
283 static_assert(kARGB_GrMaskFormat == 2, "enum_order_dependency");
284
285 return sBytesPerPixel[(int) format];
286 }
287
288 /**
289 * Pixel configurations.
290 */
291 enum GrPixelConfig {
292 kUnknown_GrPixelConfig,
293 kAlpha_8_GrPixelConfig,
294 kGray_8_GrPixelConfig,
295 kRGB_565_GrPixelConfig,
296 /**
297 * Premultiplied
298 */
299 kRGBA_4444_GrPixelConfig,
300 /**
301 * Premultiplied. Byte order is r,g,b,a.
302 */
303 kRGBA_8888_GrPixelConfig,
304 /**
305 * Premultiplied. Byte order is b,g,r,a.
306 */
307 kBGRA_8888_GrPixelConfig,
308 /**
309 * Premultiplied and sRGB. Byte order is r,g,b,a.
310 */
311 kSRGBA_8888_GrPixelConfig,
312 /**
313 * Premultiplied and sRGB. Byte order is b,g,r,a.
314 */
315 kSBGRA_8888_GrPixelConfig,
316 /**
317 * 8 bit signed integers per-channel. Byte order is b,g,r,a.
318 */
319 kRGBA_8888_sint_GrPixelConfig,
320
321 /**
322 * Byte order is r, g, b, a. This color format is 32 bits per channel
323 */
324 kRGBA_float_GrPixelConfig,
325 /**
326 * Byte order is r, g. This color format is 32 bits per channel
327 */
328 kRG_float_GrPixelConfig,
329
330 /**
331 * This color format is a single 16 bit float channel
332 */
333 kAlpha_half_GrPixelConfig,
334
335 /**
336 * Byte order is r, g, b, a. This color format is 16 bits per channel
337 */
338 kRGBA_half_GrPixelConfig,
339
340 kPrivateConfig1_GrPixelConfig,
341 kPrivateConfig2_GrPixelConfig,
342 kPrivateConfig3_GrPixelConfig,
343 kPrivateConfig4_GrPixelConfig,
344 kPrivateConfig5_GrPixelConfig,
345
346 kLast_GrPixelConfig = kPrivateConfig5_GrPixelConfig
347 };
348 static const int kGrPixelConfigCnt = kLast_GrPixelConfig + 1;
349
350 // Aliases for pixel configs that match skia's byte order.
351 #ifndef SK_CPU_LENDIAN
352 #error "Skia gpu currently assumes little endian"
353 #endif
354 #if SK_PMCOLOR_BYTE_ORDER(B,G,R,A)
355 static const GrPixelConfig kSkia8888_GrPixelConfig = kBGRA_8888_GrPixelConfig;
356 #elif SK_PMCOLOR_BYTE_ORDER(R,G,B,A)
357 static const GrPixelConfig kSkia8888_GrPixelConfig = kRGBA_8888_GrPixelConfig;
358 #else
359 #error "SK_*32_SHIFT values must correspond to GL_BGRA or GL_RGBA format."
360 #endif
361
362 /**
363 * Optional bitfield flags that can be set on GrSurfaceDesc (below).
364 */
365 enum GrSurfaceFlags {
366 kNone_GrSurfaceFlags = 0x0,
367 /**
368 * Creates a texture that can be rendered to as a GrRenderTarget. Use
369 * GrTexture::asRenderTarget() to access.
370 */
371 kRenderTarget_GrSurfaceFlag = 0x1,
372 /**
373 * Clears to zero on creation. It will cause creation failure if initial data is supplied to the
374 * texture. This only affects the base level if the texture is created with MIP levels.
375 */
376 kPerformInitialClear_GrSurfaceFlag = 0x2
377 };
378
379 GR_MAKE_BITFIELD_OPS(GrSurfaceFlags)
380
381 // opaque type for 3D API object handles
382 typedef intptr_t GrBackendObject;
383
384 /**
385 * Some textures will be stored such that the upper and left edges of the content meet at the
386 * the origin (in texture coord space) and for other textures the lower and left edges meet at
387 * the origin.
388 */
389
390 enum GrSurfaceOrigin {
391 kTopLeft_GrSurfaceOrigin,
392 kBottomLeft_GrSurfaceOrigin,
393 };
394
395 struct GrMipLevel {
396 const void* fPixels;
397 size_t fRowBytes;
398 };
399
400 /**
401 * Describes a surface to be created.
402 */
403 struct GrSurfaceDesc {
GrSurfaceDescGrSurfaceDesc404 GrSurfaceDesc()
405 : fFlags(kNone_GrSurfaceFlags)
406 , fOrigin(kTopLeft_GrSurfaceOrigin)
407 , fWidth(0)
408 , fHeight(0)
409 , fConfig(kUnknown_GrPixelConfig)
410 , fSampleCnt(1) {}
411
412 GrSurfaceFlags fFlags; //!< bitfield of TextureFlags
413 GrSurfaceOrigin fOrigin; //!< origin of the texture
414 int fWidth; //!< Width of the texture
415 int fHeight; //!< Height of the texture
416
417 /**
418 * Format of source data of the texture. Not guaranteed to be the same as
419 * internal format used by 3D API.
420 */
421 GrPixelConfig fConfig;
422
423 /**
424 * The number of samples per pixel. Zero is treated equivalently to 1. This only
425 * applies if the kRenderTarget_GrSurfaceFlag is set. The actual number
426 * of samples may not exactly match the request. The request will be rounded
427 * up to the next supported sample count. A value larger than the largest
428 * supported sample count will fail.
429 */
430 int fSampleCnt;
431 };
432
433 /**
434 * Clips are composed from these objects.
435 */
436 enum GrClipType {
437 kRect_ClipType,
438 kPath_ClipType
439 };
440
441 ///////////////////////////////////////////////////////////////////////////////
442
443 /** Ownership rules for external GPU resources imported into Skia. */
444 enum GrWrapOwnership {
445 /** Skia will assume the client will keep the resource alive and Skia will not free it. */
446 kBorrow_GrWrapOwnership,
447
448 /** Skia will assume ownership of the resource and free it. */
449 kAdopt_GrWrapOwnership,
450 };
451
452 ///////////////////////////////////////////////////////////////////////////////
453
454 /**
455 * The GrContext's cache of backend context state can be partially invalidated.
456 * These enums are specific to the GL backend and we'd add a new set for an alternative backend.
457 */
458 enum GrGLBackendState {
459 kRenderTarget_GrGLBackendState = 1 << 0,
460 kTextureBinding_GrGLBackendState = 1 << 1,
461 // View state stands for scissor and viewport
462 kView_GrGLBackendState = 1 << 2,
463 kBlend_GrGLBackendState = 1 << 3,
464 kMSAAEnable_GrGLBackendState = 1 << 4,
465 kVertex_GrGLBackendState = 1 << 5,
466 kStencil_GrGLBackendState = 1 << 6,
467 kPixelStore_GrGLBackendState = 1 << 7,
468 kProgram_GrGLBackendState = 1 << 8,
469 kFixedFunction_GrGLBackendState = 1 << 9,
470 kMisc_GrGLBackendState = 1 << 10,
471 kPathRendering_GrGLBackendState = 1 << 11,
472 kALL_GrGLBackendState = 0xffff
473 };
474
475 /**
476 * This value translates to reseting all the context state for any backend.
477 */
478 static const uint32_t kAll_GrBackendState = 0xffffffff;
479
480 // Enum used as return value when flush with semaphores so the client knows whether the
481 // semaphores were submitted to GPU or not.
482 enum class GrSemaphoresSubmitted : bool {
483 kNo = false,
484 kYes = true
485 };
486
487 #endif
488