• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2015 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #ifndef GrVkUtil_DEFINED
9 #define GrVkUtil_DEFINED
10 
11 #include "include/gpu/GrTypes.h"
12 #include "include/gpu/vk/GrVkTypes.h"
13 #include "include/private/SkMacros.h"
14 #include "src/gpu/GrColor.h"
15 #include "src/gpu/GrDataUtils.h"
16 #include "src/gpu/vk/GrVkInterface.h"
17 #include "src/sksl/ir/SkSLProgram.h"
18 
19 class GrVkGpu;
20 
21 // makes a Vk call on the interface
22 #define GR_VK_CALL(IFACE, X) (IFACE)->fFunctions.f##X
23 
24 #define GR_VK_CALL_RESULT(GPU, RESULT, X)                                 \
25     do {                                                                  \
26         (RESULT) = GR_VK_CALL(GPU->vkInterface(), X);                     \
27         SkASSERT(VK_SUCCESS == RESULT || VK_ERROR_DEVICE_LOST == RESULT); \
28         if (RESULT != VK_SUCCESS && !GPU->isDeviceLost()) {               \
29             SkDebugf("Failed vulkan call. Error: %d," #X "\n", RESULT);   \
30         }                                                                 \
31         GPU->checkVkResult(RESULT);                                       \
32     } while (false)
33 
34 #define GR_VK_CALL_RESULT_NOCHECK(GPU, RESULT, X)     \
35     do {                                              \
36         (RESULT) = GR_VK_CALL(GPU->vkInterface(), X); \
37         GPU->checkVkResult(RESULT);                   \
38     } while (false)
39 
40 // same as GR_VK_CALL but checks for success
41 #define GR_VK_CALL_ERRCHECK(GPU, X)                                  \
42     VkResult SK_MACRO_APPEND_LINE(ret);                              \
43     GR_VK_CALL_RESULT(GPU, SK_MACRO_APPEND_LINE(ret), X)             \
44 
45 
46 bool GrVkFormatIsSupported(VkFormat);
47 
GrVkFormatChannels(VkFormat vkFormat)48 static constexpr uint32_t GrVkFormatChannels(VkFormat vkFormat) {
49     switch (vkFormat) {
50         case VK_FORMAT_R8G8B8A8_UNORM:           return kRGBA_SkColorChannelFlags;
51         case VK_FORMAT_R8_UNORM:                 return kRed_SkColorChannelFlag;
52         case VK_FORMAT_B8G8R8A8_UNORM:           return kRGBA_SkColorChannelFlags;
53         case VK_FORMAT_R5G6B5_UNORM_PACK16:      return kRGB_SkColorChannelFlags;
54         case VK_FORMAT_R16G16B16A16_SFLOAT:      return kRGBA_SkColorChannelFlags;
55         case VK_FORMAT_R16_SFLOAT:               return kRed_SkColorChannelFlag;
56         case VK_FORMAT_R8G8B8_UNORM:             return kRGB_SkColorChannelFlags;
57         case VK_FORMAT_R8G8_UNORM:               return kRG_SkColorChannelFlags;
58         case VK_FORMAT_A2B10G10R10_UNORM_PACK32: return kRGBA_SkColorChannelFlags;
59         case VK_FORMAT_A2R10G10B10_UNORM_PACK32: return kRGBA_SkColorChannelFlags;
60         case VK_FORMAT_B4G4R4A4_UNORM_PACK16:    return kRGBA_SkColorChannelFlags;
61         case VK_FORMAT_R4G4B4A4_UNORM_PACK16:    return kRGBA_SkColorChannelFlags;
62         case VK_FORMAT_R8G8B8A8_SRGB:            return kRGBA_SkColorChannelFlags;
63         case VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK:  return kRGB_SkColorChannelFlags;
64         case VK_FORMAT_BC1_RGB_UNORM_BLOCK:      return kRGB_SkColorChannelFlags;
65         case VK_FORMAT_BC1_RGBA_UNORM_BLOCK:     return kRGBA_SkColorChannelFlags;
66         case VK_FORMAT_R16_UNORM:                return kRed_SkColorChannelFlag;
67         case VK_FORMAT_R16G16_UNORM:             return kRG_SkColorChannelFlags;
68         case VK_FORMAT_R16G16B16A16_UNORM:       return kRGBA_SkColorChannelFlags;
69         case VK_FORMAT_R16G16_SFLOAT:            return kRG_SkColorChannelFlags;
70         case VK_FORMAT_S8_UINT:                  return 0;
71         case VK_FORMAT_D24_UNORM_S8_UINT:        return 0;
72         case VK_FORMAT_D32_SFLOAT_S8_UINT:       return 0;
73         default:                                 return 0;
74     }
75 }
76 
GrVkFormatBytesPerBlock(VkFormat vkFormat)77 static constexpr size_t GrVkFormatBytesPerBlock(VkFormat vkFormat) {
78     switch (vkFormat) {
79         case VK_FORMAT_R8G8B8A8_UNORM:            return 4;
80         case VK_FORMAT_R8_UNORM:                  return 1;
81         case VK_FORMAT_B8G8R8A8_UNORM:            return 4;
82         case VK_FORMAT_R5G6B5_UNORM_PACK16:       return 2;
83         case VK_FORMAT_R16G16B16A16_SFLOAT:       return 8;
84         case VK_FORMAT_R16_SFLOAT:                return 2;
85         case VK_FORMAT_R8G8B8_UNORM:              return 3;
86         case VK_FORMAT_R8G8_UNORM:                return 2;
87         case VK_FORMAT_A2B10G10R10_UNORM_PACK32:  return 4;
88         case VK_FORMAT_A2R10G10B10_UNORM_PACK32:  return 4;
89         case VK_FORMAT_B4G4R4A4_UNORM_PACK16:     return 2;
90         case VK_FORMAT_R4G4B4A4_UNORM_PACK16:     return 2;
91         case VK_FORMAT_R8G8B8A8_SRGB:             return 4;
92         case VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK:   return 8;
93         case VK_FORMAT_BC1_RGB_UNORM_BLOCK:       return 8;
94         case VK_FORMAT_BC1_RGBA_UNORM_BLOCK:      return 8;
95         case VK_FORMAT_R16_UNORM:                 return 2;
96         case VK_FORMAT_R16G16_UNORM:              return 4;
97         case VK_FORMAT_R16G16B16A16_UNORM:        return 8;
98         case VK_FORMAT_R16G16_SFLOAT:             return 4;
99         // Currently we are just over estimating this value to be used in gpu size calculations even
100         // though the actually size is probably less. We should instead treat planar formats similar
101         // to compressed textures that go through their own special query for calculating size.
102         case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM: return 3;
103         case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:  return 3;
104         case VK_FORMAT_S8_UINT:                   return 1;
105         case VK_FORMAT_D24_UNORM_S8_UINT:         return 4;
106         case VK_FORMAT_D32_SFLOAT_S8_UINT:        return 8;
107 
108         default:                                 return 0;
109     }
110 }
111 
GrVkFormatStencilBits(VkFormat format)112 static constexpr int GrVkFormatStencilBits(VkFormat format) {
113     switch (format) {
114         case VK_FORMAT_S8_UINT:
115             return 8;
116         case VK_FORMAT_D24_UNORM_S8_UINT:
117             return 8;
118         case VK_FORMAT_D32_SFLOAT_S8_UINT:
119             return 8;
120         default:
121             return 0;
122     }
123 }
124 
125 bool GrVkFormatNeedsYcbcrSampler(VkFormat format);
126 
127 bool GrSampleCountToVkSampleCount(uint32_t samples, VkSampleCountFlagBits* vkSamples);
128 
129 bool GrCompileVkShaderModule(GrVkGpu* gpu,
130                              const SkSL::String& shaderString,
131                              VkShaderStageFlagBits stage,
132                              VkShaderModule* shaderModule,
133                              VkPipelineShaderStageCreateInfo* stageInfo,
134                              const SkSL::Program::Settings& settings,
135                              SkSL::String* outSPIRV,
136                              SkSL::Program::Inputs* outInputs);
137 
138 bool GrInstallVkShaderModule(GrVkGpu* gpu,
139                              const SkSL::String& spirv,
140                              VkShaderStageFlagBits stage,
141                              VkShaderModule* shaderModule,
142                              VkPipelineShaderStageCreateInfo* stageInfo);
143 
144 /**
145  * Returns true if the format is compressed.
146  */
147 bool GrVkFormatIsCompressed(VkFormat);
148 
149 #if defined(SK_DEBUG) || GR_TEST_UTILS
GrVkFormatToStr(VkFormat vkFormat)150 static constexpr const char* GrVkFormatToStr(VkFormat vkFormat) {
151     switch (vkFormat) {
152         case VK_FORMAT_R8G8B8A8_UNORM:           return "R8G8B8A8_UNORM";
153         case VK_FORMAT_R8_UNORM:                 return "R8_UNORM";
154         case VK_FORMAT_B8G8R8A8_UNORM:           return "B8G8R8A8_UNORM";
155         case VK_FORMAT_R5G6B5_UNORM_PACK16:      return "R5G6B5_UNORM_PACK16";
156         case VK_FORMAT_R16G16B16A16_SFLOAT:      return "R16G16B16A16_SFLOAT";
157         case VK_FORMAT_R16_SFLOAT:               return "R16_SFLOAT";
158         case VK_FORMAT_R8G8B8_UNORM:             return "R8G8B8_UNORM";
159         case VK_FORMAT_R8G8_UNORM:               return "R8G8_UNORM";
160         case VK_FORMAT_A2B10G10R10_UNORM_PACK32: return "A2B10G10R10_UNORM_PACK32";
161         case VK_FORMAT_A2R10G10B10_UNORM_PACK32: return "A2R10G10B10_UNORM_PACK32";
162         case VK_FORMAT_B4G4R4A4_UNORM_PACK16:    return "B4G4R4A4_UNORM_PACK16";
163         case VK_FORMAT_R4G4B4A4_UNORM_PACK16:    return "R4G4B4A4_UNORM_PACK16";
164         case VK_FORMAT_R32G32B32A32_SFLOAT:      return "R32G32B32A32_SFLOAT";
165         case VK_FORMAT_R8G8B8A8_SRGB:            return "R8G8B8A8_SRGB";
166         case VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK:  return "ETC2_R8G8B8_UNORM_BLOCK";
167         case VK_FORMAT_BC1_RGB_UNORM_BLOCK:      return "BC1_RGB_UNORM_BLOCK";
168         case VK_FORMAT_BC1_RGBA_UNORM_BLOCK:     return "BC1_RGBA_UNORM_BLOCK";
169         case VK_FORMAT_R16_UNORM:                return "R16_UNORM";
170         case VK_FORMAT_R16G16_UNORM:             return "R16G16_UNORM";
171         case VK_FORMAT_R16G16B16A16_UNORM:       return "R16G16B16A16_UNORM";
172         case VK_FORMAT_R16G16_SFLOAT:            return "R16G16_SFLOAT";
173         case VK_FORMAT_S8_UINT:                  return "S8_UINT";
174         case VK_FORMAT_D24_UNORM_S8_UINT:        return "D24_UNORM_S8_UINT";
175         case VK_FORMAT_D32_SFLOAT_S8_UINT:       return "D32_SFLOAT_S8_UINT";
176 
177         default:                                 return "Unknown";
178     }
179 }
180 
181 #endif
182 #endif
183