1 /*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "GrVkUtil.h"
9
10 #include "vk/GrVkGpu.h"
11 #include "SkSLCompiler.h"
12
GrPixelConfigToVkFormat(GrPixelConfig config,VkFormat * format)13 bool GrPixelConfigToVkFormat(GrPixelConfig config, VkFormat* format) {
14 VkFormat dontCare;
15 if (!format) {
16 format = &dontCare;
17 }
18
19 switch (config) {
20 case kUnknown_GrPixelConfig:
21 return false;
22 case kRGBA_8888_GrPixelConfig:
23 *format = VK_FORMAT_R8G8B8A8_UNORM;
24 return true;
25 case kBGRA_8888_GrPixelConfig:
26 *format = VK_FORMAT_B8G8R8A8_UNORM;
27 return true;
28 case kSRGBA_8888_GrPixelConfig:
29 *format = VK_FORMAT_R8G8B8A8_SRGB;
30 return true;
31 case kSBGRA_8888_GrPixelConfig:
32 *format = VK_FORMAT_B8G8R8A8_SRGB;
33 return true;
34 case kRGBA_8888_sint_GrPixelConfig:
35 *format = VK_FORMAT_R8G8B8A8_SINT;
36 return true;
37 case kRGB_565_GrPixelConfig:
38 *format = VK_FORMAT_R5G6B5_UNORM_PACK16;
39 return true;
40 case kRGBA_4444_GrPixelConfig:
41 // R4G4B4A4 is not required to be supported so we actually
42 // store the data is if it was B4G4R4A4 and swizzle in shaders
43 *format = VK_FORMAT_B4G4R4A4_UNORM_PACK16;
44 return true;
45 case kAlpha_8_GrPixelConfig:
46 *format = VK_FORMAT_R8_UNORM;
47 return true;
48 case kGray_8_GrPixelConfig:
49 *format = VK_FORMAT_R8_UNORM;
50 return true;
51 case kRGBA_float_GrPixelConfig:
52 *format = VK_FORMAT_R32G32B32A32_SFLOAT;
53 return true;
54 case kRG_float_GrPixelConfig:
55 *format = VK_FORMAT_R32G32_SFLOAT;
56 return true;
57 case kRGBA_half_GrPixelConfig:
58 *format = VK_FORMAT_R16G16B16A16_SFLOAT;
59 return true;
60 case kAlpha_half_GrPixelConfig:
61 *format = VK_FORMAT_R16_SFLOAT;
62 return true;
63 }
64 SkFAIL("Unexpected config");
65 return false;
66 }
67
GrVkFormatToPixelConfig(VkFormat format)68 GrPixelConfig GrVkFormatToPixelConfig(VkFormat format) {
69 switch (format) {
70 case VK_FORMAT_R8G8B8A8_UNORM:
71 return kRGBA_8888_GrPixelConfig;
72 case VK_FORMAT_B8G8R8A8_UNORM:
73 return kBGRA_8888_GrPixelConfig;
74 case VK_FORMAT_R8G8B8A8_SRGB:
75 return kSRGBA_8888_GrPixelConfig;
76 case VK_FORMAT_B8G8R8A8_SRGB:
77 return kSBGRA_8888_GrPixelConfig;
78 case VK_FORMAT_R8G8B8A8_SINT:
79 return kRGBA_8888_sint_GrPixelConfig;
80 case VK_FORMAT_R5G6B5_UNORM_PACK16:
81 return kRGB_565_GrPixelConfig;
82 break;
83 case VK_FORMAT_B4G4R4A4_UNORM_PACK16:
84 // R4G4B4A4 is not required to be supported so we actually
85 // store RGBA_4444 data as B4G4R4A4.
86 return kRGBA_4444_GrPixelConfig;
87 case VK_FORMAT_R8_UNORM:
88 return kAlpha_8_GrPixelConfig;
89 case VK_FORMAT_R32G32B32A32_SFLOAT:
90 return kRGBA_float_GrPixelConfig;
91 case VK_FORMAT_R32G32_SFLOAT:
92 return kRG_float_GrPixelConfig;
93 case VK_FORMAT_R16G16B16A16_SFLOAT:
94 return kRGBA_half_GrPixelConfig;
95 case VK_FORMAT_R16_SFLOAT:
96 return kAlpha_half_GrPixelConfig;
97 default:
98 return kUnknown_GrPixelConfig;
99 }
100 }
101
GrVkFormatIsSRGB(VkFormat format,VkFormat * linearFormat)102 bool GrVkFormatIsSRGB(VkFormat format, VkFormat* linearFormat) {
103 VkFormat linearFmt = format;
104 switch (format) {
105 case VK_FORMAT_R8_SRGB:
106 linearFmt = VK_FORMAT_R8_UNORM;
107 break;
108 case VK_FORMAT_R8G8_SRGB:
109 linearFmt = VK_FORMAT_R8G8_UNORM;
110 break;
111 case VK_FORMAT_R8G8B8_SRGB:
112 linearFmt = VK_FORMAT_R8G8B8_UNORM;
113 break;
114 case VK_FORMAT_B8G8R8_SRGB:
115 linearFmt = VK_FORMAT_B8G8R8_UNORM;
116 break;
117 case VK_FORMAT_R8G8B8A8_SRGB:
118 linearFmt = VK_FORMAT_R8G8B8A8_UNORM;
119 break;
120 case VK_FORMAT_B8G8R8A8_SRGB:
121 linearFmt = VK_FORMAT_B8G8R8A8_UNORM;
122 break;
123 case VK_FORMAT_A8B8G8R8_SRGB_PACK32:
124 linearFmt = VK_FORMAT_A8B8G8R8_UNORM_PACK32;
125 break;
126 case VK_FORMAT_BC1_RGB_SRGB_BLOCK:
127 linearFmt = VK_FORMAT_BC1_RGB_UNORM_BLOCK;
128 break;
129 case VK_FORMAT_BC1_RGBA_SRGB_BLOCK:
130 linearFmt = VK_FORMAT_BC1_RGBA_UNORM_BLOCK;
131 break;
132 case VK_FORMAT_BC2_SRGB_BLOCK:
133 linearFmt = VK_FORMAT_BC2_UNORM_BLOCK;
134 break;
135 case VK_FORMAT_BC3_SRGB_BLOCK:
136 linearFmt = VK_FORMAT_BC3_UNORM_BLOCK;
137 break;
138 case VK_FORMAT_BC7_SRGB_BLOCK:
139 linearFmt = VK_FORMAT_BC7_UNORM_BLOCK;
140 break;
141 case VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK:
142 linearFmt = VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK;
143 break;
144 case VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK:
145 linearFmt = VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK;
146 break;
147 case VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK:
148 linearFmt = VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK;
149 break;
150 case VK_FORMAT_ASTC_4x4_SRGB_BLOCK:
151 linearFmt = VK_FORMAT_ASTC_4x4_UNORM_BLOCK;
152 break;
153 case VK_FORMAT_ASTC_5x4_SRGB_BLOCK:
154 linearFmt = VK_FORMAT_ASTC_5x4_UNORM_BLOCK;
155 break;
156 case VK_FORMAT_ASTC_5x5_SRGB_BLOCK:
157 linearFmt = VK_FORMAT_ASTC_5x5_UNORM_BLOCK;
158 break;
159 case VK_FORMAT_ASTC_6x5_SRGB_BLOCK:
160 linearFmt = VK_FORMAT_ASTC_6x5_UNORM_BLOCK;
161 break;
162 case VK_FORMAT_ASTC_6x6_SRGB_BLOCK:
163 linearFmt = VK_FORMAT_ASTC_6x6_UNORM_BLOCK;
164 break;
165 case VK_FORMAT_ASTC_8x5_SRGB_BLOCK:
166 linearFmt = VK_FORMAT_ASTC_8x5_UNORM_BLOCK;
167 break;
168 case VK_FORMAT_ASTC_8x6_SRGB_BLOCK:
169 linearFmt = VK_FORMAT_ASTC_8x6_UNORM_BLOCK;
170 break;
171 case VK_FORMAT_ASTC_8x8_SRGB_BLOCK:
172 linearFmt = VK_FORMAT_ASTC_8x8_UNORM_BLOCK;
173 break;
174 case VK_FORMAT_ASTC_10x5_SRGB_BLOCK:
175 linearFmt = VK_FORMAT_ASTC_10x5_UNORM_BLOCK;
176 break;
177 case VK_FORMAT_ASTC_10x6_SRGB_BLOCK:
178 linearFmt = VK_FORMAT_ASTC_10x6_UNORM_BLOCK;
179 break;
180 case VK_FORMAT_ASTC_10x8_SRGB_BLOCK:
181 linearFmt = VK_FORMAT_ASTC_10x8_UNORM_BLOCK;
182 break;
183 case VK_FORMAT_ASTC_10x10_SRGB_BLOCK:
184 linearFmt = VK_FORMAT_ASTC_10x10_UNORM_BLOCK;
185 break;
186 case VK_FORMAT_ASTC_12x10_SRGB_BLOCK:
187 linearFmt = VK_FORMAT_ASTC_12x10_UNORM_BLOCK;
188 break;
189 case VK_FORMAT_ASTC_12x12_SRGB_BLOCK:
190 linearFmt = VK_FORMAT_ASTC_12x12_UNORM_BLOCK;
191 break;
192 default:
193 break;
194 }
195 if (linearFormat) {
196 *linearFormat = linearFmt;
197 }
198 return (linearFmt != format);
199 }
200
GrSampleCountToVkSampleCount(uint32_t samples,VkSampleCountFlagBits * vkSamples)201 bool GrSampleCountToVkSampleCount(uint32_t samples, VkSampleCountFlagBits* vkSamples) {
202 switch (samples) {
203 case 0: // fall through
204 case 1:
205 *vkSamples = VK_SAMPLE_COUNT_1_BIT;
206 return true;
207 case 2:
208 *vkSamples = VK_SAMPLE_COUNT_2_BIT;
209 return true;
210 case 4:
211 *vkSamples = VK_SAMPLE_COUNT_4_BIT;
212 return true;
213 case 8:
214 *vkSamples = VK_SAMPLE_COUNT_8_BIT;
215 return true;
216 case 16:
217 *vkSamples = VK_SAMPLE_COUNT_16_BIT;
218 return true;
219 case 32:
220 *vkSamples = VK_SAMPLE_COUNT_32_BIT;
221 return true;
222 case 64:
223 *vkSamples = VK_SAMPLE_COUNT_64_BIT;
224 return true;
225 default:
226 return false;
227 }
228 }
229
vk_shader_stage_to_skiasl_kind(VkShaderStageFlagBits stage)230 SkSL::Program::Kind vk_shader_stage_to_skiasl_kind(VkShaderStageFlagBits stage) {
231 if (VK_SHADER_STAGE_VERTEX_BIT == stage) {
232 return SkSL::Program::kVertex_Kind;
233 }
234 if (VK_SHADER_STAGE_GEOMETRY_BIT == stage) {
235 return SkSL::Program::kGeometry_Kind;
236 }
237 SkASSERT(VK_SHADER_STAGE_FRAGMENT_BIT == stage);
238 return SkSL::Program::kFragment_Kind;
239 }
240
skiasl_kind_to_vk_shader_stage(SkSL::Program::Kind kind)241 VkShaderStageFlagBits skiasl_kind_to_vk_shader_stage(SkSL::Program::Kind kind) {
242 if (SkSL::Program::kVertex_Kind == kind) {
243 return VK_SHADER_STAGE_VERTEX_BIT;
244 }
245 if (SkSL::Program::kGeometry_Kind == kind) {
246 return VK_SHADER_STAGE_GEOMETRY_BIT;
247 }
248 SkASSERT(SkSL::Program::kFragment_Kind == kind);
249 return VK_SHADER_STAGE_FRAGMENT_BIT;
250 }
251
GrCompileVkShaderModule(const GrVkGpu * gpu,const char * shaderString,VkShaderStageFlagBits stage,VkShaderModule * shaderModule,VkPipelineShaderStageCreateInfo * stageInfo,const SkSL::Program::Settings & settings,SkSL::Program::Inputs * outInputs)252 bool GrCompileVkShaderModule(const GrVkGpu* gpu,
253 const char* shaderString,
254 VkShaderStageFlagBits stage,
255 VkShaderModule* shaderModule,
256 VkPipelineShaderStageCreateInfo* stageInfo,
257 const SkSL::Program::Settings& settings,
258 SkSL::Program::Inputs* outInputs) {
259 std::unique_ptr<SkSL::Program> program = gpu->shaderCompiler()->convertProgram(
260 vk_shader_stage_to_skiasl_kind(stage),
261 SkString(shaderString),
262 settings);
263 if (!program) {
264 SkDebugf("SkSL error:\n%s\n", gpu->shaderCompiler()->errorText().c_str());
265 SkASSERT(false);
266 }
267 *outInputs = program->fInputs;
268 SkSL::String code;
269 if (!gpu->shaderCompiler()->toSPIRV(*program, &code)) {
270 SkDebugf("%s\n", gpu->shaderCompiler()->errorText().c_str());
271 return false;
272 }
273
274 VkShaderModuleCreateInfo moduleCreateInfo;
275 memset(&moduleCreateInfo, 0, sizeof(VkShaderModuleCreateInfo));
276 moduleCreateInfo.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO;
277 moduleCreateInfo.pNext = nullptr;
278 moduleCreateInfo.flags = 0;
279 moduleCreateInfo.codeSize = code.size();
280 moduleCreateInfo.pCode = (const uint32_t*)code.c_str();
281
282 VkResult err = GR_VK_CALL(gpu->vkInterface(), CreateShaderModule(gpu->device(),
283 &moduleCreateInfo,
284 nullptr,
285 shaderModule));
286 if (err) {
287 return false;
288 }
289
290 memset(stageInfo, 0, sizeof(VkPipelineShaderStageCreateInfo));
291 stageInfo->sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
292 stageInfo->pNext = nullptr;
293 stageInfo->flags = 0;
294 stageInfo->stage = skiasl_kind_to_vk_shader_stage(program->fKind);
295 stageInfo->module = *shaderModule;
296 stageInfo->pName = "main";
297 stageInfo->pSpecializationInfo = nullptr;
298
299 return true;
300 }
301