1 /*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/vk/GrVkUtil.h"
9
10 #include "include/gpu/GrDirectContext.h"
11 #include "src/core/SkTraceEvent.h"
12 #include "src/gpu/GrDataUtils.h"
13 #include "src/gpu/GrDirectContextPriv.h"
14 #include "src/gpu/vk/GrVkGpu.h"
15 #include "src/sksl/SkSLCompiler.h"
16
GrVkFormatIsSupported(VkFormat format)17 bool GrVkFormatIsSupported(VkFormat format) {
18 switch (format) {
19 case VK_FORMAT_R8G8B8A8_UNORM:
20 case VK_FORMAT_B8G8R8A8_UNORM:
21 case VK_FORMAT_R8G8B8A8_SRGB:
22 case VK_FORMAT_R8G8B8_UNORM:
23 case VK_FORMAT_R8G8_UNORM:
24 case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
25 case VK_FORMAT_A2R10G10B10_UNORM_PACK32:
26 case VK_FORMAT_R5G6B5_UNORM_PACK16:
27 case VK_FORMAT_B4G4R4A4_UNORM_PACK16:
28 case VK_FORMAT_R4G4B4A4_UNORM_PACK16:
29 case VK_FORMAT_R8_UNORM:
30 case VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK:
31 case VK_FORMAT_BC1_RGB_UNORM_BLOCK:
32 case VK_FORMAT_BC1_RGBA_UNORM_BLOCK:
33 case VK_FORMAT_R16G16B16A16_SFLOAT:
34 case VK_FORMAT_R16_SFLOAT:
35 case VK_FORMAT_R16_UNORM:
36 case VK_FORMAT_R16G16_UNORM:
37 case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
38 case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:
39 case VK_FORMAT_R16G16B16A16_UNORM:
40 case VK_FORMAT_R16G16_SFLOAT:
41 case VK_FORMAT_S8_UINT:
42 case VK_FORMAT_D24_UNORM_S8_UINT:
43 case VK_FORMAT_D32_SFLOAT_S8_UINT:
44 return true;
45 default:
46 return false;
47 }
48 }
49
GrVkFormatNeedsYcbcrSampler(VkFormat format)50 bool GrVkFormatNeedsYcbcrSampler(VkFormat format) {
51 return format == VK_FORMAT_G8_B8R8_2PLANE_420_UNORM ||
52 format == VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM;
53 }
54
GrSampleCountToVkSampleCount(uint32_t samples,VkSampleCountFlagBits * vkSamples)55 bool GrSampleCountToVkSampleCount(uint32_t samples, VkSampleCountFlagBits* vkSamples) {
56 SkASSERT(samples >= 1);
57 switch (samples) {
58 case 1:
59 *vkSamples = VK_SAMPLE_COUNT_1_BIT;
60 return true;
61 case 2:
62 *vkSamples = VK_SAMPLE_COUNT_2_BIT;
63 return true;
64 case 4:
65 *vkSamples = VK_SAMPLE_COUNT_4_BIT;
66 return true;
67 case 8:
68 *vkSamples = VK_SAMPLE_COUNT_8_BIT;
69 return true;
70 case 16:
71 *vkSamples = VK_SAMPLE_COUNT_16_BIT;
72 return true;
73 default:
74 return false;
75 }
76 }
77
vk_shader_stage_to_skiasl_kind(VkShaderStageFlagBits stage)78 SkSL::ProgramKind vk_shader_stage_to_skiasl_kind(VkShaderStageFlagBits stage) {
79 if (VK_SHADER_STAGE_VERTEX_BIT == stage) {
80 return SkSL::ProgramKind::kVertex;
81 }
82 if (VK_SHADER_STAGE_GEOMETRY_BIT == stage) {
83 return SkSL::ProgramKind::kGeometry;
84 }
85 SkASSERT(VK_SHADER_STAGE_FRAGMENT_BIT == stage);
86 return SkSL::ProgramKind::kFragment;
87 }
88
GrCompileVkShaderModule(GrVkGpu * gpu,const SkSL::String & shaderString,VkShaderStageFlagBits stage,VkShaderModule * shaderModule,VkPipelineShaderStageCreateInfo * stageInfo,const SkSL::Program::Settings & settings,SkSL::String * outSPIRV,SkSL::Program::Inputs * outInputs)89 bool GrCompileVkShaderModule(GrVkGpu* gpu,
90 const SkSL::String& shaderString,
91 VkShaderStageFlagBits stage,
92 VkShaderModule* shaderModule,
93 VkPipelineShaderStageCreateInfo* stageInfo,
94 const SkSL::Program::Settings& settings,
95 SkSL::String* outSPIRV,
96 SkSL::Program::Inputs* outInputs) {
97 TRACE_EVENT0("skia.shaders", "CompileVkShaderModule");
98 auto errorHandler = gpu->getContext()->priv().getShaderErrorHandler();
99 std::unique_ptr<SkSL::Program> program = gpu->shaderCompiler()->convertProgram(
100 vk_shader_stage_to_skiasl_kind(stage), shaderString, settings);
101 if (!program) {
102 errorHandler->compileError(shaderString.c_str(),
103 gpu->shaderCompiler()->errorText().c_str());
104 return false;
105 }
106 *outInputs = program->fInputs;
107 if (!gpu->shaderCompiler()->toSPIRV(*program, outSPIRV)) {
108 errorHandler->compileError(shaderString.c_str(),
109 gpu->shaderCompiler()->errorText().c_str());
110 return false;
111 }
112
113 return GrInstallVkShaderModule(gpu, *outSPIRV, stage, shaderModule, stageInfo);
114 }
115
GrInstallVkShaderModule(GrVkGpu * gpu,const SkSL::String & spirv,VkShaderStageFlagBits stage,VkShaderModule * shaderModule,VkPipelineShaderStageCreateInfo * stageInfo)116 bool GrInstallVkShaderModule(GrVkGpu* gpu,
117 const SkSL::String& spirv,
118 VkShaderStageFlagBits stage,
119 VkShaderModule* shaderModule,
120 VkPipelineShaderStageCreateInfo* stageInfo) {
121 TRACE_EVENT0("skia.shaders", "InstallVkShaderModule");
122 VkShaderModuleCreateInfo moduleCreateInfo;
123 memset(&moduleCreateInfo, 0, sizeof(VkShaderModuleCreateInfo));
124 moduleCreateInfo.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO;
125 moduleCreateInfo.pNext = nullptr;
126 moduleCreateInfo.flags = 0;
127 moduleCreateInfo.codeSize = spirv.size();
128 moduleCreateInfo.pCode = (const uint32_t*)spirv.c_str();
129
130 VkResult err;
131 GR_VK_CALL_RESULT(gpu, err, CreateShaderModule(gpu->device(), &moduleCreateInfo, nullptr,
132 shaderModule));
133 if (err) {
134 return false;
135 }
136
137 memset(stageInfo, 0, sizeof(VkPipelineShaderStageCreateInfo));
138 stageInfo->sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
139 stageInfo->pNext = nullptr;
140 stageInfo->flags = 0;
141 stageInfo->stage = stage;
142 stageInfo->module = *shaderModule;
143 stageInfo->pName = "main";
144 stageInfo->pSpecializationInfo = nullptr;
145
146 return true;
147 }
148
GrVkFormatIsCompressed(VkFormat vkFormat)149 bool GrVkFormatIsCompressed(VkFormat vkFormat) {
150 switch (vkFormat) {
151 case VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK:
152 case VK_FORMAT_BC1_RGB_UNORM_BLOCK:
153 case VK_FORMAT_BC1_RGBA_UNORM_BLOCK:
154 return true;
155 default:
156 return false;
157 }
158 SkUNREACHABLE;
159 }
160