• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2023 Google LLC
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #include "src/gpu/graphite/vk/VulkanGraphicsPipeline.h"
9 
10 #include "include/gpu/ShaderErrorHandler.h"
11 #include "include/gpu/graphite/TextureInfo.h"
12 #include "src/core/SkSLTypeShared.h"
13 #include "src/core/SkTraceEvent.h"
14 #include "src/gpu/SkSLToBackend.h"
15 #include "src/gpu/graphite/Attribute.h"
16 #include "src/gpu/graphite/ContextUtils.h"
17 #include "src/gpu/graphite/GraphicsPipelineDesc.h"
18 #include "src/gpu/graphite/Log.h"
19 #include "src/gpu/graphite/RenderPassDesc.h"
20 #include "src/gpu/graphite/RendererProvider.h"
21 #include "src/gpu/graphite/ResourceTypes.h"
22 #include "src/gpu/graphite/RuntimeEffectDictionary.h"
23 #include "src/gpu/graphite/ShaderInfo.h"
24 #include "src/gpu/graphite/vk/VulkanCaps.h"
25 #include "src/gpu/graphite/vk/VulkanGraphicsPipeline.h"
26 #include "src/gpu/graphite/vk/VulkanRenderPass.h"
27 #include "src/gpu/graphite/vk/VulkanResourceProvider.h"
28 #include "src/gpu/graphite/vk/VulkanSharedContext.h"
29 #include "src/gpu/vk/VulkanUtilsPriv.h"
30 #include "src/sksl/SkSLProgramKind.h"
31 #include "src/sksl/SkSLProgramSettings.h"
32 #include "src/sksl/ir/SkSLProgram.h"
33 
34 namespace skgpu::graphite {
35 
attrib_type_to_vkformat(VertexAttribType type)36 static inline VkFormat attrib_type_to_vkformat(VertexAttribType type) {
37     switch (type) {
38         case VertexAttribType::kFloat:
39             return VK_FORMAT_R32_SFLOAT;
40         case VertexAttribType::kFloat2:
41             return VK_FORMAT_R32G32_SFLOAT;
42         case VertexAttribType::kFloat3:
43             return VK_FORMAT_R32G32B32_SFLOAT;
44         case VertexAttribType::kFloat4:
45             return VK_FORMAT_R32G32B32A32_SFLOAT;
46         case VertexAttribType::kHalf:
47             return VK_FORMAT_R16_SFLOAT;
48         case VertexAttribType::kHalf2:
49             return VK_FORMAT_R16G16_SFLOAT;
50         case VertexAttribType::kHalf4:
51             return VK_FORMAT_R16G16B16A16_SFLOAT;
52         case VertexAttribType::kInt2:
53             return VK_FORMAT_R32G32_SINT;
54         case VertexAttribType::kInt3:
55             return VK_FORMAT_R32G32B32_SINT;
56         case VertexAttribType::kInt4:
57             return VK_FORMAT_R32G32B32A32_SINT;
58         case VertexAttribType::kUInt2:
59             return VK_FORMAT_R32G32_UINT;
60         case VertexAttribType::kByte:
61             return VK_FORMAT_R8_SINT;
62         case VertexAttribType::kByte2:
63             return VK_FORMAT_R8G8_SINT;
64         case VertexAttribType::kByte4:
65             return VK_FORMAT_R8G8B8A8_SINT;
66         case VertexAttribType::kUByte:
67             return VK_FORMAT_R8_UINT;
68         case VertexAttribType::kUByte2:
69             return VK_FORMAT_R8G8_UINT;
70         case VertexAttribType::kUByte4:
71             return VK_FORMAT_R8G8B8A8_UINT;
72         case VertexAttribType::kUByte_norm:
73             return VK_FORMAT_R8_UNORM;
74         case VertexAttribType::kUByte4_norm:
75             return VK_FORMAT_R8G8B8A8_UNORM;
76         case VertexAttribType::kShort2:
77             return VK_FORMAT_R16G16_SINT;
78         case VertexAttribType::kShort4:
79             return VK_FORMAT_R16G16B16A16_SINT;
80         case VertexAttribType::kUShort2:
81             return VK_FORMAT_R16G16_UINT;
82         case VertexAttribType::kUShort2_norm:
83             return VK_FORMAT_R16G16_UNORM;
84         case VertexAttribType::kInt:
85             return VK_FORMAT_R32_SINT;
86         case VertexAttribType::kUInt:
87             return VK_FORMAT_R32_UINT;
88         case VertexAttribType::kUShort_norm:
89             return VK_FORMAT_R16_UNORM;
90         case VertexAttribType::kUShort4_norm:
91             return VK_FORMAT_R16G16B16A16_UNORM;
92     }
93     SK_ABORT("Unknown vertex attrib type");
94 }
95 
setup_vertex_input_state(const SkSpan<const Attribute> & vertexAttrs,const SkSpan<const Attribute> & instanceAttrs,VkPipelineVertexInputStateCreateInfo * vertexInputInfo,skia_private::STArray<2,VkVertexInputBindingDescription,true> * bindingDescs,skia_private::STArray<16,VkVertexInputAttributeDescription> * attributeDescs)96 static void setup_vertex_input_state(
97         const SkSpan<const Attribute>& vertexAttrs,
98         const SkSpan<const Attribute>& instanceAttrs,
99         VkPipelineVertexInputStateCreateInfo* vertexInputInfo,
100         skia_private::STArray<2, VkVertexInputBindingDescription, true>* bindingDescs,
101         skia_private::STArray<16, VkVertexInputAttributeDescription>* attributeDescs) {
102     // Setup attribute & binding descriptions
103     int attribIndex = 0;
104     size_t vertexAttributeOffset = 0;
105     for (auto attrib : vertexAttrs) {
106         VkVertexInputAttributeDescription vkAttrib;
107         vkAttrib.location = attribIndex++;
108         vkAttrib.binding = VulkanGraphicsPipeline::kVertexBufferIndex;
109         vkAttrib.format = attrib_type_to_vkformat(attrib.cpuType());
110         vkAttrib.offset = vertexAttributeOffset;
111         vertexAttributeOffset += attrib.sizeAlign4();
112         attributeDescs->push_back(vkAttrib);
113     }
114 
115     size_t instanceAttributeOffset = 0;
116     for (auto attrib : instanceAttrs) {
117         VkVertexInputAttributeDescription vkAttrib;
118         vkAttrib.location = attribIndex++;
119         vkAttrib.binding = VulkanGraphicsPipeline::kInstanceBufferIndex;
120         vkAttrib.format = attrib_type_to_vkformat(attrib.cpuType());
121         vkAttrib.offset = instanceAttributeOffset;
122         instanceAttributeOffset += attrib.sizeAlign4();
123         attributeDescs->push_back(vkAttrib);
124     }
125 
126     if (bindingDescs && !vertexAttrs.empty()) {
127         bindingDescs->push_back() = {
128                 VulkanGraphicsPipeline::kVertexBufferIndex,
129                 (uint32_t) vertexAttributeOffset,
130                 VK_VERTEX_INPUT_RATE_VERTEX
131         };
132     }
133     if (bindingDescs && !instanceAttrs.empty()) {
134         bindingDescs->push_back() = {
135                 VulkanGraphicsPipeline::kInstanceBufferIndex,
136                 (uint32_t) instanceAttributeOffset,
137                 VK_VERTEX_INPUT_RATE_INSTANCE
138         };
139     }
140 
141     memset(vertexInputInfo, 0, sizeof(VkPipelineVertexInputStateCreateInfo));
142     vertexInputInfo->sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO;
143     vertexInputInfo->pNext = nullptr;
144     vertexInputInfo->flags = 0;
145     vertexInputInfo->vertexBindingDescriptionCount = bindingDescs ? bindingDescs->size() : 0;
146     vertexInputInfo->pVertexBindingDescriptions =
147             bindingDescs && !bindingDescs->empty() ? bindingDescs->begin() : VK_NULL_HANDLE;
148     vertexInputInfo->vertexAttributeDescriptionCount = attributeDescs ? attributeDescs->size() : 0;
149     vertexInputInfo->pVertexAttributeDescriptions =
150             attributeDescs && !attributeDescs->empty() ? attributeDescs->begin() : VK_NULL_HANDLE;
151 }
152 
primitive_type_to_vk_topology(PrimitiveType primitiveType)153 static VkPrimitiveTopology primitive_type_to_vk_topology(PrimitiveType primitiveType) {
154     switch (primitiveType) {
155         case PrimitiveType::kTriangles:
156             return VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST;
157         case PrimitiveType::kTriangleStrip:
158             return VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP;
159         case PrimitiveType::kPoints:
160             return VK_PRIMITIVE_TOPOLOGY_POINT_LIST;
161     }
162     SkUNREACHABLE;
163 }
164 
setup_input_assembly_state(PrimitiveType primitiveType,VkPipelineInputAssemblyStateCreateInfo * inputAssemblyInfo)165 static void setup_input_assembly_state(PrimitiveType primitiveType,
166                                        VkPipelineInputAssemblyStateCreateInfo* inputAssemblyInfo) {
167     memset(inputAssemblyInfo, 0, sizeof(VkPipelineInputAssemblyStateCreateInfo));
168     inputAssemblyInfo->sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO;
169     inputAssemblyInfo->pNext = nullptr;
170     inputAssemblyInfo->flags = 0;
171     inputAssemblyInfo->primitiveRestartEnable = false;
172     inputAssemblyInfo->topology = primitive_type_to_vk_topology(primitiveType);
173 }
174 
stencil_op_to_vk_stencil_op(StencilOp op)175 static VkStencilOp stencil_op_to_vk_stencil_op(StencilOp op) {
176     static const VkStencilOp gTable[] = {
177         VK_STENCIL_OP_KEEP,                 // kKeep
178         VK_STENCIL_OP_ZERO,                 // kZero
179         VK_STENCIL_OP_REPLACE,              // kReplace
180         VK_STENCIL_OP_INVERT,               // kInvert
181         VK_STENCIL_OP_INCREMENT_AND_WRAP,   // kIncWrap
182         VK_STENCIL_OP_DECREMENT_AND_WRAP,   // kDecWrap
183         VK_STENCIL_OP_INCREMENT_AND_CLAMP,  // kIncClamp
184         VK_STENCIL_OP_DECREMENT_AND_CLAMP,  // kDecClamp
185     };
186     static_assert(std::size(gTable) == kStencilOpCount);
187     static_assert(0 == (int)StencilOp::kKeep);
188     static_assert(1 == (int)StencilOp::kZero);
189     static_assert(2 == (int)StencilOp::kReplace);
190     static_assert(3 == (int)StencilOp::kInvert);
191     static_assert(4 == (int)StencilOp::kIncWrap);
192     static_assert(5 == (int)StencilOp::kDecWrap);
193     static_assert(6 == (int)StencilOp::kIncClamp);
194     static_assert(7 == (int)StencilOp::kDecClamp);
195     SkASSERT(op < (StencilOp)kStencilOpCount);
196     return gTable[(int)op];
197 }
198 
compare_op_to_vk_compare_op(CompareOp op)199 static VkCompareOp compare_op_to_vk_compare_op(CompareOp op) {
200     static const VkCompareOp gTable[] = {
201         VK_COMPARE_OP_ALWAYS,              // kAlways
202         VK_COMPARE_OP_NEVER,               // kNever
203         VK_COMPARE_OP_GREATER,             // kGreater
204         VK_COMPARE_OP_GREATER_OR_EQUAL,    // kGEqual
205         VK_COMPARE_OP_LESS,                // kLess
206         VK_COMPARE_OP_LESS_OR_EQUAL,       // kLEqual
207         VK_COMPARE_OP_EQUAL,               // kEqual
208         VK_COMPARE_OP_NOT_EQUAL,           // kNotEqual
209     };
210     static_assert(std::size(gTable) == kCompareOpCount);
211     static_assert(0 == (int)CompareOp::kAlways);
212     static_assert(1 == (int)CompareOp::kNever);
213     static_assert(2 == (int)CompareOp::kGreater);
214     static_assert(3 == (int)CompareOp::kGEqual);
215     static_assert(4 == (int)CompareOp::kLess);
216     static_assert(5 == (int)CompareOp::kLEqual);
217     static_assert(6 == (int)CompareOp::kEqual);
218     static_assert(7 == (int)CompareOp::kNotEqual);
219     SkASSERT(op < (CompareOp)kCompareOpCount);
220 
221     return gTable[(int)op];
222 }
223 
setup_stencil_op_state(VkStencilOpState * opState,const DepthStencilSettings::Face & face,uint32_t referenceValue)224 static void setup_stencil_op_state(VkStencilOpState* opState,
225                                    const DepthStencilSettings::Face& face,
226                                    uint32_t referenceValue) {
227     opState->failOp = stencil_op_to_vk_stencil_op(face.fStencilFailOp);
228     opState->passOp = stencil_op_to_vk_stencil_op(face.fDepthStencilPassOp);
229     opState->depthFailOp = stencil_op_to_vk_stencil_op(face.fDepthFailOp);
230     opState->compareOp = compare_op_to_vk_compare_op(face.fCompareOp);
231     opState->compareMask = face.fReadMask; // TODO - check this.
232     opState->writeMask = face.fWriteMask;
233     opState->reference = referenceValue;
234 }
235 
setup_depth_stencil_state(const DepthStencilSettings & stencilSettings,VkPipelineDepthStencilStateCreateInfo * stencilInfo)236 static void setup_depth_stencil_state(const DepthStencilSettings& stencilSettings,
237                                       VkPipelineDepthStencilStateCreateInfo* stencilInfo) {
238     SkASSERT(stencilSettings.fDepthTestEnabled ||
239              stencilSettings.fDepthCompareOp == CompareOp::kAlways);
240 
241     memset(stencilInfo, 0, sizeof(VkPipelineDepthStencilStateCreateInfo));
242     stencilInfo->sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO;
243     stencilInfo->pNext = nullptr;
244     stencilInfo->flags = 0;
245     stencilInfo->depthTestEnable = stencilSettings.fDepthTestEnabled;
246     stencilInfo->depthWriteEnable = stencilSettings.fDepthWriteEnabled;
247     stencilInfo->depthCompareOp = compare_op_to_vk_compare_op(stencilSettings.fDepthCompareOp);
248     stencilInfo->depthBoundsTestEnable = VK_FALSE; // Default value TODO - Confirm
249     stencilInfo->stencilTestEnable = stencilSettings.fStencilTestEnabled;
250     if (stencilSettings.fStencilTestEnabled) {
251         setup_stencil_op_state(&stencilInfo->front,
252                                stencilSettings.fFrontStencil,
253                                stencilSettings.fStencilReferenceValue);
254         setup_stencil_op_state(&stencilInfo->back,
255                                stencilSettings.fBackStencil,
256                                stencilSettings.fStencilReferenceValue);
257     }
258     stencilInfo->minDepthBounds = 0.0f;
259     stencilInfo->maxDepthBounds = 1.0f;
260 }
261 
setup_viewport_scissor_state(VkPipelineViewportStateCreateInfo * viewportInfo)262 static void setup_viewport_scissor_state(VkPipelineViewportStateCreateInfo* viewportInfo) {
263     memset(viewportInfo, 0, sizeof(VkPipelineViewportStateCreateInfo));
264     viewportInfo->sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO;
265     viewportInfo->pNext = nullptr;
266     viewportInfo->flags = 0;
267 
268     viewportInfo->viewportCount = 1;
269     viewportInfo->pViewports = nullptr; // This is set dynamically with a draw pass command
270 
271     viewportInfo->scissorCount = 1;
272     viewportInfo->pScissors = nullptr; // This is set dynamically with a draw pass command
273 
274     SkASSERT(viewportInfo->viewportCount == viewportInfo->scissorCount);
275 }
276 
setup_multisample_state(int numSamples,VkPipelineMultisampleStateCreateInfo * multisampleInfo)277 static void setup_multisample_state(int numSamples,
278                                     VkPipelineMultisampleStateCreateInfo* multisampleInfo) {
279     memset(multisampleInfo, 0, sizeof(VkPipelineMultisampleStateCreateInfo));
280     multisampleInfo->sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO;
281     multisampleInfo->pNext = nullptr;
282     multisampleInfo->flags = 0;
283     SkAssertResult(skgpu::SampleCountToVkSampleCount(numSamples,
284                                                      &multisampleInfo->rasterizationSamples));
285     multisampleInfo->sampleShadingEnable = VK_FALSE;
286     multisampleInfo->minSampleShading = 0.0f;
287     multisampleInfo->pSampleMask = nullptr;
288     multisampleInfo->alphaToCoverageEnable = VK_FALSE;
289     multisampleInfo->alphaToOneEnable = VK_FALSE;
290 }
291 
blend_coeff_to_vk_blend(skgpu::BlendCoeff coeff)292 static VkBlendFactor blend_coeff_to_vk_blend(skgpu::BlendCoeff coeff) {
293     switch (coeff) {
294         case skgpu::BlendCoeff::kZero:
295             return VK_BLEND_FACTOR_ZERO;
296         case skgpu::BlendCoeff::kOne:
297             return VK_BLEND_FACTOR_ONE;
298         case skgpu::BlendCoeff::kSC:
299             return VK_BLEND_FACTOR_SRC_COLOR;
300         case skgpu::BlendCoeff::kISC:
301             return VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR;
302         case skgpu::BlendCoeff::kDC:
303             return VK_BLEND_FACTOR_DST_COLOR;
304         case skgpu::BlendCoeff::kIDC:
305             return VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR;
306         case skgpu::BlendCoeff::kSA:
307             return VK_BLEND_FACTOR_SRC_ALPHA;
308         case skgpu::BlendCoeff::kISA:
309             return VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA;
310         case skgpu::BlendCoeff::kDA:
311             return VK_BLEND_FACTOR_DST_ALPHA;
312         case skgpu::BlendCoeff::kIDA:
313             return VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA;
314         case skgpu::BlendCoeff::kConstC:
315             return VK_BLEND_FACTOR_CONSTANT_COLOR;
316         case skgpu::BlendCoeff::kIConstC:
317             return VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR;
318         case skgpu::BlendCoeff::kS2C:
319             return VK_BLEND_FACTOR_SRC1_COLOR;
320         case skgpu::BlendCoeff::kIS2C:
321             return VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR;
322         case skgpu::BlendCoeff::kS2A:
323             return VK_BLEND_FACTOR_SRC1_ALPHA;
324         case skgpu::BlendCoeff::kIS2A:
325             return VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA;
326         case skgpu::BlendCoeff::kIllegal:
327             return VK_BLEND_FACTOR_ZERO;
328     }
329     SkUNREACHABLE;
330 }
331 
blend_equation_to_vk_blend_op(skgpu::BlendEquation equation)332 static VkBlendOp blend_equation_to_vk_blend_op(skgpu::BlendEquation equation) {
333     static const VkBlendOp gTable[] = {
334         // Basic blend ops
335         VK_BLEND_OP_ADD,
336         VK_BLEND_OP_SUBTRACT,
337         VK_BLEND_OP_REVERSE_SUBTRACT,
338 
339         // Advanced blend ops
340         VK_BLEND_OP_SCREEN_EXT,
341         VK_BLEND_OP_OVERLAY_EXT,
342         VK_BLEND_OP_DARKEN_EXT,
343         VK_BLEND_OP_LIGHTEN_EXT,
344         VK_BLEND_OP_COLORDODGE_EXT,
345         VK_BLEND_OP_COLORBURN_EXT,
346         VK_BLEND_OP_HARDLIGHT_EXT,
347         VK_BLEND_OP_SOFTLIGHT_EXT,
348         VK_BLEND_OP_DIFFERENCE_EXT,
349         VK_BLEND_OP_EXCLUSION_EXT,
350         VK_BLEND_OP_MULTIPLY_EXT,
351         VK_BLEND_OP_HSL_HUE_EXT,
352         VK_BLEND_OP_HSL_SATURATION_EXT,
353         VK_BLEND_OP_HSL_COLOR_EXT,
354         VK_BLEND_OP_HSL_LUMINOSITY_EXT,
355 
356         // Illegal.
357         VK_BLEND_OP_ADD,
358     };
359     static_assert(0 == (int)skgpu::BlendEquation::kAdd);
360     static_assert(1 == (int)skgpu::BlendEquation::kSubtract);
361     static_assert(2 == (int)skgpu::BlendEquation::kReverseSubtract);
362     static_assert(3 == (int)skgpu::BlendEquation::kScreen);
363     static_assert(4 == (int)skgpu::BlendEquation::kOverlay);
364     static_assert(5 == (int)skgpu::BlendEquation::kDarken);
365     static_assert(6 == (int)skgpu::BlendEquation::kLighten);
366     static_assert(7 == (int)skgpu::BlendEquation::kColorDodge);
367     static_assert(8 == (int)skgpu::BlendEquation::kColorBurn);
368     static_assert(9 == (int)skgpu::BlendEquation::kHardLight);
369     static_assert(10 == (int)skgpu::BlendEquation::kSoftLight);
370     static_assert(11 == (int)skgpu::BlendEquation::kDifference);
371     static_assert(12 == (int)skgpu::BlendEquation::kExclusion);
372     static_assert(13 == (int)skgpu::BlendEquation::kMultiply);
373     static_assert(14 == (int)skgpu::BlendEquation::kHSLHue);
374     static_assert(15 == (int)skgpu::BlendEquation::kHSLSaturation);
375     static_assert(16 == (int)skgpu::BlendEquation::kHSLColor);
376     static_assert(17 == (int)skgpu::BlendEquation::kHSLLuminosity);
377     static_assert(std::size(gTable) == skgpu::kBlendEquationCnt);
378 
379     SkASSERT((unsigned)equation < skgpu::kBlendEquationCnt);
380     return gTable[(int)equation];
381 }
382 
setup_color_blend_state(const skgpu::BlendInfo & blendInfo,VkPipelineColorBlendStateCreateInfo * colorBlendInfo,VkPipelineColorBlendAttachmentState * attachmentState)383 static void setup_color_blend_state(const skgpu::BlendInfo& blendInfo,
384                                     VkPipelineColorBlendStateCreateInfo* colorBlendInfo,
385                                     VkPipelineColorBlendAttachmentState* attachmentState) {
386     skgpu::BlendEquation equation = blendInfo.fEquation;
387     skgpu::BlendCoeff srcCoeff = blendInfo.fSrcBlend;
388     skgpu::BlendCoeff dstCoeff = blendInfo.fDstBlend;
389     bool blendOff = skgpu::BlendShouldDisable(equation, srcCoeff, dstCoeff);
390 
391     memset(attachmentState, 0, sizeof(VkPipelineColorBlendAttachmentState));
392     attachmentState->blendEnable = !blendOff;
393     if (!blendOff) {
394         attachmentState->srcColorBlendFactor = blend_coeff_to_vk_blend(srcCoeff);
395         attachmentState->dstColorBlendFactor = blend_coeff_to_vk_blend(dstCoeff);
396         attachmentState->colorBlendOp = blend_equation_to_vk_blend_op(equation);
397         attachmentState->srcAlphaBlendFactor = blend_coeff_to_vk_blend(srcCoeff);
398         attachmentState->dstAlphaBlendFactor = blend_coeff_to_vk_blend(dstCoeff);
399         attachmentState->alphaBlendOp = blend_equation_to_vk_blend_op(equation);
400     }
401 
402     if (!blendInfo.fWritesColor) {
403         attachmentState->colorWriteMask = 0;
404     } else {
405         attachmentState->colorWriteMask = VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT |
406                                           VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT;
407     }
408 
409     memset(colorBlendInfo, 0, sizeof(VkPipelineColorBlendStateCreateInfo));
410     colorBlendInfo->sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO;
411     colorBlendInfo->pNext = nullptr;
412     colorBlendInfo->flags = 0;
413     colorBlendInfo->logicOpEnable = VK_FALSE;
414     colorBlendInfo->attachmentCount = 1;
415     colorBlendInfo->pAttachments = attachmentState;
416     // colorBlendInfo->blendConstants is set dynamically
417 }
418 
setup_raster_state(bool isWireframe,VkPipelineRasterizationStateCreateInfo * rasterInfo)419 static void setup_raster_state(bool isWireframe,
420                                VkPipelineRasterizationStateCreateInfo* rasterInfo) {
421     memset(rasterInfo, 0, sizeof(VkPipelineRasterizationStateCreateInfo));
422     rasterInfo->sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO;
423     rasterInfo->pNext = nullptr;
424     rasterInfo->flags = 0;
425     rasterInfo->depthClampEnable = VK_FALSE;
426     rasterInfo->rasterizerDiscardEnable = VK_FALSE;
427     rasterInfo->polygonMode = isWireframe ? VK_POLYGON_MODE_LINE : VK_POLYGON_MODE_FILL;
428     rasterInfo->cullMode = VK_CULL_MODE_NONE;
429     rasterInfo->frontFace = VK_FRONT_FACE_COUNTER_CLOCKWISE;
430     rasterInfo->depthBiasEnable = VK_FALSE;
431     rasterInfo->depthBiasConstantFactor = 0.0f;
432     rasterInfo->depthBiasClamp = 0.0f;
433     rasterInfo->depthBiasSlopeFactor = 0.0f;
434     rasterInfo->lineWidth = 1.0f;
435 }
436 
setup_shader_stage_info(VkShaderStageFlagBits stage,VkShaderModule shaderModule,VkPipelineShaderStageCreateInfo * shaderStageInfo)437 static void setup_shader_stage_info(VkShaderStageFlagBits stage,
438                                     VkShaderModule shaderModule,
439                                     VkPipelineShaderStageCreateInfo* shaderStageInfo) {
440     memset(shaderStageInfo, 0, sizeof(VkPipelineShaderStageCreateInfo));
441     shaderStageInfo->sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
442     shaderStageInfo->pNext = nullptr;
443     shaderStageInfo->flags = 0;
444     shaderStageInfo->stage = stage;
445     shaderStageInfo->module = shaderModule;
446     shaderStageInfo->pName = "main";
447     shaderStageInfo->pSpecializationInfo = nullptr;
448 }
449 
descriptor_data_to_layout(const VulkanSharedContext * sharedContext,const SkSpan<DescriptorData> & descriptorData)450 static VkDescriptorSetLayout descriptor_data_to_layout(const VulkanSharedContext* sharedContext,
451         const SkSpan<DescriptorData>& descriptorData) {
452     // descriptorData can be empty to indicate that we should create a mock placeholder layout
453     // with no descriptors.
454     VkDescriptorSetLayout setLayout;
455     DescriptorDataToVkDescSetLayout(sharedContext, descriptorData, &setLayout);
456     if (setLayout == VK_NULL_HANDLE) {
457         SKGPU_LOG_E("Failed to create descriptor set layout; pipeline creation will fail.\n");
458         return VK_NULL_HANDLE;
459     }
460     return setLayout;
461 }
462 
destroy_desc_set_layouts(const VulkanSharedContext * sharedContext,skia_private::TArray<VkDescriptorSetLayout> & setLayouts)463 static void destroy_desc_set_layouts(const VulkanSharedContext* sharedContext,
464                                      skia_private::TArray<VkDescriptorSetLayout>& setLayouts) {
465     for (int i = 0; i < setLayouts.size(); i++) {
466         if (setLayouts[i] != VK_NULL_HANDLE) {
467             VULKAN_CALL(sharedContext->interface(),
468             DestroyDescriptorSetLayout(sharedContext->device(),
469                                        setLayouts[i],
470                                        nullptr));
471         }
472     }
473 }
474 
uniform_desc_set_layout(VkDescriptorSetLayout & outLayout,const VulkanSharedContext * sharedContext,bool hasStepUniforms,bool hasPaintUniforms,bool hasGradientBuffer)475 static bool uniform_desc_set_layout(VkDescriptorSetLayout& outLayout,
476                                     const VulkanSharedContext* sharedContext,
477                                     bool hasStepUniforms,
478                                     bool hasPaintUniforms,
479                                     bool hasGradientBuffer) {
480     // Define a container with size reserved for up to kNumUniformBuffers descriptors. Only add
481     // DescriptorData for uniforms that actually are used and need to be included in the layout.
482     skia_private::STArray<
483             VulkanGraphicsPipeline::kNumUniformBuffers, DescriptorData> uniformDescriptors;
484 
485     DescriptorType uniformBufferType =
486             sharedContext->caps()->storageBufferSupport() ? DescriptorType::kStorageBuffer
487                                                           : DescriptorType::kUniformBuffer;
488     if (hasStepUniforms) {
489         uniformDescriptors.push_back({
490                 uniformBufferType, /*count=*/1,
491                 VulkanGraphicsPipeline::kRenderStepUniformBufferIndex,
492                 PipelineStageFlags::kVertexShader | PipelineStageFlags::kFragmentShader});
493     }
494     if (hasPaintUniforms) {
495         uniformDescriptors.push_back({
496                 uniformBufferType, /*count=*/1,
497                 VulkanGraphicsPipeline::kPaintUniformBufferIndex,
498                 PipelineStageFlags::kFragmentShader});
499     }
500     if (hasGradientBuffer) {
501         uniformDescriptors.push_back({
502                 DescriptorType::kStorageBuffer,
503                 /*count=*/1,
504                 VulkanGraphicsPipeline::kGradientBufferIndex,
505                 PipelineStageFlags::kFragmentShader});
506     }
507 
508     // If no uniforms are used, still request a mock VkDescriptorSetLayout handle by passing in the
509     // unpopulated span of uniformDescriptors to descriptor set layout creation.
510     outLayout = descriptor_data_to_layout(sharedContext, {uniformDescriptors});
511     return true;
512 }
513 
texture_sampler_desc_set_layout(VkDescriptorSetLayout & outLayout,const VulkanSharedContext * sharedContext,const int numTextureSamplers,SkSpan<sk_sp<VulkanSampler>> immutableSamplers)514 static bool texture_sampler_desc_set_layout(VkDescriptorSetLayout& outLayout,
515                                             const VulkanSharedContext* sharedContext,
516                                             const int numTextureSamplers,
517                                             SkSpan<sk_sp<VulkanSampler>> immutableSamplers) {
518     SkASSERT(numTextureSamplers >= 0);
519     // The immutable sampler span size must be = the total number of texture/samplers such that
520     // we can use the index of a sampler as its binding index (or we just have none, which
521     // enables us to skip some of this logic entirely).
522     SkASSERT(immutableSamplers.empty() ||
523              SkTo<int>(immutableSamplers.size()) == numTextureSamplers);
524 
525     skia_private::TArray<DescriptorData> textureSamplerDescs(numTextureSamplers);
526     for (int i = 0; i < numTextureSamplers; i++) {
527         Sampler* immutableSampler = nullptr;
528         if (!immutableSamplers.empty() && immutableSamplers[i]) {
529             immutableSampler = immutableSamplers[i].get();
530         }
531         textureSamplerDescs.push_back({DescriptorType::kCombinedTextureSampler,
532                                         /*count=*/1,
533                                         /*bindingIdx=*/i,
534                                         PipelineStageFlags::kFragmentShader,
535                                         immutableSampler});
536     }
537 
538     // If no texture/samplers are used, a mock VkDescriptorSetLayout handle by passing in the
539     // unpopulated span of textureSamplerDescs to descriptor set layout creation.
540     outLayout = descriptor_data_to_layout(sharedContext, {textureSamplerDescs});
541     return outLayout != VK_NULL_HANDLE;
542 }
543 
input_attachment_desc_set_layout(VkDescriptorSetLayout & outLayout,const VulkanSharedContext * sharedContext,int numInputAttachments)544 static bool input_attachment_desc_set_layout(VkDescriptorSetLayout& outLayout,
545                                              const VulkanSharedContext* sharedContext,
546                                              int numInputAttachments) {
547     // For now, we expect to have either 0 or 1 input attachment (used to load MSAA from resolve).
548     SkASSERT(numInputAttachments == 0 || numInputAttachments == 1);
549 
550     skia_private::TArray<DescriptorData> inputAttachmentDescs(numInputAttachments);
551     if (numInputAttachments == 1) {
552         inputAttachmentDescs.push_back(VulkanGraphicsPipeline::kInputAttachmentDescriptor);
553     }
554 
555     // If no input attachments are used, still request a mock VkDescriptorSetLayout handle by
556     // passing in the unpopulated span of inputAttachmentDescs to descriptor set layout creation.
557     outLayout = descriptor_data_to_layout(sharedContext, {inputAttachmentDescs});
558     return outLayout != VK_NULL_HANDLE;
559 }
560 
setup_pipeline_layout(const VulkanSharedContext * sharedContext,uint32_t pushConstantSize,VkShaderStageFlagBits pushConstantPipelineStageFlags,bool hasStepUniforms,bool hasPaintUniforms,bool hasGradientBuffer,int numTextureSamplers,int numInputAttachments,SkSpan<sk_sp<VulkanSampler>> immutableSamplers)561 static VkPipelineLayout setup_pipeline_layout(const VulkanSharedContext* sharedContext,
562                                               uint32_t pushConstantSize,
563                                               VkShaderStageFlagBits pushConstantPipelineStageFlags,
564                                               bool hasStepUniforms,
565                                               bool hasPaintUniforms,
566                                               bool hasGradientBuffer,
567                                               int numTextureSamplers,
568                                               int numInputAttachments,
569                                               SkSpan<sk_sp<VulkanSampler>> immutableSamplers) {
570     // Create a container with the anticipated amount (kNumDescSets) of VkDescriptorSetLayout
571     // handles which will be used to create the pipeline layout.
572     skia_private::STArray<
573             VulkanGraphicsPipeline::kNumDescSets, VkDescriptorSetLayout> setLayouts;
574     setLayouts.push_back_n(VulkanGraphicsPipeline::kNumDescSets, VkDescriptorSetLayout());
575 
576     // Populate the container with actual descriptor set layout handles. Each index should contain
577     // either a valid/real or a mock/placehodler layout handle. Mock VkDescriptorSetLayouts do not
578     // actually contain any descriptors, but are needed as placeholders to maintain expected
579     // descriptor set binding indices. This is because VK_NULL_HANDLE is a valid
580     // VkDescriptorSetLayout value iff the graphicsPipelineLibrary feature is enabled, which is not
581     // the case for all targeted devices (see
582     // VUID-VkPipelineLayoutCreateInfo-graphicsPipelineLibrary-06753). If any of the helpers
583     // encounter an error (i.e., return false), return a null VkPipelineLayout.
584     if (!uniform_desc_set_layout(
585                 setLayouts[VulkanGraphicsPipeline::kUniformBufferDescSetIndex],
586                 sharedContext,
587                 hasStepUniforms,
588                 hasPaintUniforms,
589                 hasGradientBuffer) ||
590         !texture_sampler_desc_set_layout(
591                 setLayouts[VulkanGraphicsPipeline::kTextureBindDescSetIndex],
592                 sharedContext,
593                 numTextureSamplers,
594                 immutableSamplers) ||
595         !input_attachment_desc_set_layout(
596                 setLayouts[VulkanGraphicsPipeline::kInputAttachmentDescSetIndex],
597                 sharedContext,
598                 numInputAttachments)) {
599         destroy_desc_set_layouts(sharedContext, setLayouts);
600         return VK_NULL_HANDLE;
601     }
602 
603     // Generate a pipeline layout using the now-populated descriptor set layout array
604     VkPushConstantRange pushConstantRange;
605     if (pushConstantSize) {
606         pushConstantRange.offset = 0;
607         pushConstantRange.size = pushConstantSize;
608         pushConstantRange.stageFlags = pushConstantPipelineStageFlags;
609     }
610     VkPipelineLayoutCreateInfo layoutCreateInfo;
611     memset(&layoutCreateInfo, 0, sizeof(VkPipelineLayoutCreateFlags));
612     layoutCreateInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO;
613     layoutCreateInfo.pNext = nullptr;
614     layoutCreateInfo.flags = 0;
615     layoutCreateInfo.setLayoutCount = setLayouts.size();
616     layoutCreateInfo.pSetLayouts = setLayouts.begin();
617     layoutCreateInfo.pushConstantRangeCount = pushConstantSize ? 1 : 0;
618     layoutCreateInfo.pPushConstantRanges = pushConstantSize ? &pushConstantRange : nullptr;
619 
620     VkResult result;
621     VkPipelineLayout layout;
622     VULKAN_CALL_RESULT(sharedContext,
623                        result,
624                        CreatePipelineLayout(sharedContext->device(),
625                                             &layoutCreateInfo,
626                                             /*const VkAllocationCallbacks*=*/nullptr,
627                                             &layout));
628 
629     // DescriptorSetLayouts can be deleted after the pipeline layout is created.
630     destroy_desc_set_layouts(sharedContext, setLayouts);
631 
632     return result == VK_SUCCESS ? layout : VK_NULL_HANDLE;
633 }
634 
destroy_shader_modules(const VulkanSharedContext * sharedContext,VkShaderModule vsModule,VkShaderModule fsModule)635 static void destroy_shader_modules(const VulkanSharedContext* sharedContext,
636                                    VkShaderModule vsModule,
637                                    VkShaderModule fsModule) {
638     if (vsModule != VK_NULL_HANDLE) {
639         VULKAN_CALL(sharedContext->interface(),
640                     DestroyShaderModule(sharedContext->device(), vsModule, nullptr));
641     }
642     if (fsModule != VK_NULL_HANDLE) {
643         VULKAN_CALL(sharedContext->interface(),
644                     DestroyShaderModule(sharedContext->device(), fsModule, nullptr));
645     }
646 }
647 
setup_dynamic_state(VkPipelineDynamicStateCreateInfo * dynamicInfo,VkDynamicState * dynamicStates)648 static void setup_dynamic_state(VkPipelineDynamicStateCreateInfo* dynamicInfo,
649                                 VkDynamicState* dynamicStates) {
650     memset(dynamicInfo, 0, sizeof(VkPipelineDynamicStateCreateInfo));
651     dynamicInfo->sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO;
652     dynamicInfo->pNext = VK_NULL_HANDLE;
653     dynamicInfo->flags = 0;
654     dynamicStates[0] = VK_DYNAMIC_STATE_VIEWPORT;
655     dynamicStates[1] = VK_DYNAMIC_STATE_SCISSOR;
656     dynamicStates[2] = VK_DYNAMIC_STATE_BLEND_CONSTANTS;
657     dynamicInfo->dynamicStateCount = 3;
658     dynamicInfo->pDynamicStates = dynamicStates;
659 }
660 
Make(VulkanResourceProvider * rsrcProvider,const RuntimeEffectDictionary * runtimeDict,const UniqueKey & pipelineKey,const GraphicsPipelineDesc & pipelineDesc,const RenderPassDesc & renderPassDesc,SkEnumBitMask<PipelineCreationFlags> pipelineCreationFlags,uint32_t compilationID)661 sk_sp<VulkanGraphicsPipeline> VulkanGraphicsPipeline::Make(
662         VulkanResourceProvider* rsrcProvider,
663         const RuntimeEffectDictionary* runtimeDict,
664         const UniqueKey& pipelineKey,
665         const GraphicsPipelineDesc& pipelineDesc,
666         const RenderPassDesc& renderPassDesc,
667         SkEnumBitMask<PipelineCreationFlags> pipelineCreationFlags,
668         uint32_t compilationID) {
669     SkASSERT(rsrcProvider);
670     const VulkanSharedContext* sharedContext = rsrcProvider->vulkanSharedContext();
671 
672     SkSL::ProgramSettings settings;
673     settings.fSharpenTextures = true;
674     settings.fForceNoRTFlip = true;
675 
676     ShaderErrorHandler* errorHandler = sharedContext->caps()->shaderErrorHandler();
677 
678     const RenderStep* step = sharedContext->rendererProvider()->lookup(pipelineDesc.renderStepID());
679     const bool useStorageBuffers = sharedContext->caps()->storageBufferSupport();
680 
681     if (step->vertexAttributes().size() + step->instanceAttributes().size() >
682         sharedContext->vulkanCaps().maxVertexAttributes()) {
683         SKGPU_LOG_W("Requested more than the supported number of vertex attributes");
684         return nullptr;
685     }
686 
687     skia_private::TArray<SamplerDesc> descContainer {};
688     std::unique_ptr<ShaderInfo> shaderInfo = ShaderInfo::Make(sharedContext->caps(),
689                                                               sharedContext->shaderCodeDictionary(),
690                                                               runtimeDict,
691                                                               step,
692                                                               pipelineDesc.paintParamsID(),
693                                                               useStorageBuffers,
694                                                               renderPassDesc.fWriteSwizzle,
695                                                               &descContainer);
696 
697     // Populate an array of sampler ptrs where a sampler's index within the array indicates their
698     // binding index within the descriptor set. Initialize all values to nullptr, which represents a
699     // "regular", dynamic sampler at that index.
700     skia_private::TArray<sk_sp<VulkanSampler>> immutableSamplers;
701     immutableSamplers.push_back_n(shaderInfo->numFragmentTexturesAndSamplers());
702     SkASSERT(rsrcProvider);
703     // This logic relies upon Vulkan using combined texture/sampler bindings, which is necessary for
704     // ycbcr samplers per the Vulkan spec.
705     SkASSERT(!sharedContext->caps()->resourceBindingRequirements().fSeparateTextureAndSamplerBinding
706              && shaderInfo->numFragmentTexturesAndSamplers() == descContainer.size());
707     for (int i = 0; i < descContainer.size(); i++) {
708         // If a SamplerDesc is not equivalent to the default-initialized SamplerDesc, that indicates
709         // the usage of an immutable sampler. That sampler desc should then be used to obtain an
710         // actual immutable sampler from the resource provider and added at the proper index within
711         // immutableSamplers for inclusion in the pipeline layout.
712         if (descContainer.at(i) != SamplerDesc()) {
713             sk_sp<Sampler> immutableSampler =
714                     rsrcProvider->findOrCreateCompatibleSampler(descContainer.at(i));
715             sk_sp<VulkanSampler> vulkanSampler =
716                     sk_ref_sp<VulkanSampler>(static_cast<VulkanSampler*>(immutableSampler.get()));
717             SkASSERT(vulkanSampler);
718             immutableSamplers[i] = std::move(vulkanSampler);
719         }
720     }
721 
722     const std::string& fsSkSL = shaderInfo->fragmentSkSL();
723 
724     const bool hasFragmentSkSL = !fsSkSL.empty();
725     std::string vsSPIRV, fsSPIRV;
726     VkShaderModule fsModule = VK_NULL_HANDLE, vsModule = VK_NULL_HANDLE;
727     SkSL::Program::Interface vsInterface, fsInterface;
728     if (hasFragmentSkSL) {
729         if (!skgpu::SkSLToSPIRV(sharedContext->caps()->shaderCaps(),
730                                 fsSkSL,
731                                 SkSL::ProgramKind::kGraphiteFragment,
732                                 settings,
733                                 &fsSPIRV,
734                                 &fsInterface,
735                                 errorHandler)) {
736             return nullptr;
737         }
738 
739         fsModule = createVulkanShaderModule(sharedContext, fsSPIRV, VK_SHADER_STAGE_FRAGMENT_BIT);
740         if (!fsModule) {
741             return nullptr;
742         }
743     }
744 
745     const std::string& vsSkSL = shaderInfo->vertexSkSL();
746     if (!skgpu::SkSLToSPIRV(sharedContext->caps()->shaderCaps(),
747                             vsSkSL,
748                             SkSL::ProgramKind::kGraphiteVertex,
749                             settings,
750                             &vsSPIRV,
751                             &vsInterface,
752                             errorHandler)) {
753         return nullptr;
754     }
755 
756     vsModule = createVulkanShaderModule(sharedContext, vsSPIRV, VK_SHADER_STAGE_VERTEX_BIT);
757     if (!vsModule) {
758         // Clean up the other shader module before returning.
759         destroy_shader_modules(sharedContext, VK_NULL_HANDLE, fsModule);
760         return nullptr;
761     }
762 
763     VkPipelineVertexInputStateCreateInfo vertexInputInfo;
764     skia_private::STArray<2, VkVertexInputBindingDescription, true> bindingDescs;
765     skia_private::STArray<16, VkVertexInputAttributeDescription> attributeDescs;
766     setup_vertex_input_state(step->vertexAttributes(),
767                              step->instanceAttributes(),
768                              &vertexInputInfo,
769                              &bindingDescs,
770                              &attributeDescs);
771 
772     VkPipelineInputAssemblyStateCreateInfo inputAssemblyInfo;
773     setup_input_assembly_state(step->primitiveType(), &inputAssemblyInfo);
774 
775     VkPipelineDepthStencilStateCreateInfo depthStencilInfo;
776     setup_depth_stencil_state(step->depthStencilSettings(), &depthStencilInfo);
777 
778     VkPipelineViewportStateCreateInfo viewportInfo;
779     setup_viewport_scissor_state(&viewportInfo);
780 
781     VkPipelineMultisampleStateCreateInfo multisampleInfo;
782     setup_multisample_state(renderPassDesc.fColorAttachment.fTextureInfo.numSamples(),
783                             &multisampleInfo);
784 
785     // We will only have one color blend attachment per pipeline.
786     VkPipelineColorBlendAttachmentState attachmentStates[1];
787     VkPipelineColorBlendStateCreateInfo colorBlendInfo;
788     setup_color_blend_state(shaderInfo->blendInfo(), &colorBlendInfo, attachmentStates);
789 
790     VkPipelineRasterizationStateCreateInfo rasterInfo;
791     // TODO: Check for wire frame mode once that is an available context option within graphite.
792     setup_raster_state(/*isWireframe=*/false, &rasterInfo);
793 
794     VkPipelineShaderStageCreateInfo pipelineShaderStages[2];
795     setup_shader_stage_info(VK_SHADER_STAGE_VERTEX_BIT,
796                             vsModule,
797                             &pipelineShaderStages[0]);
798     if (hasFragmentSkSL) {
799         setup_shader_stage_info(VK_SHADER_STAGE_FRAGMENT_BIT,
800                                 fsModule,
801                                 &pipelineShaderStages[1]);
802     }
803 
804     // TODO: Query RenderPassDesc for input attachment information. For now, we only use one for
805     // loading MSAA from resolve so we can simply pass in 0 when not doing that.
806     VkPipelineLayout pipelineLayout =
807             setup_pipeline_layout(sharedContext,
808                                   VulkanResourceProvider::kIntrinsicConstantSize,
809                                   VulkanResourceProvider::kIntrinsicConstantStageFlags,
810                                   !step->uniforms().empty(),
811                                   shaderInfo->hasPaintUniforms(),
812                                   shaderInfo->hasGradientBuffer(),
813                                   shaderInfo->numFragmentTexturesAndSamplers(),
814                                   /*numInputAttachments=*/0,
815                                   SkSpan<sk_sp<VulkanSampler>>(immutableSamplers));
816     if (pipelineLayout == VK_NULL_HANDLE) {
817         destroy_shader_modules(sharedContext, vsModule, fsModule);
818         return nullptr;
819     }
820 
821     VkDynamicState dynamicStates[3];
822     VkPipelineDynamicStateCreateInfo dynamicInfo;
823     setup_dynamic_state(&dynamicInfo, dynamicStates);
824 
825     bool loadMsaaFromResolve = renderPassDesc.fColorResolveAttachment.fTextureInfo.isValid() &&
826                                renderPassDesc.fColorResolveAttachment.fLoadOp == LoadOp::kLoad;
827 
828     sk_sp<VulkanRenderPass> compatibleRenderPass =
829             rsrcProvider->findOrCreateRenderPass(renderPassDesc, /*compatibleOnly=*/true);
830 
831     VkGraphicsPipelineCreateInfo pipelineCreateInfo;
832     memset(&pipelineCreateInfo, 0, sizeof(VkGraphicsPipelineCreateInfo));
833     pipelineCreateInfo.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO;
834     pipelineCreateInfo.pNext = nullptr;
835     pipelineCreateInfo.flags = 0;
836     pipelineCreateInfo.stageCount = hasFragmentSkSL ? 2 : 1;
837     pipelineCreateInfo.pStages = &pipelineShaderStages[0];
838     pipelineCreateInfo.pVertexInputState = &vertexInputInfo;
839     pipelineCreateInfo.pInputAssemblyState = &inputAssemblyInfo;
840     pipelineCreateInfo.pTessellationState = nullptr;
841     pipelineCreateInfo.pViewportState = &viewportInfo;
842     pipelineCreateInfo.pRasterizationState = &rasterInfo;
843     pipelineCreateInfo.pMultisampleState = &multisampleInfo;
844     pipelineCreateInfo.pDepthStencilState = &depthStencilInfo;
845     pipelineCreateInfo.pColorBlendState = &colorBlendInfo;
846     pipelineCreateInfo.pDynamicState = &dynamicInfo;
847     pipelineCreateInfo.layout = pipelineLayout;
848     pipelineCreateInfo.renderPass = compatibleRenderPass->renderPass();
849     pipelineCreateInfo.subpass = loadMsaaFromResolve ? 1 : 0;
850     pipelineCreateInfo.basePipelineHandle = VK_NULL_HANDLE;
851     pipelineCreateInfo.basePipelineIndex = -1;
852 
853     VkPipeline vkPipeline;
854     VkResult result;
855     {
856         TRACE_EVENT0_ALWAYS("skia.shaders", "VkCreateGraphicsPipeline");
857         VULKAN_CALL_RESULT(sharedContext,
858                            result,
859                            CreateGraphicsPipelines(sharedContext->device(),
860                                                    rsrcProvider->pipelineCache(),
861                                                    /*createInfoCount=*/1,
862                                                    &pipelineCreateInfo,
863                                                    /*pAllocator=*/nullptr,
864                                                    &vkPipeline));
865     }
866     if (result != VK_SUCCESS) {
867         SkDebugf("Failed to create pipeline. Error: %d\n", result);
868         return nullptr;
869     }
870 
871     // After creating the pipeline object, we can clean up the VkShaderModule(s).
872     destroy_shader_modules(sharedContext, vsModule, fsModule);
873 
874     PipelineInfo pipelineInfo{ *shaderInfo, pipelineCreationFlags,
875                                pipelineKey.hash(), compilationID };
876 #if defined(GPU_TEST_UTILS)
877     pipelineInfo.fNativeVertexShader   = "SPIR-V disassembly not available";
878     pipelineInfo.fNativeFragmentShader = "SPIR-V disassmebly not available";
879 #endif
880 
881     return sk_sp<VulkanGraphicsPipeline>(
882             new VulkanGraphicsPipeline(sharedContext,
883                                        pipelineInfo,
884                                        pipelineLayout,
885                                        vkPipeline,
886                                        /*ownsPipelineLayout=*/true,
887                                        std::move(immutableSamplers)));
888 }
889 
InitializeMSAALoadPipelineStructs(const VulkanSharedContext * sharedContext,VkShaderModule * outVertexShaderModule,VkShaderModule * outFragShaderModule,VkPipelineShaderStageCreateInfo * outShaderStageInfo,VkPipelineLayout * outPipelineLayout)890 bool VulkanGraphicsPipeline::InitializeMSAALoadPipelineStructs(
891         const VulkanSharedContext* sharedContext,
892         VkShaderModule* outVertexShaderModule,
893         VkShaderModule* outFragShaderModule,
894         VkPipelineShaderStageCreateInfo* outShaderStageInfo,
895         VkPipelineLayout* outPipelineLayout) {
896     SkSL::ProgramSettings settings;
897     settings.fForceNoRTFlip = true;
898     std::string vsSPIRV, fsSPIRV;
899     ShaderErrorHandler* errorHandler = sharedContext->caps()->shaderErrorHandler();
900 
901     std::string vertShaderText;
902     vertShaderText.append(
903             "layout(vulkan,  push_constant) uniform vertexUniformBuffer {"
904             "half4 uPosXform;"
905             "};"
906 
907             "// MSAA Load Program VS\n"
908             "void main() {"
909             "float2 position = float2(sk_VertexID >> 1, sk_VertexID & 1);"
910             "sk_Position.xy = position * uPosXform.xy + uPosXform.zw;"
911             "sk_Position.zw = half2(0, 1);"
912             "}");
913 
914     std::string fragShaderText;
915     fragShaderText.append(
916             "layout(vulkan, input_attachment_index=0, set=" +
917             std::to_string(VulkanGraphicsPipeline::kInputAttachmentDescSetIndex) +
918             ", binding=0) subpassInput uInput;"
919 
920             "// MSAA Load Program FS\n"
921             "void main() {"
922             "sk_FragColor = subpassLoad(uInput);"
923             "}");
924 
925     SkSL::Program::Interface vsInterface, fsInterface;
926     if (!skgpu::SkSLToSPIRV(sharedContext->caps()->shaderCaps(),
927                             vertShaderText,
928                             SkSL::ProgramKind::kGraphiteVertex,
929                             settings,
930                             &vsSPIRV,
931                             &vsInterface,
932                             errorHandler)) {
933         return false;
934     }
935     if (!skgpu::SkSLToSPIRV(sharedContext->caps()->shaderCaps(),
936                             fragShaderText,
937                             SkSL::ProgramKind::kGraphiteFragment,
938                             settings,
939                             &fsSPIRV,
940                             &fsInterface,
941                             errorHandler)) {
942         return false;
943     }
944     *outFragShaderModule =
945             createVulkanShaderModule(sharedContext, fsSPIRV, VK_SHADER_STAGE_FRAGMENT_BIT);
946     if (*outFragShaderModule == VK_NULL_HANDLE) {
947         return false;
948     }
949 
950     *outVertexShaderModule =
951             createVulkanShaderModule(sharedContext, vsSPIRV, VK_SHADER_STAGE_VERTEX_BIT);
952     if (*outVertexShaderModule == VK_NULL_HANDLE) {
953         destroy_shader_modules(sharedContext, VK_NULL_HANDLE, *outFragShaderModule);
954         return false;
955     }
956 
957     setup_shader_stage_info(VK_SHADER_STAGE_VERTEX_BIT,
958                             *outVertexShaderModule,
959                             &outShaderStageInfo[0]);
960 
961     setup_shader_stage_info(VK_SHADER_STAGE_FRAGMENT_BIT,
962                             *outFragShaderModule,
963                             &outShaderStageInfo[1]);
964 
965     // The load msaa pipeline takes no step or paint uniforms and no instance attributes. It only
966     // references one input attachment texture (which does not require a sampler) and one vertex
967     // attribute (NDC position)
968     skia_private::TArray<DescriptorData> inputAttachmentDescriptors(1);
969     inputAttachmentDescriptors.push_back(VulkanGraphicsPipeline::kInputAttachmentDescriptor);
970     // TODO: Do we need to consider the potential usage of immutable YCbCr samplers here?
971     *outPipelineLayout = setup_pipeline_layout(sharedContext,
972                                                /*pushConstantSize=*/32,
973                                                (VkShaderStageFlagBits)VK_SHADER_STAGE_VERTEX_BIT,
974                                                /*hasStepUniforms=*/false,
975                                                /*hasPaintUniforms=*/false,
976                                                /*hasGradientBuffer=*/false,
977                                                /*numTextureSamplers=*/0,
978                                                /*numInputAttachments=*/1,
979                                                /*immutableSamplers=*/{});
980 
981     if (*outPipelineLayout == VK_NULL_HANDLE) {
982         destroy_shader_modules(sharedContext, *outVertexShaderModule, *outFragShaderModule);
983         return false;
984     }
985     return true;
986 }
987 
MakeLoadMSAAPipeline(const VulkanSharedContext * sharedContext,VkShaderModule vsModule,VkShaderModule fsModule,VkPipelineShaderStageCreateInfo * pipelineShaderStages,VkPipelineLayout pipelineLayout,sk_sp<VulkanRenderPass> compatibleRenderPass,VkPipelineCache pipelineCache,const TextureInfo & dstColorAttachmentTexInfo)988 sk_sp<VulkanGraphicsPipeline> VulkanGraphicsPipeline::MakeLoadMSAAPipeline(
989         const VulkanSharedContext* sharedContext,
990         VkShaderModule vsModule,
991         VkShaderModule fsModule,
992         VkPipelineShaderStageCreateInfo* pipelineShaderStages,
993         VkPipelineLayout pipelineLayout,
994         sk_sp<VulkanRenderPass> compatibleRenderPass,
995         VkPipelineCache pipelineCache,
996         const TextureInfo& dstColorAttachmentTexInfo) {
997 
998     int numSamples = dstColorAttachmentTexInfo.numSamples();
999 
1000     // Create vertex attribute list
1001     SkSpan<const Attribute> loadMSAAVertexAttribs = {};
1002 
1003     VkPipelineVertexInputStateCreateInfo vertexInputInfo;
1004     skia_private::STArray<2, VkVertexInputBindingDescription, true> bindingDescs;
1005     skia_private::STArray<16, VkVertexInputAttributeDescription> attributeDescs;
1006     setup_vertex_input_state(loadMSAAVertexAttribs,
1007                              /*instanceAttrs=*/{}, // Load msaa pipeline takes no instance attribs
1008                              &vertexInputInfo,
1009                              &bindingDescs,
1010                              &attributeDescs);
1011 
1012     VkPipelineInputAssemblyStateCreateInfo inputAssemblyInfo;
1013     setup_input_assembly_state(PrimitiveType::kTriangleStrip, &inputAssemblyInfo);
1014 
1015     VkPipelineDepthStencilStateCreateInfo depthStencilInfo;
1016     setup_depth_stencil_state(/*stencilSettings=*/{}, &depthStencilInfo);
1017 
1018     VkPipelineViewportStateCreateInfo viewportInfo;
1019     setup_viewport_scissor_state(&viewportInfo);
1020 
1021     VkPipelineMultisampleStateCreateInfo multisampleInfo;
1022     setup_multisample_state(numSamples, &multisampleInfo);
1023 
1024     // We will only have one color blend attachment per pipeline.
1025     VkPipelineColorBlendAttachmentState attachmentStates[1];
1026     VkPipelineColorBlendStateCreateInfo colorBlendInfo;
1027     setup_color_blend_state({}, &colorBlendInfo, attachmentStates);
1028 
1029     VkPipelineRasterizationStateCreateInfo rasterInfo;
1030     // TODO: Check for wire frame mode once that is an available context option within graphite.
1031     setup_raster_state(/*isWireframe=*/false, &rasterInfo);
1032 
1033     VkDynamicState dynamicStates[3];
1034     VkPipelineDynamicStateCreateInfo dynamicInfo;
1035     setup_dynamic_state(&dynamicInfo, dynamicStates);
1036 
1037     VkGraphicsPipelineCreateInfo pipelineCreateInfo;
1038     memset(&pipelineCreateInfo, 0, sizeof(VkGraphicsPipelineCreateInfo));
1039     pipelineCreateInfo.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO;
1040     pipelineCreateInfo.pNext = nullptr;
1041     pipelineCreateInfo.flags = 0;
1042     pipelineCreateInfo.stageCount = 2;
1043     pipelineCreateInfo.pStages = pipelineShaderStages;
1044     pipelineCreateInfo.pVertexInputState = &vertexInputInfo;
1045     pipelineCreateInfo.pInputAssemblyState = &inputAssemblyInfo;
1046     pipelineCreateInfo.pTessellationState = nullptr;
1047     pipelineCreateInfo.pViewportState = &viewportInfo;
1048     pipelineCreateInfo.pRasterizationState = &rasterInfo;
1049     pipelineCreateInfo.pMultisampleState = &multisampleInfo;
1050     pipelineCreateInfo.pDepthStencilState = &depthStencilInfo;
1051     pipelineCreateInfo.pColorBlendState = &colorBlendInfo;
1052     pipelineCreateInfo.pDynamicState = &dynamicInfo;
1053     pipelineCreateInfo.layout = pipelineLayout;
1054     pipelineCreateInfo.renderPass = compatibleRenderPass->renderPass();
1055 
1056     VkPipeline vkPipeline;
1057     VkResult result;
1058     {
1059         TRACE_EVENT0_ALWAYS("skia.shaders", "CreateGraphicsPipeline");
1060         SkASSERT(pipelineCache != VK_NULL_HANDLE);
1061         VULKAN_CALL_RESULT(sharedContext,
1062                            result,
1063                            CreateGraphicsPipelines(sharedContext->device(),
1064                                                    pipelineCache,
1065                                                    /*createInfoCount=*/1,
1066                                                    &pipelineCreateInfo,
1067                                                    /*pAllocator=*/nullptr,
1068                                                    &vkPipeline));
1069     }
1070     if (result != VK_SUCCESS) {
1071         SkDebugf("Failed to create pipeline. Error: %d\n", result);
1072         return nullptr;
1073     }
1074 
1075     // This is an internal shader, so don't bother filling in the shader code metadata
1076     PipelineInfo pipelineInfo{};
1077     return sk_sp<VulkanGraphicsPipeline>(
1078             new VulkanGraphicsPipeline(sharedContext,
1079                                        pipelineInfo,
1080                                        pipelineLayout,
1081                                        vkPipeline,
1082                                        /*ownsPipelineLayout=*/false,
1083                                        /*immutableSamplers=*/{}));
1084 }
1085 
VulkanGraphicsPipeline(const VulkanSharedContext * sharedContext,const PipelineInfo & pipelineInfo,VkPipelineLayout pipelineLayout,VkPipeline pipeline,bool ownsPipelineLayout,skia_private::TArray<sk_sp<VulkanSampler>> immutableSamplers)1086 VulkanGraphicsPipeline::VulkanGraphicsPipeline(
1087         const VulkanSharedContext* sharedContext,
1088         const PipelineInfo& pipelineInfo,
1089         VkPipelineLayout pipelineLayout,
1090         VkPipeline pipeline,
1091         bool ownsPipelineLayout,
1092         skia_private::TArray<sk_sp<VulkanSampler>> immutableSamplers)
1093     : GraphicsPipeline(sharedContext, pipelineInfo)
1094     , fPipelineLayout(pipelineLayout)
1095     , fPipeline(pipeline)
1096     , fOwnsPipelineLayout(ownsPipelineLayout)
1097     , fImmutableSamplers(std::move(immutableSamplers)) {}
1098 
freeGpuData()1099 void VulkanGraphicsPipeline::freeGpuData() {
1100     auto sharedCtxt = static_cast<const VulkanSharedContext*>(this->sharedContext());
1101     if (fPipeline != VK_NULL_HANDLE) {
1102         VULKAN_CALL(sharedCtxt->interface(),
1103             DestroyPipeline(sharedCtxt->device(), fPipeline, nullptr));
1104     }
1105     if (fOwnsPipelineLayout && fPipelineLayout != VK_NULL_HANDLE) {
1106         VULKAN_CALL(sharedCtxt->interface(),
1107                     DestroyPipelineLayout(sharedCtxt->device(), fPipelineLayout, nullptr));
1108     }
1109 }
1110 
1111 } // namespace skgpu::graphite
1112