• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2023 Google LLC
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #include "src/gpu/graphite/vk/VulkanGraphicsPipeline.h"
9 
10 #include "include/gpu/ShaderErrorHandler.h"
11 #include "include/gpu/graphite/TextureInfo.h"
12 #include "src/core/SkSLTypeShared.h"
13 #include "src/core/SkTraceEvent.h"
14 #include "src/gpu/SkSLToBackend.h"
15 #include "src/gpu/graphite/Attribute.h"
16 #include "src/gpu/graphite/ContextUtils.h"
17 #include "src/gpu/graphite/GraphicsPipelineDesc.h"
18 #include "src/gpu/graphite/Log.h"
19 #include "src/gpu/graphite/RenderPassDesc.h"
20 #include "src/gpu/graphite/RendererProvider.h"
21 #include "src/gpu/graphite/ResourceTypes.h"
22 #include "src/gpu/graphite/RuntimeEffectDictionary.h"
23 #include "src/gpu/graphite/ShaderInfo.h"
24 #include "src/gpu/graphite/vk/VulkanCaps.h"
25 #include "src/gpu/graphite/vk/VulkanGraphicsPipeline.h"
26 #include "src/gpu/graphite/vk/VulkanRenderPass.h"
27 #include "src/gpu/graphite/vk/VulkanResourceProvider.h"
28 #include "src/gpu/graphite/vk/VulkanSharedContext.h"
29 #include "src/gpu/vk/VulkanUtilsPriv.h"
30 #include "src/sksl/SkSLProgramKind.h"
31 #include "src/sksl/SkSLProgramSettings.h"
32 #include "src/sksl/ir/SkSLProgram.h"
33 
34 namespace skgpu::graphite {
35 
attrib_type_to_vkformat(VertexAttribType type)36 static inline VkFormat attrib_type_to_vkformat(VertexAttribType type) {
37     switch (type) {
38         case VertexAttribType::kFloat:
39             return VK_FORMAT_R32_SFLOAT;
40         case VertexAttribType::kFloat2:
41             return VK_FORMAT_R32G32_SFLOAT;
42         case VertexAttribType::kFloat3:
43             return VK_FORMAT_R32G32B32_SFLOAT;
44         case VertexAttribType::kFloat4:
45             return VK_FORMAT_R32G32B32A32_SFLOAT;
46         case VertexAttribType::kHalf:
47             return VK_FORMAT_R16_SFLOAT;
48         case VertexAttribType::kHalf2:
49             return VK_FORMAT_R16G16_SFLOAT;
50         case VertexAttribType::kHalf4:
51             return VK_FORMAT_R16G16B16A16_SFLOAT;
52         case VertexAttribType::kInt2:
53             return VK_FORMAT_R32G32_SINT;
54         case VertexAttribType::kInt3:
55             return VK_FORMAT_R32G32B32_SINT;
56         case VertexAttribType::kInt4:
57             return VK_FORMAT_R32G32B32A32_SINT;
58         case VertexAttribType::kUInt2:
59             return VK_FORMAT_R32G32_UINT;
60         case VertexAttribType::kByte:
61             return VK_FORMAT_R8_SINT;
62         case VertexAttribType::kByte2:
63             return VK_FORMAT_R8G8_SINT;
64         case VertexAttribType::kByte4:
65             return VK_FORMAT_R8G8B8A8_SINT;
66         case VertexAttribType::kUByte:
67             return VK_FORMAT_R8_UINT;
68         case VertexAttribType::kUByte2:
69             return VK_FORMAT_R8G8_UINT;
70         case VertexAttribType::kUByte4:
71             return VK_FORMAT_R8G8B8A8_UINT;
72         case VertexAttribType::kUByte_norm:
73             return VK_FORMAT_R8_UNORM;
74         case VertexAttribType::kUByte4_norm:
75             return VK_FORMAT_R8G8B8A8_UNORM;
76         case VertexAttribType::kShort2:
77             return VK_FORMAT_R16G16_SINT;
78         case VertexAttribType::kShort4:
79             return VK_FORMAT_R16G16B16A16_SINT;
80         case VertexAttribType::kUShort2:
81             return VK_FORMAT_R16G16_UINT;
82         case VertexAttribType::kUShort2_norm:
83             return VK_FORMAT_R16G16_UNORM;
84         case VertexAttribType::kInt:
85             return VK_FORMAT_R32_SINT;
86         case VertexAttribType::kUInt:
87             return VK_FORMAT_R32_UINT;
88         case VertexAttribType::kUShort_norm:
89             return VK_FORMAT_R16_UNORM;
90         case VertexAttribType::kUShort4_norm:
91             return VK_FORMAT_R16G16B16A16_UNORM;
92     }
93     SK_ABORT("Unknown vertex attrib type");
94 }
95 
setup_vertex_input_state(const SkSpan<const Attribute> & vertexAttrs,const SkSpan<const Attribute> & instanceAttrs,VkPipelineVertexInputStateCreateInfo * vertexInputInfo,skia_private::STArray<2,VkVertexInputBindingDescription,true> * bindingDescs,skia_private::STArray<16,VkVertexInputAttributeDescription> * attributeDescs)96 static void setup_vertex_input_state(
97         const SkSpan<const Attribute>& vertexAttrs,
98         const SkSpan<const Attribute>& instanceAttrs,
99         VkPipelineVertexInputStateCreateInfo* vertexInputInfo,
100         skia_private::STArray<2, VkVertexInputBindingDescription, true>* bindingDescs,
101         skia_private::STArray<16, VkVertexInputAttributeDescription>* attributeDescs) {
102     // Setup attribute & binding descriptions
103     int attribIndex = 0;
104     size_t vertexAttributeOffset = 0;
105     for (auto attrib : vertexAttrs) {
106         VkVertexInputAttributeDescription vkAttrib;
107         vkAttrib.location = attribIndex++;
108         vkAttrib.binding = VulkanGraphicsPipeline::kVertexBufferIndex;
109         vkAttrib.format = attrib_type_to_vkformat(attrib.cpuType());
110         vkAttrib.offset = vertexAttributeOffset;
111         vertexAttributeOffset += attrib.sizeAlign4();
112         attributeDescs->push_back(vkAttrib);
113     }
114 
115     size_t instanceAttributeOffset = 0;
116     for (auto attrib : instanceAttrs) {
117         VkVertexInputAttributeDescription vkAttrib;
118         vkAttrib.location = attribIndex++;
119         vkAttrib.binding = VulkanGraphicsPipeline::kInstanceBufferIndex;
120         vkAttrib.format = attrib_type_to_vkformat(attrib.cpuType());
121         vkAttrib.offset = instanceAttributeOffset;
122         instanceAttributeOffset += attrib.sizeAlign4();
123         attributeDescs->push_back(vkAttrib);
124     }
125 
126     if (bindingDescs && !vertexAttrs.empty()) {
127         bindingDescs->push_back() = {
128                 VulkanGraphicsPipeline::kVertexBufferIndex,
129                 (uint32_t) vertexAttributeOffset,
130                 VK_VERTEX_INPUT_RATE_VERTEX
131         };
132     }
133     if (bindingDescs && !instanceAttrs.empty()) {
134         bindingDescs->push_back() = {
135                 VulkanGraphicsPipeline::kInstanceBufferIndex,
136                 (uint32_t) instanceAttributeOffset,
137                 VK_VERTEX_INPUT_RATE_INSTANCE
138         };
139     }
140 
141     memset(vertexInputInfo, 0, sizeof(VkPipelineVertexInputStateCreateInfo));
142     vertexInputInfo->sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO;
143     vertexInputInfo->pNext = nullptr;
144     vertexInputInfo->flags = 0;
145     vertexInputInfo->vertexBindingDescriptionCount = bindingDescs ? bindingDescs->size() : 0;
146     vertexInputInfo->pVertexBindingDescriptions =
147             bindingDescs && !bindingDescs->empty() ? bindingDescs->begin() : VK_NULL_HANDLE;
148     vertexInputInfo->vertexAttributeDescriptionCount = attributeDescs ? attributeDescs->size() : 0;
149     vertexInputInfo->pVertexAttributeDescriptions =
150             attributeDescs && !attributeDescs->empty() ? attributeDescs->begin() : VK_NULL_HANDLE;
151 }
152 
primitive_type_to_vk_topology(PrimitiveType primitiveType)153 static VkPrimitiveTopology primitive_type_to_vk_topology(PrimitiveType primitiveType) {
154     switch (primitiveType) {
155         case PrimitiveType::kTriangles:
156             return VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST;
157         case PrimitiveType::kTriangleStrip:
158             return VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP;
159         case PrimitiveType::kPoints:
160             return VK_PRIMITIVE_TOPOLOGY_POINT_LIST;
161     }
162     SkUNREACHABLE;
163 }
164 
setup_input_assembly_state(PrimitiveType primitiveType,VkPipelineInputAssemblyStateCreateInfo * inputAssemblyInfo)165 static void setup_input_assembly_state(PrimitiveType primitiveType,
166                                        VkPipelineInputAssemblyStateCreateInfo* inputAssemblyInfo) {
167     memset(inputAssemblyInfo, 0, sizeof(VkPipelineInputAssemblyStateCreateInfo));
168     inputAssemblyInfo->sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO;
169     inputAssemblyInfo->pNext = nullptr;
170     inputAssemblyInfo->flags = 0;
171     inputAssemblyInfo->primitiveRestartEnable = false;
172     inputAssemblyInfo->topology = primitive_type_to_vk_topology(primitiveType);
173 }
174 
stencil_op_to_vk_stencil_op(StencilOp op)175 static VkStencilOp stencil_op_to_vk_stencil_op(StencilOp op) {
176     static const VkStencilOp gTable[] = {
177         VK_STENCIL_OP_KEEP,                 // kKeep
178         VK_STENCIL_OP_ZERO,                 // kZero
179         VK_STENCIL_OP_REPLACE,              // kReplace
180         VK_STENCIL_OP_INVERT,               // kInvert
181         VK_STENCIL_OP_INCREMENT_AND_WRAP,   // kIncWrap
182         VK_STENCIL_OP_DECREMENT_AND_WRAP,   // kDecWrap
183         VK_STENCIL_OP_INCREMENT_AND_CLAMP,  // kIncClamp
184         VK_STENCIL_OP_DECREMENT_AND_CLAMP,  // kDecClamp
185     };
186     static_assert(std::size(gTable) == kStencilOpCount);
187     static_assert(0 == (int)StencilOp::kKeep);
188     static_assert(1 == (int)StencilOp::kZero);
189     static_assert(2 == (int)StencilOp::kReplace);
190     static_assert(3 == (int)StencilOp::kInvert);
191     static_assert(4 == (int)StencilOp::kIncWrap);
192     static_assert(5 == (int)StencilOp::kDecWrap);
193     static_assert(6 == (int)StencilOp::kIncClamp);
194     static_assert(7 == (int)StencilOp::kDecClamp);
195     SkASSERT(op < (StencilOp)kStencilOpCount);
196     return gTable[(int)op];
197 }
198 
compare_op_to_vk_compare_op(CompareOp op)199 static VkCompareOp compare_op_to_vk_compare_op(CompareOp op) {
200     static const VkCompareOp gTable[] = {
201         VK_COMPARE_OP_ALWAYS,              // kAlways
202         VK_COMPARE_OP_NEVER,               // kNever
203         VK_COMPARE_OP_GREATER,             // kGreater
204         VK_COMPARE_OP_GREATER_OR_EQUAL,    // kGEqual
205         VK_COMPARE_OP_LESS,                // kLess
206         VK_COMPARE_OP_LESS_OR_EQUAL,       // kLEqual
207         VK_COMPARE_OP_EQUAL,               // kEqual
208         VK_COMPARE_OP_NOT_EQUAL,           // kNotEqual
209     };
210     static_assert(std::size(gTable) == kCompareOpCount);
211     static_assert(0 == (int)CompareOp::kAlways);
212     static_assert(1 == (int)CompareOp::kNever);
213     static_assert(2 == (int)CompareOp::kGreater);
214     static_assert(3 == (int)CompareOp::kGEqual);
215     static_assert(4 == (int)CompareOp::kLess);
216     static_assert(5 == (int)CompareOp::kLEqual);
217     static_assert(6 == (int)CompareOp::kEqual);
218     static_assert(7 == (int)CompareOp::kNotEqual);
219     SkASSERT(op < (CompareOp)kCompareOpCount);
220 
221     return gTable[(int)op];
222 }
223 
setup_stencil_op_state(VkStencilOpState * opState,const DepthStencilSettings::Face & face,uint32_t referenceValue)224 static void setup_stencil_op_state(VkStencilOpState* opState,
225                                    const DepthStencilSettings::Face& face,
226                                    uint32_t referenceValue) {
227     opState->failOp = stencil_op_to_vk_stencil_op(face.fStencilFailOp);
228     opState->passOp = stencil_op_to_vk_stencil_op(face.fDepthStencilPassOp);
229     opState->depthFailOp = stencil_op_to_vk_stencil_op(face.fDepthFailOp);
230     opState->compareOp = compare_op_to_vk_compare_op(face.fCompareOp);
231     opState->compareMask = face.fReadMask; // TODO - check this.
232     opState->writeMask = face.fWriteMask;
233     opState->reference = referenceValue;
234 }
235 
setup_depth_stencil_state(const DepthStencilSettings & stencilSettings,VkPipelineDepthStencilStateCreateInfo * stencilInfo)236 static void setup_depth_stencil_state(const DepthStencilSettings& stencilSettings,
237                                       VkPipelineDepthStencilStateCreateInfo* stencilInfo) {
238     SkASSERT(stencilSettings.fDepthTestEnabled ||
239              stencilSettings.fDepthCompareOp == CompareOp::kAlways);
240 
241     memset(stencilInfo, 0, sizeof(VkPipelineDepthStencilStateCreateInfo));
242     stencilInfo->sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO;
243     stencilInfo->pNext = nullptr;
244     stencilInfo->flags = 0;
245     stencilInfo->depthTestEnable = stencilSettings.fDepthTestEnabled;
246     stencilInfo->depthWriteEnable = stencilSettings.fDepthWriteEnabled;
247     stencilInfo->depthCompareOp = compare_op_to_vk_compare_op(stencilSettings.fDepthCompareOp);
248     stencilInfo->depthBoundsTestEnable = VK_FALSE; // Default value TODO - Confirm
249     stencilInfo->stencilTestEnable = stencilSettings.fStencilTestEnabled;
250     if (stencilSettings.fStencilTestEnabled) {
251         setup_stencil_op_state(&stencilInfo->front,
252                                stencilSettings.fFrontStencil,
253                                stencilSettings.fStencilReferenceValue);
254         setup_stencil_op_state(&stencilInfo->back,
255                                stencilSettings.fBackStencil,
256                                stencilSettings.fStencilReferenceValue);
257     }
258     stencilInfo->minDepthBounds = 0.0f;
259     stencilInfo->maxDepthBounds = 1.0f;
260 }
261 
setup_viewport_scissor_state(VkPipelineViewportStateCreateInfo * viewportInfo)262 static void setup_viewport_scissor_state(VkPipelineViewportStateCreateInfo* viewportInfo) {
263     memset(viewportInfo, 0, sizeof(VkPipelineViewportStateCreateInfo));
264     viewportInfo->sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO;
265     viewportInfo->pNext = nullptr;
266     viewportInfo->flags = 0;
267 
268     viewportInfo->viewportCount = 1;
269     viewportInfo->pViewports = nullptr; // This is set dynamically with a draw pass command
270 
271     viewportInfo->scissorCount = 1;
272     viewportInfo->pScissors = nullptr; // This is set dynamically with a draw pass command
273 
274     SkASSERT(viewportInfo->viewportCount == viewportInfo->scissorCount);
275 }
276 
setup_multisample_state(int numSamples,VkPipelineMultisampleStateCreateInfo * multisampleInfo)277 static void setup_multisample_state(int numSamples,
278                                     VkPipelineMultisampleStateCreateInfo* multisampleInfo) {
279     memset(multisampleInfo, 0, sizeof(VkPipelineMultisampleStateCreateInfo));
280     multisampleInfo->sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO;
281     multisampleInfo->pNext = nullptr;
282     multisampleInfo->flags = 0;
283     SkAssertResult(skgpu::SampleCountToVkSampleCount(numSamples,
284                                                      &multisampleInfo->rasterizationSamples));
285     multisampleInfo->sampleShadingEnable = VK_FALSE;
286     multisampleInfo->minSampleShading = 0.0f;
287     multisampleInfo->pSampleMask = nullptr;
288     multisampleInfo->alphaToCoverageEnable = VK_FALSE;
289     multisampleInfo->alphaToOneEnable = VK_FALSE;
290 }
291 
blend_coeff_to_vk_blend(skgpu::BlendCoeff coeff)292 static VkBlendFactor blend_coeff_to_vk_blend(skgpu::BlendCoeff coeff) {
293     switch (coeff) {
294         case skgpu::BlendCoeff::kZero:
295             return VK_BLEND_FACTOR_ZERO;
296         case skgpu::BlendCoeff::kOne:
297             return VK_BLEND_FACTOR_ONE;
298         case skgpu::BlendCoeff::kSC:
299             return VK_BLEND_FACTOR_SRC_COLOR;
300         case skgpu::BlendCoeff::kISC:
301             return VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR;
302         case skgpu::BlendCoeff::kDC:
303             return VK_BLEND_FACTOR_DST_COLOR;
304         case skgpu::BlendCoeff::kIDC:
305             return VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR;
306         case skgpu::BlendCoeff::kSA:
307             return VK_BLEND_FACTOR_SRC_ALPHA;
308         case skgpu::BlendCoeff::kISA:
309             return VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA;
310         case skgpu::BlendCoeff::kDA:
311             return VK_BLEND_FACTOR_DST_ALPHA;
312         case skgpu::BlendCoeff::kIDA:
313             return VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA;
314         case skgpu::BlendCoeff::kConstC:
315             return VK_BLEND_FACTOR_CONSTANT_COLOR;
316         case skgpu::BlendCoeff::kIConstC:
317             return VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR;
318         case skgpu::BlendCoeff::kS2C:
319             return VK_BLEND_FACTOR_SRC1_COLOR;
320         case skgpu::BlendCoeff::kIS2C:
321             return VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR;
322         case skgpu::BlendCoeff::kS2A:
323             return VK_BLEND_FACTOR_SRC1_ALPHA;
324         case skgpu::BlendCoeff::kIS2A:
325             return VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA;
326         case skgpu::BlendCoeff::kIllegal:
327             return VK_BLEND_FACTOR_ZERO;
328     }
329     SkUNREACHABLE;
330 }
331 
blend_equation_to_vk_blend_op(skgpu::BlendEquation equation)332 static VkBlendOp blend_equation_to_vk_blend_op(skgpu::BlendEquation equation) {
333     static const VkBlendOp gTable[] = {
334         // Basic blend ops
335         VK_BLEND_OP_ADD,
336         VK_BLEND_OP_SUBTRACT,
337         VK_BLEND_OP_REVERSE_SUBTRACT,
338 
339         // Advanced blend ops
340         VK_BLEND_OP_SCREEN_EXT,
341         VK_BLEND_OP_OVERLAY_EXT,
342         VK_BLEND_OP_DARKEN_EXT,
343         VK_BLEND_OP_LIGHTEN_EXT,
344         VK_BLEND_OP_COLORDODGE_EXT,
345         VK_BLEND_OP_COLORBURN_EXT,
346         VK_BLEND_OP_HARDLIGHT_EXT,
347         VK_BLEND_OP_SOFTLIGHT_EXT,
348         VK_BLEND_OP_DIFFERENCE_EXT,
349         VK_BLEND_OP_EXCLUSION_EXT,
350         VK_BLEND_OP_MULTIPLY_EXT,
351         VK_BLEND_OP_HSL_HUE_EXT,
352         VK_BLEND_OP_HSL_SATURATION_EXT,
353         VK_BLEND_OP_HSL_COLOR_EXT,
354         VK_BLEND_OP_HSL_LUMINOSITY_EXT,
355 
356         // Illegal.
357         VK_BLEND_OP_ADD,
358     };
359     static_assert(0 == (int)skgpu::BlendEquation::kAdd);
360     static_assert(1 == (int)skgpu::BlendEquation::kSubtract);
361     static_assert(2 == (int)skgpu::BlendEquation::kReverseSubtract);
362     static_assert(3 == (int)skgpu::BlendEquation::kScreen);
363     static_assert(4 == (int)skgpu::BlendEquation::kOverlay);
364     static_assert(5 == (int)skgpu::BlendEquation::kDarken);
365     static_assert(6 == (int)skgpu::BlendEquation::kLighten);
366     static_assert(7 == (int)skgpu::BlendEquation::kColorDodge);
367     static_assert(8 == (int)skgpu::BlendEquation::kColorBurn);
368     static_assert(9 == (int)skgpu::BlendEquation::kHardLight);
369     static_assert(10 == (int)skgpu::BlendEquation::kSoftLight);
370     static_assert(11 == (int)skgpu::BlendEquation::kDifference);
371     static_assert(12 == (int)skgpu::BlendEquation::kExclusion);
372     static_assert(13 == (int)skgpu::BlendEquation::kMultiply);
373     static_assert(14 == (int)skgpu::BlendEquation::kHSLHue);
374     static_assert(15 == (int)skgpu::BlendEquation::kHSLSaturation);
375     static_assert(16 == (int)skgpu::BlendEquation::kHSLColor);
376     static_assert(17 == (int)skgpu::BlendEquation::kHSLLuminosity);
377     static_assert(std::size(gTable) == skgpu::kBlendEquationCnt);
378 
379     SkASSERT((unsigned)equation < skgpu::kBlendEquationCnt);
380     return gTable[(int)equation];
381 }
382 
setup_color_blend_state(const skgpu::BlendInfo & blendInfo,VkPipelineColorBlendStateCreateInfo * colorBlendInfo,VkPipelineColorBlendAttachmentState * attachmentState)383 static void setup_color_blend_state(const skgpu::BlendInfo& blendInfo,
384                                     VkPipelineColorBlendStateCreateInfo* colorBlendInfo,
385                                     VkPipelineColorBlendAttachmentState* attachmentState) {
386     skgpu::BlendEquation equation = blendInfo.fEquation;
387     skgpu::BlendCoeff srcCoeff = blendInfo.fSrcBlend;
388     skgpu::BlendCoeff dstCoeff = blendInfo.fDstBlend;
389     bool blendOff = skgpu::BlendShouldDisable(equation, srcCoeff, dstCoeff);
390 
391     memset(attachmentState, 0, sizeof(VkPipelineColorBlendAttachmentState));
392     attachmentState->blendEnable = !blendOff;
393     if (!blendOff) {
394         attachmentState->srcColorBlendFactor = blend_coeff_to_vk_blend(srcCoeff);
395         attachmentState->dstColorBlendFactor = blend_coeff_to_vk_blend(dstCoeff);
396         attachmentState->colorBlendOp = blend_equation_to_vk_blend_op(equation);
397         attachmentState->srcAlphaBlendFactor = blend_coeff_to_vk_blend(srcCoeff);
398         attachmentState->dstAlphaBlendFactor = blend_coeff_to_vk_blend(dstCoeff);
399         attachmentState->alphaBlendOp = blend_equation_to_vk_blend_op(equation);
400     }
401 
402     if (!blendInfo.fWritesColor) {
403         attachmentState->colorWriteMask = 0;
404     } else {
405         attachmentState->colorWriteMask = VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT |
406                                           VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT;
407     }
408 
409     memset(colorBlendInfo, 0, sizeof(VkPipelineColorBlendStateCreateInfo));
410     colorBlendInfo->sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO;
411     colorBlendInfo->pNext = nullptr;
412     colorBlendInfo->flags = 0;
413     colorBlendInfo->logicOpEnable = VK_FALSE;
414     colorBlendInfo->attachmentCount = 1;
415     colorBlendInfo->pAttachments = attachmentState;
416     // colorBlendInfo->blendConstants is set dynamically
417 }
418 
setup_raster_state(bool isWireframe,VkPipelineRasterizationStateCreateInfo * rasterInfo)419 static void setup_raster_state(bool isWireframe,
420                                VkPipelineRasterizationStateCreateInfo* rasterInfo) {
421     memset(rasterInfo, 0, sizeof(VkPipelineRasterizationStateCreateInfo));
422     rasterInfo->sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO;
423     rasterInfo->pNext = nullptr;
424     rasterInfo->flags = 0;
425     rasterInfo->depthClampEnable = VK_FALSE;
426     rasterInfo->rasterizerDiscardEnable = VK_FALSE;
427     rasterInfo->polygonMode = isWireframe ? VK_POLYGON_MODE_LINE : VK_POLYGON_MODE_FILL;
428     rasterInfo->cullMode = VK_CULL_MODE_NONE;
429     rasterInfo->frontFace = VK_FRONT_FACE_COUNTER_CLOCKWISE;
430     rasterInfo->depthBiasEnable = VK_FALSE;
431     rasterInfo->depthBiasConstantFactor = 0.0f;
432     rasterInfo->depthBiasClamp = 0.0f;
433     rasterInfo->depthBiasSlopeFactor = 0.0f;
434     rasterInfo->lineWidth = 1.0f;
435 }
436 
setup_shader_stage_info(VkShaderStageFlagBits stage,VkShaderModule shaderModule,VkPipelineShaderStageCreateInfo * shaderStageInfo)437 static void setup_shader_stage_info(VkShaderStageFlagBits stage,
438                                     VkShaderModule shaderModule,
439                                     VkPipelineShaderStageCreateInfo* shaderStageInfo) {
440     memset(shaderStageInfo, 0, sizeof(VkPipelineShaderStageCreateInfo));
441     shaderStageInfo->sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
442     shaderStageInfo->pNext = nullptr;
443     shaderStageInfo->flags = 0;
444     shaderStageInfo->stage = stage;
445     shaderStageInfo->module = shaderModule;
446     shaderStageInfo->pName = "main";
447     shaderStageInfo->pSpecializationInfo = nullptr;
448 }
449 
descriptor_data_to_layout(const VulkanSharedContext * sharedContext,const SkSpan<DescriptorData> & descriptorData)450 static VkDescriptorSetLayout descriptor_data_to_layout(
451         const VulkanSharedContext* sharedContext, const SkSpan<DescriptorData>& descriptorData) {
452     // descriptorData can be empty to indicate that we should create a mock placeholder layout
453     // with no descriptors.
454     VkDescriptorSetLayout setLayout;
455     DescriptorDataToVkDescSetLayout(sharedContext, descriptorData, &setLayout);
456     if (setLayout == VK_NULL_HANDLE) {
457         SKGPU_LOG_E("Failed to create descriptor set layout; pipeline creation will fail.\n");
458         return VK_NULL_HANDLE;
459     }
460     return setLayout;
461 }
462 
destroy_desc_set_layouts(const VulkanSharedContext * sharedContext,skia_private::TArray<VkDescriptorSetLayout> & setLayouts)463 static void destroy_desc_set_layouts(const VulkanSharedContext* sharedContext,
464                                      skia_private::TArray<VkDescriptorSetLayout>& setLayouts) {
465     for (int i = 0; i < setLayouts.size(); i++) {
466         if (setLayouts[i] != VK_NULL_HANDLE) {
467             VULKAN_CALL(sharedContext->interface(),
468             DestroyDescriptorSetLayout(sharedContext->device(),
469                                        setLayouts[i],
470                                        nullptr));
471         }
472     }
473 }
474 
input_attachment_desc_set_layout(VkDescriptorSetLayout & outLayout,const VulkanSharedContext * sharedContext,bool mockOnly)475 static bool input_attachment_desc_set_layout(VkDescriptorSetLayout& outLayout,
476                                              const VulkanSharedContext* sharedContext,
477                                              bool mockOnly) {
478     skia_private::STArray<1, DescriptorData> inputAttachmentDesc;
479 
480     if (!mockOnly) {
481         inputAttachmentDesc.push_back(VulkanGraphicsPipeline::kInputAttachmentDescriptor);
482     }
483 
484     // If mockOnly is true (meaning no input attachment descriptor is actually needed), then still
485     // request a mock VkDescriptorSetLayout handle by passing in the unpopulated span.
486     outLayout = descriptor_data_to_layout(sharedContext, {inputAttachmentDesc});
487     return outLayout != VK_NULL_HANDLE;
488 }
489 
uniform_desc_set_layout(VkDescriptorSetLayout & outLayout,const VulkanSharedContext * sharedContext,bool hasStepUniforms,bool hasPaintUniforms,bool hasGradientBuffer)490 static bool uniform_desc_set_layout(VkDescriptorSetLayout& outLayout,
491                                     const VulkanSharedContext* sharedContext,
492                                     bool hasStepUniforms,
493                                     bool hasPaintUniforms,
494                                     bool hasGradientBuffer) {
495     // Define a container with size reserved for up to kNumUniformBuffers descriptors. Only add
496     // DescriptorData for uniforms that actually are used and need to be included in the layout.
497     skia_private::STArray<
498             VulkanGraphicsPipeline::kNumUniformBuffers, DescriptorData> uniformDescriptors;
499 
500     DescriptorType uniformBufferType =
501             sharedContext->caps()->storageBufferSupport() ? DescriptorType::kStorageBuffer
502                                                           : DescriptorType::kUniformBuffer;
503     if (hasStepUniforms) {
504         uniformDescriptors.push_back({
505                 uniformBufferType, /*count=*/1,
506                 VulkanGraphicsPipeline::kRenderStepUniformBufferIndex,
507                 PipelineStageFlags::kVertexShader | PipelineStageFlags::kFragmentShader});
508     }
509     if (hasPaintUniforms) {
510         uniformDescriptors.push_back({
511                 uniformBufferType, /*count=*/1,
512                 VulkanGraphicsPipeline::kPaintUniformBufferIndex,
513                 PipelineStageFlags::kFragmentShader});
514     }
515     if (hasGradientBuffer) {
516         uniformDescriptors.push_back({
517                 DescriptorType::kStorageBuffer,
518                 /*count=*/1,
519                 VulkanGraphicsPipeline::kGradientBufferIndex,
520                 PipelineStageFlags::kFragmentShader});
521     }
522 
523     // If no uniforms are used, still request a mock VkDescriptorSetLayout handle by passing in the
524     // unpopulated span of uniformDescriptors to descriptor set layout creation.
525     outLayout = descriptor_data_to_layout(sharedContext, {uniformDescriptors});
526     return true;
527 }
528 
texture_sampler_desc_set_layout(VkDescriptorSetLayout & outLayout,const VulkanSharedContext * sharedContext,const int numTextureSamplers,SkSpan<sk_sp<VulkanSampler>> immutableSamplers)529 static bool texture_sampler_desc_set_layout(VkDescriptorSetLayout& outLayout,
530                                             const VulkanSharedContext* sharedContext,
531                                             const int numTextureSamplers,
532                                             SkSpan<sk_sp<VulkanSampler>> immutableSamplers) {
533     SkASSERT(numTextureSamplers >= 0);
534     // The immutable sampler span size must be = the total number of texture/samplers such that
535     // we can use the index of a sampler as its binding index (or we just have none, which
536     // enables us to skip some of this logic entirely).
537     SkASSERT(immutableSamplers.empty() ||
538              SkTo<int>(immutableSamplers.size()) == numTextureSamplers);
539 
540     skia_private::TArray<DescriptorData> textureSamplerDescs(numTextureSamplers);
541     for (int i = 0; i < numTextureSamplers; i++) {
542         Sampler* immutableSampler = nullptr;
543         if (!immutableSamplers.empty() && immutableSamplers[i]) {
544             immutableSampler = immutableSamplers[i].get();
545         }
546         textureSamplerDescs.push_back({DescriptorType::kCombinedTextureSampler,
547                                         /*count=*/1,
548                                         /*bindingIdx=*/i,
549                                         PipelineStageFlags::kFragmentShader,
550                                         immutableSampler});
551     }
552 
553     // If no texture/samplers are used, a mock VkDescriptorSetLayout handle by passing in the
554     // unpopulated span of textureSamplerDescs to descriptor set layout creation.
555     outLayout = descriptor_data_to_layout(sharedContext, {textureSamplerDescs});
556     return outLayout != VK_NULL_HANDLE;
557 }
558 
setup_pipeline_layout(const VulkanSharedContext * sharedContext,uint32_t pushConstantSize,VkShaderStageFlagBits pushConstantPipelineStageFlags,bool hasStepUniforms,bool hasPaintUniforms,bool hasGradientBuffer,int numTextureSamplers,bool loadMsaaFromResolve,SkSpan<sk_sp<VulkanSampler>> immutableSamplers)559 static VkPipelineLayout setup_pipeline_layout(const VulkanSharedContext* sharedContext,
560                                               uint32_t pushConstantSize,
561                                               VkShaderStageFlagBits pushConstantPipelineStageFlags,
562                                               bool hasStepUniforms,
563                                               bool hasPaintUniforms,
564                                               bool hasGradientBuffer,
565                                               int numTextureSamplers,
566                                               bool loadMsaaFromResolve,
567                                               SkSpan<sk_sp<VulkanSampler>> immutableSamplers) {
568     // Create a container with the max anticipated amount (kMaxNumDescSets) of VkDescriptorSetLayout
569     // handles which will be used to create the pipeline layout.
570     skia_private::STArray<
571             VulkanGraphicsPipeline::kMaxNumDescSets, VkDescriptorSetLayout> setLayouts;
572     setLayouts.push_back_n(VulkanGraphicsPipeline::kMaxNumDescSets, VkDescriptorSetLayout());
573 
574     // Populate the container with actual descriptor set layout handles. Each index should contain
575     // either a valid/real or a mock/placehodler layout handle. Mock VkDescriptorSetLayouts do not
576     // actually contain any descriptors, but are needed as placeholders to maintain expected
577     // descriptor set binding indices. This is because VK_NULL_HANDLE is a valid
578     // VkDescriptorSetLayout value iff the graphicsPipelineLibrary feature is enabled, which is not
579     // the case for all targeted devices (see
580     // VUID-VkPipelineLayoutCreateInfo-graphicsPipelineLibrary-06753). If any of the helpers
581     // encounter an error (i.e., return false), return a null VkPipelineLayout.
582     if (!input_attachment_desc_set_layout(
583                 setLayouts[VulkanGraphicsPipeline::kDstAsInputDescSetIndex],
584                 sharedContext,
585                 /*mockOnly=*/false) || // We always add an input attachment descriptor
586         !uniform_desc_set_layout(
587                 setLayouts[VulkanGraphicsPipeline::kUniformBufferDescSetIndex],
588                 sharedContext,
589                 hasStepUniforms,
590                 hasPaintUniforms,
591                 hasGradientBuffer) ||
592         !texture_sampler_desc_set_layout(
593                 setLayouts[VulkanGraphicsPipeline::kTextureBindDescSetIndex],
594                 sharedContext,
595                 numTextureSamplers,
596                 immutableSamplers) ||
597         !input_attachment_desc_set_layout(
598                 setLayouts[VulkanGraphicsPipeline::kLoadMsaaFromResolveInputDescSetIndex],
599                 sharedContext,
600                 /*mockOnly=*/!loadMsaaFromResolve)) { // Actual descriptor needed iff loading MSAA
601         destroy_desc_set_layouts(sharedContext, setLayouts);
602         return VK_NULL_HANDLE;
603     }
604 
605     // Generate a pipeline layout using the now-populated descriptor set layout array
606     VkPushConstantRange pushConstantRange;
607     if (pushConstantSize) {
608         pushConstantRange.offset = 0;
609         pushConstantRange.size = pushConstantSize;
610         pushConstantRange.stageFlags = pushConstantPipelineStageFlags;
611     }
612     VkPipelineLayoutCreateInfo layoutCreateInfo;
613     memset(&layoutCreateInfo, 0, sizeof(VkPipelineLayoutCreateFlags));
614     layoutCreateInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO;
615     layoutCreateInfo.pNext = nullptr;
616     layoutCreateInfo.flags = 0;
617     layoutCreateInfo.setLayoutCount = setLayouts.size();
618     layoutCreateInfo.pSetLayouts = setLayouts.begin();
619     layoutCreateInfo.pushConstantRangeCount = pushConstantSize ? 1 : 0;
620     layoutCreateInfo.pPushConstantRanges = pushConstantSize ? &pushConstantRange : nullptr;
621 
622     VkResult result;
623     VkPipelineLayout layout;
624     VULKAN_CALL_RESULT(sharedContext,
625                        result,
626                        CreatePipelineLayout(sharedContext->device(),
627                                             &layoutCreateInfo,
628                                             /*const VkAllocationCallbacks*=*/nullptr,
629                                             &layout));
630 
631     // DescriptorSetLayouts can be deleted after the pipeline layout is created.
632     destroy_desc_set_layouts(sharedContext, setLayouts);
633 
634     return result == VK_SUCCESS ? layout : VK_NULL_HANDLE;
635 }
636 
destroy_shader_modules(const VulkanSharedContext * sharedContext,VkShaderModule vsModule,VkShaderModule fsModule)637 static void destroy_shader_modules(const VulkanSharedContext* sharedContext,
638                                    VkShaderModule vsModule,
639                                    VkShaderModule fsModule) {
640     if (vsModule != VK_NULL_HANDLE) {
641         VULKAN_CALL(sharedContext->interface(),
642                     DestroyShaderModule(sharedContext->device(), vsModule, nullptr));
643     }
644     if (fsModule != VK_NULL_HANDLE) {
645         VULKAN_CALL(sharedContext->interface(),
646                     DestroyShaderModule(sharedContext->device(), fsModule, nullptr));
647     }
648 }
649 
setup_dynamic_state(VkPipelineDynamicStateCreateInfo * dynamicInfo,VkDynamicState * dynamicStates)650 static void setup_dynamic_state(VkPipelineDynamicStateCreateInfo* dynamicInfo,
651                                 VkDynamicState* dynamicStates) {
652     memset(dynamicInfo, 0, sizeof(VkPipelineDynamicStateCreateInfo));
653     dynamicInfo->sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO;
654     dynamicInfo->pNext = VK_NULL_HANDLE;
655     dynamicInfo->flags = 0;
656     dynamicStates[0] = VK_DYNAMIC_STATE_VIEWPORT;
657     dynamicStates[1] = VK_DYNAMIC_STATE_SCISSOR;
658     dynamicStates[2] = VK_DYNAMIC_STATE_BLEND_CONSTANTS;
659     dynamicInfo->dynamicStateCount = 3;
660     dynamicInfo->pDynamicStates = dynamicStates;
661 }
662 
Make(VulkanResourceProvider * rsrcProvider,const RuntimeEffectDictionary * runtimeDict,const UniqueKey & pipelineKey,const GraphicsPipelineDesc & pipelineDesc,const RenderPassDesc & renderPassDesc,SkEnumBitMask<PipelineCreationFlags> pipelineCreationFlags,uint32_t compilationID)663 sk_sp<VulkanGraphicsPipeline> VulkanGraphicsPipeline::Make(
664         VulkanResourceProvider* rsrcProvider,
665         const RuntimeEffectDictionary* runtimeDict,
666         const UniqueKey& pipelineKey,
667         const GraphicsPipelineDesc& pipelineDesc,
668         const RenderPassDesc& renderPassDesc,
669         SkEnumBitMask<PipelineCreationFlags> pipelineCreationFlags,
670         uint32_t compilationID) {
671     SkASSERT(rsrcProvider);
672     const VulkanSharedContext* sharedContext = rsrcProvider->vulkanSharedContext();
673 
674     SkSL::ProgramSettings settings;
675     settings.fSharpenTextures = true;
676     settings.fForceNoRTFlip = true;
677 
678     ShaderErrorHandler* errorHandler = sharedContext->caps()->shaderErrorHandler();
679 
680     const RenderStep* step = sharedContext->rendererProvider()->lookup(pipelineDesc.renderStepID());
681     const bool useStorageBuffers = sharedContext->caps()->storageBufferSupport();
682 
683     if (step->vertexAttributes().size() + step->instanceAttributes().size() >
684         sharedContext->vulkanCaps().maxVertexAttributes()) {
685         SKGPU_LOG_W("Requested more than the supported number of vertex attributes");
686         return nullptr;
687     }
688 
689     skia_private::TArray<SamplerDesc> descContainer {};
690     std::unique_ptr<ShaderInfo> shaderInfo =
691             ShaderInfo::Make(sharedContext->caps(),
692                              sharedContext->shaderCodeDictionary(),
693                              runtimeDict,
694                              step,
695                              pipelineDesc.paintParamsID(),
696                              useStorageBuffers,
697                              renderPassDesc.fWriteSwizzle,
698                              renderPassDesc.fDstReadStrategyIfRequired,
699                              &descContainer);
700 
701     // Populate an array of sampler ptrs where a sampler's index within the array indicates their
702     // binding index within the descriptor set. Initialize all values to nullptr, which represents a
703     // "regular", dynamic sampler at that index.
704     skia_private::TArray<sk_sp<VulkanSampler>> immutableSamplers;
705     immutableSamplers.push_back_n(shaderInfo->numFragmentTexturesAndSamplers());
706     SkASSERT(rsrcProvider);
707     // This logic relies upon Vulkan using combined texture/sampler bindings, which is necessary for
708     // ycbcr samplers per the Vulkan spec.
709     SkASSERT(!sharedContext->caps()->resourceBindingRequirements().fSeparateTextureAndSamplerBinding
710              && shaderInfo->numFragmentTexturesAndSamplers() == descContainer.size());
711     for (int i = 0; i < descContainer.size(); i++) {
712         // If a SamplerDesc is not equivalent to the default-initialized SamplerDesc, that indicates
713         // the usage of an immutable sampler. That sampler desc should then be used to obtain an
714         // actual immutable sampler from the resource provider and added at the proper index within
715         // immutableSamplers for inclusion in the pipeline layout.
716         if (descContainer.at(i) != SamplerDesc()) {
717             sk_sp<Sampler> immutableSampler =
718                     rsrcProvider->findOrCreateCompatibleSampler(descContainer.at(i));
719             sk_sp<VulkanSampler> vulkanSampler =
720                     sk_ref_sp<VulkanSampler>(static_cast<VulkanSampler*>(immutableSampler.get()));
721             SkASSERT(vulkanSampler);
722             immutableSamplers[i] = std::move(vulkanSampler);
723         }
724     }
725 
726     const std::string& fsSkSL = shaderInfo->fragmentSkSL();
727 
728     const bool hasFragmentSkSL = !fsSkSL.empty();
729     std::string vsSPIRV, fsSPIRV;
730     VkShaderModule fsModule = VK_NULL_HANDLE, vsModule = VK_NULL_HANDLE;
731     SkSL::Program::Interface vsInterface, fsInterface;
732     if (hasFragmentSkSL) {
733         if (!skgpu::SkSLToSPIRV(sharedContext->caps()->shaderCaps(),
734                                 fsSkSL,
735                                 SkSL::ProgramKind::kGraphiteFragment,
736                                 settings,
737                                 &fsSPIRV,
738                                 &fsInterface,
739                                 errorHandler)) {
740             return nullptr;
741         }
742 
743         fsModule = createVulkanShaderModule(sharedContext, fsSPIRV, VK_SHADER_STAGE_FRAGMENT_BIT);
744         if (!fsModule) {
745             return nullptr;
746         }
747     }
748 
749     const std::string& vsSkSL = shaderInfo->vertexSkSL();
750     if (!skgpu::SkSLToSPIRV(sharedContext->caps()->shaderCaps(),
751                             vsSkSL,
752                             SkSL::ProgramKind::kGraphiteVertex,
753                             settings,
754                             &vsSPIRV,
755                             &vsInterface,
756                             errorHandler)) {
757         return nullptr;
758     }
759 
760     vsModule = createVulkanShaderModule(sharedContext, vsSPIRV, VK_SHADER_STAGE_VERTEX_BIT);
761     if (!vsModule) {
762         // Clean up the other shader module before returning.
763         destroy_shader_modules(sharedContext, VK_NULL_HANDLE, fsModule);
764         return nullptr;
765     }
766 
767     VkPipelineVertexInputStateCreateInfo vertexInputInfo;
768     skia_private::STArray<2, VkVertexInputBindingDescription, true> bindingDescs;
769     skia_private::STArray<16, VkVertexInputAttributeDescription> attributeDescs;
770     setup_vertex_input_state(step->vertexAttributes(),
771                              step->instanceAttributes(),
772                              &vertexInputInfo,
773                              &bindingDescs,
774                              &attributeDescs);
775 
776     VkPipelineInputAssemblyStateCreateInfo inputAssemblyInfo;
777     setup_input_assembly_state(step->primitiveType(), &inputAssemblyInfo);
778 
779     VkPipelineDepthStencilStateCreateInfo depthStencilInfo;
780     setup_depth_stencil_state(step->depthStencilSettings(), &depthStencilInfo);
781 
782     VkPipelineViewportStateCreateInfo viewportInfo;
783     setup_viewport_scissor_state(&viewportInfo);
784 
785     VkPipelineMultisampleStateCreateInfo multisampleInfo;
786     setup_multisample_state(renderPassDesc.fColorAttachment.fTextureInfo.numSamples(),
787                             &multisampleInfo);
788 
789     // We will only have one color blend attachment per pipeline.
790     VkPipelineColorBlendAttachmentState attachmentStates[1];
791     VkPipelineColorBlendStateCreateInfo colorBlendInfo;
792     setup_color_blend_state(shaderInfo->blendInfo(), &colorBlendInfo, attachmentStates);
793 
794     VkPipelineRasterizationStateCreateInfo rasterInfo;
795     // TODO: Check for wire frame mode once that is an available context option within graphite.
796     setup_raster_state(/*isWireframe=*/false, &rasterInfo);
797 
798     VkPipelineShaderStageCreateInfo pipelineShaderStages[2];
799     setup_shader_stage_info(VK_SHADER_STAGE_VERTEX_BIT,
800                             vsModule,
801                             &pipelineShaderStages[0]);
802     if (hasFragmentSkSL) {
803         setup_shader_stage_info(VK_SHADER_STAGE_FRAGMENT_BIT,
804                                 fsModule,
805                                 &pipelineShaderStages[1]);
806     }
807 
808     // TODO: Query RenderPassDesc for input attachment information. For now, we only use one for
809     // loading MSAA from resolve so we can simply pass in 0 when not doing that.
810     VkPipelineLayout pipelineLayout =
811             setup_pipeline_layout(sharedContext,
812                                   VulkanResourceProvider::kIntrinsicConstantSize,
813                                   VulkanResourceProvider::kIntrinsicConstantStageFlags,
814                                   !step->uniforms().empty(),
815                                   shaderInfo->hasPaintUniforms(),
816                                   shaderInfo->hasGradientBuffer(),
817                                   shaderInfo->numFragmentTexturesAndSamplers(),
818                                   /*loadMsaaFromResolve=*/false,
819                                   SkSpan<sk_sp<VulkanSampler>>(immutableSamplers));
820     if (pipelineLayout == VK_NULL_HANDLE) {
821         destroy_shader_modules(sharedContext, vsModule, fsModule);
822         return nullptr;
823     }
824 
825     VkDynamicState dynamicStates[3];
826     VkPipelineDynamicStateCreateInfo dynamicInfo;
827     setup_dynamic_state(&dynamicInfo, dynamicStates);
828 
829     bool loadMsaaFromResolve = renderPassDesc.fColorResolveAttachment.fTextureInfo.isValid() &&
830                                renderPassDesc.fColorResolveAttachment.fLoadOp == LoadOp::kLoad;
831 
832     sk_sp<VulkanRenderPass> compatibleRenderPass =
833             rsrcProvider->findOrCreateRenderPass(renderPassDesc, /*compatibleOnly=*/true);
834 
835     VkGraphicsPipelineCreateInfo pipelineCreateInfo;
836     memset(&pipelineCreateInfo, 0, sizeof(VkGraphicsPipelineCreateInfo));
837     pipelineCreateInfo.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO;
838     pipelineCreateInfo.pNext = nullptr;
839     pipelineCreateInfo.flags = 0;
840     pipelineCreateInfo.stageCount = hasFragmentSkSL ? 2 : 1;
841     pipelineCreateInfo.pStages = &pipelineShaderStages[0];
842     pipelineCreateInfo.pVertexInputState = &vertexInputInfo;
843     pipelineCreateInfo.pInputAssemblyState = &inputAssemblyInfo;
844     pipelineCreateInfo.pTessellationState = nullptr;
845     pipelineCreateInfo.pViewportState = &viewportInfo;
846     pipelineCreateInfo.pRasterizationState = &rasterInfo;
847     pipelineCreateInfo.pMultisampleState = &multisampleInfo;
848     pipelineCreateInfo.pDepthStencilState = &depthStencilInfo;
849     pipelineCreateInfo.pColorBlendState = &colorBlendInfo;
850     pipelineCreateInfo.pDynamicState = &dynamicInfo;
851     pipelineCreateInfo.layout = pipelineLayout;
852     pipelineCreateInfo.renderPass = compatibleRenderPass->renderPass();
853     pipelineCreateInfo.subpass = loadMsaaFromResolve ? 1 : 0;
854     pipelineCreateInfo.basePipelineHandle = VK_NULL_HANDLE;
855     pipelineCreateInfo.basePipelineIndex = -1;
856 
857     VkPipeline vkPipeline;
858     VkResult result;
859     {
860         TRACE_EVENT0_ALWAYS("skia.shaders", "VkCreateGraphicsPipeline");
861         VULKAN_CALL_RESULT(sharedContext,
862                            result,
863                            CreateGraphicsPipelines(sharedContext->device(),
864                                                    rsrcProvider->pipelineCache(),
865                                                    /*createInfoCount=*/1,
866                                                    &pipelineCreateInfo,
867                                                    /*pAllocator=*/nullptr,
868                                                    &vkPipeline));
869     }
870     if (result != VK_SUCCESS) {
871         SkDebugf("Failed to create pipeline. Error: %d\n", result);
872         return nullptr;
873     }
874 
875     // After creating the pipeline object, we can clean up the VkShaderModule(s).
876     destroy_shader_modules(sharedContext, vsModule, fsModule);
877 
878     PipelineInfo pipelineInfo{ *shaderInfo, pipelineCreationFlags,
879                                pipelineKey.hash(), compilationID };
880 #if defined(GPU_TEST_UTILS)
881     pipelineInfo.fNativeVertexShader   = "SPIR-V disassembly not available";
882     pipelineInfo.fNativeFragmentShader = "SPIR-V disassmebly not available";
883 #endif
884 
885     return sk_sp<VulkanGraphicsPipeline>(
886             new VulkanGraphicsPipeline(sharedContext,
887                                        pipelineInfo,
888                                        pipelineLayout,
889                                        vkPipeline,
890                                        /*ownsPipelineLayout=*/true,
891                                        std::move(immutableSamplers)));
892 }
893 
InitializeMSAALoadPipelineStructs(const VulkanSharedContext * sharedContext,VkShaderModule * outVertexShaderModule,VkShaderModule * outFragShaderModule,VkPipelineShaderStageCreateInfo * outShaderStageInfo,VkPipelineLayout * outPipelineLayout)894 bool VulkanGraphicsPipeline::InitializeMSAALoadPipelineStructs(
895         const VulkanSharedContext* sharedContext,
896         VkShaderModule* outVertexShaderModule,
897         VkShaderModule* outFragShaderModule,
898         VkPipelineShaderStageCreateInfo* outShaderStageInfo,
899         VkPipelineLayout* outPipelineLayout) {
900     SkSL::ProgramSettings settings;
901     settings.fForceNoRTFlip = true;
902     std::string vsSPIRV, fsSPIRV;
903     ShaderErrorHandler* errorHandler = sharedContext->caps()->shaderErrorHandler();
904 
905     std::string vertShaderText;
906     vertShaderText.append(
907             "layout(vulkan,  push_constant) uniform vertexUniformBuffer {"
908                 "half4 uPosXform;"
909             "};"
910 
911             // MSAA Load Program VS
912             "void main() {"
913                 "float2 position = float2(sk_VertexID >> 1, sk_VertexID & 1);"
914                 "sk_Position.xy = position * uPosXform.xy + uPosXform.zw;"
915                 "sk_Position.zw = half2(0, 1);"
916             "}");
917 
918     std::string fragShaderText;
919     fragShaderText.append(
920             "layout(vulkan, input_attachment_index=0, set=" +
921             std::to_string(VulkanGraphicsPipeline::kLoadMsaaFromResolveInputDescSetIndex) +
922             ", binding=0) subpassInput uInput;"
923 
924             // MSAA Load Program FS
925             "void main() {"
926                 "sk_FragColor = subpassLoad(uInput);"
927             "}");
928 
929     SkSL::Program::Interface vsInterface, fsInterface;
930     if (!skgpu::SkSLToSPIRV(sharedContext->caps()->shaderCaps(),
931                             vertShaderText,
932                             SkSL::ProgramKind::kGraphiteVertex,
933                             settings,
934                             &vsSPIRV,
935                             &vsInterface,
936                             errorHandler)) {
937         return false;
938     }
939     if (!skgpu::SkSLToSPIRV(sharedContext->caps()->shaderCaps(),
940                             fragShaderText,
941                             SkSL::ProgramKind::kGraphiteFragment,
942                             settings,
943                             &fsSPIRV,
944                             &fsInterface,
945                             errorHandler)) {
946         return false;
947     }
948     *outFragShaderModule =
949             createVulkanShaderModule(sharedContext, fsSPIRV, VK_SHADER_STAGE_FRAGMENT_BIT);
950     if (*outFragShaderModule == VK_NULL_HANDLE) {
951         return false;
952     }
953 
954     *outVertexShaderModule =
955             createVulkanShaderModule(sharedContext, vsSPIRV, VK_SHADER_STAGE_VERTEX_BIT);
956     if (*outVertexShaderModule == VK_NULL_HANDLE) {
957         destroy_shader_modules(sharedContext, VK_NULL_HANDLE, *outFragShaderModule);
958         return false;
959     }
960 
961     setup_shader_stage_info(VK_SHADER_STAGE_VERTEX_BIT,
962                             *outVertexShaderModule,
963                             &outShaderStageInfo[0]);
964 
965     setup_shader_stage_info(VK_SHADER_STAGE_FRAGMENT_BIT,
966                             *outFragShaderModule,
967                             &outShaderStageInfo[1]);
968 
969     // The load msaa pipeline takes no step or paint uniforms and no instance attributes. It only
970     // references one input attachment texture (which does not require a sampler) and one vertex
971     // attribute (NDC position)
972     skia_private::TArray<DescriptorData> inputAttachmentDescriptors(1);
973     inputAttachmentDescriptors.push_back(VulkanGraphicsPipeline::kInputAttachmentDescriptor);
974     // TODO: Do we need to consider the potential usage of immutable YCbCr samplers here?
975     *outPipelineLayout = setup_pipeline_layout(sharedContext,
976                                                /*pushConstantSize=*/32,
977                                                (VkShaderStageFlagBits)VK_SHADER_STAGE_VERTEX_BIT,
978                                                /*hasStepUniforms=*/false,
979                                                /*hasPaintUniforms=*/false,
980                                                /*hasGradientBuffer=*/false,
981                                                /*numTextureSamplers=*/0,
982                                                /*loadMsaaFromResolve=*/true,
983                                                /*immutableSamplers=*/{});
984 
985     if (*outPipelineLayout == VK_NULL_HANDLE) {
986         destroy_shader_modules(sharedContext, *outVertexShaderModule, *outFragShaderModule);
987         return false;
988     }
989     return true;
990 }
991 
MakeLoadMSAAPipeline(const VulkanSharedContext * sharedContext,VkShaderModule vsModule,VkShaderModule fsModule,VkPipelineShaderStageCreateInfo * pipelineShaderStages,VkPipelineLayout pipelineLayout,sk_sp<VulkanRenderPass> compatibleRenderPass,VkPipelineCache pipelineCache,const TextureInfo & dstColorAttachmentTexInfo)992 sk_sp<VulkanGraphicsPipeline> VulkanGraphicsPipeline::MakeLoadMSAAPipeline(
993         const VulkanSharedContext* sharedContext,
994         VkShaderModule vsModule,
995         VkShaderModule fsModule,
996         VkPipelineShaderStageCreateInfo* pipelineShaderStages,
997         VkPipelineLayout pipelineLayout,
998         sk_sp<VulkanRenderPass> compatibleRenderPass,
999         VkPipelineCache pipelineCache,
1000         const TextureInfo& dstColorAttachmentTexInfo) {
1001 
1002     int numSamples = dstColorAttachmentTexInfo.numSamples();
1003 
1004     // Create vertex attribute list
1005     SkSpan<const Attribute> loadMSAAVertexAttribs = {};
1006 
1007     VkPipelineVertexInputStateCreateInfo vertexInputInfo;
1008     skia_private::STArray<2, VkVertexInputBindingDescription, true> bindingDescs;
1009     skia_private::STArray<16, VkVertexInputAttributeDescription> attributeDescs;
1010     setup_vertex_input_state(loadMSAAVertexAttribs,
1011                              /*instanceAttrs=*/{}, // Load msaa pipeline takes no instance attribs
1012                              &vertexInputInfo,
1013                              &bindingDescs,
1014                              &attributeDescs);
1015 
1016     VkPipelineInputAssemblyStateCreateInfo inputAssemblyInfo;
1017     setup_input_assembly_state(PrimitiveType::kTriangleStrip, &inputAssemblyInfo);
1018 
1019     VkPipelineDepthStencilStateCreateInfo depthStencilInfo;
1020     setup_depth_stencil_state(/*stencilSettings=*/{}, &depthStencilInfo);
1021 
1022     VkPipelineViewportStateCreateInfo viewportInfo;
1023     setup_viewport_scissor_state(&viewportInfo);
1024 
1025     VkPipelineMultisampleStateCreateInfo multisampleInfo;
1026     setup_multisample_state(numSamples, &multisampleInfo);
1027 
1028     // We will only have one color blend attachment per pipeline.
1029     VkPipelineColorBlendAttachmentState attachmentStates[1];
1030     VkPipelineColorBlendStateCreateInfo colorBlendInfo;
1031     setup_color_blend_state({}, &colorBlendInfo, attachmentStates);
1032 
1033     VkPipelineRasterizationStateCreateInfo rasterInfo;
1034     // TODO: Check for wire frame mode once that is an available context option within graphite.
1035     setup_raster_state(/*isWireframe=*/false, &rasterInfo);
1036 
1037     VkDynamicState dynamicStates[3];
1038     VkPipelineDynamicStateCreateInfo dynamicInfo;
1039     setup_dynamic_state(&dynamicInfo, dynamicStates);
1040 
1041     VkGraphicsPipelineCreateInfo pipelineCreateInfo;
1042     memset(&pipelineCreateInfo, 0, sizeof(VkGraphicsPipelineCreateInfo));
1043     pipelineCreateInfo.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO;
1044     pipelineCreateInfo.pNext = nullptr;
1045     pipelineCreateInfo.flags = 0;
1046     pipelineCreateInfo.stageCount = 2;
1047     pipelineCreateInfo.pStages = pipelineShaderStages;
1048     pipelineCreateInfo.pVertexInputState = &vertexInputInfo;
1049     pipelineCreateInfo.pInputAssemblyState = &inputAssemblyInfo;
1050     pipelineCreateInfo.pTessellationState = nullptr;
1051     pipelineCreateInfo.pViewportState = &viewportInfo;
1052     pipelineCreateInfo.pRasterizationState = &rasterInfo;
1053     pipelineCreateInfo.pMultisampleState = &multisampleInfo;
1054     pipelineCreateInfo.pDepthStencilState = &depthStencilInfo;
1055     pipelineCreateInfo.pColorBlendState = &colorBlendInfo;
1056     pipelineCreateInfo.pDynamicState = &dynamicInfo;
1057     pipelineCreateInfo.layout = pipelineLayout;
1058     pipelineCreateInfo.renderPass = compatibleRenderPass->renderPass();
1059 
1060     VkPipeline vkPipeline;
1061     VkResult result;
1062     {
1063         TRACE_EVENT0_ALWAYS("skia.shaders", "CreateGraphicsPipeline");
1064         SkASSERT(pipelineCache != VK_NULL_HANDLE);
1065         VULKAN_CALL_RESULT(sharedContext,
1066                            result,
1067                            CreateGraphicsPipelines(sharedContext->device(),
1068                                                    pipelineCache,
1069                                                    /*createInfoCount=*/1,
1070                                                    &pipelineCreateInfo,
1071                                                    /*pAllocator=*/nullptr,
1072                                                    &vkPipeline));
1073     }
1074     if (result != VK_SUCCESS) {
1075         SkDebugf("Failed to create pipeline. Error: %d\n", result);
1076         return nullptr;
1077     }
1078 
1079     // This is an internal shader, so don't bother filling in the shader code metadata
1080     PipelineInfo pipelineInfo{};
1081     return sk_sp<VulkanGraphicsPipeline>(
1082             new VulkanGraphicsPipeline(sharedContext,
1083                                        pipelineInfo,
1084                                        pipelineLayout,
1085                                        vkPipeline,
1086                                        /*ownsPipelineLayout=*/false,
1087                                        /*immutableSamplers=*/{}));
1088 }
1089 
VulkanGraphicsPipeline(const VulkanSharedContext * sharedContext,const PipelineInfo & pipelineInfo,VkPipelineLayout pipelineLayout,VkPipeline pipeline,bool ownsPipelineLayout,skia_private::TArray<sk_sp<VulkanSampler>> immutableSamplers)1090 VulkanGraphicsPipeline::VulkanGraphicsPipeline(
1091         const VulkanSharedContext* sharedContext,
1092         const PipelineInfo& pipelineInfo,
1093         VkPipelineLayout pipelineLayout,
1094         VkPipeline pipeline,
1095         bool ownsPipelineLayout,
1096         skia_private::TArray<sk_sp<VulkanSampler>> immutableSamplers)
1097     : GraphicsPipeline(sharedContext, pipelineInfo)
1098     , fPipelineLayout(pipelineLayout)
1099     , fPipeline(pipeline)
1100     , fOwnsPipelineLayout(ownsPipelineLayout)
1101     , fImmutableSamplers(std::move(immutableSamplers)) {}
1102 
freeGpuData()1103 void VulkanGraphicsPipeline::freeGpuData() {
1104     auto sharedCtxt = static_cast<const VulkanSharedContext*>(this->sharedContext());
1105     if (fPipeline != VK_NULL_HANDLE) {
1106         VULKAN_CALL(sharedCtxt->interface(),
1107             DestroyPipeline(sharedCtxt->device(), fPipeline, nullptr));
1108     }
1109     if (fOwnsPipelineLayout && fPipelineLayout != VK_NULL_HANDLE) {
1110         VULKAN_CALL(sharedCtxt->interface(),
1111                     DestroyPipelineLayout(sharedCtxt->device(), fPipelineLayout, nullptr));
1112     }
1113 }
1114 
1115 } // namespace skgpu::graphite
1116