1 // Copyright 2018 The Dawn Authors 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 15 #include "dawn_native/vulkan/RenderPipelineVk.h" 16 17 #include "dawn_native/CreatePipelineAsyncTask.h" 18 #include "dawn_native/vulkan/DeviceVk.h" 19 #include "dawn_native/vulkan/FencedDeleter.h" 20 #include "dawn_native/vulkan/PipelineLayoutVk.h" 21 #include "dawn_native/vulkan/RenderPassCache.h" 22 #include "dawn_native/vulkan/ShaderModuleVk.h" 23 #include "dawn_native/vulkan/TextureVk.h" 24 #include "dawn_native/vulkan/UtilsVulkan.h" 25 #include "dawn_native/vulkan/VulkanError.h" 26 27 namespace dawn_native { namespace vulkan { 28 29 namespace { 30 VulkanInputRate(wgpu::VertexStepMode stepMode)31 VkVertexInputRate VulkanInputRate(wgpu::VertexStepMode stepMode) { 32 switch (stepMode) { 33 case wgpu::VertexStepMode::Vertex: 34 return VK_VERTEX_INPUT_RATE_VERTEX; 35 case wgpu::VertexStepMode::Instance: 36 return VK_VERTEX_INPUT_RATE_INSTANCE; 37 } 38 UNREACHABLE(); 39 } 40 VulkanVertexFormat(wgpu::VertexFormat format)41 VkFormat VulkanVertexFormat(wgpu::VertexFormat format) { 42 switch (format) { 43 case wgpu::VertexFormat::Uint8x2: 44 return VK_FORMAT_R8G8_UINT; 45 case wgpu::VertexFormat::Uint8x4: 46 return VK_FORMAT_R8G8B8A8_UINT; 47 case wgpu::VertexFormat::Sint8x2: 48 return VK_FORMAT_R8G8_SINT; 49 case wgpu::VertexFormat::Sint8x4: 50 return VK_FORMAT_R8G8B8A8_SINT; 51 case wgpu::VertexFormat::Unorm8x2: 52 return VK_FORMAT_R8G8_UNORM; 53 case wgpu::VertexFormat::Unorm8x4: 54 return VK_FORMAT_R8G8B8A8_UNORM; 55 case wgpu::VertexFormat::Snorm8x2: 56 return VK_FORMAT_R8G8_SNORM; 57 case wgpu::VertexFormat::Snorm8x4: 58 return VK_FORMAT_R8G8B8A8_SNORM; 59 case wgpu::VertexFormat::Uint16x2: 60 return VK_FORMAT_R16G16_UINT; 61 case wgpu::VertexFormat::Uint16x4: 62 return VK_FORMAT_R16G16B16A16_UINT; 63 case wgpu::VertexFormat::Sint16x2: 64 return VK_FORMAT_R16G16_SINT; 65 case wgpu::VertexFormat::Sint16x4: 66 return VK_FORMAT_R16G16B16A16_SINT; 67 case wgpu::VertexFormat::Unorm16x2: 68 return VK_FORMAT_R16G16_UNORM; 69 case wgpu::VertexFormat::Unorm16x4: 70 return VK_FORMAT_R16G16B16A16_UNORM; 71 case wgpu::VertexFormat::Snorm16x2: 72 return VK_FORMAT_R16G16_SNORM; 73 case wgpu::VertexFormat::Snorm16x4: 74 return VK_FORMAT_R16G16B16A16_SNORM; 75 case wgpu::VertexFormat::Float16x2: 76 return VK_FORMAT_R16G16_SFLOAT; 77 case wgpu::VertexFormat::Float16x4: 78 return VK_FORMAT_R16G16B16A16_SFLOAT; 79 case wgpu::VertexFormat::Float32: 80 return VK_FORMAT_R32_SFLOAT; 81 case wgpu::VertexFormat::Float32x2: 82 return VK_FORMAT_R32G32_SFLOAT; 83 case wgpu::VertexFormat::Float32x3: 84 return VK_FORMAT_R32G32B32_SFLOAT; 85 case wgpu::VertexFormat::Float32x4: 86 return VK_FORMAT_R32G32B32A32_SFLOAT; 87 case wgpu::VertexFormat::Uint32: 88 return VK_FORMAT_R32_UINT; 89 case wgpu::VertexFormat::Uint32x2: 90 return VK_FORMAT_R32G32_UINT; 91 case wgpu::VertexFormat::Uint32x3: 92 return VK_FORMAT_R32G32B32_UINT; 93 case wgpu::VertexFormat::Uint32x4: 94 return VK_FORMAT_R32G32B32A32_UINT; 95 case wgpu::VertexFormat::Sint32: 96 return VK_FORMAT_R32_SINT; 97 case wgpu::VertexFormat::Sint32x2: 98 return VK_FORMAT_R32G32_SINT; 99 case wgpu::VertexFormat::Sint32x3: 100 return VK_FORMAT_R32G32B32_SINT; 101 case wgpu::VertexFormat::Sint32x4: 102 return VK_FORMAT_R32G32B32A32_SINT; 103 default: 104 UNREACHABLE(); 105 } 106 } 107 VulkanPrimitiveTopology(wgpu::PrimitiveTopology topology)108 VkPrimitiveTopology VulkanPrimitiveTopology(wgpu::PrimitiveTopology topology) { 109 switch (topology) { 110 case wgpu::PrimitiveTopology::PointList: 111 return VK_PRIMITIVE_TOPOLOGY_POINT_LIST; 112 case wgpu::PrimitiveTopology::LineList: 113 return VK_PRIMITIVE_TOPOLOGY_LINE_LIST; 114 case wgpu::PrimitiveTopology::LineStrip: 115 return VK_PRIMITIVE_TOPOLOGY_LINE_STRIP; 116 case wgpu::PrimitiveTopology::TriangleList: 117 return VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST; 118 case wgpu::PrimitiveTopology::TriangleStrip: 119 return VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP; 120 } 121 UNREACHABLE(); 122 } 123 ShouldEnablePrimitiveRestart(wgpu::PrimitiveTopology topology)124 bool ShouldEnablePrimitiveRestart(wgpu::PrimitiveTopology topology) { 125 // Primitive restart is always enabled in WebGPU but Vulkan validation rules ask that 126 // primitive restart be only enabled on primitive topologies that support restarting. 127 switch (topology) { 128 case wgpu::PrimitiveTopology::PointList: 129 case wgpu::PrimitiveTopology::LineList: 130 case wgpu::PrimitiveTopology::TriangleList: 131 return false; 132 case wgpu::PrimitiveTopology::LineStrip: 133 case wgpu::PrimitiveTopology::TriangleStrip: 134 return true; 135 } 136 UNREACHABLE(); 137 } 138 VulkanFrontFace(wgpu::FrontFace face)139 VkFrontFace VulkanFrontFace(wgpu::FrontFace face) { 140 switch (face) { 141 case wgpu::FrontFace::CCW: 142 return VK_FRONT_FACE_COUNTER_CLOCKWISE; 143 case wgpu::FrontFace::CW: 144 return VK_FRONT_FACE_CLOCKWISE; 145 } 146 UNREACHABLE(); 147 } 148 VulkanCullMode(wgpu::CullMode mode)149 VkCullModeFlagBits VulkanCullMode(wgpu::CullMode mode) { 150 switch (mode) { 151 case wgpu::CullMode::None: 152 return VK_CULL_MODE_NONE; 153 case wgpu::CullMode::Front: 154 return VK_CULL_MODE_FRONT_BIT; 155 case wgpu::CullMode::Back: 156 return VK_CULL_MODE_BACK_BIT; 157 } 158 UNREACHABLE(); 159 } 160 VulkanBlendFactor(wgpu::BlendFactor factor)161 VkBlendFactor VulkanBlendFactor(wgpu::BlendFactor factor) { 162 switch (factor) { 163 case wgpu::BlendFactor::Zero: 164 return VK_BLEND_FACTOR_ZERO; 165 case wgpu::BlendFactor::One: 166 return VK_BLEND_FACTOR_ONE; 167 case wgpu::BlendFactor::Src: 168 return VK_BLEND_FACTOR_SRC_COLOR; 169 case wgpu::BlendFactor::OneMinusSrc: 170 return VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR; 171 case wgpu::BlendFactor::SrcAlpha: 172 return VK_BLEND_FACTOR_SRC_ALPHA; 173 case wgpu::BlendFactor::OneMinusSrcAlpha: 174 return VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA; 175 case wgpu::BlendFactor::Dst: 176 return VK_BLEND_FACTOR_DST_COLOR; 177 case wgpu::BlendFactor::OneMinusDst: 178 return VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR; 179 case wgpu::BlendFactor::DstAlpha: 180 return VK_BLEND_FACTOR_DST_ALPHA; 181 case wgpu::BlendFactor::OneMinusDstAlpha: 182 return VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA; 183 case wgpu::BlendFactor::SrcAlphaSaturated: 184 return VK_BLEND_FACTOR_SRC_ALPHA_SATURATE; 185 case wgpu::BlendFactor::Constant: 186 return VK_BLEND_FACTOR_CONSTANT_COLOR; 187 case wgpu::BlendFactor::OneMinusConstant: 188 return VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR; 189 } 190 UNREACHABLE(); 191 } 192 VulkanBlendOperation(wgpu::BlendOperation operation)193 VkBlendOp VulkanBlendOperation(wgpu::BlendOperation operation) { 194 switch (operation) { 195 case wgpu::BlendOperation::Add: 196 return VK_BLEND_OP_ADD; 197 case wgpu::BlendOperation::Subtract: 198 return VK_BLEND_OP_SUBTRACT; 199 case wgpu::BlendOperation::ReverseSubtract: 200 return VK_BLEND_OP_REVERSE_SUBTRACT; 201 case wgpu::BlendOperation::Min: 202 return VK_BLEND_OP_MIN; 203 case wgpu::BlendOperation::Max: 204 return VK_BLEND_OP_MAX; 205 } 206 UNREACHABLE(); 207 } 208 VulkanColorWriteMask(wgpu::ColorWriteMask mask,bool isDeclaredInFragmentShader)209 VkColorComponentFlags VulkanColorWriteMask(wgpu::ColorWriteMask mask, 210 bool isDeclaredInFragmentShader) { 211 // Vulkan and Dawn color write masks match, static assert it and return the mask 212 static_assert(static_cast<VkColorComponentFlagBits>(wgpu::ColorWriteMask::Red) == 213 VK_COLOR_COMPONENT_R_BIT, 214 ""); 215 static_assert(static_cast<VkColorComponentFlagBits>(wgpu::ColorWriteMask::Green) == 216 VK_COLOR_COMPONENT_G_BIT, 217 ""); 218 static_assert(static_cast<VkColorComponentFlagBits>(wgpu::ColorWriteMask::Blue) == 219 VK_COLOR_COMPONENT_B_BIT, 220 ""); 221 static_assert(static_cast<VkColorComponentFlagBits>(wgpu::ColorWriteMask::Alpha) == 222 VK_COLOR_COMPONENT_A_BIT, 223 ""); 224 225 // According to Vulkan SPEC (Chapter 14.3): "The input values to blending or color 226 // attachment writes are undefined for components which do not correspond to a fragment 227 // shader outputs", we set the color write mask to 0 to prevent such undefined values 228 // being written into the color attachments. 229 return isDeclaredInFragmentShader ? static_cast<VkColorComponentFlags>(mask) 230 : static_cast<VkColorComponentFlags>(0); 231 } 232 ComputeColorDesc(const ColorTargetState * state,bool isDeclaredInFragmentShader)233 VkPipelineColorBlendAttachmentState ComputeColorDesc(const ColorTargetState* state, 234 bool isDeclaredInFragmentShader) { 235 VkPipelineColorBlendAttachmentState attachment; 236 attachment.blendEnable = state->blend != nullptr ? VK_TRUE : VK_FALSE; 237 if (attachment.blendEnable) { 238 attachment.srcColorBlendFactor = VulkanBlendFactor(state->blend->color.srcFactor); 239 attachment.dstColorBlendFactor = VulkanBlendFactor(state->blend->color.dstFactor); 240 attachment.colorBlendOp = VulkanBlendOperation(state->blend->color.operation); 241 attachment.srcAlphaBlendFactor = VulkanBlendFactor(state->blend->alpha.srcFactor); 242 attachment.dstAlphaBlendFactor = VulkanBlendFactor(state->blend->alpha.dstFactor); 243 attachment.alphaBlendOp = VulkanBlendOperation(state->blend->alpha.operation); 244 } else { 245 // Swiftshader's Vulkan implementation appears to expect these values to be valid 246 // even when blending is not enabled. 247 attachment.srcColorBlendFactor = VK_BLEND_FACTOR_ONE; 248 attachment.dstColorBlendFactor = VK_BLEND_FACTOR_ZERO; 249 attachment.colorBlendOp = VK_BLEND_OP_ADD; 250 attachment.srcAlphaBlendFactor = VK_BLEND_FACTOR_ONE; 251 attachment.dstAlphaBlendFactor = VK_BLEND_FACTOR_ZERO; 252 attachment.alphaBlendOp = VK_BLEND_OP_ADD; 253 } 254 attachment.colorWriteMask = 255 VulkanColorWriteMask(state->writeMask, isDeclaredInFragmentShader); 256 return attachment; 257 } 258 VulkanStencilOp(wgpu::StencilOperation op)259 VkStencilOp VulkanStencilOp(wgpu::StencilOperation op) { 260 switch (op) { 261 case wgpu::StencilOperation::Keep: 262 return VK_STENCIL_OP_KEEP; 263 case wgpu::StencilOperation::Zero: 264 return VK_STENCIL_OP_ZERO; 265 case wgpu::StencilOperation::Replace: 266 return VK_STENCIL_OP_REPLACE; 267 case wgpu::StencilOperation::IncrementClamp: 268 return VK_STENCIL_OP_INCREMENT_AND_CLAMP; 269 case wgpu::StencilOperation::DecrementClamp: 270 return VK_STENCIL_OP_DECREMENT_AND_CLAMP; 271 case wgpu::StencilOperation::Invert: 272 return VK_STENCIL_OP_INVERT; 273 case wgpu::StencilOperation::IncrementWrap: 274 return VK_STENCIL_OP_INCREMENT_AND_WRAP; 275 case wgpu::StencilOperation::DecrementWrap: 276 return VK_STENCIL_OP_DECREMENT_AND_WRAP; 277 } 278 UNREACHABLE(); 279 } 280 ComputeDepthStencilDesc(const DepthStencilState * descriptor)281 VkPipelineDepthStencilStateCreateInfo ComputeDepthStencilDesc( 282 const DepthStencilState* descriptor) { 283 VkPipelineDepthStencilStateCreateInfo depthStencilState; 284 depthStencilState.sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO; 285 depthStencilState.pNext = nullptr; 286 depthStencilState.flags = 0; 287 288 // Depth writes only occur if depth is enabled 289 depthStencilState.depthTestEnable = 290 (descriptor->depthCompare == wgpu::CompareFunction::Always && 291 !descriptor->depthWriteEnabled) 292 ? VK_FALSE 293 : VK_TRUE; 294 depthStencilState.depthWriteEnable = descriptor->depthWriteEnabled ? VK_TRUE : VK_FALSE; 295 depthStencilState.depthCompareOp = ToVulkanCompareOp(descriptor->depthCompare); 296 depthStencilState.depthBoundsTestEnable = false; 297 depthStencilState.minDepthBounds = 0.0f; 298 depthStencilState.maxDepthBounds = 1.0f; 299 300 depthStencilState.stencilTestEnable = 301 StencilTestEnabled(descriptor) ? VK_TRUE : VK_FALSE; 302 303 depthStencilState.front.failOp = VulkanStencilOp(descriptor->stencilFront.failOp); 304 depthStencilState.front.passOp = VulkanStencilOp(descriptor->stencilFront.passOp); 305 depthStencilState.front.depthFailOp = 306 VulkanStencilOp(descriptor->stencilFront.depthFailOp); 307 depthStencilState.front.compareOp = ToVulkanCompareOp(descriptor->stencilFront.compare); 308 309 depthStencilState.back.failOp = VulkanStencilOp(descriptor->stencilBack.failOp); 310 depthStencilState.back.passOp = VulkanStencilOp(descriptor->stencilBack.passOp); 311 depthStencilState.back.depthFailOp = 312 VulkanStencilOp(descriptor->stencilBack.depthFailOp); 313 depthStencilState.back.compareOp = ToVulkanCompareOp(descriptor->stencilBack.compare); 314 315 // Dawn doesn't have separate front and back stencil masks. 316 depthStencilState.front.compareMask = descriptor->stencilReadMask; 317 depthStencilState.back.compareMask = descriptor->stencilReadMask; 318 depthStencilState.front.writeMask = descriptor->stencilWriteMask; 319 depthStencilState.back.writeMask = descriptor->stencilWriteMask; 320 321 // The stencil reference is always dynamic 322 depthStencilState.front.reference = 0; 323 depthStencilState.back.reference = 0; 324 325 return depthStencilState; 326 } 327 328 } // anonymous namespace 329 330 // static CreateUninitialized(Device * device,const RenderPipelineDescriptor * descriptor)331 Ref<RenderPipeline> RenderPipeline::CreateUninitialized( 332 Device* device, 333 const RenderPipelineDescriptor* descriptor) { 334 return AcquireRef(new RenderPipeline(device, descriptor)); 335 } 336 Initialize()337 MaybeError RenderPipeline::Initialize() { 338 Device* device = ToBackend(GetDevice()); 339 340 // There are at most 2 shader stages in render pipeline, i.e. vertex and fragment 341 std::array<VkPipelineShaderStageCreateInfo, 2> shaderStages; 342 std::array<std::vector<OverridableConstantScalar>, 2> specializationDataEntriesPerStages; 343 std::array<std::vector<VkSpecializationMapEntry>, 2> specializationMapEntriesPerStages; 344 std::array<VkSpecializationInfo, 2> specializationInfoPerStages; 345 uint32_t stageCount = 0; 346 347 for (auto stage : IterateStages(this->GetStageMask())) { 348 VkPipelineShaderStageCreateInfo shaderStage; 349 350 const ProgrammableStage& programmableStage = GetStage(stage); 351 DAWN_TRY_ASSIGN(shaderStage.module, 352 ToBackend(programmableStage.module) 353 ->GetTransformedModuleHandle(programmableStage.entryPoint.c_str(), 354 ToBackend(GetLayout()))); 355 356 shaderStage.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO; 357 shaderStage.pNext = nullptr; 358 shaderStage.flags = 0; 359 shaderStage.pSpecializationInfo = nullptr; 360 shaderStage.pName = programmableStage.entryPoint.c_str(); 361 362 switch (stage) { 363 case dawn_native::SingleShaderStage::Vertex: { 364 shaderStage.stage = VK_SHADER_STAGE_VERTEX_BIT; 365 break; 366 } 367 case dawn_native::SingleShaderStage::Fragment: { 368 shaderStage.stage = VK_SHADER_STAGE_FRAGMENT_BIT; 369 break; 370 } 371 default: { 372 // For render pipeline only Vertex and Fragment stage is possible 373 DAWN_UNREACHABLE(); 374 break; 375 } 376 } 377 378 shaderStage.pSpecializationInfo = 379 GetVkSpecializationInfo(programmableStage, &specializationInfoPerStages[stageCount], 380 &specializationDataEntriesPerStages[stageCount], 381 &specializationMapEntriesPerStages[stageCount]); 382 383 DAWN_ASSERT(stageCount < 2); 384 shaderStages[stageCount] = shaderStage; 385 stageCount++; 386 } 387 388 PipelineVertexInputStateCreateInfoTemporaryAllocations tempAllocations; 389 VkPipelineVertexInputStateCreateInfo vertexInputCreateInfo = 390 ComputeVertexInputDesc(&tempAllocations); 391 392 VkPipelineInputAssemblyStateCreateInfo inputAssembly; 393 inputAssembly.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO; 394 inputAssembly.pNext = nullptr; 395 inputAssembly.flags = 0; 396 inputAssembly.topology = VulkanPrimitiveTopology(GetPrimitiveTopology()); 397 inputAssembly.primitiveRestartEnable = ShouldEnablePrimitiveRestart(GetPrimitiveTopology()); 398 399 // A dummy viewport/scissor info. The validation layers force use to provide at least one 400 // scissor and one viewport here, even if we choose to make them dynamic. 401 VkViewport viewportDesc; 402 viewportDesc.x = 0.0f; 403 viewportDesc.y = 0.0f; 404 viewportDesc.width = 1.0f; 405 viewportDesc.height = 1.0f; 406 viewportDesc.minDepth = 0.0f; 407 viewportDesc.maxDepth = 1.0f; 408 VkRect2D scissorRect; 409 scissorRect.offset.x = 0; 410 scissorRect.offset.y = 0; 411 scissorRect.extent.width = 1; 412 scissorRect.extent.height = 1; 413 VkPipelineViewportStateCreateInfo viewport; 414 viewport.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO; 415 viewport.pNext = nullptr; 416 viewport.flags = 0; 417 viewport.viewportCount = 1; 418 viewport.pViewports = &viewportDesc; 419 viewport.scissorCount = 1; 420 viewport.pScissors = &scissorRect; 421 422 VkPipelineRasterizationStateCreateInfo rasterization; 423 rasterization.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO; 424 rasterization.pNext = nullptr; 425 rasterization.flags = 0; 426 rasterization.depthClampEnable = ShouldClampDepth() ? VK_TRUE : VK_FALSE; 427 rasterization.rasterizerDiscardEnable = VK_FALSE; 428 rasterization.polygonMode = VK_POLYGON_MODE_FILL; 429 rasterization.cullMode = VulkanCullMode(GetCullMode()); 430 rasterization.frontFace = VulkanFrontFace(GetFrontFace()); 431 rasterization.depthBiasEnable = IsDepthBiasEnabled(); 432 rasterization.depthBiasConstantFactor = GetDepthBias(); 433 rasterization.depthBiasClamp = GetDepthBiasClamp(); 434 rasterization.depthBiasSlopeFactor = GetDepthBiasSlopeScale(); 435 rasterization.lineWidth = 1.0f; 436 437 VkPipelineMultisampleStateCreateInfo multisample; 438 multisample.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO; 439 multisample.pNext = nullptr; 440 multisample.flags = 0; 441 multisample.rasterizationSamples = VulkanSampleCount(GetSampleCount()); 442 multisample.sampleShadingEnable = VK_FALSE; 443 multisample.minSampleShading = 0.0f; 444 // VkPipelineMultisampleStateCreateInfo.pSampleMask is an array of length 445 // ceil(rasterizationSamples / 32) and since we're passing a single uint32_t 446 // we have to assert that this length is indeed 1. 447 ASSERT(multisample.rasterizationSamples <= 32); 448 VkSampleMask sampleMask = GetSampleMask(); 449 multisample.pSampleMask = &sampleMask; 450 multisample.alphaToCoverageEnable = IsAlphaToCoverageEnabled(); 451 multisample.alphaToOneEnable = VK_FALSE; 452 453 VkPipelineDepthStencilStateCreateInfo depthStencilState = 454 ComputeDepthStencilDesc(GetDepthStencilState()); 455 456 VkPipelineColorBlendStateCreateInfo colorBlend; 457 // colorBlend may hold pointers to elements in colorBlendAttachments, so it must have a 458 // definition scope as same as colorBlend 459 ityp::array<ColorAttachmentIndex, VkPipelineColorBlendAttachmentState, kMaxColorAttachments> 460 colorBlendAttachments; 461 if (GetStageMask() & wgpu::ShaderStage::Fragment) { 462 // Initialize the "blend state info" that will be chained in the "create info" from the 463 // data pre-computed in the ColorState 464 const auto& fragmentOutputsWritten = 465 GetStage(SingleShaderStage::Fragment).metadata->fragmentOutputsWritten; 466 for (ColorAttachmentIndex i : IterateBitSet(GetColorAttachmentsMask())) { 467 const ColorTargetState* target = GetColorTargetState(i); 468 colorBlendAttachments[i] = ComputeColorDesc(target, fragmentOutputsWritten[i]); 469 } 470 471 colorBlend.sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO; 472 colorBlend.pNext = nullptr; 473 colorBlend.flags = 0; 474 // LogicOp isn't supported so we disable it. 475 colorBlend.logicOpEnable = VK_FALSE; 476 colorBlend.logicOp = VK_LOGIC_OP_CLEAR; 477 colorBlend.attachmentCount = static_cast<uint32_t>(GetColorAttachmentsMask().count()); 478 colorBlend.pAttachments = colorBlendAttachments.data(); 479 // The blend constant is always dynamic so we fill in a dummy value 480 colorBlend.blendConstants[0] = 0.0f; 481 colorBlend.blendConstants[1] = 0.0f; 482 colorBlend.blendConstants[2] = 0.0f; 483 colorBlend.blendConstants[3] = 0.0f; 484 } 485 486 // Tag all state as dynamic but stencil masks and depth bias. 487 VkDynamicState dynamicStates[] = { 488 VK_DYNAMIC_STATE_VIEWPORT, VK_DYNAMIC_STATE_SCISSOR, 489 VK_DYNAMIC_STATE_LINE_WIDTH, VK_DYNAMIC_STATE_BLEND_CONSTANTS, 490 VK_DYNAMIC_STATE_DEPTH_BOUNDS, VK_DYNAMIC_STATE_STENCIL_REFERENCE, 491 }; 492 VkPipelineDynamicStateCreateInfo dynamic; 493 dynamic.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO; 494 dynamic.pNext = nullptr; 495 dynamic.flags = 0; 496 dynamic.dynamicStateCount = sizeof(dynamicStates) / sizeof(dynamicStates[0]); 497 dynamic.pDynamicStates = dynamicStates; 498 499 // Get a VkRenderPass that matches the attachment formats for this pipeline, load/store ops 500 // don't matter so set them all to LoadOp::Load / StoreOp::Store. Whether the render pass 501 // has resolve target and whether depth/stencil attachment is read-only also don't matter, 502 // so set them both to false. 503 VkRenderPass renderPass = VK_NULL_HANDLE; 504 { 505 RenderPassCacheQuery query; 506 507 for (ColorAttachmentIndex i : IterateBitSet(GetColorAttachmentsMask())) { 508 query.SetColor(i, GetColorAttachmentFormat(i), wgpu::LoadOp::Load, 509 wgpu::StoreOp::Store, false); 510 } 511 512 if (HasDepthStencilAttachment()) { 513 query.SetDepthStencil(GetDepthStencilFormat(), wgpu::LoadOp::Load, 514 wgpu::StoreOp::Store, wgpu::LoadOp::Load, 515 wgpu::StoreOp::Store, false); 516 } 517 518 query.SetSampleCount(GetSampleCount()); 519 520 DAWN_TRY_ASSIGN(renderPass, device->GetRenderPassCache()->GetRenderPass(query)); 521 } 522 523 // The create info chains in a bunch of things created on the stack here or inside state 524 // objects. 525 VkGraphicsPipelineCreateInfo createInfo; 526 createInfo.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO; 527 createInfo.pNext = nullptr; 528 createInfo.flags = 0; 529 createInfo.stageCount = stageCount; 530 createInfo.pStages = shaderStages.data(); 531 createInfo.pVertexInputState = &vertexInputCreateInfo; 532 createInfo.pInputAssemblyState = &inputAssembly; 533 createInfo.pTessellationState = nullptr; 534 createInfo.pViewportState = &viewport; 535 createInfo.pRasterizationState = &rasterization; 536 createInfo.pMultisampleState = &multisample; 537 createInfo.pDepthStencilState = &depthStencilState; 538 createInfo.pColorBlendState = 539 (GetStageMask() & wgpu::ShaderStage::Fragment) ? &colorBlend : nullptr; 540 createInfo.pDynamicState = &dynamic; 541 createInfo.layout = ToBackend(GetLayout())->GetHandle(); 542 createInfo.renderPass = renderPass; 543 createInfo.subpass = 0; 544 createInfo.basePipelineHandle = VkPipeline{}; 545 createInfo.basePipelineIndex = -1; 546 547 DAWN_TRY(CheckVkSuccess( 548 device->fn.CreateGraphicsPipelines(device->GetVkDevice(), VkPipelineCache{}, 1, 549 &createInfo, nullptr, &*mHandle), 550 "CreateGraphicsPipeline")); 551 552 SetLabelImpl(); 553 554 return {}; 555 } 556 SetLabelImpl()557 void RenderPipeline::SetLabelImpl() { 558 SetDebugName(ToBackend(GetDevice()), VK_OBJECT_TYPE_PIPELINE, 559 reinterpret_cast<uint64_t&>(mHandle), "Dawn_RenderPipeline", GetLabel()); 560 } 561 ComputeVertexInputDesc(PipelineVertexInputStateCreateInfoTemporaryAllocations * tempAllocations)562 VkPipelineVertexInputStateCreateInfo RenderPipeline::ComputeVertexInputDesc( 563 PipelineVertexInputStateCreateInfoTemporaryAllocations* tempAllocations) { 564 // Fill in the "binding info" that will be chained in the create info 565 uint32_t bindingCount = 0; 566 for (VertexBufferSlot slot : IterateBitSet(GetVertexBufferSlotsUsed())) { 567 const VertexBufferInfo& bindingInfo = GetVertexBuffer(slot); 568 569 VkVertexInputBindingDescription* bindingDesc = &tempAllocations->bindings[bindingCount]; 570 bindingDesc->binding = static_cast<uint8_t>(slot); 571 bindingDesc->stride = bindingInfo.arrayStride; 572 bindingDesc->inputRate = VulkanInputRate(bindingInfo.stepMode); 573 574 bindingCount++; 575 } 576 577 // Fill in the "attribute info" that will be chained in the create info 578 uint32_t attributeCount = 0; 579 for (VertexAttributeLocation loc : IterateBitSet(GetAttributeLocationsUsed())) { 580 const VertexAttributeInfo& attributeInfo = GetAttribute(loc); 581 582 VkVertexInputAttributeDescription* attributeDesc = 583 &tempAllocations->attributes[attributeCount]; 584 attributeDesc->location = static_cast<uint8_t>(loc); 585 attributeDesc->binding = static_cast<uint8_t>(attributeInfo.vertexBufferSlot); 586 attributeDesc->format = VulkanVertexFormat(attributeInfo.format); 587 attributeDesc->offset = attributeInfo.offset; 588 589 attributeCount++; 590 } 591 592 // Build the create info 593 VkPipelineVertexInputStateCreateInfo createInfo; 594 createInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO; 595 createInfo.pNext = nullptr; 596 createInfo.flags = 0; 597 createInfo.vertexBindingDescriptionCount = bindingCount; 598 createInfo.pVertexBindingDescriptions = tempAllocations->bindings.data(); 599 createInfo.vertexAttributeDescriptionCount = attributeCount; 600 createInfo.pVertexAttributeDescriptions = tempAllocations->attributes.data(); 601 return createInfo; 602 } 603 604 RenderPipeline::~RenderPipeline() = default; 605 DestroyImpl()606 void RenderPipeline::DestroyImpl() { 607 RenderPipelineBase::DestroyImpl(); 608 if (mHandle != VK_NULL_HANDLE) { 609 ToBackend(GetDevice())->GetFencedDeleter()->DeleteWhenUnused(mHandle); 610 mHandle = VK_NULL_HANDLE; 611 } 612 } 613 GetHandle() const614 VkPipeline RenderPipeline::GetHandle() const { 615 return mHandle; 616 } 617 InitializeAsync(Ref<RenderPipelineBase> renderPipeline,WGPUCreateRenderPipelineAsyncCallback callback,void * userdata)618 void RenderPipeline::InitializeAsync(Ref<RenderPipelineBase> renderPipeline, 619 WGPUCreateRenderPipelineAsyncCallback callback, 620 void* userdata) { 621 std::unique_ptr<CreateRenderPipelineAsyncTask> asyncTask = 622 std::make_unique<CreateRenderPipelineAsyncTask>(std::move(renderPipeline), callback, 623 userdata); 624 CreateRenderPipelineAsyncTask::RunAsync(std::move(asyncTask)); 625 } 626 627 }} // namespace dawn_native::vulkan 628