1 // Copyright 2017 The Dawn Authors 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 15 #include "dawn_native/RenderPipeline.h" 16 17 #include "common/BitSetIterator.h" 18 #include "dawn_native/ChainUtils_autogen.h" 19 #include "dawn_native/Commands.h" 20 #include "dawn_native/Device.h" 21 #include "dawn_native/InternalPipelineStore.h" 22 #include "dawn_native/ObjectContentHasher.h" 23 #include "dawn_native/ObjectType_autogen.h" 24 #include "dawn_native/ValidationUtils_autogen.h" 25 #include "dawn_native/VertexFormat.h" 26 27 #include <cmath> 28 #include <sstream> 29 30 namespace dawn_native { AbslFormatConvert(VertexFormatBaseType value,const absl::FormatConversionSpec & spec,absl::FormatSink * s)31 absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert( 32 VertexFormatBaseType value, 33 const absl::FormatConversionSpec& spec, 34 absl::FormatSink* s) { 35 switch (value) { 36 case VertexFormatBaseType::Float: 37 s->Append("Float"); 38 break; 39 case VertexFormatBaseType::Uint: 40 s->Append("Uint"); 41 break; 42 case VertexFormatBaseType::Sint: 43 s->Append("Sint"); 44 break; 45 default: 46 UNREACHABLE(); 47 } 48 return {true}; 49 } 50 AbslFormatConvert(InterStageComponentType value,const absl::FormatConversionSpec & spec,absl::FormatSink * s)51 absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert( 52 InterStageComponentType value, 53 const absl::FormatConversionSpec& spec, 54 absl::FormatSink* s) { 55 switch (value) { 56 case InterStageComponentType::Float: 57 s->Append("Float"); 58 break; 59 case InterStageComponentType::Uint: 60 s->Append("Uint"); 61 break; 62 case InterStageComponentType::Sint: 63 s->Append("Sint"); 64 break; 65 default: 66 UNREACHABLE(); 67 } 68 return {true}; 69 } 70 AbslFormatConvert(InterpolationType value,const absl::FormatConversionSpec & spec,absl::FormatSink * s)71 absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert( 72 InterpolationType value, 73 const absl::FormatConversionSpec& spec, 74 absl::FormatSink* s) { 75 switch (value) { 76 case InterpolationType::Perspective: 77 s->Append("Perspective"); 78 break; 79 case InterpolationType::Linear: 80 s->Append("Linear"); 81 break; 82 case InterpolationType::Flat: 83 s->Append("Flat"); 84 break; 85 default: 86 UNREACHABLE(); 87 } 88 return {true}; 89 } 90 AbslFormatConvert(InterpolationSampling value,const absl::FormatConversionSpec & spec,absl::FormatSink * s)91 absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert( 92 InterpolationSampling value, 93 const absl::FormatConversionSpec& spec, 94 absl::FormatSink* s) { 95 switch (value) { 96 case InterpolationSampling::None: 97 s->Append("None"); 98 break; 99 case InterpolationSampling::Center: 100 s->Append("Center"); 101 break; 102 case InterpolationSampling::Centroid: 103 s->Append("Centroid"); 104 break; 105 case InterpolationSampling::Sample: 106 s->Append("Sample"); 107 break; 108 default: 109 UNREACHABLE(); 110 } 111 return {true}; 112 } 113 114 // Helper functions 115 namespace { ValidateVertexAttribute(DeviceBase * device,const VertexAttribute * attribute,const EntryPointMetadata & metadata,uint64_t vertexBufferStride,ityp::bitset<VertexAttributeLocation,kMaxVertexAttributes> * attributesSetMask)116 MaybeError ValidateVertexAttribute( 117 DeviceBase* device, 118 const VertexAttribute* attribute, 119 const EntryPointMetadata& metadata, 120 uint64_t vertexBufferStride, 121 ityp::bitset<VertexAttributeLocation, kMaxVertexAttributes>* attributesSetMask) { 122 DAWN_TRY(ValidateVertexFormat(attribute->format)); 123 const VertexFormatInfo& formatInfo = GetVertexFormatInfo(attribute->format); 124 125 DAWN_INVALID_IF( 126 attribute->shaderLocation >= kMaxVertexAttributes, 127 "Attribute shader location (%u) exceeds the maximum number of vertex attributes " 128 "(%u).", 129 attribute->shaderLocation, kMaxVertexAttributes); 130 131 VertexAttributeLocation location(static_cast<uint8_t>(attribute->shaderLocation)); 132 133 // No underflow is possible because the max vertex format size is smaller than 134 // kMaxVertexBufferArrayStride. 135 ASSERT(kMaxVertexBufferArrayStride >= formatInfo.byteSize); 136 DAWN_INVALID_IF( 137 attribute->offset > kMaxVertexBufferArrayStride - formatInfo.byteSize, 138 "Attribute offset (%u) with format %s (size: %u) doesn't fit in the maximum vertex " 139 "buffer stride (%u).", 140 attribute->offset, attribute->format, formatInfo.byteSize, 141 kMaxVertexBufferArrayStride); 142 143 // No overflow is possible because the offset is already validated to be less 144 // than kMaxVertexBufferArrayStride. 145 ASSERT(attribute->offset < kMaxVertexBufferArrayStride); 146 DAWN_INVALID_IF( 147 vertexBufferStride > 0 && 148 attribute->offset + formatInfo.byteSize > vertexBufferStride, 149 "Attribute offset (%u) with format %s (size: %u) doesn't fit in the vertex buffer " 150 "stride (%u).", 151 attribute->offset, attribute->format, formatInfo.byteSize, vertexBufferStride); 152 153 DAWN_INVALID_IF(attribute->offset % std::min(4u, formatInfo.byteSize) != 0, 154 "Attribute offset (%u) in not a multiple of %u.", attribute->offset, 155 std::min(4u, formatInfo.byteSize)); 156 157 DAWN_INVALID_IF(metadata.usedVertexInputs[location] && 158 formatInfo.baseType != metadata.vertexInputBaseTypes[location], 159 "Attribute base type (%s) does not match the " 160 "shader's base type (%s) in location (%u).", 161 formatInfo.baseType, metadata.vertexInputBaseTypes[location], 162 attribute->shaderLocation); 163 164 DAWN_INVALID_IF((*attributesSetMask)[location], 165 "Attribute shader location (%u) is used more than once.", 166 attribute->shaderLocation); 167 168 attributesSetMask->set(location); 169 return {}; 170 } 171 ValidateVertexBufferLayout(DeviceBase * device,const VertexBufferLayout * buffer,const EntryPointMetadata & metadata,ityp::bitset<VertexAttributeLocation,kMaxVertexAttributes> * attributesSetMask)172 MaybeError ValidateVertexBufferLayout( 173 DeviceBase* device, 174 const VertexBufferLayout* buffer, 175 const EntryPointMetadata& metadata, 176 ityp::bitset<VertexAttributeLocation, kMaxVertexAttributes>* attributesSetMask) { 177 DAWN_TRY(ValidateVertexStepMode(buffer->stepMode)); 178 DAWN_INVALID_IF( 179 buffer->arrayStride > kMaxVertexBufferArrayStride, 180 "Vertex buffer arrayStride (%u) is larger than the maximum array stride (%u).", 181 buffer->arrayStride, kMaxVertexBufferArrayStride); 182 183 DAWN_INVALID_IF(buffer->arrayStride % 4 != 0, 184 "Vertex buffer arrayStride (%u) is not a multiple of 4.", 185 buffer->arrayStride); 186 187 for (uint32_t i = 0; i < buffer->attributeCount; ++i) { 188 DAWN_TRY_CONTEXT(ValidateVertexAttribute(device, &buffer->attributes[i], metadata, 189 buffer->arrayStride, attributesSetMask), 190 "validating attributes[%u].", i); 191 } 192 193 return {}; 194 } 195 ValidateVertexState(DeviceBase * device,const VertexState * descriptor,const PipelineLayoutBase * layout)196 MaybeError ValidateVertexState(DeviceBase* device, 197 const VertexState* descriptor, 198 const PipelineLayoutBase* layout) { 199 DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr."); 200 201 DAWN_INVALID_IF( 202 descriptor->bufferCount > kMaxVertexBuffers, 203 "Vertex buffer count (%u) exceeds the maximum number of vertex buffers (%u).", 204 descriptor->bufferCount, kMaxVertexBuffers); 205 206 DAWN_TRY_CONTEXT( 207 ValidateProgrammableStage(device, descriptor->module, descriptor->entryPoint, 208 descriptor->constantCount, descriptor->constants, layout, 209 SingleShaderStage::Vertex), 210 "validating vertex stage (module: %s, entryPoint: %s).", descriptor->module, 211 descriptor->entryPoint); 212 const EntryPointMetadata& vertexMetadata = 213 descriptor->module->GetEntryPoint(descriptor->entryPoint); 214 215 ityp::bitset<VertexAttributeLocation, kMaxVertexAttributes> attributesSetMask; 216 uint32_t totalAttributesNum = 0; 217 for (uint32_t i = 0; i < descriptor->bufferCount; ++i) { 218 DAWN_TRY_CONTEXT(ValidateVertexBufferLayout(device, &descriptor->buffers[i], 219 vertexMetadata, &attributesSetMask), 220 "validating buffers[%u].", i); 221 totalAttributesNum += descriptor->buffers[i].attributeCount; 222 } 223 224 // Every vertex attribute has a member called shaderLocation, and there are some 225 // requirements for shaderLocation: 1) >=0, 2) values are different across different 226 // attributes, 3) can't exceed kMaxVertexAttributes. So it can ensure that total 227 // attribute number never exceed kMaxVertexAttributes. 228 ASSERT(totalAttributesNum <= kMaxVertexAttributes); 229 230 // TODO(dawn:563): Specify which inputs were not used in error message. 231 DAWN_INVALID_IF(!IsSubset(vertexMetadata.usedVertexInputs, attributesSetMask), 232 "Pipeline vertex stage uses vertex buffers not in the vertex state"); 233 234 return {}; 235 } 236 ValidatePrimitiveState(const DeviceBase * device,const PrimitiveState * descriptor)237 MaybeError ValidatePrimitiveState(const DeviceBase* device, 238 const PrimitiveState* descriptor) { 239 DAWN_TRY(ValidateSingleSType(descriptor->nextInChain, 240 wgpu::SType::PrimitiveDepthClampingState)); 241 const PrimitiveDepthClampingState* clampInfo = nullptr; 242 FindInChain(descriptor->nextInChain, &clampInfo); 243 if (clampInfo && !device->IsFeatureEnabled(Feature::DepthClamping)) { 244 return DAWN_VALIDATION_ERROR("The depth clamping feature is not supported"); 245 } 246 DAWN_TRY(ValidatePrimitiveTopology(descriptor->topology)); 247 DAWN_TRY(ValidateIndexFormat(descriptor->stripIndexFormat)); 248 DAWN_TRY(ValidateFrontFace(descriptor->frontFace)); 249 DAWN_TRY(ValidateCullMode(descriptor->cullMode)); 250 251 // Pipeline descriptors must have stripIndexFormat == undefined if they are using 252 // non-strip topologies. 253 if (!IsStripPrimitiveTopology(descriptor->topology)) { 254 DAWN_INVALID_IF( 255 descriptor->stripIndexFormat != wgpu::IndexFormat::Undefined, 256 "StripIndexFormat (%s) is not undefined when using a non-strip primitive " 257 "topology (%s).", 258 descriptor->stripIndexFormat, descriptor->topology); 259 } 260 261 return {}; 262 } 263 ValidateDepthStencilState(const DeviceBase * device,const DepthStencilState * descriptor)264 MaybeError ValidateDepthStencilState(const DeviceBase* device, 265 const DepthStencilState* descriptor) { 266 if (descriptor->nextInChain != nullptr) { 267 return DAWN_VALIDATION_ERROR("nextInChain must be nullptr"); 268 } 269 270 DAWN_TRY(ValidateCompareFunction(descriptor->depthCompare)); 271 DAWN_TRY(ValidateCompareFunction(descriptor->stencilFront.compare)); 272 DAWN_TRY(ValidateStencilOperation(descriptor->stencilFront.failOp)); 273 DAWN_TRY(ValidateStencilOperation(descriptor->stencilFront.depthFailOp)); 274 DAWN_TRY(ValidateStencilOperation(descriptor->stencilFront.passOp)); 275 DAWN_TRY(ValidateCompareFunction(descriptor->stencilBack.compare)); 276 DAWN_TRY(ValidateStencilOperation(descriptor->stencilBack.failOp)); 277 DAWN_TRY(ValidateStencilOperation(descriptor->stencilBack.depthFailOp)); 278 DAWN_TRY(ValidateStencilOperation(descriptor->stencilBack.passOp)); 279 280 const Format* format; 281 DAWN_TRY_ASSIGN(format, device->GetInternalFormat(descriptor->format)); 282 DAWN_INVALID_IF(!format->HasDepthOrStencil() || !format->isRenderable, 283 "Depth stencil format (%s) is not depth-stencil renderable.", 284 descriptor->format); 285 286 DAWN_INVALID_IF(std::isnan(descriptor->depthBiasSlopeScale) || 287 std::isnan(descriptor->depthBiasClamp), 288 "Either depthBiasSlopeScale (%f) or depthBiasClamp (%f) is NaN.", 289 descriptor->depthBiasSlopeScale, descriptor->depthBiasClamp); 290 291 DAWN_INVALID_IF( 292 !format->HasDepth() && (descriptor->depthCompare != wgpu::CompareFunction::Always || 293 descriptor->depthWriteEnabled), 294 "Depth stencil format (%s) doesn't have depth aspect while depthCompare (%s) is " 295 "not %s or depthWriteEnabled (%u) is true.", 296 descriptor->format, descriptor->depthCompare, wgpu::CompareFunction::Always, 297 descriptor->depthWriteEnabled); 298 299 DAWN_INVALID_IF(!format->HasStencil() && StencilTestEnabled(descriptor), 300 "Depth stencil format (%s) doesn't have stencil aspect while stencil " 301 "test or stencil write is enabled.", 302 descriptor->format); 303 304 return {}; 305 } 306 ValidateMultisampleState(const MultisampleState * descriptor)307 MaybeError ValidateMultisampleState(const MultisampleState* descriptor) { 308 DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr."); 309 310 DAWN_INVALID_IF(!IsValidSampleCount(descriptor->count), 311 "Multisample count (%u) is not supported.", descriptor->count); 312 313 DAWN_INVALID_IF(descriptor->alphaToCoverageEnabled && descriptor->count <= 1, 314 "Multisample count (%u) must be > 1 when alphaToCoverage is enabled.", 315 descriptor->count); 316 317 return {}; 318 } 319 ValidateBlendState(DeviceBase * device,const BlendState * descriptor)320 MaybeError ValidateBlendState(DeviceBase* device, const BlendState* descriptor) { 321 DAWN_TRY(ValidateBlendOperation(descriptor->alpha.operation)); 322 DAWN_TRY(ValidateBlendFactor(descriptor->alpha.srcFactor)); 323 DAWN_TRY(ValidateBlendFactor(descriptor->alpha.dstFactor)); 324 DAWN_TRY(ValidateBlendOperation(descriptor->color.operation)); 325 DAWN_TRY(ValidateBlendFactor(descriptor->color.srcFactor)); 326 DAWN_TRY(ValidateBlendFactor(descriptor->color.dstFactor)); 327 return {}; 328 } 329 BlendFactorContainsSrcAlpha(const wgpu::BlendFactor & blendFactor)330 bool BlendFactorContainsSrcAlpha(const wgpu::BlendFactor& blendFactor) { 331 return blendFactor == wgpu::BlendFactor::SrcAlpha || 332 blendFactor == wgpu::BlendFactor::OneMinusSrcAlpha || 333 blendFactor == wgpu::BlendFactor::SrcAlphaSaturated; 334 } 335 ValidateColorTargetState(DeviceBase * device,const ColorTargetState * descriptor,bool fragmentWritten,const EntryPointMetadata::FragmentOutputVariableInfo & fragmentOutputVariable)336 MaybeError ValidateColorTargetState( 337 DeviceBase* device, 338 const ColorTargetState* descriptor, 339 bool fragmentWritten, 340 const EntryPointMetadata::FragmentOutputVariableInfo& fragmentOutputVariable) { 341 DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr."); 342 343 if (descriptor->blend) { 344 DAWN_TRY_CONTEXT(ValidateBlendState(device, descriptor->blend), 345 "validating blend state."); 346 } 347 348 DAWN_TRY(ValidateColorWriteMask(descriptor->writeMask)); 349 350 const Format* format; 351 DAWN_TRY_ASSIGN(format, device->GetInternalFormat(descriptor->format)); 352 DAWN_INVALID_IF(!format->IsColor() || !format->isRenderable, 353 "Color format (%s) is not color renderable.", descriptor->format); 354 355 DAWN_INVALID_IF( 356 descriptor->blend && !(format->GetAspectInfo(Aspect::Color).supportedSampleTypes & 357 SampleTypeBit::Float), 358 "Blending is enabled but color format (%s) is not blendable.", descriptor->format); 359 360 if (fragmentWritten) { 361 DAWN_INVALID_IF(fragmentOutputVariable.baseType != 362 format->GetAspectInfo(Aspect::Color).baseType, 363 "Color format (%s) base type (%s) doesn't match the fragment " 364 "module output type (%s).", 365 descriptor->format, format->GetAspectInfo(Aspect::Color).baseType, 366 fragmentOutputVariable.baseType); 367 368 DAWN_INVALID_IF( 369 fragmentOutputVariable.componentCount < format->componentCount, 370 "The fragment stage has fewer output components (%u) than the color format " 371 "(%s) component count (%u).", 372 fragmentOutputVariable.componentCount, descriptor->format, 373 format->componentCount); 374 375 if (descriptor->blend) { 376 if (fragmentOutputVariable.componentCount < 4u) { 377 // No alpha channel output 378 // Make sure there's no alpha involved in the blending operation 379 DAWN_INVALID_IF( 380 BlendFactorContainsSrcAlpha(descriptor->blend->color.srcFactor) || 381 BlendFactorContainsSrcAlpha(descriptor->blend->color.dstFactor), 382 "Color blending srcfactor (%s) or dstFactor (%s) is reading alpha " 383 "but it is missing from fragment output.", 384 descriptor->blend->color.srcFactor, descriptor->blend->color.dstFactor); 385 } 386 } 387 } else { 388 DAWN_INVALID_IF( 389 descriptor->writeMask != wgpu::ColorWriteMask::None, 390 "Color target has no corresponding fragment stage output but writeMask (%s) is " 391 "not zero.", 392 descriptor->writeMask); 393 } 394 395 return {}; 396 } 397 ValidateFragmentState(DeviceBase * device,const FragmentState * descriptor,const PipelineLayoutBase * layout)398 MaybeError ValidateFragmentState(DeviceBase* device, 399 const FragmentState* descriptor, 400 const PipelineLayoutBase* layout) { 401 DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr."); 402 403 DAWN_TRY_CONTEXT( 404 ValidateProgrammableStage(device, descriptor->module, descriptor->entryPoint, 405 descriptor->constantCount, descriptor->constants, layout, 406 SingleShaderStage::Fragment), 407 "validating fragment stage (module: %s, entryPoint: %s).", descriptor->module, 408 descriptor->entryPoint); 409 410 DAWN_INVALID_IF(descriptor->targetCount > kMaxColorAttachments, 411 "Number of targets (%u) exceeds the maximum (%u).", 412 descriptor->targetCount, kMaxColorAttachments); 413 414 const EntryPointMetadata& fragmentMetadata = 415 descriptor->module->GetEntryPoint(descriptor->entryPoint); 416 for (ColorAttachmentIndex i(uint8_t(0)); 417 i < ColorAttachmentIndex(static_cast<uint8_t>(descriptor->targetCount)); ++i) { 418 DAWN_TRY_CONTEXT( 419 ValidateColorTargetState(device, &descriptor->targets[static_cast<uint8_t>(i)], 420 fragmentMetadata.fragmentOutputsWritten[i], 421 fragmentMetadata.fragmentOutputVariables[i]), 422 "validating targets[%u].", static_cast<uint8_t>(i)); 423 } 424 425 return {}; 426 } 427 ValidateInterStageMatching(DeviceBase * device,const VertexState & vertexState,const FragmentState & fragmentState)428 MaybeError ValidateInterStageMatching(DeviceBase* device, 429 const VertexState& vertexState, 430 const FragmentState& fragmentState) { 431 const EntryPointMetadata& vertexMetadata = 432 vertexState.module->GetEntryPoint(vertexState.entryPoint); 433 const EntryPointMetadata& fragmentMetadata = 434 fragmentState.module->GetEntryPoint(fragmentState.entryPoint); 435 436 // TODO(dawn:563): Can this message give more details? 437 DAWN_INVALID_IF( 438 vertexMetadata.usedInterStageVariables != fragmentMetadata.usedInterStageVariables, 439 "One or more fragment inputs and vertex outputs are not one-to-one matching"); 440 441 // TODO(dawn:802): Validate interpolation types and interpolition sampling types 442 for (size_t i : IterateBitSet(vertexMetadata.usedInterStageVariables)) { 443 const auto& vertexOutputInfo = vertexMetadata.interStageVariables[i]; 444 const auto& fragmentInputInfo = fragmentMetadata.interStageVariables[i]; 445 DAWN_INVALID_IF( 446 vertexOutputInfo.baseType != fragmentInputInfo.baseType, 447 "The base type (%s) of the vertex output at location %u is different from the " 448 "base type (%s) of the fragment input at location %u.", 449 vertexOutputInfo.baseType, i, fragmentInputInfo.baseType, i); 450 451 DAWN_INVALID_IF( 452 vertexOutputInfo.componentCount != fragmentInputInfo.componentCount, 453 "The component count (%u) of the vertex output at location %u is different " 454 "from the component count (%u) of the fragment input at location %u.", 455 vertexOutputInfo.componentCount, i, fragmentInputInfo.componentCount, i); 456 457 DAWN_INVALID_IF( 458 vertexOutputInfo.interpolationType != fragmentInputInfo.interpolationType, 459 "The interpolation type (%s) of the vertex output at location %u is different " 460 "from the interpolation type (%s) of the fragment input at location %u.", 461 vertexOutputInfo.interpolationType, i, fragmentInputInfo.interpolationType, i); 462 463 DAWN_INVALID_IF( 464 vertexOutputInfo.interpolationSampling != 465 fragmentInputInfo.interpolationSampling, 466 "The interpolation sampling (%s) of the vertex output at location %u is " 467 "different from the interpolation sampling (%s) of the fragment input at " 468 "location %u.", 469 vertexOutputInfo.interpolationSampling, i, 470 fragmentInputInfo.interpolationSampling, i); 471 } 472 473 return {}; 474 } 475 } // anonymous namespace 476 477 // Helper functions IndexFormatSize(wgpu::IndexFormat format)478 size_t IndexFormatSize(wgpu::IndexFormat format) { 479 switch (format) { 480 case wgpu::IndexFormat::Uint16: 481 return sizeof(uint16_t); 482 case wgpu::IndexFormat::Uint32: 483 return sizeof(uint32_t); 484 case wgpu::IndexFormat::Undefined: 485 break; 486 } 487 UNREACHABLE(); 488 } 489 IsStripPrimitiveTopology(wgpu::PrimitiveTopology primitiveTopology)490 bool IsStripPrimitiveTopology(wgpu::PrimitiveTopology primitiveTopology) { 491 return primitiveTopology == wgpu::PrimitiveTopology::LineStrip || 492 primitiveTopology == wgpu::PrimitiveTopology::TriangleStrip; 493 } 494 ValidateRenderPipelineDescriptor(DeviceBase * device,const RenderPipelineDescriptor * descriptor)495 MaybeError ValidateRenderPipelineDescriptor(DeviceBase* device, 496 const RenderPipelineDescriptor* descriptor) { 497 DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr."); 498 499 if (descriptor->layout != nullptr) { 500 DAWN_TRY(device->ValidateObject(descriptor->layout)); 501 } 502 503 DAWN_TRY_CONTEXT(ValidateVertexState(device, &descriptor->vertex, descriptor->layout), 504 "validating vertex state."); 505 506 DAWN_TRY_CONTEXT(ValidatePrimitiveState(device, &descriptor->primitive), 507 "validating primitive state."); 508 509 if (descriptor->depthStencil) { 510 DAWN_TRY_CONTEXT(ValidateDepthStencilState(device, descriptor->depthStencil), 511 "validating depthStencil state."); 512 } 513 514 DAWN_TRY_CONTEXT(ValidateMultisampleState(&descriptor->multisample), 515 "validating multisample state."); 516 517 if (descriptor->fragment != nullptr) { 518 DAWN_TRY_CONTEXT( 519 ValidateFragmentState(device, descriptor->fragment, descriptor->layout), 520 "validating fragment state."); 521 522 DAWN_INVALID_IF(descriptor->fragment->targetCount == 0 && !descriptor->depthStencil, 523 "Must have at least one color or depthStencil target."); 524 525 DAWN_TRY( 526 ValidateInterStageMatching(device, descriptor->vertex, *(descriptor->fragment))); 527 } 528 529 return {}; 530 } 531 GetRenderStagesAndSetDummyShader(DeviceBase * device,const RenderPipelineDescriptor * descriptor)532 std::vector<StageAndDescriptor> GetRenderStagesAndSetDummyShader( 533 DeviceBase* device, 534 const RenderPipelineDescriptor* descriptor) { 535 std::vector<StageAndDescriptor> stages; 536 stages.push_back({SingleShaderStage::Vertex, descriptor->vertex.module, 537 descriptor->vertex.entryPoint, descriptor->vertex.constantCount, 538 descriptor->vertex.constants}); 539 if (descriptor->fragment != nullptr) { 540 stages.push_back({SingleShaderStage::Fragment, descriptor->fragment->module, 541 descriptor->fragment->entryPoint, descriptor->fragment->constantCount, 542 descriptor->fragment->constants}); 543 } else if (device->IsToggleEnabled(Toggle::UseDummyFragmentInVertexOnlyPipeline)) { 544 InternalPipelineStore* store = device->GetInternalPipelineStore(); 545 // The dummy fragment shader module should already be initialized 546 DAWN_ASSERT(store->dummyFragmentShader != nullptr); 547 ShaderModuleBase* dummyFragmentShader = store->dummyFragmentShader.Get(); 548 stages.push_back( 549 {SingleShaderStage::Fragment, dummyFragmentShader, "fs_empty_main", 0, nullptr}); 550 } 551 return stages; 552 } 553 StencilTestEnabled(const DepthStencilState * depthStencil)554 bool StencilTestEnabled(const DepthStencilState* depthStencil) { 555 return depthStencil->stencilBack.compare != wgpu::CompareFunction::Always || 556 depthStencil->stencilBack.failOp != wgpu::StencilOperation::Keep || 557 depthStencil->stencilBack.depthFailOp != wgpu::StencilOperation::Keep || 558 depthStencil->stencilBack.passOp != wgpu::StencilOperation::Keep || 559 depthStencil->stencilFront.compare != wgpu::CompareFunction::Always || 560 depthStencil->stencilFront.failOp != wgpu::StencilOperation::Keep || 561 depthStencil->stencilFront.depthFailOp != wgpu::StencilOperation::Keep || 562 depthStencil->stencilFront.passOp != wgpu::StencilOperation::Keep; 563 } 564 565 // RenderPipelineBase 566 RenderPipelineBase(DeviceBase * device,const RenderPipelineDescriptor * descriptor)567 RenderPipelineBase::RenderPipelineBase(DeviceBase* device, 568 const RenderPipelineDescriptor* descriptor) 569 : PipelineBase(device, 570 descriptor->layout, 571 descriptor->label, 572 GetRenderStagesAndSetDummyShader(device, descriptor)), 573 mAttachmentState(device->GetOrCreateAttachmentState(descriptor)) { 574 mVertexBufferCount = descriptor->vertex.bufferCount; 575 const VertexBufferLayout* buffers = descriptor->vertex.buffers; 576 for (uint8_t slot = 0; slot < mVertexBufferCount; ++slot) { 577 if (buffers[slot].attributeCount == 0) { 578 continue; 579 } 580 581 VertexBufferSlot typedSlot(slot); 582 583 mVertexBufferSlotsUsed.set(typedSlot); 584 mVertexBufferInfos[typedSlot].arrayStride = buffers[slot].arrayStride; 585 mVertexBufferInfos[typedSlot].stepMode = buffers[slot].stepMode; 586 mVertexBufferInfos[typedSlot].usedBytesInStride = 0; 587 switch (buffers[slot].stepMode) { 588 case wgpu::VertexStepMode::Vertex: 589 mVertexBufferSlotsUsedAsVertexBuffer.set(typedSlot); 590 break; 591 case wgpu::VertexStepMode::Instance: 592 mVertexBufferSlotsUsedAsInstanceBuffer.set(typedSlot); 593 break; 594 default: 595 DAWN_UNREACHABLE(); 596 } 597 598 for (uint32_t i = 0; i < buffers[slot].attributeCount; ++i) { 599 VertexAttributeLocation location = VertexAttributeLocation( 600 static_cast<uint8_t>(buffers[slot].attributes[i].shaderLocation)); 601 mAttributeLocationsUsed.set(location); 602 mAttributeInfos[location].shaderLocation = location; 603 mAttributeInfos[location].vertexBufferSlot = typedSlot; 604 mAttributeInfos[location].offset = buffers[slot].attributes[i].offset; 605 mAttributeInfos[location].format = buffers[slot].attributes[i].format; 606 // Compute the access boundary of this attribute by adding attribute format size to 607 // attribute offset. Although offset is in uint64_t, such sum must be no larger than 608 // maxVertexBufferArrayStride (2048), which is promised by the GPUVertexBufferLayout 609 // validation of creating render pipeline. Therefore, calculating in uint16_t will 610 // cause no overflow. 611 DAWN_ASSERT(buffers[slot].attributes[i].offset <= 2048); 612 uint16_t accessBoundary = 613 uint16_t(buffers[slot].attributes[i].offset) + 614 uint16_t(GetVertexFormatInfo(buffers[slot].attributes[i].format).byteSize); 615 mVertexBufferInfos[typedSlot].usedBytesInStride = 616 std::max(mVertexBufferInfos[typedSlot].usedBytesInStride, accessBoundary); 617 } 618 } 619 620 mPrimitive = descriptor->primitive; 621 const PrimitiveDepthClampingState* clampInfo = nullptr; 622 FindInChain(mPrimitive.nextInChain, &clampInfo); 623 if (clampInfo) { 624 mClampDepth = clampInfo->clampDepth; 625 } 626 mMultisample = descriptor->multisample; 627 628 if (mAttachmentState->HasDepthStencilAttachment()) { 629 mDepthStencil = *descriptor->depthStencil; 630 mWritesDepth = mDepthStencil.depthWriteEnabled; 631 if (mDepthStencil.stencilWriteMask) { 632 if ((mPrimitive.cullMode != wgpu::CullMode::Front && 633 (mDepthStencil.stencilFront.failOp != wgpu::StencilOperation::Keep || 634 mDepthStencil.stencilFront.depthFailOp != wgpu::StencilOperation::Keep || 635 mDepthStencil.stencilFront.passOp != wgpu::StencilOperation::Keep)) || 636 (mPrimitive.cullMode != wgpu::CullMode::Back && 637 (mDepthStencil.stencilBack.failOp != wgpu::StencilOperation::Keep || 638 mDepthStencil.stencilBack.depthFailOp != wgpu::StencilOperation::Keep || 639 mDepthStencil.stencilBack.passOp != wgpu::StencilOperation::Keep))) { 640 mWritesStencil = true; 641 } 642 } 643 } else { 644 // These default values below are useful for backends to fill information. 645 // The values indicate that depth and stencil test are disabled when backends 646 // set their own depth stencil states/descriptors according to the values in 647 // mDepthStencil. 648 mDepthStencil.format = wgpu::TextureFormat::Undefined; 649 mDepthStencil.depthWriteEnabled = false; 650 mDepthStencil.depthCompare = wgpu::CompareFunction::Always; 651 mDepthStencil.stencilBack.compare = wgpu::CompareFunction::Always; 652 mDepthStencil.stencilBack.failOp = wgpu::StencilOperation::Keep; 653 mDepthStencil.stencilBack.depthFailOp = wgpu::StencilOperation::Keep; 654 mDepthStencil.stencilBack.passOp = wgpu::StencilOperation::Keep; 655 mDepthStencil.stencilFront.compare = wgpu::CompareFunction::Always; 656 mDepthStencil.stencilFront.failOp = wgpu::StencilOperation::Keep; 657 mDepthStencil.stencilFront.depthFailOp = wgpu::StencilOperation::Keep; 658 mDepthStencil.stencilFront.passOp = wgpu::StencilOperation::Keep; 659 mDepthStencil.stencilReadMask = 0xff; 660 mDepthStencil.stencilWriteMask = 0xff; 661 mDepthStencil.depthBias = 0; 662 mDepthStencil.depthBiasSlopeScale = 0.0f; 663 mDepthStencil.depthBiasClamp = 0.0f; 664 } 665 666 for (ColorAttachmentIndex i : IterateBitSet(mAttachmentState->GetColorAttachmentsMask())) { 667 // Vertex-only render pipeline have no color attachment. For a render pipeline with 668 // color attachments, there must be a valid FragmentState. 669 ASSERT(descriptor->fragment != nullptr); 670 const ColorTargetState* target = 671 &descriptor->fragment->targets[static_cast<uint8_t>(i)]; 672 mTargets[i] = *target; 673 674 if (target->blend != nullptr) { 675 mTargetBlend[i] = *target->blend; 676 mTargets[i].blend = &mTargetBlend[i]; 677 } 678 } 679 680 SetContentHash(ComputeContentHash()); 681 TrackInDevice(); 682 } 683 RenderPipelineBase(DeviceBase * device)684 RenderPipelineBase::RenderPipelineBase(DeviceBase* device) : PipelineBase(device) { 685 TrackInDevice(); 686 } 687 RenderPipelineBase(DeviceBase * device,ObjectBase::ErrorTag tag)688 RenderPipelineBase::RenderPipelineBase(DeviceBase* device, ObjectBase::ErrorTag tag) 689 : PipelineBase(device, tag) { 690 } 691 692 RenderPipelineBase::~RenderPipelineBase() = default; 693 DestroyImpl()694 void RenderPipelineBase::DestroyImpl() { 695 if (IsCachedReference()) { 696 // Do not uncache the actual cached object if we are a blueprint. 697 GetDevice()->UncacheRenderPipeline(this); 698 } 699 700 // Remove reference to the attachment state so that we don't have lingering references to 701 // it preventing it from being uncached in the device. 702 mAttachmentState = nullptr; 703 } 704 705 // static MakeError(DeviceBase * device)706 RenderPipelineBase* RenderPipelineBase::MakeError(DeviceBase* device) { 707 class ErrorRenderPipeline final : public RenderPipelineBase { 708 public: 709 ErrorRenderPipeline(DeviceBase* device) 710 : RenderPipelineBase(device, ObjectBase::kError) { 711 } 712 713 MaybeError Initialize() override { 714 UNREACHABLE(); 715 return {}; 716 } 717 }; 718 719 return new ErrorRenderPipeline(device); 720 } 721 GetType() const722 ObjectType RenderPipelineBase::GetType() const { 723 return ObjectType::RenderPipeline; 724 } 725 726 const ityp::bitset<VertexAttributeLocation, kMaxVertexAttributes>& GetAttributeLocationsUsed() const727 RenderPipelineBase::GetAttributeLocationsUsed() const { 728 ASSERT(!IsError()); 729 return mAttributeLocationsUsed; 730 } 731 GetAttribute(VertexAttributeLocation location) const732 const VertexAttributeInfo& RenderPipelineBase::GetAttribute( 733 VertexAttributeLocation location) const { 734 ASSERT(!IsError()); 735 ASSERT(mAttributeLocationsUsed[location]); 736 return mAttributeInfos[location]; 737 } 738 739 const ityp::bitset<VertexBufferSlot, kMaxVertexBuffers>& GetVertexBufferSlotsUsed() const740 RenderPipelineBase::GetVertexBufferSlotsUsed() const { 741 ASSERT(!IsError()); 742 return mVertexBufferSlotsUsed; 743 } 744 745 const ityp::bitset<VertexBufferSlot, kMaxVertexBuffers>& GetVertexBufferSlotsUsedAsVertexBuffer() const746 RenderPipelineBase::GetVertexBufferSlotsUsedAsVertexBuffer() const { 747 ASSERT(!IsError()); 748 return mVertexBufferSlotsUsedAsVertexBuffer; 749 } 750 751 const ityp::bitset<VertexBufferSlot, kMaxVertexBuffers>& GetVertexBufferSlotsUsedAsInstanceBuffer() const752 RenderPipelineBase::GetVertexBufferSlotsUsedAsInstanceBuffer() const { 753 ASSERT(!IsError()); 754 return mVertexBufferSlotsUsedAsInstanceBuffer; 755 } 756 GetVertexBuffer(VertexBufferSlot slot) const757 const VertexBufferInfo& RenderPipelineBase::GetVertexBuffer(VertexBufferSlot slot) const { 758 ASSERT(!IsError()); 759 ASSERT(mVertexBufferSlotsUsed[slot]); 760 return mVertexBufferInfos[slot]; 761 } 762 GetVertexBufferCount() const763 uint32_t RenderPipelineBase::GetVertexBufferCount() const { 764 ASSERT(!IsError()); 765 return mVertexBufferCount; 766 } 767 GetColorTargetState(ColorAttachmentIndex attachmentSlot) const768 const ColorTargetState* RenderPipelineBase::GetColorTargetState( 769 ColorAttachmentIndex attachmentSlot) const { 770 ASSERT(!IsError()); 771 ASSERT(attachmentSlot < mTargets.size()); 772 return &mTargets[attachmentSlot]; 773 } 774 GetDepthStencilState() const775 const DepthStencilState* RenderPipelineBase::GetDepthStencilState() const { 776 ASSERT(!IsError()); 777 return &mDepthStencil; 778 } 779 GetPrimitiveTopology() const780 wgpu::PrimitiveTopology RenderPipelineBase::GetPrimitiveTopology() const { 781 ASSERT(!IsError()); 782 return mPrimitive.topology; 783 } 784 GetStripIndexFormat() const785 wgpu::IndexFormat RenderPipelineBase::GetStripIndexFormat() const { 786 ASSERT(!IsError()); 787 return mPrimitive.stripIndexFormat; 788 } 789 GetCullMode() const790 wgpu::CullMode RenderPipelineBase::GetCullMode() const { 791 ASSERT(!IsError()); 792 return mPrimitive.cullMode; 793 } 794 GetFrontFace() const795 wgpu::FrontFace RenderPipelineBase::GetFrontFace() const { 796 ASSERT(!IsError()); 797 return mPrimitive.frontFace; 798 } 799 IsDepthBiasEnabled() const800 bool RenderPipelineBase::IsDepthBiasEnabled() const { 801 ASSERT(!IsError()); 802 return mDepthStencil.depthBias != 0 || mDepthStencil.depthBiasSlopeScale != 0; 803 } 804 GetDepthBias() const805 int32_t RenderPipelineBase::GetDepthBias() const { 806 ASSERT(!IsError()); 807 return mDepthStencil.depthBias; 808 } 809 GetDepthBiasSlopeScale() const810 float RenderPipelineBase::GetDepthBiasSlopeScale() const { 811 ASSERT(!IsError()); 812 return mDepthStencil.depthBiasSlopeScale; 813 } 814 GetDepthBiasClamp() const815 float RenderPipelineBase::GetDepthBiasClamp() const { 816 ASSERT(!IsError()); 817 return mDepthStencil.depthBiasClamp; 818 } 819 ShouldClampDepth() const820 bool RenderPipelineBase::ShouldClampDepth() const { 821 ASSERT(!IsError()); 822 return mClampDepth; 823 } 824 825 ityp::bitset<ColorAttachmentIndex, kMaxColorAttachments> GetColorAttachmentsMask() const826 RenderPipelineBase::GetColorAttachmentsMask() const { 827 ASSERT(!IsError()); 828 return mAttachmentState->GetColorAttachmentsMask(); 829 } 830 HasDepthStencilAttachment() const831 bool RenderPipelineBase::HasDepthStencilAttachment() const { 832 ASSERT(!IsError()); 833 return mAttachmentState->HasDepthStencilAttachment(); 834 } 835 GetColorAttachmentFormat(ColorAttachmentIndex attachment) const836 wgpu::TextureFormat RenderPipelineBase::GetColorAttachmentFormat( 837 ColorAttachmentIndex attachment) const { 838 ASSERT(!IsError()); 839 return mTargets[attachment].format; 840 } 841 GetDepthStencilFormat() const842 wgpu::TextureFormat RenderPipelineBase::GetDepthStencilFormat() const { 843 ASSERT(!IsError()); 844 ASSERT(mAttachmentState->HasDepthStencilAttachment()); 845 return mDepthStencil.format; 846 } 847 GetSampleCount() const848 uint32_t RenderPipelineBase::GetSampleCount() const { 849 ASSERT(!IsError()); 850 return mAttachmentState->GetSampleCount(); 851 } 852 GetSampleMask() const853 uint32_t RenderPipelineBase::GetSampleMask() const { 854 ASSERT(!IsError()); 855 return mMultisample.mask; 856 } 857 IsAlphaToCoverageEnabled() const858 bool RenderPipelineBase::IsAlphaToCoverageEnabled() const { 859 ASSERT(!IsError()); 860 return mMultisample.alphaToCoverageEnabled; 861 } 862 GetAttachmentState() const863 const AttachmentState* RenderPipelineBase::GetAttachmentState() const { 864 ASSERT(!IsError()); 865 866 return mAttachmentState.Get(); 867 } 868 WritesDepth() const869 bool RenderPipelineBase::WritesDepth() const { 870 ASSERT(!IsError()); 871 872 return mWritesDepth; 873 } 874 WritesStencil() const875 bool RenderPipelineBase::WritesStencil() const { 876 ASSERT(!IsError()); 877 878 return mWritesStencil; 879 } 880 ComputeContentHash()881 size_t RenderPipelineBase::ComputeContentHash() { 882 ObjectContentHasher recorder; 883 884 // Record modules and layout 885 recorder.Record(PipelineBase::ComputeContentHash()); 886 887 // Hierarchically record the attachment state. 888 // It contains the attachments set, texture formats, and sample count. 889 recorder.Record(mAttachmentState->GetContentHash()); 890 891 // Record attachments 892 for (ColorAttachmentIndex i : IterateBitSet(mAttachmentState->GetColorAttachmentsMask())) { 893 const ColorTargetState& desc = *GetColorTargetState(i); 894 recorder.Record(desc.writeMask); 895 if (desc.blend != nullptr) { 896 recorder.Record(desc.blend->color.operation, desc.blend->color.srcFactor, 897 desc.blend->color.dstFactor); 898 recorder.Record(desc.blend->alpha.operation, desc.blend->alpha.srcFactor, 899 desc.blend->alpha.dstFactor); 900 } 901 } 902 903 if (mAttachmentState->HasDepthStencilAttachment()) { 904 const DepthStencilState& desc = mDepthStencil; 905 recorder.Record(desc.depthWriteEnabled, desc.depthCompare); 906 recorder.Record(desc.stencilReadMask, desc.stencilWriteMask); 907 recorder.Record(desc.stencilFront.compare, desc.stencilFront.failOp, 908 desc.stencilFront.depthFailOp, desc.stencilFront.passOp); 909 recorder.Record(desc.stencilBack.compare, desc.stencilBack.failOp, 910 desc.stencilBack.depthFailOp, desc.stencilBack.passOp); 911 recorder.Record(desc.depthBias, desc.depthBiasSlopeScale, desc.depthBiasClamp); 912 } 913 914 // Record vertex state 915 recorder.Record(mAttributeLocationsUsed); 916 for (VertexAttributeLocation location : IterateBitSet(mAttributeLocationsUsed)) { 917 const VertexAttributeInfo& desc = GetAttribute(location); 918 recorder.Record(desc.shaderLocation, desc.vertexBufferSlot, desc.offset, desc.format); 919 } 920 921 recorder.Record(mVertexBufferSlotsUsed); 922 for (VertexBufferSlot slot : IterateBitSet(mVertexBufferSlotsUsed)) { 923 const VertexBufferInfo& desc = GetVertexBuffer(slot); 924 recorder.Record(desc.arrayStride, desc.stepMode); 925 } 926 927 // Record primitive state 928 recorder.Record(mPrimitive.topology, mPrimitive.stripIndexFormat, mPrimitive.frontFace, 929 mPrimitive.cullMode, mClampDepth); 930 931 // Record multisample state 932 // Sample count hashed as part of the attachment state 933 recorder.Record(mMultisample.mask, mMultisample.alphaToCoverageEnabled); 934 935 return recorder.GetContentHash(); 936 } 937 operator ()(const RenderPipelineBase * a,const RenderPipelineBase * b) const938 bool RenderPipelineBase::EqualityFunc::operator()(const RenderPipelineBase* a, 939 const RenderPipelineBase* b) const { 940 // Check the layout and shader stages. 941 if (!PipelineBase::EqualForCache(a, b)) { 942 return false; 943 } 944 945 // Check the attachment state. 946 // It contains the attachments set, texture formats, and sample count. 947 if (a->mAttachmentState.Get() != b->mAttachmentState.Get()) { 948 return false; 949 } 950 951 if (a->mAttachmentState.Get() != nullptr) { 952 for (ColorAttachmentIndex i : 953 IterateBitSet(a->mAttachmentState->GetColorAttachmentsMask())) { 954 const ColorTargetState& descA = *a->GetColorTargetState(i); 955 const ColorTargetState& descB = *b->GetColorTargetState(i); 956 if (descA.writeMask != descB.writeMask) { 957 return false; 958 } 959 if ((descA.blend == nullptr) != (descB.blend == nullptr)) { 960 return false; 961 } 962 if (descA.blend != nullptr) { 963 if (descA.blend->color.operation != descB.blend->color.operation || 964 descA.blend->color.srcFactor != descB.blend->color.srcFactor || 965 descA.blend->color.dstFactor != descB.blend->color.dstFactor) { 966 return false; 967 } 968 if (descA.blend->alpha.operation != descB.blend->alpha.operation || 969 descA.blend->alpha.srcFactor != descB.blend->alpha.srcFactor || 970 descA.blend->alpha.dstFactor != descB.blend->alpha.dstFactor) { 971 return false; 972 } 973 } 974 } 975 976 // Check depth/stencil state 977 if (a->mAttachmentState->HasDepthStencilAttachment()) { 978 const DepthStencilState& stateA = a->mDepthStencil; 979 const DepthStencilState& stateB = b->mDepthStencil; 980 981 ASSERT(!std::isnan(stateA.depthBiasSlopeScale)); 982 ASSERT(!std::isnan(stateB.depthBiasSlopeScale)); 983 ASSERT(!std::isnan(stateA.depthBiasClamp)); 984 ASSERT(!std::isnan(stateB.depthBiasClamp)); 985 986 if (stateA.depthWriteEnabled != stateB.depthWriteEnabled || 987 stateA.depthCompare != stateB.depthCompare || 988 stateA.depthBias != stateB.depthBias || 989 stateA.depthBiasSlopeScale != stateB.depthBiasSlopeScale || 990 stateA.depthBiasClamp != stateB.depthBiasClamp) { 991 return false; 992 } 993 if (stateA.stencilFront.compare != stateB.stencilFront.compare || 994 stateA.stencilFront.failOp != stateB.stencilFront.failOp || 995 stateA.stencilFront.depthFailOp != stateB.stencilFront.depthFailOp || 996 stateA.stencilFront.passOp != stateB.stencilFront.passOp) { 997 return false; 998 } 999 if (stateA.stencilBack.compare != stateB.stencilBack.compare || 1000 stateA.stencilBack.failOp != stateB.stencilBack.failOp || 1001 stateA.stencilBack.depthFailOp != stateB.stencilBack.depthFailOp || 1002 stateA.stencilBack.passOp != stateB.stencilBack.passOp) { 1003 return false; 1004 } 1005 if (stateA.stencilReadMask != stateB.stencilReadMask || 1006 stateA.stencilWriteMask != stateB.stencilWriteMask) { 1007 return false; 1008 } 1009 } 1010 } 1011 1012 // Check vertex state 1013 if (a->mAttributeLocationsUsed != b->mAttributeLocationsUsed) { 1014 return false; 1015 } 1016 1017 for (VertexAttributeLocation loc : IterateBitSet(a->mAttributeLocationsUsed)) { 1018 const VertexAttributeInfo& descA = a->GetAttribute(loc); 1019 const VertexAttributeInfo& descB = b->GetAttribute(loc); 1020 if (descA.shaderLocation != descB.shaderLocation || 1021 descA.vertexBufferSlot != descB.vertexBufferSlot || descA.offset != descB.offset || 1022 descA.format != descB.format) { 1023 return false; 1024 } 1025 } 1026 1027 if (a->mVertexBufferSlotsUsed != b->mVertexBufferSlotsUsed) { 1028 return false; 1029 } 1030 1031 for (VertexBufferSlot slot : IterateBitSet(a->mVertexBufferSlotsUsed)) { 1032 const VertexBufferInfo& descA = a->GetVertexBuffer(slot); 1033 const VertexBufferInfo& descB = b->GetVertexBuffer(slot); 1034 if (descA.arrayStride != descB.arrayStride || descA.stepMode != descB.stepMode) { 1035 return false; 1036 } 1037 } 1038 1039 // Check primitive state 1040 { 1041 const PrimitiveState& stateA = a->mPrimitive; 1042 const PrimitiveState& stateB = b->mPrimitive; 1043 if (stateA.topology != stateB.topology || 1044 stateA.stripIndexFormat != stateB.stripIndexFormat || 1045 stateA.frontFace != stateB.frontFace || stateA.cullMode != stateB.cullMode || 1046 a->mClampDepth != b->mClampDepth) { 1047 return false; 1048 } 1049 } 1050 1051 // Check multisample state 1052 { 1053 const MultisampleState& stateA = a->mMultisample; 1054 const MultisampleState& stateB = b->mMultisample; 1055 // Sample count already checked as part of the attachment state. 1056 if (stateA.mask != stateB.mask || 1057 stateA.alphaToCoverageEnabled != stateB.alphaToCoverageEnabled) { 1058 return false; 1059 } 1060 } 1061 1062 return true; 1063 } 1064 1065 } // namespace dawn_native 1066