1 // Copyright 2017 The Dawn Authors 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 15 #include "dawn_native/PipelineLayout.h" 16 17 #include "common/Assert.h" 18 #include "common/BitSetIterator.h" 19 #include "common/ityp_stack_vec.h" 20 #include "dawn_native/BindGroupLayout.h" 21 #include "dawn_native/Device.h" 22 #include "dawn_native/ObjectContentHasher.h" 23 #include "dawn_native/ObjectType_autogen.h" 24 #include "dawn_native/ShaderModule.h" 25 26 namespace dawn_native { 27 ValidatePipelineLayoutDescriptor(DeviceBase * device,const PipelineLayoutDescriptor * descriptor,PipelineCompatibilityToken pipelineCompatibilityToken)28 MaybeError ValidatePipelineLayoutDescriptor( 29 DeviceBase* device, 30 const PipelineLayoutDescriptor* descriptor, 31 PipelineCompatibilityToken pipelineCompatibilityToken) { 32 if (descriptor->nextInChain != nullptr) { 33 return DAWN_VALIDATION_ERROR("nextInChain must be nullptr"); 34 } 35 36 if (descriptor->bindGroupLayoutCount > kMaxBindGroups) { 37 return DAWN_VALIDATION_ERROR("too many bind group layouts"); 38 } 39 40 BindingCounts bindingCounts = {}; 41 for (uint32_t i = 0; i < descriptor->bindGroupLayoutCount; ++i) { 42 DAWN_TRY(device->ValidateObject(descriptor->bindGroupLayouts[i])); 43 if (descriptor->bindGroupLayouts[i]->GetPipelineCompatibilityToken() != 44 pipelineCompatibilityToken) { 45 return DAWN_VALIDATION_ERROR( 46 "cannot create a pipeline layout using a bind group layout that was created as " 47 "part of a pipeline's default layout"); 48 } 49 AccumulateBindingCounts(&bindingCounts, 50 descriptor->bindGroupLayouts[i]->GetBindingCountInfo()); 51 } 52 53 DAWN_TRY(ValidateBindingCounts(bindingCounts)); 54 return {}; 55 } 56 57 // PipelineLayoutBase 58 PipelineLayoutBase(DeviceBase * device,const PipelineLayoutDescriptor * descriptor,ApiObjectBase::UntrackedByDeviceTag tag)59 PipelineLayoutBase::PipelineLayoutBase(DeviceBase* device, 60 const PipelineLayoutDescriptor* descriptor, 61 ApiObjectBase::UntrackedByDeviceTag tag) 62 : ApiObjectBase(device, descriptor->label) { 63 ASSERT(descriptor->bindGroupLayoutCount <= kMaxBindGroups); 64 for (BindGroupIndex group(0); group < BindGroupIndex(descriptor->bindGroupLayoutCount); 65 ++group) { 66 mBindGroupLayouts[group] = descriptor->bindGroupLayouts[static_cast<uint32_t>(group)]; 67 mMask.set(group); 68 } 69 } 70 PipelineLayoutBase(DeviceBase * device,const PipelineLayoutDescriptor * descriptor)71 PipelineLayoutBase::PipelineLayoutBase(DeviceBase* device, 72 const PipelineLayoutDescriptor* descriptor) 73 : PipelineLayoutBase(device, descriptor, kUntrackedByDevice) { 74 TrackInDevice(); 75 } 76 PipelineLayoutBase(DeviceBase * device)77 PipelineLayoutBase::PipelineLayoutBase(DeviceBase* device) 78 : ApiObjectBase(device, kLabelNotImplemented) { 79 TrackInDevice(); 80 } 81 PipelineLayoutBase(DeviceBase * device,ObjectBase::ErrorTag tag)82 PipelineLayoutBase::PipelineLayoutBase(DeviceBase* device, ObjectBase::ErrorTag tag) 83 : ApiObjectBase(device, tag) { 84 } 85 86 PipelineLayoutBase::~PipelineLayoutBase() = default; 87 DestroyImpl()88 void PipelineLayoutBase::DestroyImpl() { 89 if (IsCachedReference()) { 90 // Do not uncache the actual cached object if we are a blueprint. 91 GetDevice()->UncachePipelineLayout(this); 92 } 93 } 94 95 // static MakeError(DeviceBase * device)96 PipelineLayoutBase* PipelineLayoutBase::MakeError(DeviceBase* device) { 97 return new PipelineLayoutBase(device, ObjectBase::kError); 98 } 99 100 // static CreateDefault(DeviceBase * device,std::vector<StageAndDescriptor> stages)101 ResultOrError<Ref<PipelineLayoutBase>> PipelineLayoutBase::CreateDefault( 102 DeviceBase* device, 103 std::vector<StageAndDescriptor> stages) { 104 using EntryMap = std::map<BindingNumber, BindGroupLayoutEntry>; 105 106 // Merges two entries at the same location, if they are allowed to be merged. 107 auto MergeEntries = [](BindGroupLayoutEntry* modifiedEntry, 108 const BindGroupLayoutEntry& mergedEntry) -> MaybeError { 109 // Visibility is excluded because we take the OR across stages. 110 bool compatible = 111 modifiedEntry->binding == mergedEntry.binding && 112 modifiedEntry->buffer.type == mergedEntry.buffer.type && 113 modifiedEntry->sampler.type == mergedEntry.sampler.type && 114 // Compatibility between these sample types is checked below. 115 (modifiedEntry->texture.sampleType != wgpu::TextureSampleType::Undefined) == 116 (mergedEntry.texture.sampleType != wgpu::TextureSampleType::Undefined) && 117 modifiedEntry->storageTexture.access == mergedEntry.storageTexture.access; 118 119 // Minimum buffer binding size excluded because we take the maximum seen across stages. 120 if (modifiedEntry->buffer.type != wgpu::BufferBindingType::Undefined) { 121 compatible = compatible && modifiedEntry->buffer.hasDynamicOffset == 122 mergedEntry.buffer.hasDynamicOffset; 123 } 124 125 if (modifiedEntry->texture.sampleType != wgpu::TextureSampleType::Undefined) { 126 // Sample types are compatible if they are exactly equal, 127 // or if the |modifiedEntry| is Float and the |mergedEntry| is UnfilterableFloat. 128 // Note that the |mergedEntry| never has type Float. Texture bindings all start 129 // as UnfilterableFloat and are promoted to Float if they are statically used with 130 // a sampler. 131 ASSERT(mergedEntry.texture.sampleType != wgpu::TextureSampleType::Float); 132 bool compatibleSampleTypes = 133 modifiedEntry->texture.sampleType == mergedEntry.texture.sampleType || 134 (modifiedEntry->texture.sampleType == wgpu::TextureSampleType::Float && 135 mergedEntry.texture.sampleType == wgpu::TextureSampleType::UnfilterableFloat); 136 compatible = 137 compatible && compatibleSampleTypes && 138 modifiedEntry->texture.viewDimension == mergedEntry.texture.viewDimension && 139 modifiedEntry->texture.multisampled == mergedEntry.texture.multisampled; 140 } 141 142 if (modifiedEntry->storageTexture.access != wgpu::StorageTextureAccess::Undefined) { 143 compatible = 144 compatible && 145 modifiedEntry->storageTexture.format == mergedEntry.storageTexture.format && 146 modifiedEntry->storageTexture.viewDimension == 147 mergedEntry.storageTexture.viewDimension; 148 } 149 150 // Check if any properties are incompatible with existing entry 151 // If compatible, we will merge some properties 152 if (!compatible) { 153 return DAWN_VALIDATION_ERROR( 154 "Duplicate binding in default pipeline layout initialization " 155 "not compatible with previous declaration"); 156 } 157 158 // Use the max |minBufferBindingSize| we find. 159 modifiedEntry->buffer.minBindingSize = 160 std::max(modifiedEntry->buffer.minBindingSize, mergedEntry.buffer.minBindingSize); 161 162 // Use the OR of all the stages at which we find this binding. 163 modifiedEntry->visibility |= mergedEntry.visibility; 164 165 return {}; 166 }; 167 168 // Does the trivial conversions from a ShaderBindingInfo to a BindGroupLayoutEntry 169 auto ConvertMetadataToEntry = 170 [](const ShaderBindingInfo& shaderBinding, 171 const ExternalTextureBindingLayout* externalTextureBindingEntry) 172 -> BindGroupLayoutEntry { 173 BindGroupLayoutEntry entry = {}; 174 switch (shaderBinding.bindingType) { 175 case BindingInfoType::Buffer: 176 entry.buffer.type = shaderBinding.buffer.type; 177 entry.buffer.hasDynamicOffset = shaderBinding.buffer.hasDynamicOffset; 178 entry.buffer.minBindingSize = shaderBinding.buffer.minBindingSize; 179 break; 180 case BindingInfoType::Sampler: 181 if (shaderBinding.sampler.isComparison) { 182 entry.sampler.type = wgpu::SamplerBindingType::Comparison; 183 } else { 184 entry.sampler.type = wgpu::SamplerBindingType::Filtering; 185 } 186 break; 187 case BindingInfoType::Texture: 188 switch (shaderBinding.texture.compatibleSampleTypes) { 189 case SampleTypeBit::Depth: 190 entry.texture.sampleType = wgpu::TextureSampleType::Depth; 191 break; 192 case SampleTypeBit::Sint: 193 entry.texture.sampleType = wgpu::TextureSampleType::Sint; 194 break; 195 case SampleTypeBit::Uint: 196 entry.texture.sampleType = wgpu::TextureSampleType::Uint; 197 break; 198 case SampleTypeBit::Float: 199 case SampleTypeBit::UnfilterableFloat: 200 case SampleTypeBit::None: 201 UNREACHABLE(); 202 break; 203 default: 204 if (shaderBinding.texture.compatibleSampleTypes == 205 (SampleTypeBit::Float | SampleTypeBit::UnfilterableFloat)) { 206 // Default to UnfilterableFloat. It will be promoted to Float if it 207 // is used with a sampler. 208 entry.texture.sampleType = 209 wgpu::TextureSampleType::UnfilterableFloat; 210 } else { 211 UNREACHABLE(); 212 } 213 } 214 entry.texture.viewDimension = shaderBinding.texture.viewDimension; 215 entry.texture.multisampled = shaderBinding.texture.multisampled; 216 break; 217 case BindingInfoType::StorageTexture: 218 entry.storageTexture.access = shaderBinding.storageTexture.access; 219 entry.storageTexture.format = shaderBinding.storageTexture.format; 220 entry.storageTexture.viewDimension = shaderBinding.storageTexture.viewDimension; 221 break; 222 case BindingInfoType::ExternalTexture: 223 entry.nextInChain = externalTextureBindingEntry; 224 break; 225 } 226 return entry; 227 }; 228 229 PipelineCompatibilityToken pipelineCompatibilityToken = 230 device->GetNextPipelineCompatibilityToken(); 231 232 // Creates the BGL from the entries for a stage, checking it is valid. 233 auto CreateBGL = [](DeviceBase* device, const EntryMap& entries, 234 PipelineCompatibilityToken pipelineCompatibilityToken) 235 -> ResultOrError<Ref<BindGroupLayoutBase>> { 236 std::vector<BindGroupLayoutEntry> entryVec; 237 entryVec.reserve(entries.size()); 238 for (auto& it : entries) { 239 entryVec.push_back(it.second); 240 } 241 242 BindGroupLayoutDescriptor desc = {}; 243 desc.entries = entryVec.data(); 244 desc.entryCount = entryVec.size(); 245 246 if (device->IsValidationEnabled()) { 247 DAWN_TRY_CONTEXT(ValidateBindGroupLayoutDescriptor(device, &desc), "validating %s", 248 &desc); 249 } 250 return device->GetOrCreateBindGroupLayout(&desc, pipelineCompatibilityToken); 251 }; 252 253 ASSERT(!stages.empty()); 254 255 // Data which BindGroupLayoutDescriptor will point to for creation 256 ityp::array<BindGroupIndex, std::map<BindingNumber, BindGroupLayoutEntry>, kMaxBindGroups> 257 entryData = {}; 258 259 // External texture binding layouts are chained structs that are set as a pointer within 260 // the bind group layout entry. We declare an entry here so that it can be used when needed 261 // in each BindGroupLayoutEntry and so it can stay alive until the call to 262 // GetOrCreateBindGroupLayout. Because ExternalTextureBindingLayout is an empty struct, 263 // there's no issue with using the same struct multiple times. 264 ExternalTextureBindingLayout externalTextureBindingLayout; 265 266 // Loops over all the reflected BindGroupLayoutEntries from shaders. 267 for (const StageAndDescriptor& stage : stages) { 268 const EntryPointMetadata& metadata = stage.module->GetEntryPoint(stage.entryPoint); 269 270 for (BindGroupIndex group(0); group < metadata.bindings.size(); ++group) { 271 for (const auto& bindingIt : metadata.bindings[group]) { 272 BindingNumber bindingNumber = bindingIt.first; 273 const ShaderBindingInfo& shaderBinding = bindingIt.second; 274 275 // Create the BindGroupLayoutEntry 276 BindGroupLayoutEntry entry = 277 ConvertMetadataToEntry(shaderBinding, &externalTextureBindingLayout); 278 entry.binding = static_cast<uint32_t>(bindingNumber); 279 entry.visibility = StageBit(stage.shaderStage); 280 281 // Add it to our map of all entries, if there is an existing entry, then we 282 // need to merge, if we can. 283 const auto& insertion = entryData[group].insert({bindingNumber, entry}); 284 if (!insertion.second) { 285 DAWN_TRY(MergeEntries(&insertion.first->second, entry)); 286 } 287 } 288 } 289 290 // Promote any Unfilterable textures used with a sampler to Filtering. 291 for (const EntryPointMetadata::SamplerTexturePair& pair : 292 metadata.samplerTexturePairs) { 293 BindGroupLayoutEntry* entry = &entryData[pair.texture.group][pair.texture.binding]; 294 if (entry->texture.sampleType == wgpu::TextureSampleType::UnfilterableFloat) { 295 entry->texture.sampleType = wgpu::TextureSampleType::Float; 296 } 297 } 298 } 299 300 // Create the bind group layouts. We need to keep track of the last non-empty BGL because 301 // Dawn doesn't yet know that an empty BGL and a null BGL are the same thing. 302 // TODO(cwallez@chromium.org): remove this when Dawn knows that empty and null BGL are the 303 // same. 304 BindGroupIndex pipelineBGLCount = BindGroupIndex(0); 305 ityp::array<BindGroupIndex, Ref<BindGroupLayoutBase>, kMaxBindGroups> bindGroupLayouts = {}; 306 for (BindGroupIndex group(0); group < kMaxBindGroupsTyped; ++group) { 307 DAWN_TRY_ASSIGN(bindGroupLayouts[group], 308 CreateBGL(device, entryData[group], pipelineCompatibilityToken)); 309 if (entryData[group].size() != 0) { 310 pipelineBGLCount = group + BindGroupIndex(1); 311 } 312 } 313 314 // Create the deduced pipeline layout, validating if it is valid. 315 ityp::array<BindGroupIndex, BindGroupLayoutBase*, kMaxBindGroups> bgls = {}; 316 for (BindGroupIndex group(0); group < pipelineBGLCount; ++group) { 317 bgls[group] = bindGroupLayouts[group].Get(); 318 } 319 320 PipelineLayoutDescriptor desc = {}; 321 desc.bindGroupLayouts = bgls.data(); 322 desc.bindGroupLayoutCount = static_cast<uint32_t>(pipelineBGLCount); 323 324 DAWN_TRY(ValidatePipelineLayoutDescriptor(device, &desc, pipelineCompatibilityToken)); 325 326 Ref<PipelineLayoutBase> result; 327 DAWN_TRY_ASSIGN(result, device->GetOrCreatePipelineLayout(&desc)); 328 ASSERT(!result->IsError()); 329 330 // Sanity check in debug that the pipeline layout is compatible with the current 331 // pipeline. 332 for (const StageAndDescriptor& stage : stages) { 333 const EntryPointMetadata& metadata = stage.module->GetEntryPoint(stage.entryPoint); 334 ASSERT(ValidateCompatibilityWithPipelineLayout(device, metadata, result.Get()) 335 .IsSuccess()); 336 } 337 338 return std::move(result); 339 } 340 GetType() const341 ObjectType PipelineLayoutBase::GetType() const { 342 return ObjectType::PipelineLayout; 343 } 344 GetBindGroupLayout(BindGroupIndex group) const345 const BindGroupLayoutBase* PipelineLayoutBase::GetBindGroupLayout(BindGroupIndex group) const { 346 ASSERT(!IsError()); 347 ASSERT(group < kMaxBindGroupsTyped); 348 ASSERT(mMask[group]); 349 const BindGroupLayoutBase* bgl = mBindGroupLayouts[group].Get(); 350 ASSERT(bgl != nullptr); 351 return bgl; 352 } 353 GetBindGroupLayout(BindGroupIndex group)354 BindGroupLayoutBase* PipelineLayoutBase::GetBindGroupLayout(BindGroupIndex group) { 355 ASSERT(!IsError()); 356 ASSERT(group < kMaxBindGroupsTyped); 357 ASSERT(mMask[group]); 358 BindGroupLayoutBase* bgl = mBindGroupLayouts[group].Get(); 359 ASSERT(bgl != nullptr); 360 return bgl; 361 } 362 GetBindGroupLayoutsMask() const363 const BindGroupLayoutMask& PipelineLayoutBase::GetBindGroupLayoutsMask() const { 364 ASSERT(!IsError()); 365 return mMask; 366 } 367 InheritedGroupsMask(const PipelineLayoutBase * other) const368 BindGroupLayoutMask PipelineLayoutBase::InheritedGroupsMask( 369 const PipelineLayoutBase* other) const { 370 ASSERT(!IsError()); 371 return {(1 << static_cast<uint32_t>(GroupsInheritUpTo(other))) - 1u}; 372 } 373 GroupsInheritUpTo(const PipelineLayoutBase * other) const374 BindGroupIndex PipelineLayoutBase::GroupsInheritUpTo(const PipelineLayoutBase* other) const { 375 ASSERT(!IsError()); 376 377 for (BindGroupIndex i(0); i < kMaxBindGroupsTyped; ++i) { 378 if (!mMask[i] || mBindGroupLayouts[i].Get() != other->mBindGroupLayouts[i].Get()) { 379 return i; 380 } 381 } 382 return kMaxBindGroupsTyped; 383 } 384 ComputeContentHash()385 size_t PipelineLayoutBase::ComputeContentHash() { 386 ObjectContentHasher recorder; 387 recorder.Record(mMask); 388 389 for (BindGroupIndex group : IterateBitSet(mMask)) { 390 recorder.Record(GetBindGroupLayout(group)->GetContentHash()); 391 } 392 393 return recorder.GetContentHash(); 394 } 395 operator ()(const PipelineLayoutBase * a,const PipelineLayoutBase * b) const396 bool PipelineLayoutBase::EqualityFunc::operator()(const PipelineLayoutBase* a, 397 const PipelineLayoutBase* b) const { 398 if (a->mMask != b->mMask) { 399 return false; 400 } 401 402 for (BindGroupIndex group : IterateBitSet(a->mMask)) { 403 if (a->GetBindGroupLayout(group) != b->GetBindGroupLayout(group)) { 404 return false; 405 } 406 } 407 408 return true; 409 } 410 411 } // namespace dawn_native 412