1 // Copyright 2019 The Dawn Authors 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 15 #include "dawn_native/CommandValidation.h" 16 17 #include "common/BitSetIterator.h" 18 #include "dawn_native/BindGroup.h" 19 #include "dawn_native/Buffer.h" 20 #include "dawn_native/CommandBufferStateTracker.h" 21 #include "dawn_native/Commands.h" 22 #include "dawn_native/Device.h" 23 #include "dawn_native/PassResourceUsage.h" 24 #include "dawn_native/QuerySet.h" 25 #include "dawn_native/RenderBundle.h" 26 #include "dawn_native/RenderPipeline.h" 27 #include "dawn_native/ValidationUtils_autogen.h" 28 29 namespace dawn_native { 30 31 // Performs validation of the "synchronization scope" rules of WebGPU. ValidateSyncScopeResourceUsage(const SyncScopeResourceUsage & scope)32 MaybeError ValidateSyncScopeResourceUsage(const SyncScopeResourceUsage& scope) { 33 // Buffers can only be used as single-write or multiple read. 34 for (size_t i = 0; i < scope.bufferUsages.size(); ++i) { 35 const wgpu::BufferUsage usage = scope.bufferUsages[i]; 36 bool readOnly = IsSubset(usage, kReadOnlyBufferUsages); 37 bool singleUse = wgpu::HasZeroOrOneBits(usage); 38 39 DAWN_INVALID_IF(!readOnly && !singleUse, 40 "%s usage (%s) includes writable usage and another usage in the same " 41 "synchronization scope.", 42 scope.buffers[i], usage); 43 } 44 45 // Check that every single subresource is used as either a single-write usage or a 46 // combination of readonly usages. 47 for (size_t i = 0; i < scope.textureUsages.size(); ++i) { 48 const TextureSubresourceUsage& textureUsage = scope.textureUsages[i]; 49 MaybeError error = {}; 50 textureUsage.Iterate([&](const SubresourceRange&, const wgpu::TextureUsage& usage) { 51 bool readOnly = IsSubset(usage, kReadOnlyTextureUsages); 52 bool singleUse = wgpu::HasZeroOrOneBits(usage); 53 if (!readOnly && !singleUse && !error.IsError()) { 54 error = DAWN_FORMAT_VALIDATION_ERROR( 55 "%s usage (%s) includes writable usage and another usage in the same " 56 "synchronization scope.", 57 scope.textures[i], usage); 58 } 59 }); 60 DAWN_TRY(std::move(error)); 61 } 62 return {}; 63 } 64 ValidateTimestampQuery(QuerySetBase * querySet,uint32_t queryIndex)65 MaybeError ValidateTimestampQuery(QuerySetBase* querySet, uint32_t queryIndex) { 66 DAWN_INVALID_IF(querySet->GetQueryType() != wgpu::QueryType::Timestamp, 67 "The type of %s is not %s.", querySet, wgpu::QueryType::Timestamp); 68 69 DAWN_INVALID_IF(queryIndex >= querySet->GetQueryCount(), 70 "Query index (%u) exceeds the number of queries (%u) in %s.", queryIndex, 71 querySet->GetQueryCount(), querySet); 72 73 return {}; 74 } 75 ValidateWriteBuffer(const DeviceBase * device,const BufferBase * buffer,uint64_t bufferOffset,uint64_t size)76 MaybeError ValidateWriteBuffer(const DeviceBase* device, 77 const BufferBase* buffer, 78 uint64_t bufferOffset, 79 uint64_t size) { 80 DAWN_TRY(device->ValidateObject(buffer)); 81 82 DAWN_INVALID_IF(bufferOffset % 4 != 0, "BufferOffset (%u) is not a multiple of 4.", 83 bufferOffset); 84 85 DAWN_INVALID_IF(size % 4 != 0, "Size (%u) is not a multiple of 4.", size); 86 87 uint64_t bufferSize = buffer->GetSize(); 88 DAWN_INVALID_IF(bufferOffset > bufferSize || size > (bufferSize - bufferOffset), 89 "Write range (bufferOffset: %u, size: %u) does not fit in %s size (%u).", 90 bufferOffset, size, buffer, bufferSize); 91 92 DAWN_INVALID_IF(!(buffer->GetUsage() & wgpu::BufferUsage::CopyDst), 93 "%s usage (%s) does not include %s.", buffer, buffer->GetUsage(), 94 wgpu::BufferUsage::CopyDst); 95 96 return {}; 97 } 98 IsRangeOverlapped(uint32_t startA,uint32_t startB,uint32_t length)99 bool IsRangeOverlapped(uint32_t startA, uint32_t startB, uint32_t length) { 100 uint32_t maxStart = std::max(startA, startB); 101 uint32_t minStart = std::min(startA, startB); 102 return static_cast<uint64_t>(minStart) + static_cast<uint64_t>(length) > 103 static_cast<uint64_t>(maxStart); 104 } 105 106 template <typename A, typename B> Safe32x32(A a,B b)107 DAWN_FORCE_INLINE uint64_t Safe32x32(A a, B b) { 108 static_assert(std::is_same<A, uint32_t>::value, "'a' must be uint32_t"); 109 static_assert(std::is_same<B, uint32_t>::value, "'b' must be uint32_t"); 110 return uint64_t(a) * uint64_t(b); 111 } 112 ComputeRequiredBytesInCopy(const TexelBlockInfo & blockInfo,const Extent3D & copySize,uint32_t bytesPerRow,uint32_t rowsPerImage)113 ResultOrError<uint64_t> ComputeRequiredBytesInCopy(const TexelBlockInfo& blockInfo, 114 const Extent3D& copySize, 115 uint32_t bytesPerRow, 116 uint32_t rowsPerImage) { 117 ASSERT(copySize.width % blockInfo.width == 0); 118 ASSERT(copySize.height % blockInfo.height == 0); 119 uint32_t widthInBlocks = copySize.width / blockInfo.width; 120 uint32_t heightInBlocks = copySize.height / blockInfo.height; 121 uint64_t bytesInLastRow = Safe32x32(widthInBlocks, blockInfo.byteSize); 122 123 if (copySize.depthOrArrayLayers == 0) { 124 return 0; 125 } 126 127 // Check for potential overflows for the rest of the computations. We have the following 128 // inequalities: 129 // 130 // bytesInLastRow <= bytesPerRow 131 // heightInBlocks <= rowsPerImage 132 // 133 // So: 134 // 135 // bytesInLastImage = bytesPerRow * (heightInBlocks - 1) + bytesInLastRow 136 // <= bytesPerRow * heightInBlocks 137 // <= bytesPerRow * rowsPerImage 138 // <= bytesPerImage 139 // 140 // This means that if the computation of depth * bytesPerImage doesn't overflow, none of the 141 // computations for requiredBytesInCopy will. (and it's not a very pessimizing check) 142 ASSERT(copySize.depthOrArrayLayers <= 1 || (bytesPerRow != wgpu::kCopyStrideUndefined && 143 rowsPerImage != wgpu::kCopyStrideUndefined)); 144 uint64_t bytesPerImage = Safe32x32(bytesPerRow, rowsPerImage); 145 DAWN_INVALID_IF( 146 bytesPerImage > std::numeric_limits<uint64_t>::max() / copySize.depthOrArrayLayers, 147 "The number of bytes per image (%u) exceeds the maximum (%u) when copying %u images.", 148 bytesPerImage, std::numeric_limits<uint64_t>::max() / copySize.depthOrArrayLayers, 149 copySize.depthOrArrayLayers); 150 151 uint64_t requiredBytesInCopy = bytesPerImage * (copySize.depthOrArrayLayers - 1); 152 if (heightInBlocks > 0) { 153 ASSERT(heightInBlocks <= 1 || bytesPerRow != wgpu::kCopyStrideUndefined); 154 uint64_t bytesInLastImage = Safe32x32(bytesPerRow, heightInBlocks - 1) + bytesInLastRow; 155 requiredBytesInCopy += bytesInLastImage; 156 } 157 return requiredBytesInCopy; 158 } 159 ValidateCopySizeFitsInBuffer(const Ref<BufferBase> & buffer,uint64_t offset,uint64_t size)160 MaybeError ValidateCopySizeFitsInBuffer(const Ref<BufferBase>& buffer, 161 uint64_t offset, 162 uint64_t size) { 163 uint64_t bufferSize = buffer->GetSize(); 164 bool fitsInBuffer = offset <= bufferSize && (size <= (bufferSize - offset)); 165 DAWN_INVALID_IF(!fitsInBuffer, 166 "Copy range (offset: %u, size: %u) does not fit in %s size (%u).", offset, 167 size, buffer.Get(), bufferSize); 168 169 return {}; 170 } 171 172 // Replace wgpu::kCopyStrideUndefined with real values, so backends don't have to think about 173 // it. ApplyDefaultTextureDataLayoutOptions(TextureDataLayout * layout,const TexelBlockInfo & blockInfo,const Extent3D & copyExtent)174 void ApplyDefaultTextureDataLayoutOptions(TextureDataLayout* layout, 175 const TexelBlockInfo& blockInfo, 176 const Extent3D& copyExtent) { 177 ASSERT(layout != nullptr); 178 ASSERT(copyExtent.height % blockInfo.height == 0); 179 uint32_t heightInBlocks = copyExtent.height / blockInfo.height; 180 181 if (layout->bytesPerRow == wgpu::kCopyStrideUndefined) { 182 ASSERT(copyExtent.width % blockInfo.width == 0); 183 uint32_t widthInBlocks = copyExtent.width / blockInfo.width; 184 uint32_t bytesInLastRow = widthInBlocks * blockInfo.byteSize; 185 186 ASSERT(heightInBlocks <= 1 && copyExtent.depthOrArrayLayers <= 1); 187 layout->bytesPerRow = Align(bytesInLastRow, kTextureBytesPerRowAlignment); 188 } 189 if (layout->rowsPerImage == wgpu::kCopyStrideUndefined) { 190 ASSERT(copyExtent.depthOrArrayLayers <= 1); 191 layout->rowsPerImage = heightInBlocks; 192 } 193 } 194 ValidateLinearTextureData(const TextureDataLayout & layout,uint64_t byteSize,const TexelBlockInfo & blockInfo,const Extent3D & copyExtent)195 MaybeError ValidateLinearTextureData(const TextureDataLayout& layout, 196 uint64_t byteSize, 197 const TexelBlockInfo& blockInfo, 198 const Extent3D& copyExtent) { 199 ASSERT(copyExtent.height % blockInfo.height == 0); 200 uint32_t heightInBlocks = copyExtent.height / blockInfo.height; 201 202 // TODO(dawn:563): Right now kCopyStrideUndefined will be formatted as a large value in the 203 // validation message. Investigate ways to make it print as a more readable symbol. 204 DAWN_INVALID_IF( 205 copyExtent.depthOrArrayLayers > 1 && 206 (layout.bytesPerRow == wgpu::kCopyStrideUndefined || 207 layout.rowsPerImage == wgpu::kCopyStrideUndefined), 208 "Copy depth (%u) is > 1, but bytesPerRow (%u) or rowsPerImage (%u) are not specified.", 209 copyExtent.depthOrArrayLayers, layout.bytesPerRow, layout.rowsPerImage); 210 211 DAWN_INVALID_IF(heightInBlocks > 1 && layout.bytesPerRow == wgpu::kCopyStrideUndefined, 212 "HeightInBlocks (%u) is > 1, but bytesPerRow is not specified.", 213 heightInBlocks); 214 215 // Validation for other members in layout: 216 ASSERT(copyExtent.width % blockInfo.width == 0); 217 uint32_t widthInBlocks = copyExtent.width / blockInfo.width; 218 ASSERT(Safe32x32(widthInBlocks, blockInfo.byteSize) <= 219 std::numeric_limits<uint32_t>::max()); 220 uint32_t bytesInLastRow = widthInBlocks * blockInfo.byteSize; 221 222 // These != wgpu::kCopyStrideUndefined checks are technically redundant with the > checks, 223 // but they should get optimized out. 224 DAWN_INVALID_IF( 225 layout.bytesPerRow != wgpu::kCopyStrideUndefined && bytesInLastRow > layout.bytesPerRow, 226 "The byte size of each row (%u) is > bytesPerRow (%u).", bytesInLastRow, 227 layout.bytesPerRow); 228 229 DAWN_INVALID_IF(layout.rowsPerImage != wgpu::kCopyStrideUndefined && 230 heightInBlocks > layout.rowsPerImage, 231 "The height of each image in blocks (%u) is > rowsPerImage (%u).", 232 heightInBlocks, layout.rowsPerImage); 233 234 // We compute required bytes in copy after validating texel block alignments 235 // because the divisibility conditions are necessary for the algorithm to be valid, 236 // also the bytesPerRow bound is necessary to avoid overflows. 237 uint64_t requiredBytesInCopy; 238 DAWN_TRY_ASSIGN(requiredBytesInCopy, 239 ComputeRequiredBytesInCopy(blockInfo, copyExtent, layout.bytesPerRow, 240 layout.rowsPerImage)); 241 242 bool fitsInData = 243 layout.offset <= byteSize && (requiredBytesInCopy <= (byteSize - layout.offset)); 244 DAWN_INVALID_IF( 245 !fitsInData, 246 "Required size for texture data layout (%u) exceeds the linear data size (%u) with " 247 "offset (%u).", 248 requiredBytesInCopy, byteSize, layout.offset); 249 250 return {}; 251 } 252 ValidateImageCopyBuffer(DeviceBase const * device,const ImageCopyBuffer & imageCopyBuffer)253 MaybeError ValidateImageCopyBuffer(DeviceBase const* device, 254 const ImageCopyBuffer& imageCopyBuffer) { 255 DAWN_TRY(device->ValidateObject(imageCopyBuffer.buffer)); 256 if (imageCopyBuffer.layout.bytesPerRow != wgpu::kCopyStrideUndefined) { 257 DAWN_INVALID_IF(imageCopyBuffer.layout.bytesPerRow % kTextureBytesPerRowAlignment != 0, 258 "bytesPerRow (%u) is not a multiple of %u.", 259 imageCopyBuffer.layout.bytesPerRow, kTextureBytesPerRowAlignment); 260 } 261 262 return {}; 263 } 264 ValidateImageCopyTexture(DeviceBase const * device,const ImageCopyTexture & textureCopy,const Extent3D & copySize)265 MaybeError ValidateImageCopyTexture(DeviceBase const* device, 266 const ImageCopyTexture& textureCopy, 267 const Extent3D& copySize) { 268 const TextureBase* texture = textureCopy.texture; 269 DAWN_TRY(device->ValidateObject(texture)); 270 DAWN_INVALID_IF(textureCopy.mipLevel >= texture->GetNumMipLevels(), 271 "MipLevel (%u) is greater than the number of mip levels (%u) in %s.", 272 textureCopy.mipLevel, texture->GetNumMipLevels(), texture); 273 274 DAWN_TRY(ValidateTextureAspect(textureCopy.aspect)); 275 DAWN_INVALID_IF( 276 SelectFormatAspects(texture->GetFormat(), textureCopy.aspect) == Aspect::None, 277 "%s format (%s) does not have the selected aspect (%s).", texture, 278 texture->GetFormat().format, textureCopy.aspect); 279 280 if (texture->GetSampleCount() > 1 || texture->GetFormat().HasDepthOrStencil()) { 281 Extent3D subresourceSize = texture->GetMipLevelPhysicalSize(textureCopy.mipLevel); 282 ASSERT(texture->GetDimension() == wgpu::TextureDimension::e2D); 283 DAWN_INVALID_IF( 284 textureCopy.origin.x != 0 || textureCopy.origin.y != 0 || 285 subresourceSize.width != copySize.width || 286 subresourceSize.height != copySize.height, 287 "Copy origin (%s) and size (%s) does not cover the entire subresource (origin: " 288 "[x: 0, y: 0], size: %s) of %s. The entire subresource must be copied when the " 289 "format (%s) is a depth/stencil format or the sample count (%u) is > 1.", 290 &textureCopy.origin, ©Size, &subresourceSize, texture, 291 texture->GetFormat().format, texture->GetSampleCount()); 292 } 293 294 return {}; 295 } 296 ValidateTextureCopyRange(DeviceBase const * device,const ImageCopyTexture & textureCopy,const Extent3D & copySize)297 MaybeError ValidateTextureCopyRange(DeviceBase const* device, 298 const ImageCopyTexture& textureCopy, 299 const Extent3D& copySize) { 300 const TextureBase* texture = textureCopy.texture; 301 302 ASSERT(texture->GetDimension() != wgpu::TextureDimension::e1D); 303 304 // Validation for the copy being in-bounds: 305 Extent3D mipSize = texture->GetMipLevelPhysicalSize(textureCopy.mipLevel); 306 // For 1D/2D textures, include the array layer as depth so it can be checked with other 307 // dimensions. 308 if (texture->GetDimension() != wgpu::TextureDimension::e3D) { 309 mipSize.depthOrArrayLayers = texture->GetArrayLayers(); 310 } 311 // All texture dimensions are in uint32_t so by doing checks in uint64_t we avoid 312 // overflows. 313 DAWN_INVALID_IF( 314 static_cast<uint64_t>(textureCopy.origin.x) + static_cast<uint64_t>(copySize.width) > 315 static_cast<uint64_t>(mipSize.width) || 316 static_cast<uint64_t>(textureCopy.origin.y) + 317 static_cast<uint64_t>(copySize.height) > 318 static_cast<uint64_t>(mipSize.height) || 319 static_cast<uint64_t>(textureCopy.origin.z) + 320 static_cast<uint64_t>(copySize.depthOrArrayLayers) > 321 static_cast<uint64_t>(mipSize.depthOrArrayLayers), 322 "Texture copy range (origin: %s, copySize: %s) touches outside of %s mip level %u " 323 "size (%s).", 324 &textureCopy.origin, ©Size, texture, textureCopy.mipLevel, &mipSize); 325 326 // Validation for the texel block alignments: 327 const Format& format = textureCopy.texture->GetFormat(); 328 if (format.isCompressed) { 329 const TexelBlockInfo& blockInfo = format.GetAspectInfo(textureCopy.aspect).block; 330 DAWN_INVALID_IF( 331 textureCopy.origin.x % blockInfo.width != 0, 332 "Texture copy origin.x (%u) is not a multiple of compressed texture format block " 333 "width (%u).", 334 textureCopy.origin.x, blockInfo.width); 335 DAWN_INVALID_IF( 336 textureCopy.origin.y % blockInfo.height != 0, 337 "Texture copy origin.y (%u) is not a multiple of compressed texture format block " 338 "height (%u).", 339 textureCopy.origin.y, blockInfo.height); 340 DAWN_INVALID_IF( 341 copySize.width % blockInfo.width != 0, 342 "copySize.width (%u) is not a multiple of compressed texture format block width " 343 "(%u).", 344 copySize.width, blockInfo.width); 345 DAWN_INVALID_IF( 346 copySize.height % blockInfo.height != 0, 347 "copySize.height (%u) is not a multiple of compressed texture format block " 348 "height (%u).", 349 copySize.height, blockInfo.height); 350 } 351 352 return {}; 353 } 354 355 // Always returns a single aspect (color, stencil, depth, or ith plane for multi-planar 356 // formats). SingleAspectUsedByImageCopyTexture(const ImageCopyTexture & view)357 ResultOrError<Aspect> SingleAspectUsedByImageCopyTexture(const ImageCopyTexture& view) { 358 const Format& format = view.texture->GetFormat(); 359 switch (view.aspect) { 360 case wgpu::TextureAspect::All: { 361 DAWN_INVALID_IF( 362 !HasOneBit(format.aspects), 363 "More than a single aspect (%s) is selected for multi-planar format (%s) in " 364 "%s <-> linear data copy.", 365 view.aspect, format.format, view.texture); 366 367 Aspect single = format.aspects; 368 return single; 369 } 370 case wgpu::TextureAspect::DepthOnly: 371 ASSERT(format.aspects & Aspect::Depth); 372 return Aspect::Depth; 373 case wgpu::TextureAspect::StencilOnly: 374 ASSERT(format.aspects & Aspect::Stencil); 375 return Aspect::Stencil; 376 case wgpu::TextureAspect::Plane0Only: 377 case wgpu::TextureAspect::Plane1Only: 378 break; 379 } 380 UNREACHABLE(); 381 } 382 ValidateLinearToDepthStencilCopyRestrictions(const ImageCopyTexture & dst)383 MaybeError ValidateLinearToDepthStencilCopyRestrictions(const ImageCopyTexture& dst) { 384 Aspect aspectUsed; 385 DAWN_TRY_ASSIGN(aspectUsed, SingleAspectUsedByImageCopyTexture(dst)); 386 DAWN_INVALID_IF(aspectUsed == Aspect::Depth, "Cannot copy into the depth aspect of %s.", 387 dst.texture); 388 389 return {}; 390 } 391 ValidateTextureToTextureCopyCommonRestrictions(const ImageCopyTexture & src,const ImageCopyTexture & dst,const Extent3D & copySize)392 MaybeError ValidateTextureToTextureCopyCommonRestrictions(const ImageCopyTexture& src, 393 const ImageCopyTexture& dst, 394 const Extent3D& copySize) { 395 const uint32_t srcSamples = src.texture->GetSampleCount(); 396 const uint32_t dstSamples = dst.texture->GetSampleCount(); 397 398 DAWN_INVALID_IF( 399 srcSamples != dstSamples, 400 "Source %s sample count (%u) and destination %s sample count (%u) does not match.", 401 src.texture, srcSamples, dst.texture, dstSamples); 402 403 // Metal cannot select a single aspect for texture-to-texture copies. 404 const Format& format = src.texture->GetFormat(); 405 DAWN_INVALID_IF( 406 SelectFormatAspects(format, src.aspect) != format.aspects, 407 "Source %s aspect (%s) doesn't select all the aspects of the source format (%s).", 408 src.texture, src.aspect, format.format); 409 410 DAWN_INVALID_IF( 411 SelectFormatAspects(format, dst.aspect) != format.aspects, 412 "Destination %s aspect (%s) doesn't select all the aspects of the destination format " 413 "(%s).", 414 dst.texture, dst.aspect, format.format); 415 416 if (src.texture == dst.texture && src.mipLevel == dst.mipLevel) { 417 wgpu::TextureDimension dimension = src.texture->GetDimension(); 418 ASSERT(dimension != wgpu::TextureDimension::e1D); 419 DAWN_INVALID_IF( 420 (dimension == wgpu::TextureDimension::e2D && 421 IsRangeOverlapped(src.origin.z, dst.origin.z, copySize.depthOrArrayLayers)) || 422 dimension == wgpu::TextureDimension::e3D, 423 "Cannot copy between overlapping subresources of %s.", src.texture); 424 } 425 426 return {}; 427 } 428 ValidateTextureToTextureCopyRestrictions(const ImageCopyTexture & src,const ImageCopyTexture & dst,const Extent3D & copySize)429 MaybeError ValidateTextureToTextureCopyRestrictions(const ImageCopyTexture& src, 430 const ImageCopyTexture& dst, 431 const Extent3D& copySize) { 432 // Metal requires texture-to-texture copies be the same format 433 DAWN_INVALID_IF(src.texture->GetFormat().format != dst.texture->GetFormat().format, 434 "Source %s format (%s) and destination %s format (%s) do not match.", 435 src.texture, src.texture->GetFormat().format, dst.texture, 436 dst.texture->GetFormat().format); 437 438 return ValidateTextureToTextureCopyCommonRestrictions(src, dst, copySize); 439 } 440 ValidateCanUseAs(const TextureBase * texture,wgpu::TextureUsage usage)441 MaybeError ValidateCanUseAs(const TextureBase* texture, wgpu::TextureUsage usage) { 442 ASSERT(wgpu::HasZeroOrOneBits(usage)); 443 DAWN_INVALID_IF(!(texture->GetUsage() & usage), "%s usage (%s) doesn't include %s.", 444 texture, texture->GetUsage(), usage); 445 446 return {}; 447 } 448 ValidateInternalCanUseAs(const TextureBase * texture,wgpu::TextureUsage usage)449 MaybeError ValidateInternalCanUseAs(const TextureBase* texture, wgpu::TextureUsage usage) { 450 ASSERT(wgpu::HasZeroOrOneBits(usage)); 451 DAWN_INVALID_IF(!(texture->GetInternalUsage() & usage), 452 "%s internal usage (%s) doesn't include %s.", texture, 453 texture->GetInternalUsage(), usage); 454 455 return {}; 456 } 457 ValidateCanUseAs(const BufferBase * buffer,wgpu::BufferUsage usage)458 MaybeError ValidateCanUseAs(const BufferBase* buffer, wgpu::BufferUsage usage) { 459 ASSERT(wgpu::HasZeroOrOneBits(usage)); 460 DAWN_INVALID_IF(!(buffer->GetUsage() & usage), "%s usage (%s) doesn't include %s.", buffer, 461 buffer->GetUsage(), usage); 462 463 return {}; 464 } 465 466 } // namespace dawn_native 467