• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2018 The Dawn Authors
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 //     http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 
15 #include "dawn_native/vulkan/TextureVk.h"
16 
17 #include "common/Assert.h"
18 #include "common/Math.h"
19 #include "dawn_native/DynamicUploader.h"
20 #include "dawn_native/EnumMaskIterator.h"
21 #include "dawn_native/Error.h"
22 #include "dawn_native/VulkanBackend.h"
23 #include "dawn_native/vulkan/AdapterVk.h"
24 #include "dawn_native/vulkan/BufferVk.h"
25 #include "dawn_native/vulkan/CommandRecordingContext.h"
26 #include "dawn_native/vulkan/DeviceVk.h"
27 #include "dawn_native/vulkan/FencedDeleter.h"
28 #include "dawn_native/vulkan/ResourceHeapVk.h"
29 #include "dawn_native/vulkan/ResourceMemoryAllocatorVk.h"
30 #include "dawn_native/vulkan/StagingBufferVk.h"
31 #include "dawn_native/vulkan/UtilsVulkan.h"
32 #include "dawn_native/vulkan/VulkanError.h"
33 
34 namespace dawn_native { namespace vulkan {
35 
36     namespace {
37         // Converts an Dawn texture dimension to a Vulkan image view type.
38         // Contrary to image types, image view types include arrayness and cubemapness
VulkanImageViewType(wgpu::TextureViewDimension dimension)39         VkImageViewType VulkanImageViewType(wgpu::TextureViewDimension dimension) {
40             switch (dimension) {
41                 case wgpu::TextureViewDimension::e2D:
42                     return VK_IMAGE_VIEW_TYPE_2D;
43                 case wgpu::TextureViewDimension::e2DArray:
44                     return VK_IMAGE_VIEW_TYPE_2D_ARRAY;
45                 case wgpu::TextureViewDimension::Cube:
46                     return VK_IMAGE_VIEW_TYPE_CUBE;
47                 case wgpu::TextureViewDimension::CubeArray:
48                     return VK_IMAGE_VIEW_TYPE_CUBE_ARRAY;
49                 case wgpu::TextureViewDimension::e3D:
50                     return VK_IMAGE_VIEW_TYPE_3D;
51 
52                 case wgpu::TextureViewDimension::e1D:
53                 case wgpu::TextureViewDimension::Undefined:
54                     break;
55             }
56             UNREACHABLE();
57         }
58 
59         // Computes which vulkan access type could be required for the given Dawn usage.
60         // TODO(crbug.com/dawn/269): We shouldn't need any access usages for srcAccessMask when
61         // the previous usage is readonly because an execution dependency is sufficient.
VulkanAccessFlags(wgpu::TextureUsage usage,const Format & format)62         VkAccessFlags VulkanAccessFlags(wgpu::TextureUsage usage, const Format& format) {
63             VkAccessFlags flags = 0;
64 
65             if (usage & wgpu::TextureUsage::CopySrc) {
66                 flags |= VK_ACCESS_TRANSFER_READ_BIT;
67             }
68             if (usage & wgpu::TextureUsage::CopyDst) {
69                 flags |= VK_ACCESS_TRANSFER_WRITE_BIT;
70             }
71             if (usage & wgpu::TextureUsage::TextureBinding) {
72                 flags |= VK_ACCESS_SHADER_READ_BIT;
73             }
74             if (usage & wgpu::TextureUsage::StorageBinding) {
75                 flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
76             }
77             if (usage & wgpu::TextureUsage::RenderAttachment) {
78                 if (format.HasDepthOrStencil()) {
79                     flags |= VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT |
80                              VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
81                 } else {
82                     flags |=
83                         VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
84                 }
85             }
86             if (usage & kReadOnlyRenderAttachment) {
87                 flags |= VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT;
88             }
89             if (usage & kPresentTextureUsage) {
90                 // The present usage is only used internally by the swapchain and is never used in
91                 // combination with other usages.
92                 ASSERT(usage == kPresentTextureUsage);
93                 // The Vulkan spec has the following note:
94                 //
95                 //   When transitioning the image to VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR or
96                 //   VK_IMAGE_LAYOUT_PRESENT_SRC_KHR, there is no need to delay subsequent
97                 //   processing, or perform any visibility operations (as vkQueuePresentKHR performs
98                 //   automatic visibility operations). To achieve this, the dstAccessMask member of
99                 //   the VkImageMemoryBarrier should be set to 0, and the dstStageMask parameter
100                 //   should be set to VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT.
101                 //
102                 // So on the transition to Present we don't need an access flag. The other
103                 // direction doesn't matter because swapchain textures always start a new frame
104                 // as uninitialized.
105                 flags |= 0;
106             }
107 
108             return flags;
109         }
110 
111         // Computes which Vulkan pipeline stage can access a texture in the given Dawn usage
VulkanPipelineStage(wgpu::TextureUsage usage,const Format & format)112         VkPipelineStageFlags VulkanPipelineStage(wgpu::TextureUsage usage, const Format& format) {
113             VkPipelineStageFlags flags = 0;
114 
115             if (usage == wgpu::TextureUsage::None) {
116                 // This only happens when a texture is initially created (and for srcAccessMask) in
117                 // which case there is no need to wait on anything to stop accessing this texture.
118                 return VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
119             }
120             if (usage & (wgpu::TextureUsage::CopySrc | wgpu::TextureUsage::CopyDst)) {
121                 flags |= VK_PIPELINE_STAGE_TRANSFER_BIT;
122             }
123             if (usage & wgpu::TextureUsage::TextureBinding) {
124                 // TODO(crbug.com/dawn/851): Only transition to the usage we care about to avoid
125                 // introducing FS -> VS dependencies that would prevent parallelization on tiler
126                 // GPUs
127                 flags |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT |
128                          VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
129                          VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
130             }
131             if (usage & wgpu::TextureUsage::StorageBinding) {
132                 flags |=
133                     VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
134             }
135             if (usage & (wgpu::TextureUsage::RenderAttachment | kReadOnlyRenderAttachment)) {
136                 if (format.HasDepthOrStencil()) {
137                     flags |= VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT |
138                              VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
139                 } else {
140                     flags |= VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
141                 }
142             }
143             if (usage & kPresentTextureUsage) {
144                 // The present usage is only used internally by the swapchain and is never used in
145                 // combination with other usages.
146                 ASSERT(usage == kPresentTextureUsage);
147                 // The Vulkan spec has the following note:
148                 //
149                 //   When transitioning the image to VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR or
150                 //   VK_IMAGE_LAYOUT_PRESENT_SRC_KHR, there is no need to delay subsequent
151                 //   processing, or perform any visibility operations (as vkQueuePresentKHR performs
152                 //   automatic visibility operations). To achieve this, the dstAccessMask member of
153                 //   the VkImageMemoryBarrier should be set to 0, and the dstStageMask parameter
154                 //   should be set to VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT.
155                 //
156                 // So on the transition to Present we use the "bottom of pipe" stage. The other
157                 // direction doesn't matter because swapchain textures always start a new frame
158                 // as uninitialized.
159                 flags |= VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT;
160             }
161 
162             // A zero value isn't a valid pipeline stage mask
163             ASSERT(flags != 0);
164             return flags;
165         }
166 
BuildMemoryBarrier(const Texture * texture,wgpu::TextureUsage lastUsage,wgpu::TextureUsage usage,const SubresourceRange & range)167         VkImageMemoryBarrier BuildMemoryBarrier(const Texture* texture,
168                                                 wgpu::TextureUsage lastUsage,
169                                                 wgpu::TextureUsage usage,
170                                                 const SubresourceRange& range) {
171             VkImageMemoryBarrier barrier;
172             barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
173             barrier.pNext = nullptr;
174             barrier.srcAccessMask = VulkanAccessFlags(lastUsage, texture->GetFormat());
175             barrier.dstAccessMask = VulkanAccessFlags(usage, texture->GetFormat());
176             barrier.oldLayout = VulkanImageLayout(texture, lastUsage);
177             barrier.newLayout = VulkanImageLayout(texture, usage);
178             barrier.image = texture->GetHandle();
179             barrier.subresourceRange.aspectMask = VulkanAspectMask(range.aspects);
180             barrier.subresourceRange.baseMipLevel = range.baseMipLevel;
181             barrier.subresourceRange.levelCount = range.levelCount;
182             barrier.subresourceRange.baseArrayLayer = range.baseArrayLayer;
183             barrier.subresourceRange.layerCount = range.layerCount;
184 
185             barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
186             barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
187             return barrier;
188         }
189 
FillVulkanCreateInfoSizesAndType(const Texture & texture,VkImageCreateInfo * info)190         void FillVulkanCreateInfoSizesAndType(const Texture& texture, VkImageCreateInfo* info) {
191             const Extent3D& size = texture.GetSize();
192 
193             info->mipLevels = texture.GetNumMipLevels();
194             info->samples = VulkanSampleCount(texture.GetSampleCount());
195 
196             // Fill in the image type, and paper over differences in how the array layer count is
197             // specified between WebGPU and Vulkan.
198             switch (texture.GetDimension()) {
199                 case wgpu::TextureDimension::e2D:
200                     info->imageType = VK_IMAGE_TYPE_2D;
201                     info->extent = {size.width, size.height, 1};
202                     info->arrayLayers = size.depthOrArrayLayers;
203                     break;
204 
205                 case wgpu::TextureDimension::e3D:
206                     info->imageType = VK_IMAGE_TYPE_3D;
207                     info->extent = {size.width, size.height, size.depthOrArrayLayers};
208                     info->arrayLayers = 1;
209                     break;
210 
211                 case wgpu::TextureDimension::e1D:
212                     UNREACHABLE();
213             }
214         }
215 
216     }  // namespace
217 
218     // Converts Dawn texture format to Vulkan formats.
VulkanImageFormat(const Device * device,wgpu::TextureFormat format)219     VkFormat VulkanImageFormat(const Device* device, wgpu::TextureFormat format) {
220         switch (format) {
221             case wgpu::TextureFormat::R8Unorm:
222                 return VK_FORMAT_R8_UNORM;
223             case wgpu::TextureFormat::R8Snorm:
224                 return VK_FORMAT_R8_SNORM;
225             case wgpu::TextureFormat::R8Uint:
226                 return VK_FORMAT_R8_UINT;
227             case wgpu::TextureFormat::R8Sint:
228                 return VK_FORMAT_R8_SINT;
229 
230             case wgpu::TextureFormat::R16Uint:
231                 return VK_FORMAT_R16_UINT;
232             case wgpu::TextureFormat::R16Sint:
233                 return VK_FORMAT_R16_SINT;
234             case wgpu::TextureFormat::R16Float:
235                 return VK_FORMAT_R16_SFLOAT;
236             case wgpu::TextureFormat::RG8Unorm:
237                 return VK_FORMAT_R8G8_UNORM;
238             case wgpu::TextureFormat::RG8Snorm:
239                 return VK_FORMAT_R8G8_SNORM;
240             case wgpu::TextureFormat::RG8Uint:
241                 return VK_FORMAT_R8G8_UINT;
242             case wgpu::TextureFormat::RG8Sint:
243                 return VK_FORMAT_R8G8_SINT;
244 
245             case wgpu::TextureFormat::R32Uint:
246                 return VK_FORMAT_R32_UINT;
247             case wgpu::TextureFormat::R32Sint:
248                 return VK_FORMAT_R32_SINT;
249             case wgpu::TextureFormat::R32Float:
250                 return VK_FORMAT_R32_SFLOAT;
251             case wgpu::TextureFormat::RG16Uint:
252                 return VK_FORMAT_R16G16_UINT;
253             case wgpu::TextureFormat::RG16Sint:
254                 return VK_FORMAT_R16G16_SINT;
255             case wgpu::TextureFormat::RG16Float:
256                 return VK_FORMAT_R16G16_SFLOAT;
257             case wgpu::TextureFormat::RGBA8Unorm:
258                 return VK_FORMAT_R8G8B8A8_UNORM;
259             case wgpu::TextureFormat::RGBA8UnormSrgb:
260                 return VK_FORMAT_R8G8B8A8_SRGB;
261             case wgpu::TextureFormat::RGBA8Snorm:
262                 return VK_FORMAT_R8G8B8A8_SNORM;
263             case wgpu::TextureFormat::RGBA8Uint:
264                 return VK_FORMAT_R8G8B8A8_UINT;
265             case wgpu::TextureFormat::RGBA8Sint:
266                 return VK_FORMAT_R8G8B8A8_SINT;
267             case wgpu::TextureFormat::BGRA8Unorm:
268                 return VK_FORMAT_B8G8R8A8_UNORM;
269             case wgpu::TextureFormat::BGRA8UnormSrgb:
270                 return VK_FORMAT_B8G8R8A8_SRGB;
271             case wgpu::TextureFormat::RGB10A2Unorm:
272                 return VK_FORMAT_A2B10G10R10_UNORM_PACK32;
273             case wgpu::TextureFormat::RG11B10Ufloat:
274                 return VK_FORMAT_B10G11R11_UFLOAT_PACK32;
275             case wgpu::TextureFormat::RGB9E5Ufloat:
276                 return VK_FORMAT_E5B9G9R9_UFLOAT_PACK32;
277 
278             case wgpu::TextureFormat::RG32Uint:
279                 return VK_FORMAT_R32G32_UINT;
280             case wgpu::TextureFormat::RG32Sint:
281                 return VK_FORMAT_R32G32_SINT;
282             case wgpu::TextureFormat::RG32Float:
283                 return VK_FORMAT_R32G32_SFLOAT;
284             case wgpu::TextureFormat::RGBA16Uint:
285                 return VK_FORMAT_R16G16B16A16_UINT;
286             case wgpu::TextureFormat::RGBA16Sint:
287                 return VK_FORMAT_R16G16B16A16_SINT;
288             case wgpu::TextureFormat::RGBA16Float:
289                 return VK_FORMAT_R16G16B16A16_SFLOAT;
290 
291             case wgpu::TextureFormat::RGBA32Uint:
292                 return VK_FORMAT_R32G32B32A32_UINT;
293             case wgpu::TextureFormat::RGBA32Sint:
294                 return VK_FORMAT_R32G32B32A32_SINT;
295             case wgpu::TextureFormat::RGBA32Float:
296                 return VK_FORMAT_R32G32B32A32_SFLOAT;
297 
298             case wgpu::TextureFormat::Depth16Unorm:
299                 return VK_FORMAT_D16_UNORM;
300             case wgpu::TextureFormat::Depth32Float:
301                 return VK_FORMAT_D32_SFLOAT;
302             case wgpu::TextureFormat::Depth24Plus:
303                 return VK_FORMAT_D32_SFLOAT;
304             case wgpu::TextureFormat::Depth24PlusStencil8:
305                 // Depth24PlusStencil8 maps to either of these two formats because only requires
306                 // that one of the two be present. The VulkanUseD32S8 toggle combines the wish of
307                 // the environment, default to using D32S8, and availability information so we know
308                 // that the format is available.
309                 if (device->IsToggleEnabled(Toggle::VulkanUseD32S8)) {
310                     return VK_FORMAT_D32_SFLOAT_S8_UINT;
311                 } else {
312                     return VK_FORMAT_D24_UNORM_S8_UINT;
313                 }
314 
315             case wgpu::TextureFormat::BC1RGBAUnorm:
316                 return VK_FORMAT_BC1_RGBA_UNORM_BLOCK;
317             case wgpu::TextureFormat::BC1RGBAUnormSrgb:
318                 return VK_FORMAT_BC1_RGBA_SRGB_BLOCK;
319             case wgpu::TextureFormat::BC2RGBAUnorm:
320                 return VK_FORMAT_BC2_UNORM_BLOCK;
321             case wgpu::TextureFormat::BC2RGBAUnormSrgb:
322                 return VK_FORMAT_BC2_SRGB_BLOCK;
323             case wgpu::TextureFormat::BC3RGBAUnorm:
324                 return VK_FORMAT_BC3_UNORM_BLOCK;
325             case wgpu::TextureFormat::BC3RGBAUnormSrgb:
326                 return VK_FORMAT_BC3_SRGB_BLOCK;
327             case wgpu::TextureFormat::BC4RSnorm:
328                 return VK_FORMAT_BC4_SNORM_BLOCK;
329             case wgpu::TextureFormat::BC4RUnorm:
330                 return VK_FORMAT_BC4_UNORM_BLOCK;
331             case wgpu::TextureFormat::BC5RGSnorm:
332                 return VK_FORMAT_BC5_SNORM_BLOCK;
333             case wgpu::TextureFormat::BC5RGUnorm:
334                 return VK_FORMAT_BC5_UNORM_BLOCK;
335             case wgpu::TextureFormat::BC6HRGBFloat:
336                 return VK_FORMAT_BC6H_SFLOAT_BLOCK;
337             case wgpu::TextureFormat::BC6HRGBUfloat:
338                 return VK_FORMAT_BC6H_UFLOAT_BLOCK;
339             case wgpu::TextureFormat::BC7RGBAUnorm:
340                 return VK_FORMAT_BC7_UNORM_BLOCK;
341             case wgpu::TextureFormat::BC7RGBAUnormSrgb:
342                 return VK_FORMAT_BC7_SRGB_BLOCK;
343 
344             case wgpu::TextureFormat::ETC2RGB8Unorm:
345                 return VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK;
346             case wgpu::TextureFormat::ETC2RGB8UnormSrgb:
347                 return VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK;
348             case wgpu::TextureFormat::ETC2RGB8A1Unorm:
349                 return VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK;
350             case wgpu::TextureFormat::ETC2RGB8A1UnormSrgb:
351                 return VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK;
352             case wgpu::TextureFormat::ETC2RGBA8Unorm:
353                 return VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK;
354             case wgpu::TextureFormat::ETC2RGBA8UnormSrgb:
355                 return VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK;
356             case wgpu::TextureFormat::EACR11Unorm:
357                 return VK_FORMAT_EAC_R11_UNORM_BLOCK;
358             case wgpu::TextureFormat::EACR11Snorm:
359                 return VK_FORMAT_EAC_R11_SNORM_BLOCK;
360             case wgpu::TextureFormat::EACRG11Unorm:
361                 return VK_FORMAT_EAC_R11G11_UNORM_BLOCK;
362             case wgpu::TextureFormat::EACRG11Snorm:
363                 return VK_FORMAT_EAC_R11G11_SNORM_BLOCK;
364 
365             case wgpu::TextureFormat::ASTC4x4Unorm:
366                 return VK_FORMAT_ASTC_4x4_UNORM_BLOCK;
367             case wgpu::TextureFormat::ASTC4x4UnormSrgb:
368                 return VK_FORMAT_ASTC_4x4_SRGB_BLOCK;
369             case wgpu::TextureFormat::ASTC5x4Unorm:
370                 return VK_FORMAT_ASTC_5x4_UNORM_BLOCK;
371             case wgpu::TextureFormat::ASTC5x4UnormSrgb:
372                 return VK_FORMAT_ASTC_5x4_SRGB_BLOCK;
373             case wgpu::TextureFormat::ASTC5x5Unorm:
374                 return VK_FORMAT_ASTC_5x5_UNORM_BLOCK;
375             case wgpu::TextureFormat::ASTC5x5UnormSrgb:
376                 return VK_FORMAT_ASTC_5x5_SRGB_BLOCK;
377             case wgpu::TextureFormat::ASTC6x5Unorm:
378                 return VK_FORMAT_ASTC_6x5_UNORM_BLOCK;
379             case wgpu::TextureFormat::ASTC6x5UnormSrgb:
380                 return VK_FORMAT_ASTC_6x5_SRGB_BLOCK;
381             case wgpu::TextureFormat::ASTC6x6Unorm:
382                 return VK_FORMAT_ASTC_6x6_UNORM_BLOCK;
383             case wgpu::TextureFormat::ASTC6x6UnormSrgb:
384                 return VK_FORMAT_ASTC_6x6_SRGB_BLOCK;
385             case wgpu::TextureFormat::ASTC8x5Unorm:
386                 return VK_FORMAT_ASTC_8x5_UNORM_BLOCK;
387             case wgpu::TextureFormat::ASTC8x5UnormSrgb:
388                 return VK_FORMAT_ASTC_8x5_SRGB_BLOCK;
389             case wgpu::TextureFormat::ASTC8x6Unorm:
390                 return VK_FORMAT_ASTC_8x6_UNORM_BLOCK;
391             case wgpu::TextureFormat::ASTC8x6UnormSrgb:
392                 return VK_FORMAT_ASTC_8x6_SRGB_BLOCK;
393             case wgpu::TextureFormat::ASTC8x8Unorm:
394                 return VK_FORMAT_ASTC_8x8_UNORM_BLOCK;
395             case wgpu::TextureFormat::ASTC8x8UnormSrgb:
396                 return VK_FORMAT_ASTC_8x8_SRGB_BLOCK;
397             case wgpu::TextureFormat::ASTC10x5Unorm:
398                 return VK_FORMAT_ASTC_10x5_UNORM_BLOCK;
399             case wgpu::TextureFormat::ASTC10x5UnormSrgb:
400                 return VK_FORMAT_ASTC_10x5_SRGB_BLOCK;
401             case wgpu::TextureFormat::ASTC10x6Unorm:
402                 return VK_FORMAT_ASTC_10x6_UNORM_BLOCK;
403             case wgpu::TextureFormat::ASTC10x6UnormSrgb:
404                 return VK_FORMAT_ASTC_10x6_SRGB_BLOCK;
405             case wgpu::TextureFormat::ASTC10x8Unorm:
406                 return VK_FORMAT_ASTC_10x8_UNORM_BLOCK;
407             case wgpu::TextureFormat::ASTC10x8UnormSrgb:
408                 return VK_FORMAT_ASTC_10x8_SRGB_BLOCK;
409             case wgpu::TextureFormat::ASTC10x10Unorm:
410                 return VK_FORMAT_ASTC_10x10_UNORM_BLOCK;
411             case wgpu::TextureFormat::ASTC10x10UnormSrgb:
412                 return VK_FORMAT_ASTC_10x10_SRGB_BLOCK;
413             case wgpu::TextureFormat::ASTC12x10Unorm:
414                 return VK_FORMAT_ASTC_12x10_UNORM_BLOCK;
415             case wgpu::TextureFormat::ASTC12x10UnormSrgb:
416                 return VK_FORMAT_ASTC_12x10_SRGB_BLOCK;
417             case wgpu::TextureFormat::ASTC12x12Unorm:
418                 return VK_FORMAT_ASTC_12x12_UNORM_BLOCK;
419             case wgpu::TextureFormat::ASTC12x12UnormSrgb:
420                 return VK_FORMAT_ASTC_12x12_SRGB_BLOCK;
421 
422             case wgpu::TextureFormat::R8BG8Biplanar420Unorm:
423             // TODO(dawn:666): implement stencil8
424             case wgpu::TextureFormat::Stencil8:
425             // TODO(dawn:690): implement depth24unorm-stencil8
426             case wgpu::TextureFormat::Depth24UnormStencil8:
427             // TODO(dawn:690): implement depth32float-stencil8
428             case wgpu::TextureFormat::Depth32FloatStencil8:
429             case wgpu::TextureFormat::Undefined:
430                 break;
431         }
432         UNREACHABLE();
433     }
434 
435     // Converts the Dawn usage flags to Vulkan usage flags. Also needs the format to choose
436     // between color and depth attachment usages.
VulkanImageUsage(wgpu::TextureUsage usage,const Format & format)437     VkImageUsageFlags VulkanImageUsage(wgpu::TextureUsage usage, const Format& format) {
438         VkImageUsageFlags flags = 0;
439 
440         if (usage & wgpu::TextureUsage::CopySrc) {
441             flags |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
442         }
443         if (usage & wgpu::TextureUsage::CopyDst) {
444             flags |= VK_IMAGE_USAGE_TRANSFER_DST_BIT;
445         }
446         if (usage & wgpu::TextureUsage::TextureBinding) {
447             flags |= VK_IMAGE_USAGE_SAMPLED_BIT;
448             // If the sampled texture is a depth/stencil texture, its image layout will be set
449             // to DEPTH_STENCIL_READ_ONLY_OPTIMAL in order to support readonly depth/stencil
450             // attachment. That layout requires DEPTH_STENCIL_ATTACHMENT_BIT image usage.
451             if (format.HasDepthOrStencil() && format.isRenderable) {
452                 flags |= VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
453             }
454         }
455         if (usage & wgpu::TextureUsage::StorageBinding) {
456             flags |= VK_IMAGE_USAGE_STORAGE_BIT;
457         }
458         if (usage & wgpu::TextureUsage::RenderAttachment) {
459             if (format.HasDepthOrStencil()) {
460                 flags |= VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
461             } else {
462                 flags |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
463             }
464         }
465         if (usage & kReadOnlyRenderAttachment) {
466             flags |= VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
467         }
468 
469         return flags;
470     }
471 
472     // Chooses which Vulkan image layout should be used for the given Dawn usage. Note that this
473     // layout must match the layout given to various Vulkan operations as well as the layout given
474     // to descriptor set writes.
VulkanImageLayout(const Texture * texture,wgpu::TextureUsage usage)475     VkImageLayout VulkanImageLayout(const Texture* texture, wgpu::TextureUsage usage) {
476         if (usage == wgpu::TextureUsage::None) {
477             return VK_IMAGE_LAYOUT_UNDEFINED;
478         }
479 
480         if (!wgpu::HasZeroOrOneBits(usage)) {
481             // Sampled | kReadOnlyRenderAttachment is the only possible multi-bit usage, if more
482             // appear we might need additional special-casing.
483             ASSERT(usage == (wgpu::TextureUsage::TextureBinding | kReadOnlyRenderAttachment));
484 
485             // WebGPU requires both aspects to be readonly if the attachment's format does have
486             // both depth and stencil aspects. Vulkan 1.0 supports readonly for both aspects too
487             // via DEPTH_STENCIL_READ_ONLY image layout. Vulkan 1.1 and above can support separate
488             // readonly for a single aspect via DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL and
489             // DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL layouts. But Vulkan 1.0 cannot support
490             // it, and WebGPU doesn't need that currently.
491             return VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL;
492         }
493 
494         // Usage has a single bit so we can switch on its value directly.
495         switch (usage) {
496             case wgpu::TextureUsage::CopyDst:
497                 return VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
498 
499                 // The layout returned here is the one that will be used at bindgroup creation time.
500                 // The bindgrpup's layout must match the runtime layout of the image when it is
501                 // used via the bindgroup, but we don't know exactly what it will be yet. So we
502                 // have to prepare for the pessimistic case.
503             case wgpu::TextureUsage::TextureBinding:
504                 // Only VK_IMAGE_LAYOUT_GENERAL can do sampling and storage access of texture at the
505                 // same time.
506                 if (texture->GetInternalUsage() & wgpu::TextureUsage::StorageBinding) {
507                     return VK_IMAGE_LAYOUT_GENERAL;
508                 }
509                 // The sampled image can be used as a readonly depth/stencil attachment at the same
510                 // time if it is a depth/stencil renderable format, so the image layout need to be
511                 // VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL.
512                 if (texture->GetFormat().HasDepthOrStencil() && texture->GetFormat().isRenderable) {
513                     return VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL;
514                 }
515                 return VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
516 
517                 // Vulkan texture copy functions require the image to be in _one_  known layout.
518                 // Depending on whether parts of the texture have been transitioned to only CopySrc
519                 // or a combination with something else, the texture could be in a combination of
520                 // GENERAL and TRANSFER_SRC_OPTIMAL. This would be a problem, so we make CopySrc use
521                 // GENERAL.
522                 // TODO(crbug.com/dawn/851): We no longer need to transition resources all at
523                 // once and can instead track subresources so we should lift this limitation.
524             case wgpu::TextureUsage::CopySrc:
525                 // Read-only and write-only storage textures must use general layout because load
526                 // and store operations on storage images can only be done on the images in
527                 // VK_IMAGE_LAYOUT_GENERAL layout.
528             case wgpu::TextureUsage::StorageBinding:
529                 return VK_IMAGE_LAYOUT_GENERAL;
530 
531             case wgpu::TextureUsage::RenderAttachment:
532                 if (texture->GetFormat().HasDepthOrStencil()) {
533                     return VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
534                 } else {
535                     return VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
536                 }
537 
538             case kPresentTextureUsage:
539                 return VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
540 
541             case wgpu::TextureUsage::None:
542                 break;
543         }
544         UNREACHABLE();
545     }
546 
VulkanSampleCount(uint32_t sampleCount)547     VkSampleCountFlagBits VulkanSampleCount(uint32_t sampleCount) {
548         switch (sampleCount) {
549             case 1:
550                 return VK_SAMPLE_COUNT_1_BIT;
551             case 4:
552                 return VK_SAMPLE_COUNT_4_BIT;
553         }
554         UNREACHABLE();
555     }
556 
ValidateVulkanImageCanBeWrapped(const DeviceBase *,const TextureDescriptor * descriptor)557     MaybeError ValidateVulkanImageCanBeWrapped(const DeviceBase*,
558                                                const TextureDescriptor* descriptor) {
559         DAWN_INVALID_IF(descriptor->dimension != wgpu::TextureDimension::e2D,
560                         "Texture dimension (%s) is not %s.", descriptor->dimension,
561                         wgpu::TextureDimension::e2D);
562 
563         DAWN_INVALID_IF(descriptor->mipLevelCount != 1, "Mip level count (%u) is not 1.",
564                         descriptor->mipLevelCount);
565 
566         DAWN_INVALID_IF(descriptor->size.depthOrArrayLayers != 1,
567                         "Array layer count (%u) is not 1.", descriptor->size.depthOrArrayLayers);
568 
569         DAWN_INVALID_IF(descriptor->sampleCount != 1, "Sample count (%u) is not 1.",
570                         descriptor->sampleCount);
571 
572         return {};
573     }
574 
IsSampleCountSupported(const dawn_native::vulkan::Device * device,const VkImageCreateInfo & imageCreateInfo)575     bool IsSampleCountSupported(const dawn_native::vulkan::Device* device,
576                                 const VkImageCreateInfo& imageCreateInfo) {
577         ASSERT(device);
578 
579         VkPhysicalDevice physicalDevice = ToBackend(device->GetAdapter())->GetPhysicalDevice();
580         VkImageFormatProperties properties;
581         if (device->fn.GetPhysicalDeviceImageFormatProperties(
582                 physicalDevice, imageCreateInfo.format, imageCreateInfo.imageType,
583                 imageCreateInfo.tiling, imageCreateInfo.usage, imageCreateInfo.flags,
584                 &properties) != VK_SUCCESS) {
585             UNREACHABLE();
586         }
587 
588         return properties.sampleCounts & imageCreateInfo.samples;
589     }
590 
591     // static
Create(Device * device,const TextureDescriptor * descriptor,VkImageUsageFlags extraUsages)592     ResultOrError<Ref<Texture>> Texture::Create(Device* device,
593                                                 const TextureDescriptor* descriptor,
594                                                 VkImageUsageFlags extraUsages) {
595         Ref<Texture> texture =
596             AcquireRef(new Texture(device, descriptor, TextureState::OwnedInternal));
597         DAWN_TRY(texture->InitializeAsInternalTexture(extraUsages));
598         return std::move(texture);
599     }
600 
601     // static
CreateFromExternal(Device * device,const ExternalImageDescriptorVk * descriptor,const TextureDescriptor * textureDescriptor,external_memory::Service * externalMemoryService)602     ResultOrError<Texture*> Texture::CreateFromExternal(
603         Device* device,
604         const ExternalImageDescriptorVk* descriptor,
605         const TextureDescriptor* textureDescriptor,
606         external_memory::Service* externalMemoryService) {
607         Ref<Texture> texture =
608             AcquireRef(new Texture(device, textureDescriptor, TextureState::OwnedInternal));
609         DAWN_TRY(texture->InitializeFromExternal(descriptor, externalMemoryService));
610         return texture.Detach();
611     }
612 
613     // static
CreateForSwapChain(Device * device,const TextureDescriptor * descriptor,VkImage nativeImage)614     Ref<Texture> Texture::CreateForSwapChain(Device* device,
615                                              const TextureDescriptor* descriptor,
616                                              VkImage nativeImage) {
617         Ref<Texture> texture =
618             AcquireRef(new Texture(device, descriptor, TextureState::OwnedExternal));
619         texture->InitializeForSwapChain(nativeImage);
620         return texture;
621     }
622 
Texture(Device * device,const TextureDescriptor * descriptor,TextureState state)623     Texture::Texture(Device* device, const TextureDescriptor* descriptor, TextureState state)
624         : TextureBase(device, descriptor, state),
625           // A usage of none will make sure the texture is transitioned before its first use as
626           // required by the Vulkan spec.
627           mSubresourceLastUsages(ComputeAspectsForSubresourceStorage(),
628                                  GetArrayLayers(),
629                                  GetNumMipLevels(),
630                                  wgpu::TextureUsage::None) {
631     }
632 
InitializeAsInternalTexture(VkImageUsageFlags extraUsages)633     MaybeError Texture::InitializeAsInternalTexture(VkImageUsageFlags extraUsages) {
634         Device* device = ToBackend(GetDevice());
635 
636         // Create the Vulkan image "container". We don't need to check that the format supports the
637         // combination of sample, usage etc. because validation should have been done in the Dawn
638         // frontend already based on the minimum supported formats in the Vulkan spec
639         VkImageCreateInfo createInfo = {};
640         FillVulkanCreateInfoSizesAndType(*this, &createInfo);
641 
642         createInfo.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
643         createInfo.pNext = nullptr;
644         createInfo.flags = 0;
645         createInfo.format = VulkanImageFormat(device, GetFormat().format);
646         createInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
647         createInfo.usage = VulkanImageUsage(GetInternalUsage(), GetFormat()) | extraUsages;
648         createInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
649         createInfo.queueFamilyIndexCount = 0;
650         createInfo.pQueueFamilyIndices = nullptr;
651         createInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
652 
653         ASSERT(IsSampleCountSupported(device, createInfo));
654 
655         if (GetArrayLayers() >= 6 && GetWidth() == GetHeight()) {
656             createInfo.flags |= VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT;
657         }
658 
659         // We always set VK_IMAGE_USAGE_TRANSFER_DST_BIT unconditionally beause the Vulkan images
660         // that are used in vkCmdClearColorImage() must have been created with this flag, which is
661         // also required for the implementation of robust resource initialization.
662         createInfo.usage |= VK_IMAGE_USAGE_TRANSFER_DST_BIT;
663 
664         DAWN_TRY(CheckVkSuccess(
665             device->fn.CreateImage(device->GetVkDevice(), &createInfo, nullptr, &*mHandle),
666             "CreateImage"));
667 
668         // Create the image memory and associate it with the container
669         VkMemoryRequirements requirements;
670         device->fn.GetImageMemoryRequirements(device->GetVkDevice(), mHandle, &requirements);
671 
672         DAWN_TRY_ASSIGN(mMemoryAllocation, device->GetResourceMemoryAllocator()->Allocate(
673                                                requirements, MemoryKind::Opaque));
674 
675         DAWN_TRY(CheckVkSuccess(
676             device->fn.BindImageMemory(device->GetVkDevice(), mHandle,
677                                        ToBackend(mMemoryAllocation.GetResourceHeap())->GetMemory(),
678                                        mMemoryAllocation.GetOffset()),
679             "BindImageMemory"));
680 
681         if (device->IsToggleEnabled(Toggle::NonzeroClearResourcesOnCreationForTesting)) {
682             DAWN_TRY(ClearTexture(ToBackend(GetDevice())->GetPendingRecordingContext(),
683                                   GetAllSubresources(), TextureBase::ClearValue::NonZero));
684         }
685 
686         SetLabelImpl();
687 
688         return {};
689     }
690 
691     // Internally managed, but imported from external handle
InitializeFromExternal(const ExternalImageDescriptorVk * descriptor,external_memory::Service * externalMemoryService)692     MaybeError Texture::InitializeFromExternal(const ExternalImageDescriptorVk* descriptor,
693                                                external_memory::Service* externalMemoryService) {
694         VkFormat format = VulkanImageFormat(ToBackend(GetDevice()), GetFormat().format);
695         VkImageUsageFlags usage = VulkanImageUsage(GetInternalUsage(), GetFormat());
696         DAWN_INVALID_IF(!externalMemoryService->SupportsCreateImage(descriptor, format, usage),
697                         "Creating an image from external memory is not supported.");
698 
699         mExternalState = ExternalState::PendingAcquire;
700 
701         mPendingAcquireOldLayout = descriptor->releasedOldLayout;
702         mPendingAcquireNewLayout = descriptor->releasedNewLayout;
703 
704         VkImageCreateInfo baseCreateInfo = {};
705         FillVulkanCreateInfoSizesAndType(*this, &baseCreateInfo);
706 
707         baseCreateInfo.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
708         baseCreateInfo.pNext = nullptr;
709         baseCreateInfo.format = format;
710         baseCreateInfo.usage = usage;
711         baseCreateInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
712         baseCreateInfo.queueFamilyIndexCount = 0;
713         baseCreateInfo.pQueueFamilyIndices = nullptr;
714 
715         // We always set VK_IMAGE_USAGE_TRANSFER_DST_BIT unconditionally beause the Vulkan images
716         // that are used in vkCmdClearColorImage() must have been created with this flag, which is
717         // also required for the implementation of robust resource initialization.
718         baseCreateInfo.usage |= VK_IMAGE_USAGE_TRANSFER_DST_BIT;
719 
720         DAWN_TRY_ASSIGN(mHandle, externalMemoryService->CreateImage(descriptor, baseCreateInfo));
721 
722         SetLabelHelper("Dawn_ExternalTexture");
723 
724         return {};
725     }
726 
InitializeForSwapChain(VkImage nativeImage)727     void Texture::InitializeForSwapChain(VkImage nativeImage) {
728         mHandle = nativeImage;
729         SetLabelHelper("Dawn_SwapChainTexture");
730     }
731 
BindExternalMemory(const ExternalImageDescriptorVk * descriptor,VkSemaphore signalSemaphore,VkDeviceMemory externalMemoryAllocation,std::vector<VkSemaphore> waitSemaphores)732     MaybeError Texture::BindExternalMemory(const ExternalImageDescriptorVk* descriptor,
733                                            VkSemaphore signalSemaphore,
734                                            VkDeviceMemory externalMemoryAllocation,
735                                            std::vector<VkSemaphore> waitSemaphores) {
736         Device* device = ToBackend(GetDevice());
737         DAWN_TRY(CheckVkSuccess(
738             device->fn.BindImageMemory(device->GetVkDevice(), mHandle, externalMemoryAllocation, 0),
739             "BindImageMemory (external)"));
740 
741         // Don't clear imported texture if already initialized
742         if (descriptor->isInitialized) {
743             SetIsSubresourceContentInitialized(true, GetAllSubresources());
744         }
745 
746         // Success, acquire all the external objects.
747         mExternalAllocation = externalMemoryAllocation;
748         mSignalSemaphore = signalSemaphore;
749         mWaitRequirements = std::move(waitSemaphores);
750         return {};
751     }
752 
ExportExternalTexture(VkImageLayout desiredLayout,VkSemaphore * signalSemaphore,VkImageLayout * releasedOldLayout,VkImageLayout * releasedNewLayout)753     MaybeError Texture::ExportExternalTexture(VkImageLayout desiredLayout,
754                                               VkSemaphore* signalSemaphore,
755                                               VkImageLayout* releasedOldLayout,
756                                               VkImageLayout* releasedNewLayout) {
757         Device* device = ToBackend(GetDevice());
758 
759         DAWN_INVALID_IF(mExternalState == ExternalState::Released,
760                         "Can't export a signal semaphore from signaled texture %s.", this);
761 
762         DAWN_INVALID_IF(
763             mExternalAllocation == VK_NULL_HANDLE,
764             "Can't export a signal semaphore from destroyed or non-external texture %s.", this);
765 
766         ASSERT(mSignalSemaphore != VK_NULL_HANDLE);
767 
768         // Release the texture
769         mExternalState = ExternalState::Released;
770 
771         ASSERT(GetNumMipLevels() == 1 && GetArrayLayers() == 1);
772         wgpu::TextureUsage usage = mSubresourceLastUsages.Get(Aspect::Color, 0, 0);
773 
774         VkImageMemoryBarrier barrier;
775         barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
776         barrier.pNext = nullptr;
777         barrier.image = GetHandle();
778         barrier.subresourceRange.aspectMask = VulkanAspectMask(GetFormat().aspects);
779         barrier.subresourceRange.baseMipLevel = 0;
780         barrier.subresourceRange.levelCount = 1;
781         barrier.subresourceRange.baseArrayLayer = 0;
782         barrier.subresourceRange.layerCount = 1;
783 
784         barrier.srcAccessMask = VulkanAccessFlags(usage, GetFormat());
785         barrier.dstAccessMask = 0;  // The barrier must be paired with another barrier that will
786                                     // specify the dst access mask on the importing queue.
787 
788         barrier.oldLayout = VulkanImageLayout(this, usage);
789         if (desiredLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
790             // VK_IMAGE_LAYOUT_UNDEFINED is invalid here. We use it as a
791             // special value to indicate no layout transition should be done.
792             barrier.newLayout = barrier.oldLayout;
793         } else {
794             barrier.newLayout = desiredLayout;
795         }
796 
797         barrier.srcQueueFamilyIndex = device->GetGraphicsQueueFamily();
798         barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_EXTERNAL_KHR;
799 
800         VkPipelineStageFlags srcStages = VulkanPipelineStage(usage, GetFormat());
801         VkPipelineStageFlags dstStages =
802             VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;  // We don't know when the importing queue will need
803                                                 // the texture, so pass
804                                                 // VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT to ensure
805                                                 // the barrier happens-before any usage in the
806                                                 // importing queue.
807 
808         CommandRecordingContext* recordingContext = device->GetPendingRecordingContext();
809         device->fn.CmdPipelineBarrier(recordingContext->commandBuffer, srcStages, dstStages, 0, 0,
810                                       nullptr, 0, nullptr, 1, &barrier);
811 
812         // Queue submit to signal we are done with the texture
813         recordingContext->signalSemaphores.push_back(mSignalSemaphore);
814         DAWN_TRY(device->SubmitPendingCommands());
815 
816         // Write out the layouts and signal semaphore
817         *releasedOldLayout = barrier.oldLayout;
818         *releasedNewLayout = barrier.newLayout;
819         *signalSemaphore = mSignalSemaphore;
820 
821         mSignalSemaphore = VK_NULL_HANDLE;
822 
823         // Destroy the texture so it can't be used again
824         Destroy();
825         return {};
826     }
827 
~Texture()828     Texture::~Texture() {
829     }
830 
SetLabelHelper(const char * prefix)831     void Texture::SetLabelHelper(const char* prefix) {
832         SetDebugName(ToBackend(GetDevice()), VK_OBJECT_TYPE_IMAGE,
833                      reinterpret_cast<uint64_t&>(mHandle), prefix, GetLabel());
834     }
835 
SetLabelImpl()836     void Texture::SetLabelImpl() {
837         SetLabelHelper("Dawn_InternalTexture");
838     }
839 
DestroyImpl()840     void Texture::DestroyImpl() {
841         if (GetTextureState() == TextureState::OwnedInternal) {
842             Device* device = ToBackend(GetDevice());
843 
844             // For textures created from a VkImage, the allocation if kInvalid so the Device knows
845             // to skip the deallocation of the (absence of) VkDeviceMemory.
846             device->GetResourceMemoryAllocator()->Deallocate(&mMemoryAllocation);
847 
848             if (mHandle != VK_NULL_HANDLE) {
849                 device->GetFencedDeleter()->DeleteWhenUnused(mHandle);
850             }
851 
852             if (mExternalAllocation != VK_NULL_HANDLE) {
853                 device->GetFencedDeleter()->DeleteWhenUnused(mExternalAllocation);
854             }
855 
856             mHandle = VK_NULL_HANDLE;
857             mExternalAllocation = VK_NULL_HANDLE;
858             // If a signal semaphore exists it should be requested before we delete the texture
859             ASSERT(mSignalSemaphore == VK_NULL_HANDLE);
860         }
861         // For Vulkan, we currently run the base destruction code after the internal changes because
862         // of the dependency on the texture state which the base code overwrites too early.
863         TextureBase::DestroyImpl();
864     }
865 
GetHandle() const866     VkImage Texture::GetHandle() const {
867         return mHandle;
868     }
869 
GetVkAspectMask(wgpu::TextureAspect aspect) const870     VkImageAspectFlags Texture::GetVkAspectMask(wgpu::TextureAspect aspect) const {
871         // TODO(enga): These masks could be precomputed.
872         switch (aspect) {
873             case wgpu::TextureAspect::All:
874                 return VulkanAspectMask(GetFormat().aspects);
875             case wgpu::TextureAspect::DepthOnly:
876                 ASSERT(GetFormat().aspects & Aspect::Depth);
877                 return VulkanAspectMask(Aspect::Depth);
878             case wgpu::TextureAspect::StencilOnly:
879                 ASSERT(GetFormat().aspects & Aspect::Stencil);
880                 return VulkanAspectMask(Aspect::Stencil);
881             case wgpu::TextureAspect::Plane0Only:
882             case wgpu::TextureAspect::Plane1Only:
883                 break;
884         }
885         UNREACHABLE();
886     }
887 
TweakTransitionForExternalUsage(CommandRecordingContext * recordingContext,std::vector<VkImageMemoryBarrier> * barriers,size_t transitionBarrierStart)888     void Texture::TweakTransitionForExternalUsage(CommandRecordingContext* recordingContext,
889                                                   std::vector<VkImageMemoryBarrier>* barriers,
890                                                   size_t transitionBarrierStart) {
891         ASSERT(GetNumMipLevels() == 1 && GetArrayLayers() == 1);
892 
893         // transitionBarrierStart specify the index where barriers for current transition start in
894         // the vector. barriers->size() - transitionBarrierStart is the number of barriers that we
895         // have already added into the vector during current transition.
896         ASSERT(barriers->size() - transitionBarrierStart <= 1);
897 
898         if (mExternalState == ExternalState::PendingAcquire) {
899             if (barriers->size() == transitionBarrierStart) {
900                 barriers->push_back(BuildMemoryBarrier(
901                     this, wgpu::TextureUsage::None, wgpu::TextureUsage::None,
902                     SubresourceRange::SingleMipAndLayer(0, 0, GetFormat().aspects)));
903             }
904 
905             VkImageMemoryBarrier* barrier = &(*barriers)[transitionBarrierStart];
906             // Transfer texture from external queue to graphics queue
907             barrier->srcQueueFamilyIndex = VK_QUEUE_FAMILY_EXTERNAL_KHR;
908             barrier->dstQueueFamilyIndex = ToBackend(GetDevice())->GetGraphicsQueueFamily();
909 
910             // srcAccessMask means nothing when importing. Queue transfers require a barrier on
911             // both the importing and exporting queues. The exporting queue should have specified
912             // this.
913             barrier->srcAccessMask = 0;
914 
915             // This should be the first barrier after import.
916             ASSERT(barrier->oldLayout == VK_IMAGE_LAYOUT_UNDEFINED);
917 
918             // Save the desired layout. We may need to transition through an intermediate
919             // |mPendingAcquireLayout| first.
920             VkImageLayout desiredLayout = barrier->newLayout;
921 
922             bool isInitialized = IsSubresourceContentInitialized(GetAllSubresources());
923 
924             // We don't care about the pending old layout if the texture is uninitialized. The
925             // driver is free to discard it. Likewise, we don't care about the pending new layout if
926             // the texture is uninitialized. We can skip the layout transition.
927             if (!isInitialized) {
928                 barrier->oldLayout = VK_IMAGE_LAYOUT_UNDEFINED;
929                 barrier->newLayout = desiredLayout;
930             } else {
931                 barrier->oldLayout = mPendingAcquireOldLayout;
932                 barrier->newLayout = mPendingAcquireNewLayout;
933             }
934 
935             // If these are unequal, we need an another barrier to transition the layout.
936             if (barrier->newLayout != desiredLayout) {
937                 VkImageMemoryBarrier layoutBarrier;
938                 layoutBarrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
939                 layoutBarrier.pNext = nullptr;
940                 layoutBarrier.image = GetHandle();
941                 layoutBarrier.subresourceRange = barrier->subresourceRange;
942 
943                 // Transition from the acquired new layout to the desired layout.
944                 layoutBarrier.oldLayout = barrier->newLayout;
945                 layoutBarrier.newLayout = desiredLayout;
946 
947                 // We already transitioned these.
948                 layoutBarrier.srcAccessMask = 0;
949                 layoutBarrier.dstAccessMask = 0;
950                 layoutBarrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
951                 layoutBarrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
952 
953                 barriers->push_back(layoutBarrier);
954             }
955 
956             mExternalState = ExternalState::Acquired;
957         }
958 
959         mLastExternalState = mExternalState;
960 
961         recordingContext->waitSemaphores.insert(recordingContext->waitSemaphores.end(),
962                                                 mWaitRequirements.begin(), mWaitRequirements.end());
963         mWaitRequirements.clear();
964     }
965 
CanReuseWithoutBarrier(wgpu::TextureUsage lastUsage,wgpu::TextureUsage usage)966     bool Texture::CanReuseWithoutBarrier(wgpu::TextureUsage lastUsage, wgpu::TextureUsage usage) {
967         // Reuse the texture directly and avoid encoding barriers when it isn't needed.
968         bool lastReadOnly = IsSubset(lastUsage, kReadOnlyTextureUsages);
969         if (lastReadOnly && lastUsage == usage && mLastExternalState == mExternalState) {
970             return true;
971         }
972         return false;
973     }
974 
ShouldCombineDepthStencilBarriers() const975     bool Texture::ShouldCombineDepthStencilBarriers() const {
976         return GetFormat().aspects == (Aspect::Depth | Aspect::Stencil);
977     }
978 
ComputeAspectsForSubresourceStorage() const979     Aspect Texture::ComputeAspectsForSubresourceStorage() const {
980         if (ShouldCombineDepthStencilBarriers()) {
981             return Aspect::CombinedDepthStencil;
982         }
983         return GetFormat().aspects;
984     }
985 
TransitionUsageForPass(CommandRecordingContext * recordingContext,const TextureSubresourceUsage & textureUsages,std::vector<VkImageMemoryBarrier> * imageBarriers,VkPipelineStageFlags * srcStages,VkPipelineStageFlags * dstStages)986     void Texture::TransitionUsageForPass(CommandRecordingContext* recordingContext,
987                                          const TextureSubresourceUsage& textureUsages,
988                                          std::vector<VkImageMemoryBarrier>* imageBarriers,
989                                          VkPipelineStageFlags* srcStages,
990                                          VkPipelineStageFlags* dstStages) {
991         // Base Vulkan doesn't support transitioning depth and stencil separately. We work around
992         // this limitation by combining the usages in the two planes of `textureUsages` into a
993         // single plane in a new SubresourceStorage<TextureUsage>. The barriers will be produced
994         // for DEPTH | STENCIL since the SubresourceRange uses Aspect::CombinedDepthStencil.
995         if (ShouldCombineDepthStencilBarriers()) {
996             SubresourceStorage<wgpu::TextureUsage> combinedUsages(
997                 Aspect::CombinedDepthStencil, GetArrayLayers(), GetNumMipLevels());
998             textureUsages.Iterate([&](const SubresourceRange& range, wgpu::TextureUsage usage) {
999                 SubresourceRange updateRange = range;
1000                 updateRange.aspects = Aspect::CombinedDepthStencil;
1001 
1002                 combinedUsages.Update(
1003                     updateRange, [&](const SubresourceRange&, wgpu::TextureUsage* combinedUsage) {
1004                         *combinedUsage |= usage;
1005                     });
1006             });
1007 
1008             TransitionUsageForPassImpl(recordingContext, combinedUsages, imageBarriers, srcStages,
1009                                        dstStages);
1010         } else {
1011             TransitionUsageForPassImpl(recordingContext, textureUsages, imageBarriers, srcStages,
1012                                        dstStages);
1013         }
1014     }
1015 
TransitionUsageForPassImpl(CommandRecordingContext * recordingContext,const SubresourceStorage<wgpu::TextureUsage> & subresourceUsages,std::vector<VkImageMemoryBarrier> * imageBarriers,VkPipelineStageFlags * srcStages,VkPipelineStageFlags * dstStages)1016     void Texture::TransitionUsageForPassImpl(
1017         CommandRecordingContext* recordingContext,
1018         const SubresourceStorage<wgpu::TextureUsage>& subresourceUsages,
1019         std::vector<VkImageMemoryBarrier>* imageBarriers,
1020         VkPipelineStageFlags* srcStages,
1021         VkPipelineStageFlags* dstStages) {
1022         size_t transitionBarrierStart = imageBarriers->size();
1023         const Format& format = GetFormat();
1024 
1025         wgpu::TextureUsage allUsages = wgpu::TextureUsage::None;
1026         wgpu::TextureUsage allLastUsages = wgpu::TextureUsage::None;
1027 
1028         // TODO(crbug.com/dawn/814): support 1D textures.
1029         ASSERT(GetDimension() != wgpu::TextureDimension::e1D);
1030 
1031         mSubresourceLastUsages.Merge(
1032             subresourceUsages, [&](const SubresourceRange& range, wgpu::TextureUsage* lastUsage,
1033                                    const wgpu::TextureUsage& newUsage) {
1034                 if (newUsage == wgpu::TextureUsage::None ||
1035                     CanReuseWithoutBarrier(*lastUsage, newUsage)) {
1036                     return;
1037                 }
1038 
1039                 imageBarriers->push_back(BuildMemoryBarrier(this, *lastUsage, newUsage, range));
1040 
1041                 allLastUsages |= *lastUsage;
1042                 allUsages |= newUsage;
1043 
1044                 *lastUsage = newUsage;
1045             });
1046 
1047         if (mExternalState != ExternalState::InternalOnly) {
1048             TweakTransitionForExternalUsage(recordingContext, imageBarriers,
1049                                             transitionBarrierStart);
1050         }
1051 
1052         *srcStages |= VulkanPipelineStage(allLastUsages, format);
1053         *dstStages |= VulkanPipelineStage(allUsages, format);
1054     }
1055 
TransitionUsageNow(CommandRecordingContext * recordingContext,wgpu::TextureUsage usage,const SubresourceRange & range)1056     void Texture::TransitionUsageNow(CommandRecordingContext* recordingContext,
1057                                      wgpu::TextureUsage usage,
1058                                      const SubresourceRange& range) {
1059         std::vector<VkImageMemoryBarrier> barriers;
1060 
1061         VkPipelineStageFlags srcStages = 0;
1062         VkPipelineStageFlags dstStages = 0;
1063 
1064         TransitionUsageAndGetResourceBarrier(usage, range, &barriers, &srcStages, &dstStages);
1065 
1066         if (mExternalState != ExternalState::InternalOnly) {
1067             TweakTransitionForExternalUsage(recordingContext, &barriers, 0);
1068         }
1069 
1070         if (!barriers.empty()) {
1071             ASSERT(srcStages != 0 && dstStages != 0);
1072             ToBackend(GetDevice())
1073                 ->fn.CmdPipelineBarrier(recordingContext->commandBuffer, srcStages, dstStages, 0, 0,
1074                                         nullptr, 0, nullptr, barriers.size(), barriers.data());
1075         }
1076     }
1077 
TransitionUsageAndGetResourceBarrier(wgpu::TextureUsage usage,const SubresourceRange & range,std::vector<VkImageMemoryBarrier> * imageBarriers,VkPipelineStageFlags * srcStages,VkPipelineStageFlags * dstStages)1078     void Texture::TransitionUsageAndGetResourceBarrier(
1079         wgpu::TextureUsage usage,
1080         const SubresourceRange& range,
1081         std::vector<VkImageMemoryBarrier>* imageBarriers,
1082         VkPipelineStageFlags* srcStages,
1083         VkPipelineStageFlags* dstStages) {
1084         // Base Vulkan doesn't support transitioning depth and stencil separately. We work around
1085         // this limitation by modifying the range to be on CombinedDepthStencil. The barriers will
1086         // be produced for DEPTH | STENCIL since the SubresourceRange uses
1087         // Aspect::CombinedDepthStencil.
1088         if (ShouldCombineDepthStencilBarriers()) {
1089             SubresourceRange updatedRange = range;
1090             updatedRange.aspects = Aspect::CombinedDepthStencil;
1091 
1092             std::vector<VkImageMemoryBarrier> newBarriers;
1093             TransitionUsageAndGetResourceBarrierImpl(usage, updatedRange, imageBarriers, srcStages,
1094                                                      dstStages);
1095         } else {
1096             TransitionUsageAndGetResourceBarrierImpl(usage, range, imageBarriers, srcStages,
1097                                                      dstStages);
1098         }
1099     }
1100 
TransitionUsageAndGetResourceBarrierImpl(wgpu::TextureUsage usage,const SubresourceRange & range,std::vector<VkImageMemoryBarrier> * imageBarriers,VkPipelineStageFlags * srcStages,VkPipelineStageFlags * dstStages)1101     void Texture::TransitionUsageAndGetResourceBarrierImpl(
1102         wgpu::TextureUsage usage,
1103         const SubresourceRange& range,
1104         std::vector<VkImageMemoryBarrier>* imageBarriers,
1105         VkPipelineStageFlags* srcStages,
1106         VkPipelineStageFlags* dstStages) {
1107         ASSERT(imageBarriers != nullptr);
1108         const Format& format = GetFormat();
1109 
1110         wgpu::TextureUsage allLastUsages = wgpu::TextureUsage::None;
1111         mSubresourceLastUsages.Update(
1112             range, [&](const SubresourceRange& range, wgpu::TextureUsage* lastUsage) {
1113                 if (CanReuseWithoutBarrier(*lastUsage, usage)) {
1114                     return;
1115                 }
1116 
1117                 imageBarriers->push_back(BuildMemoryBarrier(this, *lastUsage, usage, range));
1118 
1119                 allLastUsages |= *lastUsage;
1120                 *lastUsage = usage;
1121             });
1122 
1123         *srcStages |= VulkanPipelineStage(allLastUsages, format);
1124         *dstStages |= VulkanPipelineStage(usage, format);
1125     }
1126 
ClearTexture(CommandRecordingContext * recordingContext,const SubresourceRange & range,TextureBase::ClearValue clearValue)1127     MaybeError Texture::ClearTexture(CommandRecordingContext* recordingContext,
1128                                      const SubresourceRange& range,
1129                                      TextureBase::ClearValue clearValue) {
1130         Device* device = ToBackend(GetDevice());
1131 
1132         const bool isZero = clearValue == TextureBase::ClearValue::Zero;
1133         uint32_t uClearColor = isZero ? 0 : 1;
1134         int32_t sClearColor = isZero ? 0 : 1;
1135         float fClearColor = isZero ? 0.f : 1.f;
1136 
1137         TransitionUsageNow(recordingContext, wgpu::TextureUsage::CopyDst, range);
1138 
1139         VkImageSubresourceRange imageRange = {};
1140         imageRange.levelCount = 1;
1141         imageRange.layerCount = 1;
1142 
1143         if (GetFormat().isCompressed) {
1144             if (range.aspects == Aspect::None) {
1145                 return {};
1146             }
1147             // need to clear the texture with a copy from buffer
1148             ASSERT(range.aspects == Aspect::Color);
1149             const TexelBlockInfo& blockInfo = GetFormat().GetAspectInfo(range.aspects).block;
1150 
1151             Extent3D largestMipSize = GetMipLevelPhysicalSize(range.baseMipLevel);
1152 
1153             uint32_t bytesPerRow =
1154                 Align((largestMipSize.width / blockInfo.width) * blockInfo.byteSize,
1155                       device->GetOptimalBytesPerRowAlignment());
1156             uint64_t bufferSize = bytesPerRow * (largestMipSize.height / blockInfo.height) *
1157                                   largestMipSize.depthOrArrayLayers;
1158             DynamicUploader* uploader = device->GetDynamicUploader();
1159             UploadHandle uploadHandle;
1160             DAWN_TRY_ASSIGN(uploadHandle,
1161                             uploader->Allocate(bufferSize, device->GetPendingCommandSerial(),
1162                                                blockInfo.byteSize));
1163             memset(uploadHandle.mappedBuffer, uClearColor, bufferSize);
1164 
1165             std::vector<VkBufferImageCopy> regions;
1166             for (uint32_t level = range.baseMipLevel; level < range.baseMipLevel + range.levelCount;
1167                  ++level) {
1168                 Extent3D copySize = GetMipLevelPhysicalSize(level);
1169                 imageRange.baseMipLevel = level;
1170                 for (uint32_t layer = range.baseArrayLayer;
1171                      layer < range.baseArrayLayer + range.layerCount; ++layer) {
1172                     if (clearValue == TextureBase::ClearValue::Zero &&
1173                         IsSubresourceContentInitialized(
1174                             SubresourceRange::SingleMipAndLayer(level, layer, range.aspects))) {
1175                         // Skip lazy clears if already initialized.
1176                         continue;
1177                     }
1178 
1179                     TextureDataLayout dataLayout;
1180                     dataLayout.offset = uploadHandle.startOffset;
1181                     dataLayout.rowsPerImage = copySize.height / blockInfo.height;
1182                     dataLayout.bytesPerRow = bytesPerRow;
1183                     TextureCopy textureCopy;
1184                     textureCopy.aspect = range.aspects;
1185                     textureCopy.mipLevel = level;
1186                     textureCopy.origin = {0, 0, layer};
1187                     textureCopy.texture = this;
1188 
1189                     regions.push_back(
1190                         ComputeBufferImageCopyRegion(dataLayout, textureCopy, copySize));
1191                 }
1192             }
1193             device->fn.CmdCopyBufferToImage(
1194                 recordingContext->commandBuffer,
1195                 ToBackend(uploadHandle.stagingBuffer)->GetBufferHandle(), GetHandle(),
1196                 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, regions.size(), regions.data());
1197         } else {
1198             for (uint32_t level = range.baseMipLevel; level < range.baseMipLevel + range.levelCount;
1199                  ++level) {
1200                 imageRange.baseMipLevel = level;
1201                 for (uint32_t layer = range.baseArrayLayer;
1202                      layer < range.baseArrayLayer + range.layerCount; ++layer) {
1203                     Aspect aspects = Aspect::None;
1204                     for (Aspect aspect : IterateEnumMask(range.aspects)) {
1205                         if (clearValue == TextureBase::ClearValue::Zero &&
1206                             IsSubresourceContentInitialized(
1207                                 SubresourceRange::SingleMipAndLayer(level, layer, aspect))) {
1208                             // Skip lazy clears if already initialized.
1209                             continue;
1210                         }
1211                         aspects |= aspect;
1212                     }
1213 
1214                     if (aspects == Aspect::None) {
1215                         continue;
1216                     }
1217 
1218                     imageRange.aspectMask = VulkanAspectMask(aspects);
1219                     imageRange.baseArrayLayer = layer;
1220 
1221                     if (aspects &
1222                         (Aspect::Depth | Aspect::Stencil | Aspect::CombinedDepthStencil)) {
1223                         VkClearDepthStencilValue clearDepthStencilValue[1];
1224                         clearDepthStencilValue[0].depth = fClearColor;
1225                         clearDepthStencilValue[0].stencil = uClearColor;
1226                         device->fn.CmdClearDepthStencilImage(
1227                             recordingContext->commandBuffer, GetHandle(),
1228                             VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, clearDepthStencilValue, 1,
1229                             &imageRange);
1230                     } else {
1231                         ASSERT(aspects == Aspect::Color);
1232                         VkClearColorValue clearColorValue;
1233                         switch (GetFormat().GetAspectInfo(Aspect::Color).baseType) {
1234                             case wgpu::TextureComponentType::Float:
1235                                 clearColorValue.float32[0] = fClearColor;
1236                                 clearColorValue.float32[1] = fClearColor;
1237                                 clearColorValue.float32[2] = fClearColor;
1238                                 clearColorValue.float32[3] = fClearColor;
1239                                 break;
1240                             case wgpu::TextureComponentType::Sint:
1241                                 clearColorValue.int32[0] = sClearColor;
1242                                 clearColorValue.int32[1] = sClearColor;
1243                                 clearColorValue.int32[2] = sClearColor;
1244                                 clearColorValue.int32[3] = sClearColor;
1245                                 break;
1246                             case wgpu::TextureComponentType::Uint:
1247                                 clearColorValue.uint32[0] = uClearColor;
1248                                 clearColorValue.uint32[1] = uClearColor;
1249                                 clearColorValue.uint32[2] = uClearColor;
1250                                 clearColorValue.uint32[3] = uClearColor;
1251                                 break;
1252                             case wgpu::TextureComponentType::DepthComparison:
1253                                 UNREACHABLE();
1254                         }
1255                         device->fn.CmdClearColorImage(recordingContext->commandBuffer, GetHandle(),
1256                                                       VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1257                                                       &clearColorValue, 1, &imageRange);
1258                     }
1259                 }
1260             }
1261         }
1262 
1263         if (clearValue == TextureBase::ClearValue::Zero) {
1264             SetIsSubresourceContentInitialized(true, range);
1265             device->IncrementLazyClearCountForTesting();
1266         }
1267         return {};
1268     }
1269 
EnsureSubresourceContentInitialized(CommandRecordingContext * recordingContext,const SubresourceRange & range)1270     void Texture::EnsureSubresourceContentInitialized(CommandRecordingContext* recordingContext,
1271                                                       const SubresourceRange& range) {
1272         if (!GetDevice()->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse)) {
1273             return;
1274         }
1275         if (!IsSubresourceContentInitialized(range)) {
1276             // If subresource has not been initialized, clear it to black as it could contain dirty
1277             // bits from recycled memory
1278             GetDevice()->ConsumedError(
1279                 ClearTexture(recordingContext, range, TextureBase::ClearValue::Zero));
1280         }
1281     }
1282 
GetCurrentLayoutForSwapChain() const1283     VkImageLayout Texture::GetCurrentLayoutForSwapChain() const {
1284         return VulkanImageLayout(this, mSubresourceLastUsages.Get(Aspect::Color, 0, 0));
1285     }
1286 
1287     // static
Create(TextureBase * texture,const TextureViewDescriptor * descriptor)1288     ResultOrError<Ref<TextureView>> TextureView::Create(TextureBase* texture,
1289                                                         const TextureViewDescriptor* descriptor) {
1290         Ref<TextureView> view = AcquireRef(new TextureView(texture, descriptor));
1291         DAWN_TRY(view->Initialize(descriptor));
1292         return view;
1293     }
1294 
Initialize(const TextureViewDescriptor * descriptor)1295     MaybeError TextureView::Initialize(const TextureViewDescriptor* descriptor) {
1296         if ((GetTexture()->GetUsage() &
1297              ~(wgpu::TextureUsage::CopySrc | wgpu::TextureUsage::CopyDst)) == 0) {
1298             // If the texture view has no other usage than CopySrc and CopyDst, then it can't
1299             // actually be used as a render pass attachment or sampled/storage texture. The Vulkan
1300             // validation errors warn if you create such a vkImageView, so return early.
1301             return {};
1302         }
1303 
1304         // Texture could be destroyed by the time we make a view.
1305         if (GetTexture()->GetTextureState() == Texture::TextureState::Destroyed) {
1306             return {};
1307         }
1308 
1309         Device* device = ToBackend(GetTexture()->GetDevice());
1310 
1311         VkImageViewCreateInfo createInfo;
1312         createInfo.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
1313         createInfo.pNext = nullptr;
1314         createInfo.flags = 0;
1315         createInfo.image = ToBackend(GetTexture())->GetHandle();
1316         createInfo.viewType = VulkanImageViewType(descriptor->dimension);
1317         createInfo.format = VulkanImageFormat(device, descriptor->format);
1318         createInfo.components = VkComponentMapping{VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_G,
1319                                                    VK_COMPONENT_SWIZZLE_B, VK_COMPONENT_SWIZZLE_A};
1320 
1321         const SubresourceRange& subresources = GetSubresourceRange();
1322         createInfo.subresourceRange.baseMipLevel = subresources.baseMipLevel;
1323         createInfo.subresourceRange.levelCount = subresources.levelCount;
1324         createInfo.subresourceRange.baseArrayLayer = subresources.baseArrayLayer;
1325         createInfo.subresourceRange.layerCount = subresources.layerCount;
1326         createInfo.subresourceRange.aspectMask = VulkanAspectMask(subresources.aspects);
1327 
1328         DAWN_TRY(CheckVkSuccess(
1329             device->fn.CreateImageView(device->GetVkDevice(), &createInfo, nullptr, &*mHandle),
1330             "CreateImageView"));
1331 
1332         SetLabelImpl();
1333 
1334         return {};
1335     }
1336 
~TextureView()1337     TextureView::~TextureView() {
1338     }
1339 
DestroyImpl()1340     void TextureView::DestroyImpl() {
1341         Device* device = ToBackend(GetTexture()->GetDevice());
1342 
1343         if (mHandle != VK_NULL_HANDLE) {
1344             device->GetFencedDeleter()->DeleteWhenUnused(mHandle);
1345             mHandle = VK_NULL_HANDLE;
1346         }
1347     }
1348 
GetHandle() const1349     VkImageView TextureView::GetHandle() const {
1350         return mHandle;
1351     }
1352 
SetLabelImpl()1353     void TextureView::SetLabelImpl() {
1354         SetDebugName(ToBackend(GetDevice()), VK_OBJECT_TYPE_IMAGE_VIEW,
1355                      reinterpret_cast<uint64_t&>(mHandle), "Dawn_InternalTextureView", GetLabel());
1356     }
1357 
1358 }}  // namespace dawn_native::vulkan
1359