1 // Copyright 2018 The SwiftShader Authors. All Rights Reserved.
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14
15 #include "VkImage.hpp"
16
17 #include "VkBuffer.hpp"
18 #include "VkDevice.hpp"
19 #include "VkDeviceMemory.hpp"
20 #include "VkImageView.hpp"
21 #include "VkStringify.hpp"
22 #include "VkStructConversion.hpp"
23 #include "Device/ASTC_Decoder.hpp"
24 #include "Device/BC_Decoder.hpp"
25 #include "Device/Blitter.hpp"
26 #include "Device/ETC_Decoder.hpp"
27
28 #ifdef __ANDROID__
29 # include "System/GrallocAndroid.hpp"
30 # include "VkDeviceMemoryExternalAndroid.hpp"
31 #endif
32
33 #include <cstring>
34
35 namespace {
36
GetInputType(const vk::Format & format)37 ETC_Decoder::InputType GetInputType(const vk::Format &format)
38 {
39 switch(format)
40 {
41 case VK_FORMAT_EAC_R11_UNORM_BLOCK:
42 return ETC_Decoder::ETC_R_UNSIGNED;
43 case VK_FORMAT_EAC_R11_SNORM_BLOCK:
44 return ETC_Decoder::ETC_R_SIGNED;
45 case VK_FORMAT_EAC_R11G11_UNORM_BLOCK:
46 return ETC_Decoder::ETC_RG_UNSIGNED;
47 case VK_FORMAT_EAC_R11G11_SNORM_BLOCK:
48 return ETC_Decoder::ETC_RG_SIGNED;
49 case VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK:
50 case VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK:
51 return ETC_Decoder::ETC_RGB;
52 case VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK:
53 case VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK:
54 return ETC_Decoder::ETC_RGB_PUNCHTHROUGH_ALPHA;
55 case VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK:
56 case VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK:
57 return ETC_Decoder::ETC_RGBA;
58 default:
59 UNSUPPORTED("format: %d", int(format));
60 return ETC_Decoder::ETC_RGBA;
61 }
62 }
63
GetBCn(const vk::Format & format)64 int GetBCn(const vk::Format &format)
65 {
66 switch(format)
67 {
68 case VK_FORMAT_BC1_RGB_UNORM_BLOCK:
69 case VK_FORMAT_BC1_RGBA_UNORM_BLOCK:
70 case VK_FORMAT_BC1_RGB_SRGB_BLOCK:
71 case VK_FORMAT_BC1_RGBA_SRGB_BLOCK:
72 return 1;
73 case VK_FORMAT_BC2_UNORM_BLOCK:
74 case VK_FORMAT_BC2_SRGB_BLOCK:
75 return 2;
76 case VK_FORMAT_BC3_UNORM_BLOCK:
77 case VK_FORMAT_BC3_SRGB_BLOCK:
78 return 3;
79 case VK_FORMAT_BC4_UNORM_BLOCK:
80 case VK_FORMAT_BC4_SNORM_BLOCK:
81 return 4;
82 case VK_FORMAT_BC5_UNORM_BLOCK:
83 case VK_FORMAT_BC5_SNORM_BLOCK:
84 return 5;
85 case VK_FORMAT_BC6H_UFLOAT_BLOCK:
86 case VK_FORMAT_BC6H_SFLOAT_BLOCK:
87 return 6;
88 case VK_FORMAT_BC7_UNORM_BLOCK:
89 case VK_FORMAT_BC7_SRGB_BLOCK:
90 return 7;
91 default:
92 UNSUPPORTED("format: %d", int(format));
93 return 0;
94 }
95 }
96
97 // Returns true for BC1 if we have an RGB format, false for RGBA
98 // Returns true for BC4, BC5, BC6H if we have an unsigned format, false for signed
99 // Ignored by BC2, BC3, and BC7
GetNoAlphaOrUnsigned(const vk::Format & format)100 bool GetNoAlphaOrUnsigned(const vk::Format &format)
101 {
102 switch(format)
103 {
104 case VK_FORMAT_BC1_RGB_UNORM_BLOCK:
105 case VK_FORMAT_BC1_RGB_SRGB_BLOCK:
106 case VK_FORMAT_BC4_UNORM_BLOCK:
107 case VK_FORMAT_BC5_UNORM_BLOCK:
108 case VK_FORMAT_BC6H_UFLOAT_BLOCK:
109 return true;
110 case VK_FORMAT_BC1_RGBA_UNORM_BLOCK:
111 case VK_FORMAT_BC1_RGBA_SRGB_BLOCK:
112 case VK_FORMAT_BC2_UNORM_BLOCK:
113 case VK_FORMAT_BC2_SRGB_BLOCK:
114 case VK_FORMAT_BC3_UNORM_BLOCK:
115 case VK_FORMAT_BC3_SRGB_BLOCK:
116 case VK_FORMAT_BC4_SNORM_BLOCK:
117 case VK_FORMAT_BC5_SNORM_BLOCK:
118 case VK_FORMAT_BC6H_SFLOAT_BLOCK:
119 case VK_FORMAT_BC7_SRGB_BLOCK:
120 case VK_FORMAT_BC7_UNORM_BLOCK:
121 return false;
122 default:
123 UNSUPPORTED("format: %d", int(format));
124 return false;
125 }
126 }
127
GetImageFormat(const VkImageCreateInfo * pCreateInfo)128 VkFormat GetImageFormat(const VkImageCreateInfo *pCreateInfo)
129 {
130 const auto *nextInfo = reinterpret_cast<const VkBaseInStructure *>(pCreateInfo->pNext);
131 while(nextInfo)
132 {
133 // Casting to an int since some structures, such as VK_STRUCTURE_TYPE_NATIVE_BUFFER_ANDROID and
134 // VK_STRUCTURE_TYPE_SWAPCHAIN_IMAGE_CREATE_INFO_ANDROID, are not enumerated in the official Vulkan headers.
135 switch((int)(nextInfo->sType))
136 {
137 #ifdef __ANDROID__
138 case VK_STRUCTURE_TYPE_EXTERNAL_FORMAT_ANDROID:
139 {
140 const VkExternalFormatANDROID *externalFormatAndroid = reinterpret_cast<const VkExternalFormatANDROID *>(nextInfo);
141
142 // VkExternalFormatANDROID: "If externalFormat is zero, the effect is as if the VkExternalFormatANDROID structure was not present."
143 if(externalFormatAndroid->externalFormat == 0)
144 {
145 break;
146 }
147
148 const VkFormat correspondingVkFormat = AHardwareBufferExternalMemory::GetVkFormatFromAHBFormat(externalFormatAndroid->externalFormat);
149 ASSERT(pCreateInfo->format == VK_FORMAT_UNDEFINED || pCreateInfo->format == correspondingVkFormat);
150 return correspondingVkFormat;
151 }
152 break;
153 case VK_STRUCTURE_TYPE_NATIVE_BUFFER_ANDROID:
154 break;
155 case VK_STRUCTURE_TYPE_SWAPCHAIN_IMAGE_CREATE_INFO_ANDROID:
156 break;
157 #endif
158 // We support these extensions, but they don't affect the image format.
159 case VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO:
160 case VK_STRUCTURE_TYPE_IMAGE_SWAPCHAIN_CREATE_INFO_KHR:
161 case VK_STRUCTURE_TYPE_IMAGE_FORMAT_LIST_CREATE_INFO:
162 case VK_STRUCTURE_TYPE_IMAGE_STENCIL_USAGE_CREATE_INFO:
163 break;
164 case VK_STRUCTURE_TYPE_MAX_ENUM:
165 // dEQP tests that this value is ignored.
166 break;
167 default:
168 UNSUPPORTED("pCreateInfo->pNext->sType = %s", vk::Stringify(nextInfo->sType).c_str());
169 break;
170 }
171
172 nextInfo = nextInfo->pNext;
173 }
174
175 return pCreateInfo->format;
176 }
177
178 } // anonymous namespace
179
180 namespace vk {
181
Image(const VkImageCreateInfo * pCreateInfo,void * mem,Device * device)182 Image::Image(const VkImageCreateInfo *pCreateInfo, void *mem, Device *device)
183 : device(device)
184 , flags(pCreateInfo->flags)
185 , imageType(pCreateInfo->imageType)
186 , format(GetImageFormat(pCreateInfo))
187 , extent(pCreateInfo->extent)
188 , mipLevels(pCreateInfo->mipLevels)
189 , arrayLayers(pCreateInfo->arrayLayers)
190 , samples(pCreateInfo->samples)
191 , tiling(pCreateInfo->tiling)
192 , usage(pCreateInfo->usage)
193 {
194 if(format.isCompressed())
195 {
196 VkImageCreateInfo compressedImageCreateInfo = *pCreateInfo;
197 compressedImageCreateInfo.format = format.getDecompressedFormat();
198 decompressedImage = new(mem) Image(&compressedImageCreateInfo, nullptr, device);
199 }
200
201 const auto *externalInfo = GetExtendedStruct<VkExternalMemoryImageCreateInfo>(pCreateInfo->pNext, VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO);
202 if(externalInfo)
203 {
204 supportedExternalMemoryHandleTypes = externalInfo->handleTypes;
205 }
206 }
207
destroy(const VkAllocationCallbacks * pAllocator)208 void Image::destroy(const VkAllocationCallbacks *pAllocator)
209 {
210 if(decompressedImage)
211 {
212 vk::freeHostMemory(decompressedImage, pAllocator);
213 }
214 }
215
ComputeRequiredAllocationSize(const VkImageCreateInfo * pCreateInfo)216 size_t Image::ComputeRequiredAllocationSize(const VkImageCreateInfo *pCreateInfo)
217 {
218 return Format(pCreateInfo->format).isCompressed() ? sizeof(Image) : 0;
219 }
220
getMemoryRequirements() const221 const VkMemoryRequirements Image::getMemoryRequirements() const
222 {
223 VkMemoryRequirements memoryRequirements;
224 memoryRequirements.alignment = vk::MEMORY_REQUIREMENTS_OFFSET_ALIGNMENT;
225 memoryRequirements.memoryTypeBits = vk::MEMORY_TYPE_GENERIC_BIT;
226 memoryRequirements.size = getStorageSize(format.getAspects()) +
227 (decompressedImage ? decompressedImage->getStorageSize(decompressedImage->format.getAspects()) : 0);
228 return memoryRequirements;
229 }
230
getMemoryRequirements(VkMemoryRequirements2 * pMemoryRequirements) const231 void Image::getMemoryRequirements(VkMemoryRequirements2 *pMemoryRequirements) const
232 {
233 VkBaseOutStructure *extensionRequirements = reinterpret_cast<VkBaseOutStructure *>(pMemoryRequirements->pNext);
234 while(extensionRequirements)
235 {
236 switch(extensionRequirements->sType)
237 {
238 case VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS:
239 {
240 auto *requirements = reinterpret_cast<VkMemoryDedicatedRequirements *>(extensionRequirements);
241 device->getRequirements(requirements);
242 #if SWIFTSHADER_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER
243 if(getSupportedExternalMemoryHandleTypes() == VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID)
244 {
245 requirements->prefersDedicatedAllocation = VK_TRUE;
246 requirements->requiresDedicatedAllocation = VK_TRUE;
247 }
248 #endif
249 }
250 break;
251 default:
252 UNSUPPORTED("pMemoryRequirements->pNext sType = %s", vk::Stringify(extensionRequirements->sType).c_str());
253 break;
254 }
255
256 extensionRequirements = extensionRequirements->pNext;
257 }
258
259 pMemoryRequirements->memoryRequirements = getMemoryRequirements();
260 }
261
getSizeInBytes(const VkImageSubresourceRange & subresourceRange) const262 size_t Image::getSizeInBytes(const VkImageSubresourceRange &subresourceRange) const
263 {
264 size_t size = 0;
265 uint32_t lastLayer = getLastLayerIndex(subresourceRange);
266 uint32_t lastMipLevel = getLastMipLevel(subresourceRange);
267 uint32_t layerCount = lastLayer - subresourceRange.baseArrayLayer + 1;
268 uint32_t mipLevelCount = lastMipLevel - subresourceRange.baseMipLevel + 1;
269
270 auto aspect = static_cast<VkImageAspectFlagBits>(subresourceRange.aspectMask);
271
272 if(layerCount > 1)
273 {
274 if(mipLevelCount < mipLevels) // Compute size for all layers except the last one, then add relevant mip level sizes only for last layer
275 {
276 size = (layerCount - 1) * getLayerSize(aspect);
277 for(uint32_t mipLevel = subresourceRange.baseMipLevel; mipLevel <= lastMipLevel; ++mipLevel)
278 {
279 size += getMultiSampledLevelSize(aspect, mipLevel);
280 }
281 }
282 else // All mip levels used, compute full layer sizes
283 {
284 size = layerCount * getLayerSize(aspect);
285 }
286 }
287 else // Single layer, add all mip levels in the subresource range
288 {
289 for(uint32_t mipLevel = subresourceRange.baseMipLevel; mipLevel <= lastMipLevel; ++mipLevel)
290 {
291 size += getMultiSampledLevelSize(aspect, mipLevel);
292 }
293 }
294
295 return size;
296 }
297
canBindToMemory(DeviceMemory * pDeviceMemory) const298 bool Image::canBindToMemory(DeviceMemory *pDeviceMemory) const
299 {
300 return pDeviceMemory->checkExternalMemoryHandleType(supportedExternalMemoryHandleTypes);
301 }
302
bind(DeviceMemory * pDeviceMemory,VkDeviceSize pMemoryOffset)303 void Image::bind(DeviceMemory *pDeviceMemory, VkDeviceSize pMemoryOffset)
304 {
305 deviceMemory = pDeviceMemory;
306 memoryOffset = pMemoryOffset;
307 if(decompressedImage)
308 {
309 decompressedImage->deviceMemory = deviceMemory;
310 decompressedImage->memoryOffset = memoryOffset + getStorageSize(format.getAspects());
311 }
312 }
313
314 #ifdef __ANDROID__
prepareForExternalUseANDROID() const315 VkResult Image::prepareForExternalUseANDROID() const
316 {
317 void *nativeBuffer = nullptr;
318 VkExtent3D extent = getMipLevelExtent(VK_IMAGE_ASPECT_COLOR_BIT, 0);
319
320 buffer_handle_t importedBufferHandle = nullptr;
321 if(GrallocModule::getInstance()->import(backingMemory.nativeHandle, &importedBufferHandle) != 0)
322 {
323 return VK_ERROR_OUT_OF_DATE_KHR;
324 }
325 if(!importedBufferHandle)
326 {
327 return VK_ERROR_OUT_OF_DATE_KHR;
328 }
329
330 if(GrallocModule::getInstance()->lock(importedBufferHandle, GRALLOC_USAGE_SW_WRITE_OFTEN, 0, 0, extent.width, extent.height, &nativeBuffer) != 0)
331 {
332 return VK_ERROR_OUT_OF_DATE_KHR;
333 }
334
335 if(!nativeBuffer)
336 {
337 return VK_ERROR_OUT_OF_DATE_KHR;
338 }
339
340 int imageRowBytes = rowPitchBytes(VK_IMAGE_ASPECT_COLOR_BIT, 0);
341 int bufferRowBytes = backingMemory.stride * getFormat().bytes();
342 ASSERT(imageRowBytes <= bufferRowBytes);
343
344 uint8_t *srcBuffer = static_cast<uint8_t *>(deviceMemory->getOffsetPointer(0));
345 uint8_t *dstBuffer = static_cast<uint8_t *>(nativeBuffer);
346 for(uint32_t i = 0; i < extent.height; i++)
347 {
348 memcpy(dstBuffer + (i * bufferRowBytes), srcBuffer + (i * imageRowBytes), imageRowBytes);
349 }
350
351 if(GrallocModule::getInstance()->unlock(importedBufferHandle) != 0)
352 {
353 return VK_ERROR_OUT_OF_DATE_KHR;
354 }
355
356 if(GrallocModule::getInstance()->release(importedBufferHandle) != 0)
357 {
358 return VK_ERROR_OUT_OF_DATE_KHR;
359 }
360
361 return VK_SUCCESS;
362 }
363
getExternalMemory() const364 VkDeviceMemory Image::getExternalMemory() const
365 {
366 return backingMemory.externalMemory ? *deviceMemory : VkDeviceMemory{ VK_NULL_HANDLE };
367 }
368 #endif
369
getSubresourceLayout(const VkImageSubresource * pSubresource,VkSubresourceLayout * pLayout) const370 void Image::getSubresourceLayout(const VkImageSubresource *pSubresource, VkSubresourceLayout *pLayout) const
371 {
372 // By spec, aspectMask has a single bit set.
373 if(!((pSubresource->aspectMask == VK_IMAGE_ASPECT_COLOR_BIT) ||
374 (pSubresource->aspectMask == VK_IMAGE_ASPECT_DEPTH_BIT) ||
375 (pSubresource->aspectMask == VK_IMAGE_ASPECT_STENCIL_BIT) ||
376 (pSubresource->aspectMask == VK_IMAGE_ASPECT_PLANE_0_BIT) ||
377 (pSubresource->aspectMask == VK_IMAGE_ASPECT_PLANE_1_BIT) ||
378 (pSubresource->aspectMask == VK_IMAGE_ASPECT_PLANE_2_BIT)))
379 {
380 UNSUPPORTED("aspectMask %X", pSubresource->aspectMask);
381 }
382
383 auto aspect = static_cast<VkImageAspectFlagBits>(pSubresource->aspectMask);
384 pLayout->offset = getSubresourceOffset(aspect, pSubresource->mipLevel, pSubresource->arrayLayer);
385 pLayout->size = getMultiSampledLevelSize(aspect, pSubresource->mipLevel);
386 pLayout->rowPitch = rowPitchBytes(aspect, pSubresource->mipLevel);
387 pLayout->depthPitch = slicePitchBytes(aspect, pSubresource->mipLevel);
388 pLayout->arrayPitch = getLayerSize(aspect);
389 }
390
copyTo(Image * dstImage,const VkImageCopy2KHR & region) const391 void Image::copyTo(Image *dstImage, const VkImageCopy2KHR ®ion) const
392 {
393 static constexpr VkImageAspectFlags CombinedDepthStencilAspects =
394 VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
395 if((region.srcSubresource.aspectMask == CombinedDepthStencilAspects) &&
396 (region.dstSubresource.aspectMask == CombinedDepthStencilAspects))
397 {
398 // Depth and stencil can be specified together, copy each separately
399 VkImageCopy2KHR singleAspectRegion = region;
400 singleAspectRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
401 singleAspectRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
402 copySingleAspectTo(dstImage, singleAspectRegion);
403 singleAspectRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
404 singleAspectRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
405 copySingleAspectTo(dstImage, singleAspectRegion);
406 return;
407 }
408
409 copySingleAspectTo(dstImage, region);
410 }
411
copySingleAspectTo(Image * dstImage,const VkImageCopy2KHR & region) const412 void Image::copySingleAspectTo(Image *dstImage, const VkImageCopy2KHR ®ion) const
413 {
414 // Image copy does not perform any conversion, it simply copies memory from
415 // an image to another image that has the same number of bytes per pixel.
416
417 if(!((region.srcSubresource.aspectMask == VK_IMAGE_ASPECT_COLOR_BIT) ||
418 (region.srcSubresource.aspectMask == VK_IMAGE_ASPECT_DEPTH_BIT) ||
419 (region.srcSubresource.aspectMask == VK_IMAGE_ASPECT_STENCIL_BIT) ||
420 (region.srcSubresource.aspectMask == VK_IMAGE_ASPECT_PLANE_0_BIT) ||
421 (region.srcSubresource.aspectMask == VK_IMAGE_ASPECT_PLANE_1_BIT) ||
422 (region.srcSubresource.aspectMask == VK_IMAGE_ASPECT_PLANE_2_BIT)))
423 {
424 UNSUPPORTED("srcSubresource.aspectMask %X", region.srcSubresource.aspectMask);
425 }
426
427 if(!((region.dstSubresource.aspectMask == VK_IMAGE_ASPECT_COLOR_BIT) ||
428 (region.dstSubresource.aspectMask == VK_IMAGE_ASPECT_DEPTH_BIT) ||
429 (region.dstSubresource.aspectMask == VK_IMAGE_ASPECT_STENCIL_BIT) ||
430 (region.dstSubresource.aspectMask == VK_IMAGE_ASPECT_PLANE_0_BIT) ||
431 (region.dstSubresource.aspectMask == VK_IMAGE_ASPECT_PLANE_1_BIT) ||
432 (region.dstSubresource.aspectMask == VK_IMAGE_ASPECT_PLANE_2_BIT)))
433 {
434 UNSUPPORTED("dstSubresource.aspectMask %X", region.dstSubresource.aspectMask);
435 }
436
437 VkImageAspectFlagBits srcAspect = static_cast<VkImageAspectFlagBits>(region.srcSubresource.aspectMask);
438 VkImageAspectFlagBits dstAspect = static_cast<VkImageAspectFlagBits>(region.dstSubresource.aspectMask);
439
440 Format srcFormat = getFormat(srcAspect);
441 Format dstFormat = dstImage->getFormat(dstAspect);
442 int bytesPerBlock = srcFormat.bytesPerBlock();
443 ASSERT(bytesPerBlock == dstFormat.bytesPerBlock());
444 ASSERT(samples == dstImage->samples);
445
446 VkExtent3D srcExtent = getMipLevelExtent(srcAspect, region.srcSubresource.mipLevel);
447 VkExtent3D dstExtent = dstImage->getMipLevelExtent(dstAspect, region.dstSubresource.mipLevel);
448 VkExtent3D copyExtent = imageExtentInBlocks(region.extent, srcAspect);
449
450 VkImageType srcImageType = imageType;
451 VkImageType dstImageType = dstImage->getImageType();
452 bool one3D = (srcImageType == VK_IMAGE_TYPE_3D) != (dstImageType == VK_IMAGE_TYPE_3D);
453 bool both3D = (srcImageType == VK_IMAGE_TYPE_3D) && (dstImageType == VK_IMAGE_TYPE_3D);
454
455 // Texel layout pitches, using the VkSubresourceLayout nomenclature.
456 int srcRowPitch = rowPitchBytes(srcAspect, region.srcSubresource.mipLevel);
457 int srcDepthPitch = slicePitchBytes(srcAspect, region.srcSubresource.mipLevel);
458 int dstRowPitch = dstImage->rowPitchBytes(dstAspect, region.dstSubresource.mipLevel);
459 int dstDepthPitch = dstImage->slicePitchBytes(dstAspect, region.dstSubresource.mipLevel);
460 VkDeviceSize srcArrayPitch = getLayerSize(srcAspect);
461 VkDeviceSize dstArrayPitch = dstImage->getLayerSize(dstAspect);
462
463 // These are the pitches used when iterating over the layers that are being copied by the
464 // vkCmdCopyImage command. They can differ from the above array piches because the spec states that:
465 // "If one image is VK_IMAGE_TYPE_3D and the other image is VK_IMAGE_TYPE_2D with multiple
466 // layers, then each slice is copied to or from a different layer."
467 VkDeviceSize srcLayerPitch = (srcImageType == VK_IMAGE_TYPE_3D) ? srcDepthPitch : srcArrayPitch;
468 VkDeviceSize dstLayerPitch = (dstImageType == VK_IMAGE_TYPE_3D) ? dstDepthPitch : dstArrayPitch;
469
470 // If one image is 3D, extent.depth must match the layer count. If both images are 2D,
471 // depth is 1 but the source and destination subresource layer count must match.
472 uint32_t layerCount = one3D ? copyExtent.depth : region.srcSubresource.layerCount;
473
474 // Copies between 2D and 3D images are treated as layers, so only use depth as the slice count when
475 // both images are 3D.
476 // Multisample images are currently implemented similar to 3D images by storing one sample per slice.
477 // TODO(b/160600347): Store samples consecutively.
478 uint32_t sliceCount = both3D ? copyExtent.depth : samples;
479
480 bool isSingleSlice = (sliceCount == 1);
481 bool isSingleRow = (copyExtent.height == 1) && isSingleSlice;
482 // In order to copy multiple rows using a single memcpy call, we
483 // have to make sure that we need to copy the entire row and that
484 // both source and destination rows have the same size in bytes
485 bool isEntireRow = (region.extent.width == srcExtent.width) &&
486 (region.extent.width == dstExtent.width) &&
487 // For non-compressed formats, blockWidth is 1. For compressed
488 // formats, rowPitchBytes returns the number of bytes for a row of
489 // blocks, so we have to divide by the block height, which means:
490 // srcRowPitchBytes / srcBlockWidth == dstRowPitchBytes / dstBlockWidth
491 // And, to avoid potential non exact integer division, for example if a
492 // block has 16 bytes and represents 5 rows, we change the equation to:
493 // srcRowPitchBytes * dstBlockWidth == dstRowPitchBytes * srcBlockWidth
494 ((srcRowPitch * dstFormat.blockWidth()) ==
495 (dstRowPitch * srcFormat.blockWidth()));
496 // In order to copy multiple slices using a single memcpy call, we
497 // have to make sure that we need to copy the entire slice and that
498 // both source and destination slices have the same size in bytes
499 bool isEntireSlice = isEntireRow &&
500 (copyExtent.height == srcExtent.height) &&
501 (copyExtent.height == dstExtent.height) &&
502 (srcDepthPitch == dstDepthPitch);
503
504 const uint8_t *srcLayer = static_cast<const uint8_t *>(getTexelPointer(region.srcOffset, ImageSubresource(region.srcSubresource)));
505 uint8_t *dstLayer = static_cast<uint8_t *>(dstImage->getTexelPointer(region.dstOffset, ImageSubresource(region.dstSubresource)));
506
507 for(uint32_t layer = 0; layer < layerCount; layer++)
508 {
509 if(isSingleRow) // Copy one row
510 {
511 size_t copySize = copyExtent.width * bytesPerBlock;
512 ASSERT((srcLayer + copySize) < end());
513 ASSERT((dstLayer + copySize) < dstImage->end());
514 memcpy(dstLayer, srcLayer, copySize);
515 }
516 else if(isEntireRow && isSingleSlice) // Copy one slice
517 {
518 size_t copySize = copyExtent.height * srcRowPitch;
519 ASSERT((srcLayer + copySize) < end());
520 ASSERT((dstLayer + copySize) < dstImage->end());
521 memcpy(dstLayer, srcLayer, copySize);
522 }
523 else if(isEntireSlice) // Copy multiple slices
524 {
525 size_t copySize = sliceCount * srcDepthPitch;
526 ASSERT((srcLayer + copySize) < end());
527 ASSERT((dstLayer + copySize) < dstImage->end());
528 memcpy(dstLayer, srcLayer, copySize);
529 }
530 else if(isEntireRow) // Copy slice by slice
531 {
532 size_t sliceSize = copyExtent.height * srcRowPitch;
533 const uint8_t *srcSlice = srcLayer;
534 uint8_t *dstSlice = dstLayer;
535
536 for(uint32_t z = 0; z < sliceCount; z++)
537 {
538 ASSERT((srcSlice + sliceSize) < end());
539 ASSERT((dstSlice + sliceSize) < dstImage->end());
540
541 memcpy(dstSlice, srcSlice, sliceSize);
542
543 dstSlice += dstDepthPitch;
544 srcSlice += srcDepthPitch;
545 }
546 }
547 else // Copy row by row
548 {
549 size_t rowSize = copyExtent.width * bytesPerBlock;
550 const uint8_t *srcSlice = srcLayer;
551 uint8_t *dstSlice = dstLayer;
552
553 for(uint32_t z = 0; z < sliceCount; z++)
554 {
555 const uint8_t *srcRow = srcSlice;
556 uint8_t *dstRow = dstSlice;
557
558 for(uint32_t y = 0; y < copyExtent.height; y++)
559 {
560 ASSERT((srcRow + rowSize) < end());
561 ASSERT((dstRow + rowSize) < dstImage->end());
562
563 memcpy(dstRow, srcRow, rowSize);
564
565 srcRow += srcRowPitch;
566 dstRow += dstRowPitch;
567 }
568
569 srcSlice += srcDepthPitch;
570 dstSlice += dstDepthPitch;
571 }
572 }
573
574 srcLayer += srcLayerPitch;
575 dstLayer += dstLayerPitch;
576 }
577
578 dstImage->contentsChanged(ImageSubresourceRange(region.dstSubresource));
579 }
580
copy(Buffer * buffer,const VkBufferImageCopy2KHR & region,bool bufferIsSource)581 void Image::copy(Buffer *buffer, const VkBufferImageCopy2KHR ®ion, bool bufferIsSource)
582 {
583 switch(region.imageSubresource.aspectMask)
584 {
585 case VK_IMAGE_ASPECT_COLOR_BIT:
586 case VK_IMAGE_ASPECT_DEPTH_BIT:
587 case VK_IMAGE_ASPECT_STENCIL_BIT:
588 case VK_IMAGE_ASPECT_PLANE_0_BIT:
589 case VK_IMAGE_ASPECT_PLANE_1_BIT:
590 case VK_IMAGE_ASPECT_PLANE_2_BIT:
591 break;
592 default:
593 UNSUPPORTED("aspectMask %x", int(region.imageSubresource.aspectMask));
594 break;
595 }
596
597 auto aspect = static_cast<VkImageAspectFlagBits>(region.imageSubresource.aspectMask);
598 Format copyFormat = getFormat(aspect);
599
600 VkExtent3D imageExtent = imageExtentInBlocks(region.imageExtent, aspect);
601
602 if(imageExtent.width == 0 || imageExtent.height == 0 || imageExtent.depth == 0)
603 {
604 return;
605 }
606
607 VkExtent2D bufferExtent = bufferExtentInBlocks(Extent2D(imageExtent), region);
608 int bytesPerBlock = copyFormat.bytesPerBlock();
609 int bufferRowPitchBytes = bufferExtent.width * bytesPerBlock;
610 int bufferSlicePitchBytes = bufferExtent.height * bufferRowPitchBytes;
611 ASSERT(samples == 1);
612
613 uint8_t *bufferMemory = static_cast<uint8_t *>(buffer->getOffsetPointer(region.bufferOffset));
614 uint8_t *imageMemory = static_cast<uint8_t *>(getTexelPointer(region.imageOffset, ImageSubresource(region.imageSubresource)));
615 uint8_t *srcMemory = bufferIsSource ? bufferMemory : imageMemory;
616 uint8_t *dstMemory = bufferIsSource ? imageMemory : bufferMemory;
617 int imageRowPitchBytes = rowPitchBytes(aspect, region.imageSubresource.mipLevel);
618 int imageSlicePitchBytes = slicePitchBytes(aspect, region.imageSubresource.mipLevel);
619
620 int srcSlicePitchBytes = bufferIsSource ? bufferSlicePitchBytes : imageSlicePitchBytes;
621 int dstSlicePitchBytes = bufferIsSource ? imageSlicePitchBytes : bufferSlicePitchBytes;
622 int srcRowPitchBytes = bufferIsSource ? bufferRowPitchBytes : imageRowPitchBytes;
623 int dstRowPitchBytes = bufferIsSource ? imageRowPitchBytes : bufferRowPitchBytes;
624
625 VkDeviceSize copySize = imageExtent.width * bytesPerBlock;
626
627 VkDeviceSize imageLayerSize = getLayerSize(aspect);
628 VkDeviceSize srcLayerSize = bufferIsSource ? bufferSlicePitchBytes : imageLayerSize;
629 VkDeviceSize dstLayerSize = bufferIsSource ? imageLayerSize : bufferSlicePitchBytes;
630
631 for(uint32_t i = 0; i < region.imageSubresource.layerCount; i++)
632 {
633 uint8_t *srcLayerMemory = srcMemory;
634 uint8_t *dstLayerMemory = dstMemory;
635 for(uint32_t z = 0; z < imageExtent.depth; z++)
636 {
637 uint8_t *srcSliceMemory = srcLayerMemory;
638 uint8_t *dstSliceMemory = dstLayerMemory;
639 for(uint32_t y = 0; y < imageExtent.height; y++)
640 {
641 ASSERT(((bufferIsSource ? dstSliceMemory : srcSliceMemory) + copySize) < end());
642 ASSERT(((bufferIsSource ? srcSliceMemory : dstSliceMemory) + copySize) < buffer->end());
643 memcpy(dstSliceMemory, srcSliceMemory, copySize);
644 srcSliceMemory += srcRowPitchBytes;
645 dstSliceMemory += dstRowPitchBytes;
646 }
647 srcLayerMemory += srcSlicePitchBytes;
648 dstLayerMemory += dstSlicePitchBytes;
649 }
650
651 srcMemory += srcLayerSize;
652 dstMemory += dstLayerSize;
653 }
654
655 if(bufferIsSource)
656 {
657 contentsChanged(ImageSubresourceRange(region.imageSubresource));
658 }
659 }
660
copyTo(Buffer * dstBuffer,const VkBufferImageCopy2KHR & region)661 void Image::copyTo(Buffer *dstBuffer, const VkBufferImageCopy2KHR ®ion)
662 {
663 copy(dstBuffer, region, false);
664 }
665
copyFrom(Buffer * srcBuffer,const VkBufferImageCopy2KHR & region)666 void Image::copyFrom(Buffer *srcBuffer, const VkBufferImageCopy2KHR ®ion)
667 {
668 copy(srcBuffer, region, true);
669 }
670
getTexelPointer(const VkOffset3D & offset,const VkImageSubresource & subresource) const671 void *Image::getTexelPointer(const VkOffset3D &offset, const VkImageSubresource &subresource) const
672 {
673 VkImageAspectFlagBits aspect = static_cast<VkImageAspectFlagBits>(subresource.aspectMask);
674 return deviceMemory->getOffsetPointer(getMemoryOffset(aspect) +
675 texelOffsetBytesInStorage(offset, subresource) +
676 getSubresourceOffset(aspect, subresource.mipLevel, subresource.arrayLayer));
677 }
678
imageExtentInBlocks(const VkExtent3D & extent,VkImageAspectFlagBits aspect) const679 VkExtent3D Image::imageExtentInBlocks(const VkExtent3D &extent, VkImageAspectFlagBits aspect) const
680 {
681 VkExtent3D adjustedExtent = extent;
682 Format usedFormat = getFormat(aspect);
683 if(usedFormat.isCompressed())
684 {
685 // When using a compressed format, we use the block as the base unit, instead of the texel
686 int blockWidth = usedFormat.blockWidth();
687 int blockHeight = usedFormat.blockHeight();
688
689 // Mip level allocations will round up to the next block for compressed texture
690 adjustedExtent.width = ((adjustedExtent.width + blockWidth - 1) / blockWidth);
691 adjustedExtent.height = ((adjustedExtent.height + blockHeight - 1) / blockHeight);
692 }
693 return adjustedExtent;
694 }
695
imageOffsetInBlocks(const VkOffset3D & offset,VkImageAspectFlagBits aspect) const696 VkOffset3D Image::imageOffsetInBlocks(const VkOffset3D &offset, VkImageAspectFlagBits aspect) const
697 {
698 VkOffset3D adjustedOffset = offset;
699 Format usedFormat = getFormat(aspect);
700 if(usedFormat.isCompressed())
701 {
702 // When using a compressed format, we use the block as the base unit, instead of the texel
703 int blockWidth = usedFormat.blockWidth();
704 int blockHeight = usedFormat.blockHeight();
705
706 ASSERT(((offset.x % blockWidth) == 0) && ((offset.y % blockHeight) == 0)); // We can't offset within a block
707
708 adjustedOffset.x /= blockWidth;
709 adjustedOffset.y /= blockHeight;
710 }
711 return adjustedOffset;
712 }
713
bufferExtentInBlocks(const VkExtent2D & extent,const VkBufferImageCopy2KHR & region) const714 VkExtent2D Image::bufferExtentInBlocks(const VkExtent2D &extent, const VkBufferImageCopy2KHR ®ion) const
715 {
716 VkExtent2D adjustedExtent = extent;
717 VkImageAspectFlagBits aspect = static_cast<VkImageAspectFlagBits>(region.imageSubresource.aspectMask);
718 Format usedFormat = getFormat(aspect);
719
720 if(region.bufferRowLength != 0)
721 {
722 adjustedExtent.width = region.bufferRowLength;
723
724 if(usedFormat.isCompressed())
725 {
726 int blockWidth = usedFormat.blockWidth();
727 ASSERT((adjustedExtent.width % blockWidth == 0) || (adjustedExtent.width + region.imageOffset.x == extent.width));
728 adjustedExtent.width = (region.bufferRowLength + blockWidth - 1) / blockWidth;
729 }
730 }
731
732 if(region.bufferImageHeight != 0)
733 {
734 adjustedExtent.height = region.bufferImageHeight;
735
736 if(usedFormat.isCompressed())
737 {
738 int blockHeight = usedFormat.blockHeight();
739 ASSERT((adjustedExtent.height % blockHeight == 0) || (adjustedExtent.height + region.imageOffset.y == extent.height));
740 adjustedExtent.height = (region.bufferImageHeight + blockHeight - 1) / blockHeight;
741 }
742 }
743
744 return adjustedExtent;
745 }
746
borderSize() const747 int Image::borderSize() const
748 {
749 // We won't add a border to compressed cube textures, we'll add it when we decompress the texture
750 return (isCubeCompatible() && !format.isCompressed()) ? 1 : 0;
751 }
752
texelOffsetBytesInStorage(const VkOffset3D & offset,const VkImageSubresource & subresource) const753 VkDeviceSize Image::texelOffsetBytesInStorage(const VkOffset3D &offset, const VkImageSubresource &subresource) const
754 {
755 VkImageAspectFlagBits aspect = static_cast<VkImageAspectFlagBits>(subresource.aspectMask);
756 VkOffset3D adjustedOffset = imageOffsetInBlocks(offset, aspect);
757 int border = borderSize();
758 return adjustedOffset.z * slicePitchBytes(aspect, subresource.mipLevel) +
759 (adjustedOffset.y + border) * rowPitchBytes(aspect, subresource.mipLevel) +
760 (adjustedOffset.x + border) * getFormat(aspect).bytesPerBlock();
761 }
762
getMipLevelExtent(VkImageAspectFlagBits aspect,uint32_t mipLevel) const763 VkExtent3D Image::getMipLevelExtent(VkImageAspectFlagBits aspect, uint32_t mipLevel) const
764 {
765 VkExtent3D mipLevelExtent;
766 mipLevelExtent.width = extent.width >> mipLevel;
767 mipLevelExtent.height = extent.height >> mipLevel;
768 mipLevelExtent.depth = extent.depth >> mipLevel;
769
770 if(mipLevelExtent.width == 0) { mipLevelExtent.width = 1; }
771 if(mipLevelExtent.height == 0) { mipLevelExtent.height = 1; }
772 if(mipLevelExtent.depth == 0) { mipLevelExtent.depth = 1; }
773
774 switch(aspect)
775 {
776 case VK_IMAGE_ASPECT_COLOR_BIT:
777 case VK_IMAGE_ASPECT_DEPTH_BIT:
778 case VK_IMAGE_ASPECT_STENCIL_BIT:
779 case VK_IMAGE_ASPECT_PLANE_0_BIT: // Vulkan 1.1 Table 31. Plane Format Compatibility Table: plane 0 of all defined formats is full resolution.
780 break;
781 case VK_IMAGE_ASPECT_PLANE_1_BIT:
782 case VK_IMAGE_ASPECT_PLANE_2_BIT:
783 switch(format)
784 {
785 case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
786 case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:
787 case VK_FORMAT_G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16:
788 ASSERT(mipLevelExtent.width % 2 == 0 && mipLevelExtent.height % 2 == 0); // Vulkan 1.1: "Images in this format must be defined with a width and height that is a multiple of two."
789 // Vulkan 1.1 Table 31. Plane Format Compatibility Table:
790 // Half-resolution U and V planes.
791 mipLevelExtent.width /= 2;
792 mipLevelExtent.height /= 2;
793 break;
794 default:
795 UNSUPPORTED("format %d", int(format));
796 }
797 break;
798 default:
799 UNSUPPORTED("aspect %x", int(aspect));
800 }
801
802 return mipLevelExtent;
803 }
804
rowPitchBytes(VkImageAspectFlagBits aspect,uint32_t mipLevel) const805 size_t Image::rowPitchBytes(VkImageAspectFlagBits aspect, uint32_t mipLevel) const
806 {
807 if(deviceMemory && deviceMemory->hasExternalImagePlanes())
808 {
809 return deviceMemory->externalImageRowPitchBytes(aspect);
810 }
811
812 // Depth and Stencil pitch should be computed separately
813 ASSERT((aspect & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) !=
814 (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT));
815
816 VkExtent3D mipLevelExtent = getMipLevelExtent(aspect, mipLevel);
817 Format usedFormat = getFormat(aspect);
818 if(usedFormat.isCompressed())
819 {
820 VkExtent3D extentInBlocks = imageExtentInBlocks(mipLevelExtent, aspect);
821 return extentInBlocks.width * usedFormat.bytesPerBlock();
822 }
823
824 return usedFormat.pitchB(mipLevelExtent.width, borderSize());
825 }
826
slicePitchBytes(VkImageAspectFlagBits aspect,uint32_t mipLevel) const827 size_t Image::slicePitchBytes(VkImageAspectFlagBits aspect, uint32_t mipLevel) const
828 {
829 // Depth and Stencil slice should be computed separately
830 ASSERT((aspect & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) !=
831 (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT));
832
833 VkExtent3D mipLevelExtent = getMipLevelExtent(aspect, mipLevel);
834 Format usedFormat = getFormat(aspect);
835 if(usedFormat.isCompressed())
836 {
837 VkExtent3D extentInBlocks = imageExtentInBlocks(mipLevelExtent, aspect);
838 return extentInBlocks.height * extentInBlocks.width * usedFormat.bytesPerBlock();
839 }
840
841 return usedFormat.sliceB(mipLevelExtent.width, mipLevelExtent.height, borderSize());
842 }
843
getFormat(VkImageAspectFlagBits aspect) const844 Format Image::getFormat(VkImageAspectFlagBits aspect) const
845 {
846 return format.getAspectFormat(aspect);
847 }
848
isCubeCompatible() const849 bool Image::isCubeCompatible() const
850 {
851 bool cubeCompatible = (flags & VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT);
852 ASSERT(!cubeCompatible || (imageType == VK_IMAGE_TYPE_2D)); // VUID-VkImageCreateInfo-flags-00949
853 ASSERT(!cubeCompatible || (arrayLayers >= 6)); // VUID-VkImageCreateInfo-imageType-00954
854
855 return cubeCompatible;
856 }
857
end() const858 uint8_t *Image::end() const
859 {
860 return reinterpret_cast<uint8_t *>(deviceMemory->getOffsetPointer(deviceMemory->getCommittedMemoryInBytes() + 1));
861 }
862
getMemoryOffset(VkImageAspectFlagBits aspect) const863 VkDeviceSize Image::getMemoryOffset(VkImageAspectFlagBits aspect) const
864 {
865 if(deviceMemory && deviceMemory->hasExternalImagePlanes())
866 {
867 return deviceMemory->externalImageMemoryOffset(aspect);
868 }
869
870 return memoryOffset;
871 }
872
getAspectOffset(VkImageAspectFlagBits aspect) const873 VkDeviceSize Image::getAspectOffset(VkImageAspectFlagBits aspect) const
874 {
875 switch(format)
876 {
877 case VK_FORMAT_D16_UNORM_S8_UINT:
878 case VK_FORMAT_D24_UNORM_S8_UINT:
879 case VK_FORMAT_D32_SFLOAT_S8_UINT:
880 if(aspect == VK_IMAGE_ASPECT_STENCIL_BIT)
881 {
882 // Offset by depth buffer to get to stencil buffer
883 return getStorageSize(VK_IMAGE_ASPECT_DEPTH_BIT);
884 }
885 break;
886
887 case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
888 if(aspect == VK_IMAGE_ASPECT_PLANE_2_BIT)
889 {
890 return getStorageSize(VK_IMAGE_ASPECT_PLANE_1_BIT) + getStorageSize(VK_IMAGE_ASPECT_PLANE_0_BIT);
891 }
892 // Fall through to 2PLANE case:
893 case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:
894 case VK_FORMAT_G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16:
895 if(aspect == VK_IMAGE_ASPECT_PLANE_1_BIT)
896 {
897 return getStorageSize(VK_IMAGE_ASPECT_PLANE_0_BIT);
898 }
899 else
900 {
901 ASSERT(aspect == VK_IMAGE_ASPECT_PLANE_0_BIT);
902
903 return 0;
904 }
905 break;
906
907 default:
908 break;
909 }
910
911 return 0;
912 }
913
getSubresourceOffset(VkImageAspectFlagBits aspect,uint32_t mipLevel,uint32_t layer) const914 VkDeviceSize Image::getSubresourceOffset(VkImageAspectFlagBits aspect, uint32_t mipLevel, uint32_t layer) const
915 {
916 // "If the image is disjoint, then the offset is relative to the base address of the plane.
917 // If the image is non-disjoint, then the offset is relative to the base address of the image."
918 // Multi-plane external images are essentially disjoint.
919 bool disjoint = (flags & VK_IMAGE_CREATE_DISJOINT_BIT) || (deviceMemory && deviceMemory->hasExternalImagePlanes());
920 VkDeviceSize offset = !disjoint ? getAspectOffset(aspect) : 0;
921
922 for(uint32_t i = 0; i < mipLevel; i++)
923 {
924 offset += getMultiSampledLevelSize(aspect, i);
925 }
926
927 return offset + layer * getLayerOffset(aspect, mipLevel);
928 }
929
getMipLevelSize(VkImageAspectFlagBits aspect,uint32_t mipLevel) const930 VkDeviceSize Image::getMipLevelSize(VkImageAspectFlagBits aspect, uint32_t mipLevel) const
931 {
932 return slicePitchBytes(aspect, mipLevel) * getMipLevelExtent(aspect, mipLevel).depth;
933 }
934
getMultiSampledLevelSize(VkImageAspectFlagBits aspect,uint32_t mipLevel) const935 VkDeviceSize Image::getMultiSampledLevelSize(VkImageAspectFlagBits aspect, uint32_t mipLevel) const
936 {
937 return getMipLevelSize(aspect, mipLevel) * samples;
938 }
939
is3DSlice() const940 bool Image::is3DSlice() const
941 {
942 return ((imageType == VK_IMAGE_TYPE_3D) && (flags & VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT));
943 }
944
getLayerOffset(VkImageAspectFlagBits aspect,uint32_t mipLevel) const945 VkDeviceSize Image::getLayerOffset(VkImageAspectFlagBits aspect, uint32_t mipLevel) const
946 {
947 if(is3DSlice())
948 {
949 // When the VkImageSubresourceRange structure is used to select a subset of the slices of a 3D
950 // image's mip level in order to create a 2D or 2D array image view of a 3D image created with
951 // VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT, baseArrayLayer and layerCount specify the first
952 // slice index and the number of slices to include in the created image view.
953 ASSERT(samples == VK_SAMPLE_COUNT_1_BIT);
954
955 // Offset to the proper slice of the 3D image's mip level
956 return slicePitchBytes(aspect, mipLevel);
957 }
958
959 return getLayerSize(aspect);
960 }
961
getLayerSize(VkImageAspectFlagBits aspect) const962 VkDeviceSize Image::getLayerSize(VkImageAspectFlagBits aspect) const
963 {
964 VkDeviceSize layerSize = 0;
965
966 for(uint32_t mipLevel = 0; mipLevel < mipLevels; ++mipLevel)
967 {
968 layerSize += getMultiSampledLevelSize(aspect, mipLevel);
969 }
970
971 return layerSize;
972 }
973
getStorageSize(VkImageAspectFlags aspectMask) const974 VkDeviceSize Image::getStorageSize(VkImageAspectFlags aspectMask) const
975 {
976 if((aspectMask & ~(VK_IMAGE_ASPECT_COLOR_BIT | VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT |
977 VK_IMAGE_ASPECT_PLANE_0_BIT | VK_IMAGE_ASPECT_PLANE_1_BIT | VK_IMAGE_ASPECT_PLANE_2_BIT)) != 0)
978 {
979 UNSUPPORTED("aspectMask %x", int(aspectMask));
980 }
981
982 VkDeviceSize storageSize = 0;
983
984 if(aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) storageSize += getLayerSize(VK_IMAGE_ASPECT_COLOR_BIT);
985 if(aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT) storageSize += getLayerSize(VK_IMAGE_ASPECT_DEPTH_BIT);
986 if(aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT) storageSize += getLayerSize(VK_IMAGE_ASPECT_STENCIL_BIT);
987 if(aspectMask & VK_IMAGE_ASPECT_PLANE_0_BIT) storageSize += getLayerSize(VK_IMAGE_ASPECT_PLANE_0_BIT);
988 if(aspectMask & VK_IMAGE_ASPECT_PLANE_1_BIT) storageSize += getLayerSize(VK_IMAGE_ASPECT_PLANE_1_BIT);
989 if(aspectMask & VK_IMAGE_ASPECT_PLANE_2_BIT) storageSize += getLayerSize(VK_IMAGE_ASPECT_PLANE_2_BIT);
990
991 return arrayLayers * storageSize;
992 }
993
getSampledImage(const vk::Format & imageViewFormat) const994 const Image *Image::getSampledImage(const vk::Format &imageViewFormat) const
995 {
996 bool isImageViewCompressed = imageViewFormat.isCompressed();
997 if(decompressedImage && !isImageViewCompressed)
998 {
999 ASSERT(flags & VK_IMAGE_CREATE_BLOCK_TEXEL_VIEW_COMPATIBLE_BIT);
1000 ASSERT(format.bytesPerBlock() == imageViewFormat.bytesPerBlock());
1001 }
1002 // If the ImageView's format is compressed, then we do need to decompress the image so that
1003 // it may be sampled properly by texture sampling functions, which don't support compressed
1004 // textures. If the ImageView's format is NOT compressed, then we reinterpret cast the
1005 // compressed image into the ImageView's format, so we must return the compressed image as is.
1006 return (decompressedImage && isImageViewCompressed) ? decompressedImage : this;
1007 }
1008
blitTo(Image * dstImage,const VkImageBlit2KHR & region,VkFilter filter) const1009 void Image::blitTo(Image *dstImage, const VkImageBlit2KHR ®ion, VkFilter filter) const
1010 {
1011 prepareForSampling(ImageSubresourceRange(region.srcSubresource));
1012 device->getBlitter()->blit(decompressedImage ? decompressedImage : this, dstImage, region, filter);
1013 }
1014
copyTo(uint8_t * dst,unsigned int dstPitch) const1015 void Image::copyTo(uint8_t *dst, unsigned int dstPitch) const
1016 {
1017 device->getBlitter()->copy(this, dst, dstPitch);
1018 }
1019
resolveTo(Image * dstImage,const VkImageResolve2KHR & region) const1020 void Image::resolveTo(Image *dstImage, const VkImageResolve2KHR ®ion) const
1021 {
1022 device->getBlitter()->resolve(this, dstImage, region);
1023 }
1024
resolveDepthStencilTo(const ImageView * src,ImageView * dst,VkResolveModeFlagBits depthResolveMode,VkResolveModeFlagBits stencilResolveMode) const1025 void Image::resolveDepthStencilTo(const ImageView *src, ImageView *dst, VkResolveModeFlagBits depthResolveMode, VkResolveModeFlagBits stencilResolveMode) const
1026 {
1027 device->getBlitter()->resolveDepthStencil(src, dst, depthResolveMode, stencilResolveMode);
1028 }
1029
getLastLayerIndex(const VkImageSubresourceRange & subresourceRange) const1030 uint32_t Image::getLastLayerIndex(const VkImageSubresourceRange &subresourceRange) const
1031 {
1032 return ((subresourceRange.layerCount == VK_REMAINING_ARRAY_LAYERS) ? arrayLayers : (subresourceRange.baseArrayLayer + subresourceRange.layerCount)) - 1;
1033 }
1034
getLastMipLevel(const VkImageSubresourceRange & subresourceRange) const1035 uint32_t Image::getLastMipLevel(const VkImageSubresourceRange &subresourceRange) const
1036 {
1037 return ((subresourceRange.levelCount == VK_REMAINING_MIP_LEVELS) ? mipLevels : (subresourceRange.baseMipLevel + subresourceRange.levelCount)) - 1;
1038 }
1039
clear(const void * pixelData,VkFormat pixelFormat,const vk::Format & viewFormat,const VkImageSubresourceRange & subresourceRange,const VkRect2D * renderArea)1040 void Image::clear(const void *pixelData, VkFormat pixelFormat, const vk::Format &viewFormat, const VkImageSubresourceRange &subresourceRange, const VkRect2D *renderArea)
1041 {
1042 device->getBlitter()->clear(pixelData, pixelFormat, this, viewFormat, subresourceRange, renderArea);
1043 }
1044
clear(const VkClearColorValue & color,const VkImageSubresourceRange & subresourceRange)1045 void Image::clear(const VkClearColorValue &color, const VkImageSubresourceRange &subresourceRange)
1046 {
1047 ASSERT(subresourceRange.aspectMask == VK_IMAGE_ASPECT_COLOR_BIT);
1048
1049 clear(color.float32, format.getClearFormat(), format, subresourceRange, nullptr);
1050 }
1051
clear(const VkClearDepthStencilValue & color,const VkImageSubresourceRange & subresourceRange)1052 void Image::clear(const VkClearDepthStencilValue &color, const VkImageSubresourceRange &subresourceRange)
1053 {
1054 ASSERT((subresourceRange.aspectMask & ~(VK_IMAGE_ASPECT_DEPTH_BIT |
1055 VK_IMAGE_ASPECT_STENCIL_BIT)) == 0);
1056
1057 if(subresourceRange.aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT)
1058 {
1059 VkImageSubresourceRange depthSubresourceRange = subresourceRange;
1060 depthSubresourceRange.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
1061 clear(&color.depth, VK_FORMAT_D32_SFLOAT, format, depthSubresourceRange, nullptr);
1062 }
1063
1064 if(subresourceRange.aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT)
1065 {
1066 VkImageSubresourceRange stencilSubresourceRange = subresourceRange;
1067 stencilSubresourceRange.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
1068 clear(&color.stencil, VK_FORMAT_S8_UINT, format, stencilSubresourceRange, nullptr);
1069 }
1070 }
1071
clear(const VkClearValue & clearValue,const vk::Format & viewFormat,const VkRect2D & renderArea,const VkImageSubresourceRange & subresourceRange)1072 void Image::clear(const VkClearValue &clearValue, const vk::Format &viewFormat, const VkRect2D &renderArea, const VkImageSubresourceRange &subresourceRange)
1073 {
1074 ASSERT((subresourceRange.aspectMask == VK_IMAGE_ASPECT_COLOR_BIT) ||
1075 (subresourceRange.aspectMask & (VK_IMAGE_ASPECT_DEPTH_BIT |
1076 VK_IMAGE_ASPECT_STENCIL_BIT)));
1077
1078 if(subresourceRange.aspectMask == VK_IMAGE_ASPECT_COLOR_BIT)
1079 {
1080 clear(clearValue.color.float32, viewFormat.getClearFormat(), viewFormat, subresourceRange, &renderArea);
1081 }
1082 else
1083 {
1084 if(subresourceRange.aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT)
1085 {
1086 VkImageSubresourceRange depthSubresourceRange = subresourceRange;
1087 depthSubresourceRange.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
1088 clear(&clearValue.depthStencil.depth, VK_FORMAT_D32_SFLOAT, viewFormat, depthSubresourceRange, &renderArea);
1089 }
1090
1091 if(subresourceRange.aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT)
1092 {
1093 VkImageSubresourceRange stencilSubresourceRange = subresourceRange;
1094 stencilSubresourceRange.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
1095 clear(&clearValue.depthStencil.stencil, VK_FORMAT_S8_UINT, viewFormat, stencilSubresourceRange, &renderArea);
1096 }
1097 }
1098 }
1099
requiresPreprocessing() const1100 bool Image::requiresPreprocessing() const
1101 {
1102 return isCubeCompatible() || decompressedImage;
1103 }
1104
contentsChanged(const VkImageSubresourceRange & subresourceRange,ContentsChangedContext contentsChangedContext)1105 void Image::contentsChanged(const VkImageSubresourceRange &subresourceRange, ContentsChangedContext contentsChangedContext)
1106 {
1107 // If this function is called after (possibly) writing to this image from a shader,
1108 // this must have the VK_IMAGE_USAGE_STORAGE_BIT set for the write operation to be
1109 // valid. Otherwise, we can't have legally written to this image, so we know we can
1110 // skip updating dirtyResources.
1111 if((contentsChangedContext == USING_STORAGE) && !(usage & VK_IMAGE_USAGE_STORAGE_BIT))
1112 {
1113 return;
1114 }
1115
1116 // If this isn't a cube or a compressed image, we'll never need dirtyResources,
1117 // so we can skip updating dirtyResources
1118 if(!requiresPreprocessing())
1119 {
1120 return;
1121 }
1122
1123 uint32_t lastLayer = getLastLayerIndex(subresourceRange);
1124 uint32_t lastMipLevel = getLastMipLevel(subresourceRange);
1125
1126 VkImageSubresource subresource = {
1127 subresourceRange.aspectMask,
1128 subresourceRange.baseMipLevel,
1129 subresourceRange.baseArrayLayer
1130 };
1131
1132 marl::lock lock(mutex);
1133 for(subresource.arrayLayer = subresourceRange.baseArrayLayer;
1134 subresource.arrayLayer <= lastLayer;
1135 subresource.arrayLayer++)
1136 {
1137 for(subresource.mipLevel = subresourceRange.baseMipLevel;
1138 subresource.mipLevel <= lastMipLevel;
1139 subresource.mipLevel++)
1140 {
1141 dirtySubresources.insert(subresource);
1142 }
1143 }
1144 }
1145
prepareForSampling(const VkImageSubresourceRange & subresourceRange) const1146 void Image::prepareForSampling(const VkImageSubresourceRange &subresourceRange) const
1147 {
1148 // If this isn't a cube or a compressed image, there's nothing to do
1149 if(!requiresPreprocessing())
1150 {
1151 return;
1152 }
1153
1154 uint32_t lastLayer = getLastLayerIndex(subresourceRange);
1155 uint32_t lastMipLevel = getLastMipLevel(subresourceRange);
1156
1157 VkImageSubresource subresource = {
1158 subresourceRange.aspectMask,
1159 subresourceRange.baseMipLevel,
1160 subresourceRange.baseArrayLayer
1161 };
1162
1163 marl::lock lock(mutex);
1164
1165 if(dirtySubresources.empty())
1166 {
1167 return;
1168 }
1169
1170 // First, decompress all relevant dirty subregions
1171 if(decompressedImage)
1172 {
1173 for(subresource.mipLevel = subresourceRange.baseMipLevel;
1174 subresource.mipLevel <= lastMipLevel;
1175 subresource.mipLevel++)
1176 {
1177 for(subresource.arrayLayer = subresourceRange.baseArrayLayer;
1178 subresource.arrayLayer <= lastLayer;
1179 subresource.arrayLayer++)
1180 {
1181 auto it = dirtySubresources.find(subresource);
1182 if(it != dirtySubresources.end())
1183 {
1184 decompress(subresource);
1185 }
1186 }
1187 }
1188 }
1189
1190 // Second, update cubemap borders
1191 if(isCubeCompatible())
1192 {
1193 for(subresource.mipLevel = subresourceRange.baseMipLevel;
1194 subresource.mipLevel <= lastMipLevel;
1195 subresource.mipLevel++)
1196 {
1197 for(subresource.arrayLayer = subresourceRange.baseArrayLayer;
1198 subresource.arrayLayer <= lastLayer;
1199 subresource.arrayLayer++)
1200 {
1201 auto it = dirtySubresources.find(subresource);
1202 if(it != dirtySubresources.end())
1203 {
1204 // Since cube faces affect each other's borders, we update all 6 layers.
1205
1206 subresource.arrayLayer -= subresource.arrayLayer % 6; // Round down to a multiple of 6.
1207
1208 if(subresource.arrayLayer + 5 <= lastLayer)
1209 {
1210 device->getBlitter()->updateBorders(decompressedImage ? decompressedImage : this, subresource);
1211 }
1212
1213 subresource.arrayLayer += 5; // Together with the loop increment, advances to the next cube.
1214 }
1215 }
1216 }
1217 }
1218
1219 // Finally, mark all updated subregions clean
1220 for(subresource.mipLevel = subresourceRange.baseMipLevel;
1221 subresource.mipLevel <= lastMipLevel;
1222 subresource.mipLevel++)
1223 {
1224 for(subresource.arrayLayer = subresourceRange.baseArrayLayer;
1225 subresource.arrayLayer <= lastLayer;
1226 subresource.arrayLayer++)
1227 {
1228 auto it = dirtySubresources.find(subresource);
1229 if(it != dirtySubresources.end())
1230 {
1231 dirtySubresources.erase(it);
1232 }
1233 }
1234 }
1235 }
1236
decompress(const VkImageSubresource & subresource) const1237 void Image::decompress(const VkImageSubresource &subresource) const
1238 {
1239 switch(format)
1240 {
1241 case VK_FORMAT_EAC_R11_UNORM_BLOCK:
1242 case VK_FORMAT_EAC_R11_SNORM_BLOCK:
1243 case VK_FORMAT_EAC_R11G11_UNORM_BLOCK:
1244 case VK_FORMAT_EAC_R11G11_SNORM_BLOCK:
1245 case VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK:
1246 case VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK:
1247 case VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK:
1248 case VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK:
1249 case VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK:
1250 case VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK:
1251 decodeETC2(subresource);
1252 break;
1253 case VK_FORMAT_BC1_RGB_UNORM_BLOCK:
1254 case VK_FORMAT_BC1_RGB_SRGB_BLOCK:
1255 case VK_FORMAT_BC1_RGBA_UNORM_BLOCK:
1256 case VK_FORMAT_BC1_RGBA_SRGB_BLOCK:
1257 case VK_FORMAT_BC2_UNORM_BLOCK:
1258 case VK_FORMAT_BC2_SRGB_BLOCK:
1259 case VK_FORMAT_BC3_UNORM_BLOCK:
1260 case VK_FORMAT_BC3_SRGB_BLOCK:
1261 case VK_FORMAT_BC4_UNORM_BLOCK:
1262 case VK_FORMAT_BC4_SNORM_BLOCK:
1263 case VK_FORMAT_BC5_UNORM_BLOCK:
1264 case VK_FORMAT_BC5_SNORM_BLOCK:
1265 case VK_FORMAT_BC6H_UFLOAT_BLOCK:
1266 case VK_FORMAT_BC6H_SFLOAT_BLOCK:
1267 case VK_FORMAT_BC7_UNORM_BLOCK:
1268 case VK_FORMAT_BC7_SRGB_BLOCK:
1269 decodeBC(subresource);
1270 break;
1271 case VK_FORMAT_ASTC_4x4_UNORM_BLOCK:
1272 case VK_FORMAT_ASTC_5x4_UNORM_BLOCK:
1273 case VK_FORMAT_ASTC_5x5_UNORM_BLOCK:
1274 case VK_FORMAT_ASTC_6x5_UNORM_BLOCK:
1275 case VK_FORMAT_ASTC_6x6_UNORM_BLOCK:
1276 case VK_FORMAT_ASTC_8x5_UNORM_BLOCK:
1277 case VK_FORMAT_ASTC_8x6_UNORM_BLOCK:
1278 case VK_FORMAT_ASTC_8x8_UNORM_BLOCK:
1279 case VK_FORMAT_ASTC_10x5_UNORM_BLOCK:
1280 case VK_FORMAT_ASTC_10x6_UNORM_BLOCK:
1281 case VK_FORMAT_ASTC_10x8_UNORM_BLOCK:
1282 case VK_FORMAT_ASTC_10x10_UNORM_BLOCK:
1283 case VK_FORMAT_ASTC_12x10_UNORM_BLOCK:
1284 case VK_FORMAT_ASTC_12x12_UNORM_BLOCK:
1285 case VK_FORMAT_ASTC_4x4_SRGB_BLOCK:
1286 case VK_FORMAT_ASTC_5x4_SRGB_BLOCK:
1287 case VK_FORMAT_ASTC_5x5_SRGB_BLOCK:
1288 case VK_FORMAT_ASTC_6x5_SRGB_BLOCK:
1289 case VK_FORMAT_ASTC_6x6_SRGB_BLOCK:
1290 case VK_FORMAT_ASTC_8x5_SRGB_BLOCK:
1291 case VK_FORMAT_ASTC_8x6_SRGB_BLOCK:
1292 case VK_FORMAT_ASTC_8x8_SRGB_BLOCK:
1293 case VK_FORMAT_ASTC_10x5_SRGB_BLOCK:
1294 case VK_FORMAT_ASTC_10x6_SRGB_BLOCK:
1295 case VK_FORMAT_ASTC_10x8_SRGB_BLOCK:
1296 case VK_FORMAT_ASTC_10x10_SRGB_BLOCK:
1297 case VK_FORMAT_ASTC_12x10_SRGB_BLOCK:
1298 case VK_FORMAT_ASTC_12x12_SRGB_BLOCK:
1299 decodeASTC(subresource);
1300 break;
1301 default:
1302 UNSUPPORTED("Compressed format %d", (VkFormat)format);
1303 break;
1304 }
1305 }
1306
decodeETC2(const VkImageSubresource & subresource) const1307 void Image::decodeETC2(const VkImageSubresource &subresource) const
1308 {
1309 ASSERT(decompressedImage);
1310
1311 ETC_Decoder::InputType inputType = GetInputType(format);
1312
1313 int bytes = decompressedImage->format.bytes();
1314 bool fakeAlpha = (format == VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK) || (format == VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK);
1315 size_t sizeToWrite = 0;
1316
1317 VkExtent3D mipLevelExtent = getMipLevelExtent(static_cast<VkImageAspectFlagBits>(subresource.aspectMask), subresource.mipLevel);
1318
1319 int pitchB = decompressedImage->rowPitchBytes(VK_IMAGE_ASPECT_COLOR_BIT, subresource.mipLevel);
1320
1321 if(fakeAlpha)
1322 {
1323 // To avoid overflow in case of cube textures, which are offset in memory to account for the border,
1324 // compute the size from the first pixel to the last pixel, excluding any padding or border before
1325 // the first pixel or after the last pixel.
1326 sizeToWrite = ((mipLevelExtent.height - 1) * pitchB) + (mipLevelExtent.width * bytes);
1327 }
1328
1329 for(int32_t depth = 0; depth < static_cast<int32_t>(mipLevelExtent.depth); depth++)
1330 {
1331 uint8_t *source = static_cast<uint8_t *>(getTexelPointer({ 0, 0, depth }, subresource));
1332 uint8_t *dest = static_cast<uint8_t *>(decompressedImage->getTexelPointer({ 0, 0, depth }, subresource));
1333
1334 if(fakeAlpha)
1335 {
1336 ASSERT((dest + sizeToWrite) < decompressedImage->end());
1337 memset(dest, 0xFF, sizeToWrite);
1338 }
1339
1340 ETC_Decoder::Decode(source, dest, mipLevelExtent.width, mipLevelExtent.height,
1341 pitchB, bytes, inputType);
1342 }
1343 }
1344
decodeBC(const VkImageSubresource & subresource) const1345 void Image::decodeBC(const VkImageSubresource &subresource) const
1346 {
1347 ASSERT(decompressedImage);
1348
1349 int n = GetBCn(format);
1350 int noAlphaU = GetNoAlphaOrUnsigned(format);
1351
1352 int bytes = decompressedImage->format.bytes();
1353
1354 VkExtent3D mipLevelExtent = getMipLevelExtent(static_cast<VkImageAspectFlagBits>(subresource.aspectMask), subresource.mipLevel);
1355
1356 int pitchB = decompressedImage->rowPitchBytes(VK_IMAGE_ASPECT_COLOR_BIT, subresource.mipLevel);
1357
1358 for(int32_t depth = 0; depth < static_cast<int32_t>(mipLevelExtent.depth); depth++)
1359 {
1360 uint8_t *source = static_cast<uint8_t *>(getTexelPointer({ 0, 0, depth }, subresource));
1361 uint8_t *dest = static_cast<uint8_t *>(decompressedImage->getTexelPointer({ 0, 0, depth }, subresource));
1362
1363 BC_Decoder::Decode(source, dest, mipLevelExtent.width, mipLevelExtent.height,
1364 pitchB, bytes, n, noAlphaU);
1365 }
1366 }
1367
decodeASTC(const VkImageSubresource & subresource) const1368 void Image::decodeASTC(const VkImageSubresource &subresource) const
1369 {
1370 ASSERT(decompressedImage);
1371
1372 int xBlockSize = format.blockWidth();
1373 int yBlockSize = format.blockHeight();
1374 int zBlockSize = 1;
1375 bool isUnsigned = format.isUnsignedComponent(0);
1376
1377 int bytes = decompressedImage->format.bytes();
1378
1379 VkExtent3D mipLevelExtent = getMipLevelExtent(static_cast<VkImageAspectFlagBits>(subresource.aspectMask), subresource.mipLevel);
1380
1381 int xblocks = (mipLevelExtent.width + xBlockSize - 1) / xBlockSize;
1382 int yblocks = (mipLevelExtent.height + yBlockSize - 1) / yBlockSize;
1383 int zblocks = (zBlockSize > 1) ? (mipLevelExtent.depth + zBlockSize - 1) / zBlockSize : 1;
1384
1385 if(xblocks <= 0 || yblocks <= 0 || zblocks <= 0)
1386 {
1387 return;
1388 }
1389
1390 int pitchB = decompressedImage->rowPitchBytes(VK_IMAGE_ASPECT_COLOR_BIT, subresource.mipLevel);
1391 int sliceB = decompressedImage->slicePitchBytes(VK_IMAGE_ASPECT_COLOR_BIT, subresource.mipLevel);
1392
1393 for(int32_t depth = 0; depth < static_cast<int32_t>(mipLevelExtent.depth); depth++)
1394 {
1395 uint8_t *source = static_cast<uint8_t *>(getTexelPointer({ 0, 0, depth }, subresource));
1396 uint8_t *dest = static_cast<uint8_t *>(decompressedImage->getTexelPointer({ 0, 0, depth }, subresource));
1397
1398 ASTC_Decoder::Decode(source, dest, mipLevelExtent.width, mipLevelExtent.height, mipLevelExtent.depth, bytes, pitchB, sliceB,
1399 xBlockSize, yBlockSize, zBlockSize, xblocks, yblocks, zblocks, isUnsigned);
1400 }
1401 }
1402
1403 } // namespace vk
1404