1 /*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/ganesh/vk/GrVkImage.h"
9
10 #include "include/gpu/vk/VulkanMutableTextureState.h"
11 #include "src/gpu/ganesh/vk/GrVkGpu.h"
12 #include "src/gpu/ganesh/vk/GrVkImageView.h"
13 #include "src/gpu/ganesh/vk/GrVkTexture.h"
14 #include "src/gpu/ganesh/vk/GrVkUtil.h"
15 #include "src/gpu/vk/VulkanMemory.h"
16 #include "src/gpu/vk/VulkanMutableTextureStatePriv.h"
17 #include "src/gpu/vk/VulkanUtilsPriv.h"
18
19 #define VK_CALL(GPU, X) GR_VK_CALL(GPU->vkInterface(), X)
20
MakeStencil(GrVkGpu * gpu,SkISize dimensions,int sampleCnt,VkFormat format)21 sk_sp<GrVkImage> GrVkImage::MakeStencil(GrVkGpu* gpu,
22 SkISize dimensions,
23 int sampleCnt,
24 VkFormat format) {
25 VkImageUsageFlags vkUsageFlags =
26 VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
27 return GrVkImage::Make(gpu,
28 dimensions,
29 UsageFlags::kStencilAttachment,
30 sampleCnt,
31 format,
32 /*mipLevels=*/1,
33 vkUsageFlags,
34 GrProtected::kNo,
35 GrMemoryless::kNo,
36 skgpu::Budgeted::kYes);
37 }
38
MakeMSAA(GrVkGpu * gpu,SkISize dimensions,int numSamples,VkFormat format,GrProtected isProtected,GrMemoryless memoryless)39 sk_sp<GrVkImage> GrVkImage::MakeMSAA(GrVkGpu* gpu,
40 SkISize dimensions,
41 int numSamples,
42 VkFormat format,
43 GrProtected isProtected,
44 GrMemoryless memoryless) {
45 SkASSERT(numSamples > 1);
46
47 VkImageUsageFlags vkUsageFlags = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
48 if (memoryless == GrMemoryless::kYes) {
49 vkUsageFlags |= VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT;
50 } else {
51 vkUsageFlags |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
52 }
53 return GrVkImage::Make(gpu,
54 dimensions,
55 UsageFlags::kColorAttachment,
56 numSamples,
57 format,
58 /*mipLevels=*/1,
59 vkUsageFlags,
60 isProtected,
61 memoryless,
62 skgpu::Budgeted::kYes);
63 }
64
MakeTexture(GrVkGpu * gpu,SkISize dimensions,VkFormat format,uint32_t mipLevels,GrRenderable renderable,int numSamples,skgpu::Budgeted budgeted,GrProtected isProtected)65 sk_sp<GrVkImage> GrVkImage::MakeTexture(GrVkGpu* gpu,
66 SkISize dimensions,
67 VkFormat format,
68 uint32_t mipLevels,
69 GrRenderable renderable,
70 int numSamples,
71 skgpu::Budgeted budgeted,
72 GrProtected isProtected) {
73 UsageFlags usageFlags = UsageFlags::kTexture;
74 VkImageUsageFlags vkUsageFlags = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
75 VK_IMAGE_USAGE_TRANSFER_DST_BIT;
76 if (renderable == GrRenderable::kYes) {
77 usageFlags |= UsageFlags::kColorAttachment;
78 vkUsageFlags |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
79 // We always make our render targets support being used as input attachments
80 vkUsageFlags |= VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT;
81 }
82
83 return GrVkImage::Make(gpu,
84 dimensions,
85 usageFlags,
86 numSamples,
87 format,
88 mipLevels,
89 vkUsageFlags,
90 isProtected,
91 GrMemoryless::kNo,
92 budgeted);
93 }
94
make_views(GrVkGpu * gpu,const GrVkImageInfo & info,GrAttachment::UsageFlags attachmentUsages,sk_sp<const GrVkImageView> * framebufferView,sk_sp<const GrVkImageView> * textureView)95 static bool make_views(GrVkGpu* gpu,
96 const GrVkImageInfo& info,
97 GrAttachment::UsageFlags attachmentUsages,
98 sk_sp<const GrVkImageView>* framebufferView,
99 sk_sp<const GrVkImageView>* textureView) {
100 GrVkImageView::Type viewType;
101 if (attachmentUsages & GrAttachment::UsageFlags::kStencilAttachment) {
102 // If we have stencil usage then we shouldn't have any other usages
103 SkASSERT(attachmentUsages == GrAttachment::UsageFlags::kStencilAttachment);
104 viewType = GrVkImageView::kStencil_Type;
105 } else {
106 viewType = GrVkImageView::kColor_Type;
107 }
108
109 if (SkToBool(attachmentUsages & GrAttachment::UsageFlags::kStencilAttachment) ||
110 SkToBool(attachmentUsages & GrAttachment::UsageFlags::kColorAttachment)) {
111 // Attachments can only have a mip level of 1
112 *framebufferView = GrVkImageView::Make(
113 gpu, info.fImage, info.fFormat, viewType, 1, info.fYcbcrConversionInfo);
114 if (!*framebufferView) {
115 return false;
116 }
117 }
118
119 if (attachmentUsages & GrAttachment::UsageFlags::kTexture) {
120 *textureView = GrVkImageView::Make(gpu,
121 info.fImage,
122 info.fFormat,
123 viewType,
124 info.fLevelCount,
125 info.fYcbcrConversionInfo);
126 if (!*textureView) {
127 return false;
128 }
129 }
130 return true;
131 }
132
Make(GrVkGpu * gpu,SkISize dimensions,UsageFlags attachmentUsages,int sampleCnt,VkFormat format,uint32_t mipLevels,VkImageUsageFlags vkUsageFlags,GrProtected isProtected,GrMemoryless memoryless,skgpu::Budgeted budgeted)133 sk_sp<GrVkImage> GrVkImage::Make(GrVkGpu* gpu,
134 SkISize dimensions,
135 UsageFlags attachmentUsages,
136 int sampleCnt,
137 VkFormat format,
138 uint32_t mipLevels,
139 VkImageUsageFlags vkUsageFlags,
140 GrProtected isProtected,
141 GrMemoryless memoryless,
142 skgpu::Budgeted budgeted) {
143 GrVkImage::ImageDesc imageDesc;
144 imageDesc.fImageType = VK_IMAGE_TYPE_2D;
145 imageDesc.fFormat = format;
146 imageDesc.fWidth = dimensions.width();
147 imageDesc.fHeight = dimensions.height();
148 imageDesc.fLevels = mipLevels;
149 imageDesc.fSamples = sampleCnt;
150 imageDesc.fImageTiling = VK_IMAGE_TILING_OPTIMAL;
151 imageDesc.fUsageFlags = vkUsageFlags;
152 imageDesc.fIsProtected = isProtected;
153
154 GrVkImageInfo info;
155 if (!GrVkImage::InitImageInfo(gpu, imageDesc, &info)) {
156 return nullptr;
157 }
158
159 sk_sp<const GrVkImageView> framebufferView;
160 sk_sp<const GrVkImageView> textureView;
161 if (!make_views(gpu, info, attachmentUsages, &framebufferView, &textureView)) {
162 GrVkImage::DestroyImageInfo(gpu, &info);
163 return nullptr;
164 }
165
166 auto mutableState = sk_make_sp<skgpu::MutableTextureState>(
167 skgpu::MutableTextureStates::MakeVulkan(info.fImageLayout, info.fCurrentQueueFamily));
168 return sk_sp<GrVkImage>(new GrVkImage(gpu,
169 dimensions,
170 attachmentUsages,
171 info,
172 std::move(mutableState),
173 std::move(framebufferView),
174 std::move(textureView),
175 budgeted,
176 /*label=*/"MakeVkImage"));
177 }
178
MakeWrapped(GrVkGpu * gpu,SkISize dimensions,const GrVkImageInfo & info,sk_sp<skgpu::MutableTextureState> mutableState,UsageFlags attachmentUsages,GrWrapOwnership ownership,GrWrapCacheable cacheable,std::string_view label,bool forSecondaryCB)179 sk_sp<GrVkImage> GrVkImage::MakeWrapped(GrVkGpu* gpu,
180 SkISize dimensions,
181 const GrVkImageInfo& info,
182 sk_sp<skgpu::MutableTextureState> mutableState,
183 UsageFlags attachmentUsages,
184 GrWrapOwnership ownership,
185 GrWrapCacheable cacheable,
186 std::string_view label,
187 bool forSecondaryCB) {
188 sk_sp<const GrVkImageView> framebufferView;
189 sk_sp<const GrVkImageView> textureView;
190 if (!forSecondaryCB) {
191 if (!make_views(gpu, info, attachmentUsages, &framebufferView, &textureView)) {
192 return nullptr;
193 }
194 }
195
196 GrBackendObjectOwnership backendOwnership = kBorrow_GrWrapOwnership == ownership
197 ? GrBackendObjectOwnership::kBorrowed
198 : GrBackendObjectOwnership::kOwned;
199
200 return sk_sp<GrVkImage>(new GrVkImage(gpu,
201 dimensions,
202 attachmentUsages,
203 info,
204 std::move(mutableState),
205 std::move(framebufferView),
206 std::move(textureView),
207 backendOwnership,
208 cacheable,
209 forSecondaryCB,
210 label));
211 }
212
GrVkImage(GrVkGpu * gpu,SkISize dimensions,UsageFlags supportedUsages,const GrVkImageInfo & info,sk_sp<skgpu::MutableTextureState> mutableState,sk_sp<const GrVkImageView> framebufferView,sk_sp<const GrVkImageView> textureView,skgpu::Budgeted budgeted,std::string_view label)213 GrVkImage::GrVkImage(GrVkGpu* gpu,
214 SkISize dimensions,
215 UsageFlags supportedUsages,
216 const GrVkImageInfo& info,
217 sk_sp<skgpu::MutableTextureState> mutableState,
218 sk_sp<const GrVkImageView> framebufferView,
219 sk_sp<const GrVkImageView> textureView,
220 skgpu::Budgeted budgeted,
221 std::string_view label)
222 : GrAttachment(gpu,
223 dimensions,
224 supportedUsages,
225 info.fSampleCount,
226 info.fLevelCount > 1 ? skgpu::Mipmapped::kYes : skgpu::Mipmapped::kNo,
227 info.fProtected,
228 label,
229 info.fAlloc.fFlags & skgpu::VulkanAlloc::kLazilyAllocated_Flag
230 ? GrMemoryless::kYes
231 : GrMemoryless::kNo)
232 , fInfo(info)
233 , fInitialQueueFamily(info.fCurrentQueueFamily)
234 , fMutableState(std::move(mutableState))
235 , fFramebufferView(std::move(framebufferView))
236 , fTextureView(std::move(textureView))
237 , fIsBorrowed(false) {
238 this->init(gpu, false);
239 this->registerWithCache(budgeted);
240 }
241
GrVkImage(GrVkGpu * gpu,SkISize dimensions,UsageFlags supportedUsages,const GrVkImageInfo & info,sk_sp<skgpu::MutableTextureState> mutableState,sk_sp<const GrVkImageView> framebufferView,sk_sp<const GrVkImageView> textureView,GrBackendObjectOwnership ownership,GrWrapCacheable cacheable,bool forSecondaryCB,std::string_view label)242 GrVkImage::GrVkImage(GrVkGpu* gpu,
243 SkISize dimensions,
244 UsageFlags supportedUsages,
245 const GrVkImageInfo& info,
246 sk_sp<skgpu::MutableTextureState> mutableState,
247 sk_sp<const GrVkImageView> framebufferView,
248 sk_sp<const GrVkImageView> textureView,
249 GrBackendObjectOwnership ownership,
250 GrWrapCacheable cacheable,
251 bool forSecondaryCB,
252 std::string_view label)
253 : GrAttachment(gpu,
254 dimensions,
255 supportedUsages,
256 info.fSampleCount,
257 info.fLevelCount > 1 ? skgpu::Mipmapped::kYes : skgpu::Mipmapped::kNo,
258 info.fProtected,
259 label)
260 , fInfo(info)
261 , fInitialQueueFamily(info.fCurrentQueueFamily)
262 , fMutableState(std::move(mutableState))
263 , fFramebufferView(std::move(framebufferView))
264 , fTextureView(std::move(textureView))
265 , fIsBorrowed(GrBackendObjectOwnership::kBorrowed == ownership) {
266 this->init(gpu, forSecondaryCB);
267 this->registerWithCacheWrapped(cacheable);
268 }
269
init(GrVkGpu * gpu,bool forSecondaryCB)270 void GrVkImage::init(GrVkGpu* gpu, bool forSecondaryCB) {
271 SkASSERT(skgpu::MutableTextureStates::GetVkImageLayout(fMutableState.get()) == fInfo.fImageLayout);
272 SkASSERT(skgpu::MutableTextureStates::GetVkQueueFamilyIndex(fMutableState.get()) == fInfo.fCurrentQueueFamily);
273 #ifdef SK_DEBUG
274 if (fInfo.fImageUsageFlags & VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) {
275 SkASSERT(SkToBool(fInfo.fImageUsageFlags & VK_IMAGE_USAGE_TRANSFER_DST_BIT));
276 } else {
277 if (fInfo.fAlloc.fFlags & skgpu::VulkanAlloc::kLazilyAllocated_Flag) {
278 SkASSERT(fInfo.fImageUsageFlags & VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT);
279 SkASSERT(!SkToBool(fInfo.fImageUsageFlags & VK_IMAGE_USAGE_TRANSFER_DST_BIT) &&
280 !SkToBool(fInfo.fImageUsageFlags & VK_IMAGE_USAGE_TRANSFER_SRC_BIT));
281 } else {
282 SkASSERT(!SkToBool(fInfo.fImageUsageFlags & VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT));
283 SkASSERT(SkToBool(fInfo.fImageUsageFlags & VK_IMAGE_USAGE_TRANSFER_DST_BIT) &&
284 SkToBool(fInfo.fImageUsageFlags & VK_IMAGE_USAGE_TRANSFER_SRC_BIT));
285 }
286 }
287 // We can't transfer from the non graphics queue to the graphics queue since we can't
288 // release the image from the original queue without having that queue. This limits us in terms
289 // of the types of queue indices we can handle.
290 if (fInfo.fCurrentQueueFamily != VK_QUEUE_FAMILY_IGNORED &&
291 fInfo.fCurrentQueueFamily != VK_QUEUE_FAMILY_EXTERNAL &&
292 fInfo.fCurrentQueueFamily != VK_QUEUE_FAMILY_FOREIGN_EXT) {
293 if (fInfo.fSharingMode == VK_SHARING_MODE_EXCLUSIVE) {
294 if (fInfo.fCurrentQueueFamily != gpu->queueIndex()) {
295 SkASSERT(false);
296 }
297 } else {
298 SkASSERT(false);
299 }
300 }
301 #endif
302 if (forSecondaryCB) {
303 fResource = nullptr;
304 } else if (fIsBorrowed) {
305 fResource = new BorrowedResource(gpu, fInfo.fImage, fInfo.fAlloc, fInfo.fImageTiling);
306 } else {
307 SkASSERT(VK_NULL_HANDLE != fInfo.fAlloc.fMemory);
308 fResource = new Resource(gpu, fInfo.fImage, fInfo.fAlloc, fInfo.fImageTiling);
309 }
310 }
311
LayoutToPipelineSrcStageFlags(const VkImageLayout layout)312 VkPipelineStageFlags GrVkImage::LayoutToPipelineSrcStageFlags(const VkImageLayout layout) {
313 if (VK_IMAGE_LAYOUT_GENERAL == layout) {
314 return VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
315 } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout ||
316 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
317 return VK_PIPELINE_STAGE_TRANSFER_BIT;
318 } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout) {
319 return VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
320 } else if (VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout ||
321 VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL == layout) {
322 return VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
323 } else if (VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) {
324 return VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
325 } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
326 return VK_PIPELINE_STAGE_HOST_BIT;
327 } else if (VK_IMAGE_LAYOUT_PRESENT_SRC_KHR == layout) {
328 return VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
329 }
330
331 SkASSERT(VK_IMAGE_LAYOUT_UNDEFINED == layout);
332 return VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
333 }
334
LayoutToSrcAccessMask(const VkImageLayout layout)335 VkAccessFlags GrVkImage::LayoutToSrcAccessMask(const VkImageLayout layout) {
336 // Currently we assume we will never being doing any explict shader writes (this doesn't include
337 // color attachment or depth/stencil writes). So we will ignore the
338 // VK_MEMORY_OUTPUT_SHADER_WRITE_BIT.
339
340 // We can only directly access the host memory if we are in preinitialized or general layout,
341 // and the image is linear.
342 // TODO: Add check for linear here so we are not always adding host to general, and we should
343 // only be in preinitialized if we are linear
344 VkAccessFlags flags = 0;
345 if (VK_IMAGE_LAYOUT_GENERAL == layout) {
346 flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
347 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
348 VK_ACCESS_TRANSFER_WRITE_BIT |
349 VK_ACCESS_HOST_WRITE_BIT;
350 } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
351 flags = VK_ACCESS_HOST_WRITE_BIT;
352 } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout) {
353 flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
354 } else if (VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout) {
355 flags = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
356 } else if (VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
357 flags = VK_ACCESS_TRANSFER_WRITE_BIT;
358 } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout ||
359 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout ||
360 VK_IMAGE_LAYOUT_PRESENT_SRC_KHR == layout) {
361 // There are no writes that need to be made available
362 flags = 0;
363 }
364 return flags;
365 }
366
vk_format_to_aspect_flags(VkFormat format)367 VkImageAspectFlags vk_format_to_aspect_flags(VkFormat format) {
368 switch (format) {
369 case VK_FORMAT_S8_UINT:
370 return VK_IMAGE_ASPECT_STENCIL_BIT;
371 case VK_FORMAT_D24_UNORM_S8_UINT: // fallthrough
372 case VK_FORMAT_D32_SFLOAT_S8_UINT:
373 return VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
374 default:
375 return VK_IMAGE_ASPECT_COLOR_BIT;
376 }
377 }
378
setImageLayoutAndQueueIndex(const GrVkGpu * gpu,VkImageLayout newLayout,VkAccessFlags dstAccessMask,VkPipelineStageFlags dstStageMask,bool byRegion,uint32_t newQueueFamilyIndex)379 void GrVkImage::setImageLayoutAndQueueIndex(const GrVkGpu* gpu,
380 VkImageLayout newLayout,
381 VkAccessFlags dstAccessMask,
382 VkPipelineStageFlags dstStageMask,
383 bool byRegion,
384 uint32_t newQueueFamilyIndex) {
385 // Enable the following block to test new devices to confirm their lazy images stay at 0 memory use.
386 #if 0
387 if (fInfo.fAlloc.fFlags & skgpu::VulkanAlloc::kLazilyAllocated_Flag) {
388 VkDeviceSize size;
389 VK_CALL(gpu, GetDeviceMemoryCommitment(gpu->device(), fInfo.fAlloc.fMemory, &size));
390
391 SkDebugf("Lazy Image. This: %p, image: %d, size: %d\n", this, fInfo.fImage, size);
392 }
393 #endif
394 SkASSERT(!gpu->isDeviceLost());
395 SkASSERT(newLayout == this->currentLayout() ||
396 (VK_IMAGE_LAYOUT_UNDEFINED != newLayout &&
397 VK_IMAGE_LAYOUT_PREINITIALIZED != newLayout));
398 VkImageLayout currentLayout = this->currentLayout();
399 uint32_t currentQueueIndex = this->currentQueueFamilyIndex();
400
401 #ifdef SK_DEBUG
402 if (fInfo.fSharingMode == VK_SHARING_MODE_CONCURRENT) {
403 if (newQueueFamilyIndex == VK_QUEUE_FAMILY_IGNORED) {
404 SkASSERT(currentQueueIndex == VK_QUEUE_FAMILY_IGNORED ||
405 currentQueueIndex == VK_QUEUE_FAMILY_EXTERNAL ||
406 currentQueueIndex == VK_QUEUE_FAMILY_FOREIGN_EXT);
407 } else {
408 SkASSERT(newQueueFamilyIndex == VK_QUEUE_FAMILY_EXTERNAL ||
409 newQueueFamilyIndex == VK_QUEUE_FAMILY_FOREIGN_EXT);
410 SkASSERT(currentQueueIndex == VK_QUEUE_FAMILY_IGNORED);
411 }
412 } else {
413 SkASSERT(fInfo.fSharingMode == VK_SHARING_MODE_EXCLUSIVE);
414 if (newQueueFamilyIndex == VK_QUEUE_FAMILY_IGNORED ||
415 currentQueueIndex == gpu->queueIndex()) {
416 SkASSERT(currentQueueIndex == VK_QUEUE_FAMILY_IGNORED ||
417 currentQueueIndex == VK_QUEUE_FAMILY_EXTERNAL ||
418 currentQueueIndex == VK_QUEUE_FAMILY_FOREIGN_EXT ||
419 currentQueueIndex == gpu->queueIndex());
420 } else if (newQueueFamilyIndex == VK_QUEUE_FAMILY_EXTERNAL ||
421 newQueueFamilyIndex == VK_QUEUE_FAMILY_FOREIGN_EXT) {
422 SkASSERT(currentQueueIndex == VK_QUEUE_FAMILY_IGNORED ||
423 currentQueueIndex == gpu->queueIndex());
424 }
425 }
426 #endif
427
428 if (fInfo.fSharingMode == VK_SHARING_MODE_EXCLUSIVE) {
429 if (newQueueFamilyIndex == VK_QUEUE_FAMILY_IGNORED) {
430 newQueueFamilyIndex = gpu->queueIndex();
431 }
432 if (currentQueueIndex == VK_QUEUE_FAMILY_IGNORED) {
433 currentQueueIndex = gpu->queueIndex();
434 }
435 }
436
437 // If the old and new layout are the same and the layout is a read only layout, there is no need
438 // to put in a barrier unless we also need to switch queues.
439 if (newLayout == currentLayout && currentQueueIndex == newQueueFamilyIndex &&
440 (VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL == currentLayout ||
441 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == currentLayout ||
442 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == currentLayout)) {
443 return;
444 }
445
446 VkAccessFlags srcAccessMask = GrVkImage::LayoutToSrcAccessMask(currentLayout);
447 VkPipelineStageFlags srcStageMask = GrVkImage::LayoutToPipelineSrcStageFlags(currentLayout);
448
449 VkImageAspectFlags aspectFlags = vk_format_to_aspect_flags(fInfo.fFormat);
450
451 VkImageMemoryBarrier imageMemoryBarrier = {
452 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
453 nullptr, // pNext
454 srcAccessMask, // srcAccessMask
455 dstAccessMask, // dstAccessMask
456 currentLayout, // oldLayout
457 newLayout, // newLayout
458 currentQueueIndex, // srcQueueFamilyIndex
459 newQueueFamilyIndex, // dstQueueFamilyIndex
460 fInfo.fImage, // image
461 { aspectFlags, 0, fInfo.fLevelCount, 0, 1 } // subresourceRange
462 };
463 SkASSERT(srcAccessMask == imageMemoryBarrier.srcAccessMask);
464 gpu->addImageMemoryBarrier(this->resource(), srcStageMask, dstStageMask, byRegion,
465 &imageMemoryBarrier);
466
467 this->updateImageLayout(newLayout);
468 this->setQueueFamilyIndex(newQueueFamilyIndex);
469 }
470
InitImageInfo(GrVkGpu * gpu,const ImageDesc & imageDesc,GrVkImageInfo * info)471 bool GrVkImage::InitImageInfo(GrVkGpu* gpu, const ImageDesc& imageDesc, GrVkImageInfo* info) {
472 if (0 == imageDesc.fWidth || 0 == imageDesc.fHeight) {
473 return false;
474 }
475 if ((imageDesc.fIsProtected == GrProtected::kYes) &&
476 !gpu->vkCaps().supportsProtectedContent()) {
477 return false;
478 }
479
480 bool isLinear = VK_IMAGE_TILING_LINEAR == imageDesc.fImageTiling;
481 VkImageLayout initialLayout = isLinear ? VK_IMAGE_LAYOUT_PREINITIALIZED
482 : VK_IMAGE_LAYOUT_UNDEFINED;
483
484 // Create Image
485 VkSampleCountFlagBits vkSamples;
486 if (!skgpu::SampleCountToVkSampleCount(imageDesc.fSamples, &vkSamples)) {
487 return false;
488 }
489
490 SkASSERT(VK_IMAGE_TILING_OPTIMAL == imageDesc.fImageTiling ||
491 VK_SAMPLE_COUNT_1_BIT == vkSamples);
492
493 VkImageCreateFlags createflags = 0;
494 if (imageDesc.fIsProtected == GrProtected::kYes || gpu->protectedContext()) {
495 createflags |= VK_IMAGE_CREATE_PROTECTED_BIT;
496 }
497 const VkImageCreateInfo imageCreateInfo = {
498 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // sType
499 nullptr, // pNext
500 createflags, // VkImageCreateFlags
501 imageDesc.fImageType, // VkImageType
502 imageDesc.fFormat, // VkFormat
503 { imageDesc.fWidth, imageDesc.fHeight, 1 }, // VkExtent3D
504 imageDesc.fLevels, // mipLevels
505 1, // arrayLayers
506 vkSamples, // samples
507 imageDesc.fImageTiling, // VkImageTiling
508 imageDesc.fUsageFlags, // VkImageUsageFlags
509 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode
510 0, // queueFamilyCount
511 nullptr, // pQueueFamilyIndices
512 initialLayout // initialLayout
513 };
514
515 VkImage image = VK_NULL_HANDLE;
516 VkResult result;
517 GR_VK_CALL_RESULT(gpu, result, CreateImage(gpu->device(), &imageCreateInfo, nullptr, &image));
518 if (result != VK_SUCCESS) {
519 return false;
520 }
521
522 skgpu::Protected isProtected = gpu->protectedContext() ? skgpu::Protected::kYes
523 : skgpu::Protected::kNo;
524 bool forceDedicatedMemory = gpu->vkCaps().shouldAlwaysUseDedicatedImageMemory();
525 bool useLazyAllocation =
526 SkToBool(imageDesc.fUsageFlags & VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT);
527
528 auto checkResult = [gpu, isProtected, forceDedicatedMemory, useLazyAllocation](
529 VkResult result) {
530 GR_VK_LOG_IF_NOT_SUCCESS(gpu, result, "skgpu::VulkanMemory::AllocImageMemory"
531 " (isProtected:%d, forceDedicatedMemory:%d, useLazyAllocation:%d)",
532 (int)isProtected, (int)forceDedicatedMemory,
533 (int)useLazyAllocation);
534 return gpu->checkVkResult(result);
535 };
536 auto allocator = gpu->memoryAllocator();
537 skgpu::VulkanAlloc alloc;
538 if (!skgpu::VulkanMemory::AllocImageMemory(allocator,
539 image,
540 isProtected,
541 forceDedicatedMemory,
542 useLazyAllocation,
543 checkResult,
544 &alloc) ||
545 (useLazyAllocation &&
546 !SkToBool(alloc.fFlags & skgpu::VulkanAlloc::kLazilyAllocated_Flag))) {
547 VK_CALL(gpu, DestroyImage(gpu->device(), image, nullptr));
548 return false;
549 }
550
551 // Bind buffer
552 GR_VK_CALL_RESULT(gpu, result, BindImageMemory(gpu->device(),
553 image,
554 alloc.fMemory,
555 alloc.fOffset));
556 if (result) {
557 skgpu::VulkanMemory::FreeImageMemory(allocator, alloc);
558 VK_CALL(gpu, DestroyImage(gpu->device(), image, nullptr));
559 return false;
560 }
561
562 info->fImage = image;
563 info->fAlloc = alloc;
564 info->fImageTiling = imageDesc.fImageTiling;
565 info->fImageLayout = initialLayout;
566 info->fFormat = imageDesc.fFormat;
567 info->fImageUsageFlags = imageDesc.fUsageFlags;
568 info->fSampleCount = imageDesc.fSamples;
569 info->fLevelCount = imageDesc.fLevels;
570 info->fCurrentQueueFamily = VK_QUEUE_FAMILY_IGNORED;
571 info->fProtected =
572 (createflags & VK_IMAGE_CREATE_PROTECTED_BIT) ? GrProtected::kYes : GrProtected::kNo;
573 info->fSharingMode = VK_SHARING_MODE_EXCLUSIVE;
574 return true;
575 }
576
DestroyImageInfo(const GrVkGpu * gpu,GrVkImageInfo * info)577 void GrVkImage::DestroyImageInfo(const GrVkGpu* gpu, GrVkImageInfo* info) {
578 VK_CALL(gpu, DestroyImage(gpu->device(), info->fImage, nullptr));
579 skgpu::VulkanMemory::FreeImageMemory(gpu->memoryAllocator(), info->fAlloc);
580 }
581
~GrVkImage()582 GrVkImage::~GrVkImage() {
583 // should have been released first
584 SkASSERT(!fResource);
585 SkASSERT(!fFramebufferView);
586 SkASSERT(!fTextureView);
587 }
588
prepareForPresent(GrVkGpu * gpu)589 void GrVkImage::prepareForPresent(GrVkGpu* gpu) {
590 VkImageLayout layout = this->currentLayout();
591 if (fInitialQueueFamily != VK_QUEUE_FAMILY_EXTERNAL &&
592 fInitialQueueFamily != VK_QUEUE_FAMILY_FOREIGN_EXT) {
593 if (gpu->vkCaps().supportsSwapchain()) {
594 layout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
595 }
596 }
597 this->setImageLayoutAndQueueIndex(gpu, layout, 0, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, false,
598 fInitialQueueFamily);
599 }
600
prepareForExternal(GrVkGpu * gpu)601 void GrVkImage::prepareForExternal(GrVkGpu* gpu) {
602 this->setImageLayoutAndQueueIndex(gpu, this->currentLayout(), 0,
603 VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, false,
604 fInitialQueueFamily);
605 }
606
releaseImage()607 void GrVkImage::releaseImage() {
608 if (fResource) {
609 fResource->unref();
610 fResource = nullptr;
611 }
612 fFramebufferView.reset();
613 fTextureView.reset();
614 fCachedBlendingInputDescSet.reset();
615 fCachedMSAALoadInputDescSet.reset();
616 }
617
onRelease()618 void GrVkImage::onRelease() {
619 this->releaseImage();
620 GrAttachment::onRelease();
621 }
622
onAbandon()623 void GrVkImage::onAbandon() {
624 this->releaseImage();
625 GrAttachment::onAbandon();
626 }
627
setResourceRelease(sk_sp<RefCntedReleaseProc> releaseHelper)628 void GrVkImage::setResourceRelease(sk_sp<RefCntedReleaseProc> releaseHelper) {
629 SkASSERT(fResource);
630 // Forward the release proc on to GrVkImage::Resource
631 fResource->setRelease(std::move(releaseHelper));
632 }
633
freeGPUData() const634 void GrVkImage::Resource::freeGPUData() const {
635 this->invokeReleaseProc();
636 VK_CALL(fGpu, DestroyImage(fGpu->device(), fImage, nullptr));
637 skgpu::VulkanMemory::FreeImageMemory(fGpu->memoryAllocator(), fAlloc);
638 }
639
freeGPUData() const640 void GrVkImage::BorrowedResource::freeGPUData() const {
641 this->invokeReleaseProc();
642 }
643
write_input_desc_set(GrVkGpu * gpu,VkImageView view,VkImageLayout layout,VkDescriptorSet descSet)644 static void write_input_desc_set(GrVkGpu* gpu,
645 VkImageView view,
646 VkImageLayout layout,
647 VkDescriptorSet descSet) {
648 VkDescriptorImageInfo imageInfo;
649 memset(&imageInfo, 0, sizeof(VkDescriptorImageInfo));
650 imageInfo.sampler = VK_NULL_HANDLE;
651 imageInfo.imageView = view;
652 imageInfo.imageLayout = layout;
653
654 VkWriteDescriptorSet writeInfo;
655 memset(&writeInfo, 0, sizeof(VkWriteDescriptorSet));
656 writeInfo.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
657 writeInfo.pNext = nullptr;
658 writeInfo.dstSet = descSet;
659 writeInfo.dstBinding = GrVkUniformHandler::kInputBinding;
660 writeInfo.dstArrayElement = 0;
661 writeInfo.descriptorCount = 1;
662 writeInfo.descriptorType = VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT;
663 writeInfo.pImageInfo = &imageInfo;
664 writeInfo.pBufferInfo = nullptr;
665 writeInfo.pTexelBufferView = nullptr;
666
667 GR_VK_CALL(gpu->vkInterface(), UpdateDescriptorSets(gpu->device(), 1, &writeInfo, 0, nullptr));
668 }
669
inputDescSetForBlending(GrVkGpu * gpu)670 gr_rp<const GrVkDescriptorSet> GrVkImage::inputDescSetForBlending(GrVkGpu* gpu) {
671 if (!this->supportsInputAttachmentUsage()) {
672 return nullptr;
673 }
674 if (fCachedBlendingInputDescSet) {
675 return fCachedBlendingInputDescSet;
676 }
677
678 fCachedBlendingInputDescSet.reset(gpu->resourceProvider().getInputDescriptorSet());
679 if (!fCachedBlendingInputDescSet) {
680 return nullptr;
681 }
682
683 write_input_desc_set(gpu,
684 this->framebufferView()->imageView(),
685 VK_IMAGE_LAYOUT_GENERAL,
686 *fCachedBlendingInputDescSet->descriptorSet());
687
688 return fCachedBlendingInputDescSet;
689 }
690
inputDescSetForMSAALoad(GrVkGpu * gpu)691 gr_rp<const GrVkDescriptorSet> GrVkImage::inputDescSetForMSAALoad(GrVkGpu* gpu) {
692 if (!this->supportsInputAttachmentUsage()) {
693 return nullptr;
694 }
695 if (fCachedMSAALoadInputDescSet) {
696 return fCachedMSAALoadInputDescSet;
697 }
698
699 fCachedMSAALoadInputDescSet.reset(gpu->resourceProvider().getInputDescriptorSet());
700 if (!fCachedMSAALoadInputDescSet) {
701 return nullptr;
702 }
703
704 write_input_desc_set(gpu,
705 this->framebufferView()->imageView(),
706 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
707 *fCachedMSAALoadInputDescSet->descriptorSet());
708
709 return fCachedMSAALoadInputDescSet;
710 }
711
getVkGpu() const712 GrVkGpu* GrVkImage::getVkGpu() const {
713 SkASSERT(!this->wasDestroyed());
714 return static_cast<GrVkGpu*>(this->getGpu());
715 }
716
717 #if defined(GR_TEST_UTILS)
setCurrentQueueFamilyToGraphicsQueue(GrVkGpu * gpu)718 void GrVkImage::setCurrentQueueFamilyToGraphicsQueue(GrVkGpu* gpu) {
719 skgpu::MutableTextureStates::SetVkQueueFamilyIndex(fMutableState.get(), gpu->queueIndex());
720 }
721 #endif
722