1 /*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/ganesh/vk/GrVkImage.h"
9
10 #include "src/gpu/ganesh/vk/GrVkGpu.h"
11 #include "src/gpu/ganesh/vk/GrVkImageView.h"
12 #include "src/gpu/ganesh/vk/GrVkTexture.h"
13 #include "src/gpu/ganesh/vk/GrVkUtil.h"
14 #include "src/gpu/vk/VulkanMemory.h"
15 #include "src/gpu/vk/VulkanUtilsPriv.h"
16
17 #define VK_CALL(GPU, X) GR_VK_CALL(GPU->vkInterface(), X)
18
MakeStencil(GrVkGpu * gpu,SkISize dimensions,int sampleCnt,VkFormat format)19 sk_sp<GrVkImage> GrVkImage::MakeStencil(GrVkGpu* gpu,
20 SkISize dimensions,
21 int sampleCnt,
22 VkFormat format) {
23 VkImageUsageFlags vkUsageFlags =
24 VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
25 return GrVkImage::Make(gpu,
26 dimensions,
27 UsageFlags::kStencilAttachment,
28 sampleCnt,
29 format,
30 /*mipLevels=*/1,
31 vkUsageFlags,
32 GrProtected::kNo,
33 GrMemoryless::kNo,
34 skgpu::Budgeted::kYes);
35 }
36
MakeMSAA(GrVkGpu * gpu,SkISize dimensions,int numSamples,VkFormat format,GrProtected isProtected,GrMemoryless memoryless)37 sk_sp<GrVkImage> GrVkImage::MakeMSAA(GrVkGpu* gpu,
38 SkISize dimensions,
39 int numSamples,
40 VkFormat format,
41 GrProtected isProtected,
42 GrMemoryless memoryless) {
43 SkASSERT(numSamples > 1);
44
45 VkImageUsageFlags vkUsageFlags = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
46 if (memoryless == GrMemoryless::kYes) {
47 vkUsageFlags |= VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT;
48 } else {
49 vkUsageFlags |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
50 }
51 return GrVkImage::Make(gpu,
52 dimensions,
53 UsageFlags::kColorAttachment,
54 numSamples,
55 format,
56 /*mipLevels=*/1,
57 vkUsageFlags,
58 isProtected,
59 memoryless,
60 skgpu::Budgeted::kYes);
61 }
62
MakeTexture(GrVkGpu * gpu,SkISize dimensions,VkFormat format,uint32_t mipLevels,GrRenderable renderable,int numSamples,skgpu::Budgeted budgeted,GrProtected isProtected)63 sk_sp<GrVkImage> GrVkImage::MakeTexture(GrVkGpu* gpu,
64 SkISize dimensions,
65 VkFormat format,
66 uint32_t mipLevels,
67 GrRenderable renderable,
68 int numSamples,
69 skgpu::Budgeted budgeted,
70 GrProtected isProtected) {
71 UsageFlags usageFlags = UsageFlags::kTexture;
72 VkImageUsageFlags vkUsageFlags = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
73 VK_IMAGE_USAGE_TRANSFER_DST_BIT;
74 if (renderable == GrRenderable::kYes) {
75 usageFlags |= UsageFlags::kColorAttachment;
76 vkUsageFlags |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
77 // We always make our render targets support being used as input attachments
78 vkUsageFlags |= VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT;
79 }
80
81 return GrVkImage::Make(gpu,
82 dimensions,
83 usageFlags,
84 numSamples,
85 format,
86 mipLevels,
87 vkUsageFlags,
88 isProtected,
89 GrMemoryless::kNo,
90 budgeted);
91 }
92
make_views(GrVkGpu * gpu,const GrVkImageInfo & info,GrAttachment::UsageFlags attachmentUsages,sk_sp<const GrVkImageView> * framebufferView,sk_sp<const GrVkImageView> * textureView)93 static bool make_views(GrVkGpu* gpu,
94 const GrVkImageInfo& info,
95 GrAttachment::UsageFlags attachmentUsages,
96 sk_sp<const GrVkImageView>* framebufferView,
97 sk_sp<const GrVkImageView>* textureView) {
98 GrVkImageView::Type viewType;
99 if (attachmentUsages & GrAttachment::UsageFlags::kStencilAttachment) {
100 // If we have stencil usage then we shouldn't have any other usages
101 SkASSERT(attachmentUsages == GrAttachment::UsageFlags::kStencilAttachment);
102 viewType = GrVkImageView::kStencil_Type;
103 } else {
104 viewType = GrVkImageView::kColor_Type;
105 }
106
107 if (SkToBool(attachmentUsages & GrAttachment::UsageFlags::kStencilAttachment) ||
108 SkToBool(attachmentUsages & GrAttachment::UsageFlags::kColorAttachment)) {
109 // Attachments can only have a mip level of 1
110 *framebufferView = GrVkImageView::Make(
111 gpu, info.fImage, info.fFormat, viewType, 1, info.fYcbcrConversionInfo);
112 if (!*framebufferView) {
113 return false;
114 }
115 }
116
117 if (attachmentUsages & GrAttachment::UsageFlags::kTexture) {
118 *textureView = GrVkImageView::Make(gpu,
119 info.fImage,
120 info.fFormat,
121 viewType,
122 info.fLevelCount,
123 info.fYcbcrConversionInfo);
124 if (!*textureView) {
125 return false;
126 }
127 }
128 return true;
129 }
130
Make(GrVkGpu * gpu,SkISize dimensions,UsageFlags attachmentUsages,int sampleCnt,VkFormat format,uint32_t mipLevels,VkImageUsageFlags vkUsageFlags,GrProtected isProtected,GrMemoryless memoryless,skgpu::Budgeted budgeted)131 sk_sp<GrVkImage> GrVkImage::Make(GrVkGpu* gpu,
132 SkISize dimensions,
133 UsageFlags attachmentUsages,
134 int sampleCnt,
135 VkFormat format,
136 uint32_t mipLevels,
137 VkImageUsageFlags vkUsageFlags,
138 GrProtected isProtected,
139 GrMemoryless memoryless,
140 skgpu::Budgeted budgeted) {
141 GrVkImage::ImageDesc imageDesc;
142 imageDesc.fImageType = VK_IMAGE_TYPE_2D;
143 imageDesc.fFormat = format;
144 imageDesc.fWidth = dimensions.width();
145 imageDesc.fHeight = dimensions.height();
146 imageDesc.fLevels = mipLevels;
147 imageDesc.fSamples = sampleCnt;
148 imageDesc.fImageTiling = VK_IMAGE_TILING_OPTIMAL;
149 imageDesc.fUsageFlags = vkUsageFlags;
150 imageDesc.fIsProtected = isProtected;
151
152 GrVkImageInfo info;
153 if (!GrVkImage::InitImageInfo(gpu, imageDesc, &info)) {
154 return nullptr;
155 }
156
157 sk_sp<const GrVkImageView> framebufferView;
158 sk_sp<const GrVkImageView> textureView;
159 if (!make_views(gpu, info, attachmentUsages, &framebufferView, &textureView)) {
160 GrVkImage::DestroyImageInfo(gpu, &info);
161 return nullptr;
162 }
163
164 sk_sp<skgpu::MutableTextureStateRef> mutableState(
165 new skgpu::MutableTextureStateRef(info.fImageLayout, info.fCurrentQueueFamily));
166 return sk_sp<GrVkImage>(new GrVkImage(gpu,
167 dimensions,
168 attachmentUsages,
169 info,
170 std::move(mutableState),
171 std::move(framebufferView),
172 std::move(textureView),
173 budgeted,
174 /*label=*/"MakeVkImage"));
175 }
176
MakeWrapped(GrVkGpu * gpu,SkISize dimensions,const GrVkImageInfo & info,sk_sp<skgpu::MutableTextureStateRef> mutableState,UsageFlags attachmentUsages,GrWrapOwnership ownership,GrWrapCacheable cacheable,std::string_view label,bool forSecondaryCB)177 sk_sp<GrVkImage> GrVkImage::MakeWrapped(GrVkGpu* gpu,
178 SkISize dimensions,
179 const GrVkImageInfo& info,
180 sk_sp<skgpu::MutableTextureStateRef> mutableState,
181 UsageFlags attachmentUsages,
182 GrWrapOwnership ownership,
183 GrWrapCacheable cacheable,
184 std::string_view label,
185 bool forSecondaryCB) {
186 sk_sp<const GrVkImageView> framebufferView;
187 sk_sp<const GrVkImageView> textureView;
188 if (!forSecondaryCB) {
189 if (!make_views(gpu, info, attachmentUsages, &framebufferView, &textureView)) {
190 return nullptr;
191 }
192 }
193
194 GrBackendObjectOwnership backendOwnership = kBorrow_GrWrapOwnership == ownership
195 ? GrBackendObjectOwnership::kBorrowed
196 : GrBackendObjectOwnership::kOwned;
197
198 return sk_sp<GrVkImage>(new GrVkImage(gpu,
199 dimensions,
200 attachmentUsages,
201 info,
202 std::move(mutableState),
203 std::move(framebufferView),
204 std::move(textureView),
205 backendOwnership,
206 cacheable,
207 forSecondaryCB,
208 label));
209 }
210
GrVkImage(GrVkGpu * gpu,SkISize dimensions,UsageFlags supportedUsages,const GrVkImageInfo & info,sk_sp<skgpu::MutableTextureStateRef> mutableState,sk_sp<const GrVkImageView> framebufferView,sk_sp<const GrVkImageView> textureView,skgpu::Budgeted budgeted,std::string_view label)211 GrVkImage::GrVkImage(GrVkGpu* gpu,
212 SkISize dimensions,
213 UsageFlags supportedUsages,
214 const GrVkImageInfo& info,
215 sk_sp<skgpu::MutableTextureStateRef> mutableState,
216 sk_sp<const GrVkImageView> framebufferView,
217 sk_sp<const GrVkImageView> textureView,
218 skgpu::Budgeted budgeted,
219 std::string_view label)
220 : GrAttachment(gpu,
221 dimensions,
222 supportedUsages,
223 info.fSampleCount,
224 info.fLevelCount > 1 ? GrMipmapped::kYes : GrMipmapped::kNo,
225 info.fProtected,
226 label,
227 info.fAlloc.fFlags & skgpu::VulkanAlloc::kLazilyAllocated_Flag
228 ? GrMemoryless::kYes
229 : GrMemoryless::kNo)
230 , fInfo(info)
231 , fInitialQueueFamily(info.fCurrentQueueFamily)
232 , fMutableState(std::move(mutableState))
233 , fFramebufferView(std::move(framebufferView))
234 , fTextureView(std::move(textureView))
235 , fIsBorrowed(false) {
236 this->init(gpu, false);
237 this->registerWithCache(budgeted);
238 }
239
GrVkImage(GrVkGpu * gpu,SkISize dimensions,UsageFlags supportedUsages,const GrVkImageInfo & info,sk_sp<skgpu::MutableTextureStateRef> mutableState,sk_sp<const GrVkImageView> framebufferView,sk_sp<const GrVkImageView> textureView,GrBackendObjectOwnership ownership,GrWrapCacheable cacheable,bool forSecondaryCB,std::string_view label)240 GrVkImage::GrVkImage(GrVkGpu* gpu,
241 SkISize dimensions,
242 UsageFlags supportedUsages,
243 const GrVkImageInfo& info,
244 sk_sp<skgpu::MutableTextureStateRef> mutableState,
245 sk_sp<const GrVkImageView> framebufferView,
246 sk_sp<const GrVkImageView> textureView,
247 GrBackendObjectOwnership ownership,
248 GrWrapCacheable cacheable,
249 bool forSecondaryCB,
250 std::string_view label)
251 : GrAttachment(gpu,
252 dimensions,
253 supportedUsages,
254 info.fSampleCount,
255 info.fLevelCount > 1 ? GrMipmapped::kYes : GrMipmapped::kNo,
256 info.fProtected,
257 label)
258 , fInfo(info)
259 , fInitialQueueFamily(info.fCurrentQueueFamily)
260 , fMutableState(std::move(mutableState))
261 , fFramebufferView(std::move(framebufferView))
262 , fTextureView(std::move(textureView))
263 , fIsBorrowed(GrBackendObjectOwnership::kBorrowed == ownership) {
264 this->init(gpu, forSecondaryCB);
265 this->registerWithCacheWrapped(cacheable);
266 }
267
init(GrVkGpu * gpu,bool forSecondaryCB)268 void GrVkImage::init(GrVkGpu* gpu, bool forSecondaryCB) {
269 SkASSERT(fMutableState->getImageLayout() == fInfo.fImageLayout);
270 SkASSERT(fMutableState->getQueueFamilyIndex() == fInfo.fCurrentQueueFamily);
271 #ifdef SK_DEBUG
272 if (fInfo.fImageUsageFlags & VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) {
273 SkASSERT(SkToBool(fInfo.fImageUsageFlags & VK_IMAGE_USAGE_TRANSFER_DST_BIT));
274 } else {
275 if (fInfo.fAlloc.fFlags & skgpu::VulkanAlloc::kLazilyAllocated_Flag) {
276 SkASSERT(fInfo.fImageUsageFlags & VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT);
277 SkASSERT(!SkToBool(fInfo.fImageUsageFlags & VK_IMAGE_USAGE_TRANSFER_DST_BIT) &&
278 !SkToBool(fInfo.fImageUsageFlags & VK_IMAGE_USAGE_TRANSFER_SRC_BIT));
279 } else {
280 SkASSERT(!SkToBool(fInfo.fImageUsageFlags & VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT));
281 SkASSERT(SkToBool(fInfo.fImageUsageFlags & VK_IMAGE_USAGE_TRANSFER_DST_BIT) &&
282 SkToBool(fInfo.fImageUsageFlags & VK_IMAGE_USAGE_TRANSFER_SRC_BIT));
283 }
284 }
285 // We can't transfer from the non graphics queue to the graphics queue since we can't
286 // release the image from the original queue without having that queue. This limits us in terms
287 // of the types of queue indices we can handle.
288 if (fInfo.fCurrentQueueFamily != VK_QUEUE_FAMILY_IGNORED &&
289 fInfo.fCurrentQueueFamily != VK_QUEUE_FAMILY_EXTERNAL &&
290 fInfo.fCurrentQueueFamily != VK_QUEUE_FAMILY_FOREIGN_EXT) {
291 if (fInfo.fSharingMode == VK_SHARING_MODE_EXCLUSIVE) {
292 if (fInfo.fCurrentQueueFamily != gpu->queueIndex()) {
293 SkASSERT(false);
294 }
295 } else {
296 SkASSERT(false);
297 }
298 }
299 #endif
300 if (forSecondaryCB) {
301 fResource = nullptr;
302 } else if (fIsBorrowed) {
303 fResource = new BorrowedResource(gpu, fInfo.fImage, fInfo.fAlloc, fInfo.fImageTiling);
304 } else {
305 SkASSERT(VK_NULL_HANDLE != fInfo.fAlloc.fMemory);
306 fResource = new Resource(gpu, fInfo.fImage, fInfo.fAlloc, fInfo.fImageTiling);
307 }
308 }
309
LayoutToPipelineSrcStageFlags(const VkImageLayout layout)310 VkPipelineStageFlags GrVkImage::LayoutToPipelineSrcStageFlags(const VkImageLayout layout) {
311 if (VK_IMAGE_LAYOUT_GENERAL == layout) {
312 return VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
313 } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout ||
314 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
315 return VK_PIPELINE_STAGE_TRANSFER_BIT;
316 } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout) {
317 return VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
318 } else if (VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout ||
319 VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL == layout) {
320 return VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
321 } else if (VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) {
322 return VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
323 } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
324 return VK_PIPELINE_STAGE_HOST_BIT;
325 } else if (VK_IMAGE_LAYOUT_PRESENT_SRC_KHR == layout) {
326 return VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
327 }
328
329 SkASSERT(VK_IMAGE_LAYOUT_UNDEFINED == layout);
330 return VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
331 }
332
LayoutToSrcAccessMask(const VkImageLayout layout)333 VkAccessFlags GrVkImage::LayoutToSrcAccessMask(const VkImageLayout layout) {
334 // Currently we assume we will never being doing any explict shader writes (this doesn't include
335 // color attachment or depth/stencil writes). So we will ignore the
336 // VK_MEMORY_OUTPUT_SHADER_WRITE_BIT.
337
338 // We can only directly access the host memory if we are in preinitialized or general layout,
339 // and the image is linear.
340 // TODO: Add check for linear here so we are not always adding host to general, and we should
341 // only be in preinitialized if we are linear
342 VkAccessFlags flags = 0;
343 if (VK_IMAGE_LAYOUT_GENERAL == layout) {
344 flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
345 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
346 VK_ACCESS_TRANSFER_WRITE_BIT |
347 VK_ACCESS_HOST_WRITE_BIT;
348 } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
349 flags = VK_ACCESS_HOST_WRITE_BIT;
350 } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout) {
351 flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
352 } else if (VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout) {
353 flags = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
354 } else if (VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
355 flags = VK_ACCESS_TRANSFER_WRITE_BIT;
356 } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout ||
357 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout ||
358 VK_IMAGE_LAYOUT_PRESENT_SRC_KHR == layout) {
359 // There are no writes that need to be made available
360 flags = 0;
361 }
362 return flags;
363 }
364
vk_format_to_aspect_flags(VkFormat format)365 VkImageAspectFlags vk_format_to_aspect_flags(VkFormat format) {
366 switch (format) {
367 case VK_FORMAT_S8_UINT:
368 return VK_IMAGE_ASPECT_STENCIL_BIT;
369 case VK_FORMAT_D24_UNORM_S8_UINT: // fallthrough
370 case VK_FORMAT_D32_SFLOAT_S8_UINT:
371 return VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
372 default:
373 return VK_IMAGE_ASPECT_COLOR_BIT;
374 }
375 }
376
setImageLayoutAndQueueIndex(const GrVkGpu * gpu,VkImageLayout newLayout,VkAccessFlags dstAccessMask,VkPipelineStageFlags dstStageMask,bool byRegion,uint32_t newQueueFamilyIndex)377 void GrVkImage::setImageLayoutAndQueueIndex(const GrVkGpu* gpu,
378 VkImageLayout newLayout,
379 VkAccessFlags dstAccessMask,
380 VkPipelineStageFlags dstStageMask,
381 bool byRegion,
382 uint32_t newQueueFamilyIndex) {
383 // Enable the following block to test new devices to confirm their lazy images stay at 0 memory use.
384 #if 0
385 if (fInfo.fAlloc.fFlags & skgpu::VulkanAlloc::kLazilyAllocated_Flag) {
386 VkDeviceSize size;
387 VK_CALL(gpu, GetDeviceMemoryCommitment(gpu->device(), fInfo.fAlloc.fMemory, &size));
388
389 SkDebugf("Lazy Image. This: %p, image: %d, size: %d\n", this, fInfo.fImage, size);
390 }
391 #endif
392 SkASSERT(!gpu->isDeviceLost());
393 SkASSERT(newLayout == this->currentLayout() ||
394 (VK_IMAGE_LAYOUT_UNDEFINED != newLayout &&
395 VK_IMAGE_LAYOUT_PREINITIALIZED != newLayout));
396 VkImageLayout currentLayout = this->currentLayout();
397 uint32_t currentQueueIndex = this->currentQueueFamilyIndex();
398
399 #ifdef SK_DEBUG
400 if (fInfo.fSharingMode == VK_SHARING_MODE_CONCURRENT) {
401 if (newQueueFamilyIndex == VK_QUEUE_FAMILY_IGNORED) {
402 SkASSERT(currentQueueIndex == VK_QUEUE_FAMILY_IGNORED ||
403 currentQueueIndex == VK_QUEUE_FAMILY_EXTERNAL ||
404 currentQueueIndex == VK_QUEUE_FAMILY_FOREIGN_EXT);
405 } else {
406 SkASSERT(newQueueFamilyIndex == VK_QUEUE_FAMILY_EXTERNAL ||
407 newQueueFamilyIndex == VK_QUEUE_FAMILY_FOREIGN_EXT);
408 SkASSERT(currentQueueIndex == VK_QUEUE_FAMILY_IGNORED);
409 }
410 } else {
411 SkASSERT(fInfo.fSharingMode == VK_SHARING_MODE_EXCLUSIVE);
412 if (newQueueFamilyIndex == VK_QUEUE_FAMILY_IGNORED ||
413 currentQueueIndex == gpu->queueIndex()) {
414 SkASSERT(currentQueueIndex == VK_QUEUE_FAMILY_IGNORED ||
415 currentQueueIndex == VK_QUEUE_FAMILY_EXTERNAL ||
416 currentQueueIndex == VK_QUEUE_FAMILY_FOREIGN_EXT ||
417 currentQueueIndex == gpu->queueIndex());
418 } else if (newQueueFamilyIndex == VK_QUEUE_FAMILY_EXTERNAL ||
419 newQueueFamilyIndex == VK_QUEUE_FAMILY_FOREIGN_EXT) {
420 SkASSERT(currentQueueIndex == VK_QUEUE_FAMILY_IGNORED ||
421 currentQueueIndex == gpu->queueIndex());
422 }
423 }
424 #endif
425
426 if (fInfo.fSharingMode == VK_SHARING_MODE_EXCLUSIVE) {
427 if (newQueueFamilyIndex == VK_QUEUE_FAMILY_IGNORED) {
428 newQueueFamilyIndex = gpu->queueIndex();
429 }
430 if (currentQueueIndex == VK_QUEUE_FAMILY_IGNORED) {
431 currentQueueIndex = gpu->queueIndex();
432 }
433 }
434
435 // If the old and new layout are the same and the layout is a read only layout, there is no need
436 // to put in a barrier unless we also need to switch queues.
437 if (newLayout == currentLayout && currentQueueIndex == newQueueFamilyIndex &&
438 (VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL == currentLayout ||
439 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == currentLayout ||
440 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == currentLayout)) {
441 return;
442 }
443
444 VkAccessFlags srcAccessMask = GrVkImage::LayoutToSrcAccessMask(currentLayout);
445 VkPipelineStageFlags srcStageMask = GrVkImage::LayoutToPipelineSrcStageFlags(currentLayout);
446
447 VkImageAspectFlags aspectFlags = vk_format_to_aspect_flags(fInfo.fFormat);
448
449 VkImageMemoryBarrier imageMemoryBarrier = {
450 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
451 nullptr, // pNext
452 srcAccessMask, // srcAccessMask
453 dstAccessMask, // dstAccessMask
454 currentLayout, // oldLayout
455 newLayout, // newLayout
456 currentQueueIndex, // srcQueueFamilyIndex
457 newQueueFamilyIndex, // dstQueueFamilyIndex
458 fInfo.fImage, // image
459 { aspectFlags, 0, fInfo.fLevelCount, 0, 1 } // subresourceRange
460 };
461 SkASSERT(srcAccessMask == imageMemoryBarrier.srcAccessMask);
462 gpu->addImageMemoryBarrier(this->resource(), srcStageMask, dstStageMask, byRegion,
463 &imageMemoryBarrier);
464
465 this->updateImageLayout(newLayout);
466 this->setQueueFamilyIndex(newQueueFamilyIndex);
467 }
468
InitImageInfo(GrVkGpu * gpu,const ImageDesc & imageDesc,GrVkImageInfo * info)469 bool GrVkImage::InitImageInfo(GrVkGpu* gpu, const ImageDesc& imageDesc, GrVkImageInfo* info) {
470 if (0 == imageDesc.fWidth || 0 == imageDesc.fHeight) {
471 return false;
472 }
473 if ((imageDesc.fIsProtected == GrProtected::kYes) && !gpu->vkCaps().supportsProtectedMemory()) {
474 return false;
475 }
476
477 bool isLinear = VK_IMAGE_TILING_LINEAR == imageDesc.fImageTiling;
478 VkImageLayout initialLayout = isLinear ? VK_IMAGE_LAYOUT_PREINITIALIZED
479 : VK_IMAGE_LAYOUT_UNDEFINED;
480
481 // Create Image
482 VkSampleCountFlagBits vkSamples;
483 if (!skgpu::SampleCountToVkSampleCount(imageDesc.fSamples, &vkSamples)) {
484 return false;
485 }
486
487 SkASSERT(VK_IMAGE_TILING_OPTIMAL == imageDesc.fImageTiling ||
488 VK_SAMPLE_COUNT_1_BIT == vkSamples);
489
490 VkImageCreateFlags createflags = 0;
491 if (imageDesc.fIsProtected == GrProtected::kYes || gpu->protectedContext()) {
492 createflags |= VK_IMAGE_CREATE_PROTECTED_BIT;
493 }
494 const VkImageCreateInfo imageCreateInfo = {
495 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // sType
496 nullptr, // pNext
497 createflags, // VkImageCreateFlags
498 imageDesc.fImageType, // VkImageType
499 imageDesc.fFormat, // VkFormat
500 { imageDesc.fWidth, imageDesc.fHeight, 1 }, // VkExtent3D
501 imageDesc.fLevels, // mipLevels
502 1, // arrayLayers
503 vkSamples, // samples
504 imageDesc.fImageTiling, // VkImageTiling
505 imageDesc.fUsageFlags, // VkImageUsageFlags
506 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode
507 0, // queueFamilyCount
508 nullptr, // pQueueFamilyIndices
509 initialLayout // initialLayout
510 };
511
512 VkImage image = VK_NULL_HANDLE;
513 VkResult result;
514 GR_VK_CALL_RESULT(gpu, result, CreateImage(gpu->device(), &imageCreateInfo, nullptr, &image));
515 if (result != VK_SUCCESS) {
516 return false;
517 }
518
519 skgpu::Protected isProtected = gpu->protectedContext() ? skgpu::Protected::kYes
520 : skgpu::Protected::kNo;
521 bool forceDedicatedMemory = gpu->vkCaps().shouldAlwaysUseDedicatedImageMemory();
522 bool useLazyAllocation =
523 SkToBool(imageDesc.fUsageFlags & VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT);
524
525 auto checkResult = [gpu, isProtected, forceDedicatedMemory, useLazyAllocation](
526 VkResult result) {
527 GR_VK_LOG_IF_NOT_SUCCESS(gpu, result, "skgpu::VulkanMemory::AllocImageMemory"
528 " (isProtected:%d, forceDedicatedMemory:%d, useLazyAllocation:%d)",
529 (int)isProtected, (int)forceDedicatedMemory,
530 (int)useLazyAllocation);
531 return gpu->checkVkResult(result);
532 };
533 auto allocator = gpu->memoryAllocator();
534 skgpu::VulkanAlloc alloc;
535 if (!skgpu::VulkanMemory::AllocImageMemory(allocator,
536 image,
537 isProtected,
538 forceDedicatedMemory,
539 useLazyAllocation,
540 checkResult,
541 &alloc) ||
542 (useLazyAllocation &&
543 !SkToBool(alloc.fFlags & skgpu::VulkanAlloc::kLazilyAllocated_Flag))) {
544 VK_CALL(gpu, DestroyImage(gpu->device(), image, nullptr));
545 return false;
546 }
547
548 // Bind buffer
549 GR_VK_CALL_RESULT(gpu, result, BindImageMemory(gpu->device(),
550 image,
551 alloc.fMemory,
552 alloc.fOffset));
553 if (result) {
554 skgpu::VulkanMemory::FreeImageMemory(allocator, alloc);
555 VK_CALL(gpu, DestroyImage(gpu->device(), image, nullptr));
556 return false;
557 }
558
559 info->fImage = image;
560 info->fAlloc = alloc;
561 info->fImageTiling = imageDesc.fImageTiling;
562 info->fImageLayout = initialLayout;
563 info->fFormat = imageDesc.fFormat;
564 info->fImageUsageFlags = imageDesc.fUsageFlags;
565 info->fSampleCount = imageDesc.fSamples;
566 info->fLevelCount = imageDesc.fLevels;
567 info->fCurrentQueueFamily = VK_QUEUE_FAMILY_IGNORED;
568 info->fProtected =
569 (createflags & VK_IMAGE_CREATE_PROTECTED_BIT) ? GrProtected::kYes : GrProtected::kNo;
570 info->fSharingMode = VK_SHARING_MODE_EXCLUSIVE;
571 return true;
572 }
573
DestroyImageInfo(const GrVkGpu * gpu,GrVkImageInfo * info)574 void GrVkImage::DestroyImageInfo(const GrVkGpu* gpu, GrVkImageInfo* info) {
575 VK_CALL(gpu, DestroyImage(gpu->device(), info->fImage, nullptr));
576 skgpu::VulkanMemory::FreeImageMemory(gpu->memoryAllocator(), info->fAlloc);
577 }
578
~GrVkImage()579 GrVkImage::~GrVkImage() {
580 // should have been released first
581 SkASSERT(!fResource);
582 SkASSERT(!fFramebufferView);
583 SkASSERT(!fTextureView);
584 }
585
prepareForPresent(GrVkGpu * gpu)586 void GrVkImage::prepareForPresent(GrVkGpu* gpu) {
587 VkImageLayout layout = this->currentLayout();
588 if (fInitialQueueFamily != VK_QUEUE_FAMILY_EXTERNAL &&
589 fInitialQueueFamily != VK_QUEUE_FAMILY_FOREIGN_EXT) {
590 if (gpu->vkCaps().supportsSwapchain()) {
591 layout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
592 }
593 }
594 this->setImageLayoutAndQueueIndex(gpu, layout, 0, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, false,
595 fInitialQueueFamily);
596 }
597
prepareForExternal(GrVkGpu * gpu)598 void GrVkImage::prepareForExternal(GrVkGpu* gpu) {
599 this->setImageLayoutAndQueueIndex(gpu, this->currentLayout(), 0,
600 VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, false,
601 fInitialQueueFamily);
602 }
603
releaseImage()604 void GrVkImage::releaseImage() {
605 if (fResource) {
606 fResource->unref();
607 fResource = nullptr;
608 }
609 fFramebufferView.reset();
610 fTextureView.reset();
611 fCachedBlendingInputDescSet.reset();
612 fCachedMSAALoadInputDescSet.reset();
613 }
614
onRelease()615 void GrVkImage::onRelease() {
616 this->releaseImage();
617 GrAttachment::onRelease();
618 }
619
onAbandon()620 void GrVkImage::onAbandon() {
621 this->releaseImage();
622 GrAttachment::onAbandon();
623 }
624
setResourceRelease(sk_sp<RefCntedReleaseProc> releaseHelper)625 void GrVkImage::setResourceRelease(sk_sp<RefCntedReleaseProc> releaseHelper) {
626 SkASSERT(fResource);
627 // Forward the release proc on to GrVkImage::Resource
628 fResource->setRelease(std::move(releaseHelper));
629 }
630
freeGPUData() const631 void GrVkImage::Resource::freeGPUData() const {
632 this->invokeReleaseProc();
633 VK_CALL(fGpu, DestroyImage(fGpu->device(), fImage, nullptr));
634 skgpu::VulkanMemory::FreeImageMemory(fGpu->memoryAllocator(), fAlloc);
635 }
636
freeGPUData() const637 void GrVkImage::BorrowedResource::freeGPUData() const {
638 this->invokeReleaseProc();
639 }
640
write_input_desc_set(GrVkGpu * gpu,VkImageView view,VkImageLayout layout,VkDescriptorSet descSet)641 static void write_input_desc_set(GrVkGpu* gpu,
642 VkImageView view,
643 VkImageLayout layout,
644 VkDescriptorSet descSet) {
645 VkDescriptorImageInfo imageInfo;
646 memset(&imageInfo, 0, sizeof(VkDescriptorImageInfo));
647 imageInfo.sampler = VK_NULL_HANDLE;
648 imageInfo.imageView = view;
649 imageInfo.imageLayout = layout;
650
651 VkWriteDescriptorSet writeInfo;
652 memset(&writeInfo, 0, sizeof(VkWriteDescriptorSet));
653 writeInfo.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
654 writeInfo.pNext = nullptr;
655 writeInfo.dstSet = descSet;
656 writeInfo.dstBinding = GrVkUniformHandler::kInputBinding;
657 writeInfo.dstArrayElement = 0;
658 writeInfo.descriptorCount = 1;
659 writeInfo.descriptorType = VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT;
660 writeInfo.pImageInfo = &imageInfo;
661 writeInfo.pBufferInfo = nullptr;
662 writeInfo.pTexelBufferView = nullptr;
663
664 GR_VK_CALL(gpu->vkInterface(), UpdateDescriptorSets(gpu->device(), 1, &writeInfo, 0, nullptr));
665 }
666
inputDescSetForBlending(GrVkGpu * gpu)667 gr_rp<const GrVkDescriptorSet> GrVkImage::inputDescSetForBlending(GrVkGpu* gpu) {
668 if (!this->supportsInputAttachmentUsage()) {
669 return nullptr;
670 }
671 if (fCachedBlendingInputDescSet) {
672 return fCachedBlendingInputDescSet;
673 }
674
675 fCachedBlendingInputDescSet.reset(gpu->resourceProvider().getInputDescriptorSet());
676 if (!fCachedBlendingInputDescSet) {
677 return nullptr;
678 }
679
680 write_input_desc_set(gpu,
681 this->framebufferView()->imageView(),
682 VK_IMAGE_LAYOUT_GENERAL,
683 *fCachedBlendingInputDescSet->descriptorSet());
684
685 return fCachedBlendingInputDescSet;
686 }
687
inputDescSetForMSAALoad(GrVkGpu * gpu)688 gr_rp<const GrVkDescriptorSet> GrVkImage::inputDescSetForMSAALoad(GrVkGpu* gpu) {
689 if (!this->supportsInputAttachmentUsage()) {
690 return nullptr;
691 }
692 if (fCachedMSAALoadInputDescSet) {
693 return fCachedMSAALoadInputDescSet;
694 }
695
696 fCachedMSAALoadInputDescSet.reset(gpu->resourceProvider().getInputDescriptorSet());
697 if (!fCachedMSAALoadInputDescSet) {
698 return nullptr;
699 }
700
701 write_input_desc_set(gpu,
702 this->framebufferView()->imageView(),
703 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
704 *fCachedMSAALoadInputDescSet->descriptorSet());
705
706 return fCachedMSAALoadInputDescSet;
707 }
708
getVkGpu() const709 GrVkGpu* GrVkImage::getVkGpu() const {
710 SkASSERT(!this->wasDestroyed());
711 return static_cast<GrVkGpu*>(this->getGpu());
712 }
713
714 #if GR_TEST_UTILS
setCurrentQueueFamilyToGraphicsQueue(GrVkGpu * gpu)715 void GrVkImage::setCurrentQueueFamilyToGraphicsQueue(GrVkGpu* gpu) {
716 fMutableState->setQueueFamilyIndex(gpu->queueIndex());
717 }
718 #endif
719
720