1 /*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/ganesh/vk/GrVkImage.h"
9
10 #ifdef SKIA_DFX_FOR_RECORD_VKIMAGE
11 #include "include/core/SkLog.h"
12 #endif
13 #include "include/core/SkSize.h"
14 #include "include/gpu/vk/VulkanMutableTextureState.h"
15 #include "src/gpu/ganesh/vk/GrVkCaps.h"
16 #include "src/gpu/ganesh/vk/GrVkDescriptorSet.h"
17 #include "src/gpu/ganesh/vk/GrVkGpu.h"
18 #include "src/gpu/ganesh/vk/GrVkImageView.h"
19 #include "src/gpu/ganesh/vk/GrVkResourceProvider.h"
20 #include "src/gpu/ganesh/vk/GrVkUniformHandler.h"
21 #include "src/gpu/ganesh/vk/GrVkUtil.h"
22 #ifdef SKIA_DFX_FOR_RECORD_VKIMAGE
23 #include "src/gpu/ganesh/vk/GrVulkanTracker.h"
24 #endif
25 #include "src/gpu/vk/VulkanMemory.h"
26 #include "src/gpu/vk/VulkanMutableTextureStatePriv.h"
27 #include "src/gpu/vk/VulkanUtilsPriv.h"
28
29 #include <string.h>
30 #include <functional>
31 #include <utility>
32
33 #define VK_CALL(GPU, X) GR_VK_CALL(GPU->vkInterface(), X)
34
MakeStencil(GrVkGpu * gpu,SkISize dimensions,int sampleCnt,VkFormat format)35 sk_sp<GrVkImage> GrVkImage::MakeStencil(GrVkGpu* gpu,
36 SkISize dimensions,
37 int sampleCnt,
38 VkFormat format) {
39 VkImageUsageFlags vkUsageFlags =
40 VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
41 return GrVkImage::Make(gpu,
42 dimensions,
43 UsageFlags::kStencilAttachment,
44 sampleCnt,
45 format,
46 /*mipLevels=*/1,
47 vkUsageFlags,
48 GrProtected::kNo,
49 GrMemoryless::kNo,
50 skgpu::Budgeted::kYes);
51 }
52
MakeMSAA(GrVkGpu * gpu,SkISize dimensions,int numSamples,VkFormat format,GrProtected isProtected,GrMemoryless memoryless)53 sk_sp<GrVkImage> GrVkImage::MakeMSAA(GrVkGpu* gpu,
54 SkISize dimensions,
55 int numSamples,
56 VkFormat format,
57 GrProtected isProtected,
58 GrMemoryless memoryless) {
59 SkASSERT(numSamples > 1);
60
61 VkImageUsageFlags vkUsageFlags = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
62 if (memoryless == GrMemoryless::kYes) {
63 vkUsageFlags |= VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT;
64 } else {
65 vkUsageFlags |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
66 }
67 return GrVkImage::Make(gpu,
68 dimensions,
69 UsageFlags::kColorAttachment,
70 numSamples,
71 format,
72 /*mipLevels=*/1,
73 vkUsageFlags,
74 isProtected,
75 memoryless,
76 skgpu::Budgeted::kYes);
77 }
78
MakeTexture(GrVkGpu * gpu,SkISize dimensions,VkFormat format,uint32_t mipLevels,GrRenderable renderable,int numSamples,skgpu::Budgeted budgeted,GrProtected isProtected)79 sk_sp<GrVkImage> GrVkImage::MakeTexture(GrVkGpu* gpu,
80 SkISize dimensions,
81 VkFormat format,
82 uint32_t mipLevels,
83 GrRenderable renderable,
84 int numSamples,
85 skgpu::Budgeted budgeted,
86 GrProtected isProtected) {
87 UsageFlags usageFlags = UsageFlags::kTexture;
88 VkImageUsageFlags vkUsageFlags = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
89 VK_IMAGE_USAGE_TRANSFER_DST_BIT;
90 if (renderable == GrRenderable::kYes) {
91 usageFlags |= UsageFlags::kColorAttachment;
92 vkUsageFlags |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
93 // We always make our render targets support being used as input attachments
94 vkUsageFlags |= VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT;
95 }
96
97 return GrVkImage::Make(gpu,
98 dimensions,
99 usageFlags,
100 numSamples,
101 format,
102 mipLevels,
103 vkUsageFlags,
104 isProtected,
105 GrMemoryless::kNo,
106 budgeted);
107 }
108
make_views(GrVkGpu * gpu,const GrVkImageInfo & info,GrAttachment::UsageFlags attachmentUsages,sk_sp<const GrVkImageView> * framebufferView,sk_sp<const GrVkImageView> * textureView)109 static bool make_views(GrVkGpu* gpu,
110 const GrVkImageInfo& info,
111 GrAttachment::UsageFlags attachmentUsages,
112 sk_sp<const GrVkImageView>* framebufferView,
113 sk_sp<const GrVkImageView>* textureView) {
114 GrVkImageView::Type viewType;
115 if (attachmentUsages & GrAttachment::UsageFlags::kStencilAttachment) {
116 // If we have stencil usage then we shouldn't have any other usages
117 SkASSERT(attachmentUsages == GrAttachment::UsageFlags::kStencilAttachment);
118 viewType = GrVkImageView::kStencil_Type;
119 } else {
120 viewType = GrVkImageView::kColor_Type;
121 }
122
123 if (SkToBool(attachmentUsages & GrAttachment::UsageFlags::kStencilAttachment) ||
124 SkToBool(attachmentUsages & GrAttachment::UsageFlags::kColorAttachment)) {
125 // Attachments can only have a mip level of 1
126 *framebufferView = GrVkImageView::Make(
127 gpu, info.fImage, info.fFormat, viewType, 1, info.fYcbcrConversionInfo);
128 if (!*framebufferView) {
129 return false;
130 }
131 }
132
133 if (attachmentUsages & GrAttachment::UsageFlags::kTexture) {
134 *textureView = GrVkImageView::Make(gpu,
135 info.fImage,
136 info.fFormat,
137 viewType,
138 info.fLevelCount,
139 info.fYcbcrConversionInfo);
140 if (!*textureView) {
141 return false;
142 }
143 }
144 return true;
145 }
146
Make(GrVkGpu * gpu,SkISize dimensions,UsageFlags attachmentUsages,int sampleCnt,VkFormat format,uint32_t mipLevels,VkImageUsageFlags vkUsageFlags,GrProtected isProtected,GrMemoryless memoryless,skgpu::Budgeted budgeted)147 sk_sp<GrVkImage> GrVkImage::Make(GrVkGpu* gpu,
148 SkISize dimensions,
149 UsageFlags attachmentUsages,
150 int sampleCnt,
151 VkFormat format,
152 uint32_t mipLevels,
153 VkImageUsageFlags vkUsageFlags,
154 GrProtected isProtected,
155 GrMemoryless memoryless,
156 skgpu::Budgeted budgeted) {
157 GrVkImage::ImageDesc imageDesc;
158 imageDesc.fImageType = VK_IMAGE_TYPE_2D;
159 imageDesc.fFormat = format;
160 imageDesc.fWidth = dimensions.width();
161 imageDesc.fHeight = dimensions.height();
162 imageDesc.fLevels = mipLevels;
163 imageDesc.fSamples = sampleCnt;
164 imageDesc.fImageTiling = VK_IMAGE_TILING_OPTIMAL;
165 imageDesc.fUsageFlags = vkUsageFlags;
166 imageDesc.fIsProtected = isProtected;
167
168 GrVkImageInfo info;
169 if (!GrVkImage::InitImageInfo(gpu, imageDesc, &info)) {
170 return nullptr;
171 }
172
173 sk_sp<const GrVkImageView> framebufferView;
174 sk_sp<const GrVkImageView> textureView;
175 if (!make_views(gpu, info, attachmentUsages, &framebufferView, &textureView)) {
176 GrVkImage::DestroyImageInfo(gpu, &info);
177 return nullptr;
178 }
179
180 auto mutableState = sk_make_sp<skgpu::MutableTextureState>(
181 skgpu::MutableTextureStates::MakeVulkan(info.fImageLayout, info.fCurrentQueueFamily));
182 return sk_sp<GrVkImage>(new GrVkImage(gpu,
183 dimensions,
184 attachmentUsages,
185 info,
186 std::move(mutableState),
187 std::move(framebufferView),
188 std::move(textureView),
189 budgeted,
190 /*label=*/"MakeVkImage"));
191 }
192
MakeWrapped(GrVkGpu * gpu,SkISize dimensions,const GrVkImageInfo & info,sk_sp<skgpu::MutableTextureState> mutableState,UsageFlags attachmentUsages,GrWrapOwnership ownership,GrWrapCacheable cacheable,std::string_view label,bool forSecondaryCB)193 sk_sp<GrVkImage> GrVkImage::MakeWrapped(GrVkGpu* gpu,
194 SkISize dimensions,
195 const GrVkImageInfo& info,
196 sk_sp<skgpu::MutableTextureState> mutableState,
197 UsageFlags attachmentUsages,
198 GrWrapOwnership ownership,
199 GrWrapCacheable cacheable,
200 std::string_view label,
201 bool forSecondaryCB) {
202 sk_sp<const GrVkImageView> framebufferView;
203 sk_sp<const GrVkImageView> textureView;
204 if (!forSecondaryCB) {
205 if (!make_views(gpu, info, attachmentUsages, &framebufferView, &textureView)) {
206 return nullptr;
207 }
208 }
209
210 GrBackendObjectOwnership backendOwnership = kBorrow_GrWrapOwnership == ownership
211 ? GrBackendObjectOwnership::kBorrowed
212 : GrBackendObjectOwnership::kOwned;
213
214 return sk_sp<GrVkImage>(new GrVkImage(gpu,
215 dimensions,
216 attachmentUsages,
217 info,
218 std::move(mutableState),
219 std::move(framebufferView),
220 std::move(textureView),
221 backendOwnership,
222 cacheable,
223 forSecondaryCB,
224 label));
225 }
226
GrVkImage(GrVkGpu * gpu,SkISize dimensions,UsageFlags supportedUsages,const GrVkImageInfo & info,sk_sp<skgpu::MutableTextureState> mutableState,sk_sp<const GrVkImageView> framebufferView,sk_sp<const GrVkImageView> textureView,skgpu::Budgeted budgeted,std::string_view label)227 GrVkImage::GrVkImage(GrVkGpu* gpu,
228 SkISize dimensions,
229 UsageFlags supportedUsages,
230 const GrVkImageInfo& info,
231 sk_sp<skgpu::MutableTextureState> mutableState,
232 sk_sp<const GrVkImageView> framebufferView,
233 sk_sp<const GrVkImageView> textureView,
234 skgpu::Budgeted budgeted,
235 std::string_view label)
236 : GrAttachment(gpu,
237 dimensions,
238 supportedUsages,
239 info.fSampleCount,
240 info.fLevelCount > 1 ? skgpu::Mipmapped::kYes : skgpu::Mipmapped::kNo,
241 info.fProtected,
242 label,
243 info.fAlloc.fFlags & skgpu::VulkanAlloc::kLazilyAllocated_Flag
244 ? GrMemoryless::kYes
245 : GrMemoryless::kNo)
246 , fInfo(info)
247 , fInitialQueueFamily(info.fCurrentQueueFamily)
248 , fMutableState(std::move(mutableState))
249 , fFramebufferView(std::move(framebufferView))
250 , fTextureView(std::move(textureView))
251 #ifdef SKIA_OHOS
252 , fBudgeted(budgeted)
253 #endif
254 , fIsBorrowed(false) {
255 this->init(gpu, false);
256 this->setRealAlloc(true); // OH ISSUE: set real alloc flag
257 this->registerWithCache(budgeted);
258 }
259
GrVkImage(GrVkGpu * gpu,SkISize dimensions,UsageFlags supportedUsages,const GrVkImageInfo & info,sk_sp<skgpu::MutableTextureState> mutableState,sk_sp<const GrVkImageView> framebufferView,sk_sp<const GrVkImageView> textureView,GrBackendObjectOwnership ownership,GrWrapCacheable cacheable,bool forSecondaryCB,std::string_view label)260 GrVkImage::GrVkImage(GrVkGpu* gpu,
261 SkISize dimensions,
262 UsageFlags supportedUsages,
263 const GrVkImageInfo& info,
264 sk_sp<skgpu::MutableTextureState> mutableState,
265 sk_sp<const GrVkImageView> framebufferView,
266 sk_sp<const GrVkImageView> textureView,
267 GrBackendObjectOwnership ownership,
268 GrWrapCacheable cacheable,
269 bool forSecondaryCB,
270 std::string_view label)
271 : GrAttachment(gpu,
272 dimensions,
273 supportedUsages,
274 info.fSampleCount,
275 info.fLevelCount > 1 ? skgpu::Mipmapped::kYes : skgpu::Mipmapped::kNo,
276 info.fProtected,
277 label)
278 , fInfo(info)
279 , fInitialQueueFamily(info.fCurrentQueueFamily)
280 , fMutableState(std::move(mutableState))
281 , fFramebufferView(std::move(framebufferView))
282 , fTextureView(std::move(textureView))
283 , fIsBorrowed(GrBackendObjectOwnership::kBorrowed == ownership) {
284 this->init(gpu, forSecondaryCB);
285 this->registerWithCacheWrapped(cacheable);
286 }
287
init(GrVkGpu * gpu,bool forSecondaryCB)288 void GrVkImage::init(GrVkGpu* gpu, bool forSecondaryCB) {
289 SkASSERT(skgpu::MutableTextureStates::GetVkImageLayout(fMutableState.get()) == fInfo.fImageLayout);
290 SkASSERT(skgpu::MutableTextureStates::GetVkQueueFamilyIndex(fMutableState.get()) == fInfo.fCurrentQueueFamily);
291 #ifdef SK_DEBUG
292 if (fInfo.fImageUsageFlags & VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) {
293 SkASSERT(SkToBool(fInfo.fImageUsageFlags & VK_IMAGE_USAGE_TRANSFER_DST_BIT));
294 } else {
295 if (fInfo.fAlloc.fFlags & skgpu::VulkanAlloc::kLazilyAllocated_Flag) {
296 SkASSERT(fInfo.fImageUsageFlags & VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT);
297 SkASSERT(!SkToBool(fInfo.fImageUsageFlags & VK_IMAGE_USAGE_TRANSFER_DST_BIT) &&
298 !SkToBool(fInfo.fImageUsageFlags & VK_IMAGE_USAGE_TRANSFER_SRC_BIT));
299 } else {
300 SkASSERT(!SkToBool(fInfo.fImageUsageFlags & VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT));
301 SkASSERT(SkToBool(fInfo.fImageUsageFlags & VK_IMAGE_USAGE_TRANSFER_DST_BIT) &&
302 SkToBool(fInfo.fImageUsageFlags & VK_IMAGE_USAGE_TRANSFER_SRC_BIT));
303 }
304 }
305 // We can't transfer from the non graphics queue to the graphics queue since we can't
306 // release the image from the original queue without having that queue. This limits us in terms
307 // of the types of queue indices we can handle.
308 if (fInfo.fCurrentQueueFamily != VK_QUEUE_FAMILY_IGNORED &&
309 fInfo.fCurrentQueueFamily != VK_QUEUE_FAMILY_EXTERNAL &&
310 fInfo.fCurrentQueueFamily != VK_QUEUE_FAMILY_FOREIGN_EXT) {
311 if (fInfo.fSharingMode == VK_SHARING_MODE_EXCLUSIVE) {
312 if (fInfo.fCurrentQueueFamily != gpu->queueIndex()) {
313 SkASSERT(false);
314 }
315 } else {
316 SkASSERT(false);
317 }
318 }
319 #endif
320 if (forSecondaryCB) {
321 fResource = nullptr;
322 } else if (fIsBorrowed) {
323 fResource = new BorrowedResource(gpu, fInfo.fImage, fInfo.fAlloc, fInfo.fImageTiling);
324 } else {
325 SkASSERT(VK_NULL_HANDLE != fInfo.fAlloc.fMemory);
326 fResource = new Resource(gpu, fInfo.fImage, fInfo.fAlloc, fInfo.fImageTiling);
327 }
328 }
329
LayoutToPipelineSrcStageFlags(const VkImageLayout layout)330 VkPipelineStageFlags GrVkImage::LayoutToPipelineSrcStageFlags(const VkImageLayout layout) {
331 if (VK_IMAGE_LAYOUT_GENERAL == layout) {
332 return VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
333 } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout ||
334 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
335 return VK_PIPELINE_STAGE_TRANSFER_BIT;
336 } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout) {
337 return VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
338 } else if (VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout ||
339 VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL == layout) {
340 return VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
341 } else if (VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) {
342 return VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
343 } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
344 return VK_PIPELINE_STAGE_HOST_BIT;
345 } else if (VK_IMAGE_LAYOUT_PRESENT_SRC_KHR == layout) {
346 return VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
347 }
348
349 SkASSERT(VK_IMAGE_LAYOUT_UNDEFINED == layout);
350 return VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
351 }
352
LayoutToSrcAccessMask(const VkImageLayout layout)353 VkAccessFlags GrVkImage::LayoutToSrcAccessMask(const VkImageLayout layout) {
354 // Currently we assume we will never being doing any explict shader writes (this doesn't include
355 // color attachment or depth/stencil writes). So we will ignore the
356 // VK_MEMORY_OUTPUT_SHADER_WRITE_BIT.
357
358 // We can only directly access the host memory if we are in preinitialized or general layout,
359 // and the image is linear.
360 // TODO: Add check for linear here so we are not always adding host to general, and we should
361 // only be in preinitialized if we are linear
362 VkAccessFlags flags = 0;
363 if (VK_IMAGE_LAYOUT_GENERAL == layout) {
364 flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
365 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
366 VK_ACCESS_TRANSFER_WRITE_BIT |
367 VK_ACCESS_HOST_WRITE_BIT;
368 } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
369 flags = VK_ACCESS_HOST_WRITE_BIT;
370 } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout) {
371 flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
372 } else if (VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout) {
373 flags = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
374 } else if (VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
375 flags = VK_ACCESS_TRANSFER_WRITE_BIT;
376 } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout ||
377 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout ||
378 VK_IMAGE_LAYOUT_PRESENT_SRC_KHR == layout) {
379 // There are no writes that need to be made available
380 flags = 0;
381 }
382 return flags;
383 }
384
vk_format_to_aspect_flags(VkFormat format)385 VkImageAspectFlags vk_format_to_aspect_flags(VkFormat format) {
386 switch (format) {
387 case VK_FORMAT_S8_UINT:
388 return VK_IMAGE_ASPECT_STENCIL_BIT;
389 case VK_FORMAT_D24_UNORM_S8_UINT: // fallthrough
390 case VK_FORMAT_D32_SFLOAT_S8_UINT:
391 return VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
392 default:
393 return VK_IMAGE_ASPECT_COLOR_BIT;
394 }
395 }
396
setImageLayoutAndQueueIndex(const GrVkGpu * gpu,VkImageLayout newLayout,VkAccessFlags dstAccessMask,VkPipelineStageFlags dstStageMask,bool byRegion,uint32_t newQueueFamilyIndex)397 void GrVkImage::setImageLayoutAndQueueIndex(const GrVkGpu* gpu,
398 VkImageLayout newLayout,
399 VkAccessFlags dstAccessMask,
400 VkPipelineStageFlags dstStageMask,
401 bool byRegion,
402 uint32_t newQueueFamilyIndex) {
403 // Enable the following block to test new devices to confirm their lazy images stay at 0 memory use.
404 #if 0
405 if (fInfo.fAlloc.fFlags & skgpu::VulkanAlloc::kLazilyAllocated_Flag) {
406 VkDeviceSize size;
407 VK_CALL(gpu, GetDeviceMemoryCommitment(gpu->device(), fInfo.fAlloc.fMemory, &size));
408
409 SkDebugf("Lazy Image. This: %p, image: %d, size: %d\n", this, fInfo.fImage, size);
410 }
411 #endif
412 SkASSERT(!gpu->isDeviceLost());
413 SkASSERT(newLayout == this->currentLayout() ||
414 (VK_IMAGE_LAYOUT_UNDEFINED != newLayout &&
415 VK_IMAGE_LAYOUT_PREINITIALIZED != newLayout));
416 VkImageLayout currentLayout = this->currentLayout();
417 uint32_t currentQueueIndex = this->currentQueueFamilyIndex();
418
419 #ifdef SK_DEBUG
420 if (fInfo.fSharingMode == VK_SHARING_MODE_CONCURRENT) {
421 if (newQueueFamilyIndex == VK_QUEUE_FAMILY_IGNORED) {
422 SkASSERT(currentQueueIndex == VK_QUEUE_FAMILY_IGNORED ||
423 currentQueueIndex == VK_QUEUE_FAMILY_EXTERNAL ||
424 currentQueueIndex == VK_QUEUE_FAMILY_FOREIGN_EXT);
425 } else {
426 SkASSERT(newQueueFamilyIndex == VK_QUEUE_FAMILY_EXTERNAL ||
427 newQueueFamilyIndex == VK_QUEUE_FAMILY_FOREIGN_EXT);
428 SkASSERT(currentQueueIndex == VK_QUEUE_FAMILY_IGNORED);
429 }
430 } else {
431 SkASSERT(fInfo.fSharingMode == VK_SHARING_MODE_EXCLUSIVE);
432 if (newQueueFamilyIndex == VK_QUEUE_FAMILY_IGNORED ||
433 currentQueueIndex == gpu->queueIndex()) {
434 SkASSERT(currentQueueIndex == VK_QUEUE_FAMILY_IGNORED ||
435 currentQueueIndex == VK_QUEUE_FAMILY_EXTERNAL ||
436 currentQueueIndex == VK_QUEUE_FAMILY_FOREIGN_EXT ||
437 currentQueueIndex == gpu->queueIndex());
438 } else if (newQueueFamilyIndex == VK_QUEUE_FAMILY_EXTERNAL ||
439 newQueueFamilyIndex == VK_QUEUE_FAMILY_FOREIGN_EXT) {
440 SkASSERT(currentQueueIndex == VK_QUEUE_FAMILY_IGNORED ||
441 currentQueueIndex == gpu->queueIndex());
442 }
443 }
444 #endif
445
446 if (fInfo.fSharingMode == VK_SHARING_MODE_EXCLUSIVE) {
447 if (newQueueFamilyIndex == VK_QUEUE_FAMILY_IGNORED) {
448 newQueueFamilyIndex = gpu->queueIndex();
449 }
450 if (currentQueueIndex == VK_QUEUE_FAMILY_IGNORED) {
451 currentQueueIndex = gpu->queueIndex();
452 }
453 }
454
455 // If the old and new layout are the same and the layout is a read only layout, there is no need
456 // to put in a barrier unless we also need to switch queues.
457 if (newLayout == currentLayout && currentQueueIndex == newQueueFamilyIndex &&
458 (VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL == currentLayout ||
459 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == currentLayout ||
460 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == currentLayout)) {
461 return;
462 }
463
464 VkAccessFlags srcAccessMask = GrVkImage::LayoutToSrcAccessMask(currentLayout);
465 VkPipelineStageFlags srcStageMask = GrVkImage::LayoutToPipelineSrcStageFlags(currentLayout);
466
467 VkImageAspectFlags aspectFlags = vk_format_to_aspect_flags(fInfo.fFormat);
468
469 VkImageMemoryBarrier imageMemoryBarrier = {
470 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
471 nullptr, // pNext
472 srcAccessMask, // srcAccessMask
473 dstAccessMask, // dstAccessMask
474 currentLayout, // oldLayout
475 newLayout, // newLayout
476 currentQueueIndex, // srcQueueFamilyIndex
477 newQueueFamilyIndex, // dstQueueFamilyIndex
478 fInfo.fImage, // image
479 { aspectFlags, 0, fInfo.fLevelCount, 0, 1 } // subresourceRange
480 };
481 SkASSERT(srcAccessMask == imageMemoryBarrier.srcAccessMask);
482 gpu->addImageMemoryBarrier(this->resource(), srcStageMask, dstStageMask, byRegion,
483 &imageMemoryBarrier);
484
485 this->updateImageLayout(newLayout);
486 this->setQueueFamilyIndex(newQueueFamilyIndex);
487 }
488
InitImageInfo(GrVkGpu * gpu,const ImageDesc & imageDesc,GrVkImageInfo * info)489 bool GrVkImage::InitImageInfo(GrVkGpu* gpu, const ImageDesc& imageDesc, GrVkImageInfo* info) {
490 if (0 == imageDesc.fWidth || 0 == imageDesc.fHeight) {
491 return false;
492 }
493 if ((imageDesc.fIsProtected == GrProtected::kYes) &&
494 !gpu->vkCaps().supportsProtectedContent()) {
495 return false;
496 }
497
498 bool isLinear = VK_IMAGE_TILING_LINEAR == imageDesc.fImageTiling;
499 VkImageLayout initialLayout = isLinear ? VK_IMAGE_LAYOUT_PREINITIALIZED
500 : VK_IMAGE_LAYOUT_UNDEFINED;
501
502 // Create Image
503 VkSampleCountFlagBits vkSamples;
504 if (!skgpu::SampleCountToVkSampleCount(imageDesc.fSamples, &vkSamples)) {
505 return false;
506 }
507
508 SkASSERT(VK_IMAGE_TILING_OPTIMAL == imageDesc.fImageTiling ||
509 VK_SAMPLE_COUNT_1_BIT == vkSamples);
510
511 VkImageCreateFlags createflags = 0;
512 if (imageDesc.fIsProtected == GrProtected::kYes || gpu->protectedContext()) {
513 createflags |= VK_IMAGE_CREATE_PROTECTED_BIT;
514 }
515 const VkImageCreateInfo imageCreateInfo = {
516 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // sType
517 nullptr, // pNext
518 createflags, // VkImageCreateFlags
519 imageDesc.fImageType, // VkImageType
520 imageDesc.fFormat, // VkFormat
521 { imageDesc.fWidth, imageDesc.fHeight, 1 }, // VkExtent3D
522 imageDesc.fLevels, // mipLevels
523 1, // arrayLayers
524 vkSamples, // samples
525 imageDesc.fImageTiling, // VkImageTiling
526 imageDesc.fUsageFlags, // VkImageUsageFlags
527 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode
528 0, // queueFamilyCount
529 nullptr, // pQueueFamilyIndices
530 initialLayout // initialLayout
531 };
532
533 VkImage image = VK_NULL_HANDLE;
534 VkResult result;
535 GR_VK_CALL_RESULT(gpu, result, CreateImage(gpu->device(), &imageCreateInfo, nullptr, &image));
536 if (result != VK_SUCCESS) {
537 return false;
538 }
539
540 skgpu::Protected isProtected = gpu->protectedContext() ? skgpu::Protected::kYes
541 : skgpu::Protected::kNo;
542 bool forceDedicatedMemory = gpu->vkCaps().shouldAlwaysUseDedicatedImageMemory();
543 bool useLazyAllocation =
544 SkToBool(imageDesc.fUsageFlags & VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT);
545
546 auto checkResult = [gpu, isProtected, forceDedicatedMemory, useLazyAllocation](
547 VkResult result) {
548 GR_VK_LOG_IF_NOT_SUCCESS(gpu, result, "skgpu::VulkanMemory::AllocImageMemory"
549 " (isProtected:%d, forceDedicatedMemory:%d, useLazyAllocation:%d)",
550 (int)isProtected, (int)forceDedicatedMemory,
551 (int)useLazyAllocation);
552 return gpu->checkVkResult(result);
553 };
554 auto allocator = gpu->memoryAllocator();
555 auto allocatorCacheImage = gpu->memoryAllocatorCacheImage();
556 skgpu::VulkanAlloc alloc;
557 if (!skgpu::VulkanMemory::AllocImageMemory(allocator,
558 allocatorCacheImage,
559 image,
560 isProtected,
561 forceDedicatedMemory,
562 useLazyAllocation,
563 checkResult,
564 &alloc,
565 imageDesc.fWidth * imageDesc.fHeight * 4) ||
566 (useLazyAllocation &&
567 !SkToBool(alloc.fFlags & skgpu::VulkanAlloc::kLazilyAllocated_Flag))) {
568 VK_CALL(gpu, DestroyImage(gpu->device(), image, nullptr));
569 return false;
570 }
571
572 // Bind buffer
573 GR_VK_CALL_RESULT(gpu, result, BindImageMemory(gpu->device(),
574 image,
575 alloc.fMemory,
576 alloc.fOffset));
577 if (result) {
578 skgpu::VulkanMemory::FreeImageMemory(allocator, alloc);
579 VK_CALL(gpu, DestroyImage(gpu->device(), image, nullptr));
580 return false;
581 }
582
583 #ifdef SKIA_DFX_FOR_OHOS
584 alloc.fBytes = alloc.fSize;
585 gpu->addAllocImageBytes(alloc.fSize);
586 #endif
587
588 info->fImage = image;
589 info->fAlloc = alloc;
590 info->fImageTiling = imageDesc.fImageTiling;
591 info->fImageLayout = initialLayout;
592 info->fFormat = imageDesc.fFormat;
593 info->fImageUsageFlags = imageDesc.fUsageFlags;
594 info->fSampleCount = imageDesc.fSamples;
595 info->fLevelCount = imageDesc.fLevels;
596 info->fCurrentQueueFamily = VK_QUEUE_FAMILY_IGNORED;
597 info->fProtected =
598 (createflags & VK_IMAGE_CREATE_PROTECTED_BIT) ? GrProtected::kYes : GrProtected::kNo;
599 info->fSharingMode = VK_SHARING_MODE_EXCLUSIVE;
600 return true;
601 }
602
DestroyImageInfo(const GrVkGpu * gpu,GrVkImageInfo * info)603 void GrVkImage::DestroyImageInfo(const GrVkGpu* gpu, GrVkImageInfo* info) {
604 VK_CALL(gpu, DestroyImage(gpu->device(), info->fImage, nullptr));
605 #ifdef SKIA_DFX_FOR_OHOS
606 ((GrVkGpu*)gpu)->removeAllocImageBytes(info->fAlloc.fBytes);
607 #endif
608 skgpu::VulkanMemory::FreeImageMemory(gpu->memoryAllocator(), info->fAlloc);
609 }
610
~GrVkImage()611 GrVkImage::~GrVkImage() {
612 // should have been released first
613 SkASSERT(!fResource);
614 SkASSERT(!fFramebufferView);
615 SkASSERT(!fTextureView);
616 }
617
prepareForPresent(GrVkGpu * gpu)618 void GrVkImage::prepareForPresent(GrVkGpu* gpu) {
619 VkImageLayout layout = this->currentLayout();
620 if (fInitialQueueFamily != VK_QUEUE_FAMILY_EXTERNAL &&
621 fInitialQueueFamily != VK_QUEUE_FAMILY_FOREIGN_EXT) {
622 if (gpu->vkCaps().supportsSwapchain()) {
623 layout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
624 }
625 }
626 this->setImageLayoutAndQueueIndex(gpu, layout, 0, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, false,
627 fInitialQueueFamily);
628 }
629
prepareForExternal(GrVkGpu * gpu)630 void GrVkImage::prepareForExternal(GrVkGpu* gpu) {
631 this->setImageLayoutAndQueueIndex(gpu, this->currentLayout(), 0,
632 VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, false,
633 fInitialQueueFamily);
634 }
635
releaseImage()636 void GrVkImage::releaseImage() {
637 if (fResource) {
638 fResource->unref();
639 fResource = nullptr;
640 }
641 fFramebufferView.reset();
642 fTextureView.reset();
643 fCachedBlendingInputDescSet.reset();
644 fCachedMSAALoadInputDescSet.reset();
645 }
646
onRelease()647 void GrVkImage::onRelease() {
648 this->releaseImage();
649 GrAttachment::onRelease();
650 }
651
onAbandon()652 void GrVkImage::onAbandon() {
653 this->releaseImage();
654 GrAttachment::onAbandon();
655 }
656
setResourceRelease(sk_sp<RefCntedReleaseProc> releaseHelper)657 void GrVkImage::setResourceRelease(sk_sp<RefCntedReleaseProc> releaseHelper) {
658 SkASSERT(fResource);
659 // Forward the release proc on to GrVkImage::Resource
660 fResource->setRelease(std::move(releaseHelper));
661 }
662
663 #ifdef SKIA_DFX_FOR_RECORD_VKIMAGE
dumpVkImageInfo(std::stringstream & dump) const664 void GrVkImage::dumpVkImageInfo(std::stringstream& dump) const {
665 auto vkGpu = getVkGpu();
666 if (vkGpu == nullptr) {
667 SK_LOGE("GrVkImage::dumpVkImageInfo vkGpu nullptr");
668 return;
669 }
670 VkMemoryRequirements memRequirements;
671 VK_CALL(vkGpu, GetImageMemoryRequirements(vkGpu->device(), image(), &memRequirements));
672 VkDeviceSize imageSize = memRequirements.size;
673
674 if (fResource == nullptr) {
675 SK_LOGE("GrVkImage::dumpVkImageInfo fResource nullptr");
676 return;
677 }
678 fResource->dumpVkImageResource(dump);
679 dump << "Borrowed: " << isBorrowed() << ", " << "ImageSize: " << imageSize << ", ";
680 if (fResource->fCaller == nullptr) {
681 SK_LOGE("GrVkImage::dumpVkImageInfo fCaller nullptr");
682 } else {
683 fResource->fCaller->Dump(dump);
684 }
685 dump << "\n";
686 }
687
dumpVkImageResource(std::stringstream & dump)688 void GrVkImage::Resource::dumpVkImageResource(std::stringstream& dump) {
689 dump << "VkImage: " << fImage << ", "
690 << "Memory: " << fAlloc.fMemory << ", "
691 << "Offset: " << fAlloc.fOffset << ", "
692 << "Size: " << fAlloc.fSize << ", ";
693 }
694
RecordFreeVkImage(bool isBorrowed) const695 void GrVkImage::Resource::RecordFreeVkImage(bool isBorrowed) const {
696 static const bool isInRenderSevice = IsRenderService();
697 if (isInRenderSevice) {
698 ParallelDebug::VkImageDestroyRecord::Record(fImage, isBorrowed, fCaller, fAlloc.fMemory);
699 }
700 }
701
updateNodeId(uint64_t nodeId)702 void GrVkImage::updateNodeId(uint64_t nodeId) {
703 if (fResource && fResource->fCaller) {
704 fResource->fCaller->nodeId_ = nodeId;
705 }
706 }
707 #endif
708
~Resource()709 GrVkImage::Resource::~Resource() {
710 #ifdef SKIA_DFX_FOR_RECORD_VKIMAGE
711 ParallelDebug::DestroyVkImageInvokeRecord(fCaller);
712 #endif
713 }
714
freeGPUData() const715 void GrVkImage::Resource::freeGPUData() const {
716 #ifdef SKIA_DFX_FOR_RECORD_VKIMAGE
717 RecordFreeVkImage(true);
718 #endif
719 this->invokeReleaseProc();
720 VK_CALL(fGpu, DestroyImage(fGpu->device(), fImage, nullptr));
721 #ifdef SKIA_DFX_FOR_OHOS
722 ((GrVkGpu*)fGpu)->removeAllocImageBytes(fAlloc.fBytes);
723 #endif
724 skgpu::VulkanMemory::FreeImageMemory(fGpu->memoryAllocator(), fAlloc);
725 }
726
freeGPUData() const727 void GrVkImage::BorrowedResource::freeGPUData() const {
728 #ifdef SKIA_DFX_FOR_RECORD_VKIMAGE
729 RecordFreeVkImage(true);
730 #endif
731 this->invokeReleaseProc();
732 }
733
write_input_desc_set(GrVkGpu * gpu,VkImageView view,VkImageLayout layout,VkDescriptorSet descSet)734 static void write_input_desc_set(GrVkGpu* gpu,
735 VkImageView view,
736 VkImageLayout layout,
737 VkDescriptorSet descSet) {
738 VkDescriptorImageInfo imageInfo;
739 memset(&imageInfo, 0, sizeof(VkDescriptorImageInfo));
740 imageInfo.sampler = VK_NULL_HANDLE;
741 imageInfo.imageView = view;
742 imageInfo.imageLayout = layout;
743
744 VkWriteDescriptorSet writeInfo;
745 memset(&writeInfo, 0, sizeof(VkWriteDescriptorSet));
746 writeInfo.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
747 writeInfo.pNext = nullptr;
748 writeInfo.dstSet = descSet;
749 writeInfo.dstBinding = GrVkUniformHandler::kInputBinding;
750 writeInfo.dstArrayElement = 0;
751 writeInfo.descriptorCount = 1;
752 writeInfo.descriptorType = VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT;
753 writeInfo.pImageInfo = &imageInfo;
754 writeInfo.pBufferInfo = nullptr;
755 writeInfo.pTexelBufferView = nullptr;
756
757 GR_VK_CALL(gpu->vkInterface(), UpdateDescriptorSets(gpu->device(), 1, &writeInfo, 0, nullptr));
758 }
759
inputDescSetForBlending(GrVkGpu * gpu)760 gr_rp<const GrVkDescriptorSet> GrVkImage::inputDescSetForBlending(GrVkGpu* gpu) {
761 if (!this->supportsInputAttachmentUsage()) {
762 return nullptr;
763 }
764 if (fCachedBlendingInputDescSet) {
765 return fCachedBlendingInputDescSet;
766 }
767
768 fCachedBlendingInputDescSet.reset(gpu->resourceProvider().getInputDescriptorSet());
769 if (!fCachedBlendingInputDescSet) {
770 return nullptr;
771 }
772
773 write_input_desc_set(gpu,
774 this->framebufferView()->imageView(),
775 VK_IMAGE_LAYOUT_GENERAL,
776 *fCachedBlendingInputDescSet->descriptorSet());
777
778 return fCachedBlendingInputDescSet;
779 }
780
inputDescSetForMSAALoad(GrVkGpu * gpu)781 gr_rp<const GrVkDescriptorSet> GrVkImage::inputDescSetForMSAALoad(GrVkGpu* gpu) {
782 if (!this->supportsInputAttachmentUsage()) {
783 return nullptr;
784 }
785 if (fCachedMSAALoadInputDescSet) {
786 return fCachedMSAALoadInputDescSet;
787 }
788
789 fCachedMSAALoadInputDescSet.reset(gpu->resourceProvider().getInputDescriptorSet());
790 if (!fCachedMSAALoadInputDescSet) {
791 return nullptr;
792 }
793
794 write_input_desc_set(gpu,
795 this->framebufferView()->imageView(),
796 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
797 *fCachedMSAALoadInputDescSet->descriptorSet());
798
799 return fCachedMSAALoadInputDescSet;
800 }
801
getVkGpu() const802 GrVkGpu* GrVkImage::getVkGpu() const {
803 SkASSERT(!this->wasDestroyed());
804 return static_cast<GrVkGpu*>(this->getGpu());
805 }
806
onGpuMemorySize() const807 size_t GrVkImage::onGpuMemorySize() const
808 {
809 if (supportedUsages() & UsageFlags::kTexture) {
810 return GrSurface::ComputeSize(this->backendFormat(), this->dimensions(), 1, this->mipmapped());
811 } else {
812 return GrAttachment::onGpuMemorySize();
813 }
814 }
815
816 #if defined(GPU_TEST_UTILS)
setCurrentQueueFamilyToGraphicsQueue(GrVkGpu * gpu)817 void GrVkImage::setCurrentQueueFamilyToGraphicsQueue(GrVkGpu* gpu) {
818 skgpu::MutableTextureStates::SetVkQueueFamilyIndex(fMutableState.get(), gpu->queueIndex());
819 }
820 #endif
821