• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2015 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #ifdef SKIA_DFX_FOR_RECORD_VKIMAGE
9 #include "include/gpu/vk/GrVulkanTracker.h"
10 #endif
11 #include "src/gpu/vk/GrVkImage.h"
12 
13 #include "src/gpu/vk/GrVkGpu.h"
14 #include "src/gpu/vk/GrVkImageView.h"
15 #include "src/gpu/vk/GrVkMemory.h"
16 #include "src/gpu/vk/GrVkTexture.h"
17 #include "src/gpu/vk/GrVkUtil.h"
18 
19 #define VK_CALL(GPU, X) GR_VK_CALL(GPU->vkInterface(), X)
20 constexpr uint32_t VKIMAGE_LIMIT_SIZE = 10000 * 10000; // Vk-Image Size need less than 10000*10000
21 
MakeStencil(GrVkGpu * gpu,SkISize dimensions,int sampleCnt,VkFormat format)22 sk_sp<GrVkImage> GrVkImage::MakeStencil(GrVkGpu* gpu,
23                                         SkISize dimensions,
24                                         int sampleCnt,
25                                         VkFormat format) {
26     VkImageUsageFlags vkUsageFlags =
27             VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
28     return GrVkImage::Make(gpu,
29                            dimensions,
30                            UsageFlags::kStencilAttachment,
31                            sampleCnt,
32                            format,
33                            /*mipLevels=*/1,
34                            vkUsageFlags,
35                            GrProtected::kNo,
36                            GrMemoryless::kNo,
37                            SkBudgeted::kYes);
38 }
39 
MakeMSAA(GrVkGpu * gpu,SkISize dimensions,int numSamples,VkFormat format,GrProtected isProtected,GrMemoryless memoryless)40 sk_sp<GrVkImage> GrVkImage::MakeMSAA(GrVkGpu* gpu,
41                                      SkISize dimensions,
42                                      int numSamples,
43                                      VkFormat format,
44                                      GrProtected isProtected,
45                                      GrMemoryless memoryless) {
46     SkASSERT(numSamples > 1);
47 
48     VkImageUsageFlags vkUsageFlags = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
49     if (memoryless == GrMemoryless::kYes) {
50         vkUsageFlags |= VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT;
51     } else {
52         vkUsageFlags |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
53     }
54     return GrVkImage::Make(gpu,
55                            dimensions,
56                            UsageFlags::kColorAttachment,
57                            numSamples,
58                            format,
59                            /*mipLevels=*/1,
60                            vkUsageFlags,
61                            isProtected,
62                            memoryless,
63                            SkBudgeted::kYes);
64 }
65 
MakeTexture(GrVkGpu * gpu,SkISize dimensions,VkFormat format,uint32_t mipLevels,GrRenderable renderable,int numSamples,SkBudgeted budgeted,GrProtected isProtected)66 sk_sp<GrVkImage> GrVkImage::MakeTexture(GrVkGpu* gpu,
67                                         SkISize dimensions,
68                                         VkFormat format,
69                                         uint32_t mipLevels,
70                                         GrRenderable renderable,
71                                         int numSamples,
72                                         SkBudgeted budgeted,
73                                         GrProtected isProtected) {
74     UsageFlags usageFlags = UsageFlags::kTexture;
75     VkImageUsageFlags vkUsageFlags = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
76                                      VK_IMAGE_USAGE_TRANSFER_DST_BIT;
77     if (renderable == GrRenderable::kYes) {
78         usageFlags |= UsageFlags::kColorAttachment;
79         vkUsageFlags |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
80         // We always make our render targets support being used as input attachments
81         vkUsageFlags |= VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT;
82     }
83 
84     return GrVkImage::Make(gpu,
85                            dimensions,
86                            usageFlags,
87                            numSamples,
88                            format,
89                            mipLevels,
90                            vkUsageFlags,
91                            isProtected,
92                            GrMemoryless::kNo,
93                            budgeted);
94 }
95 
make_views(GrVkGpu * gpu,const GrVkImageInfo & info,GrAttachment::UsageFlags attachmentUsages,sk_sp<const GrVkImageView> * framebufferView,sk_sp<const GrVkImageView> * textureView)96 static bool make_views(GrVkGpu* gpu,
97                        const GrVkImageInfo& info,
98                        GrAttachment::UsageFlags attachmentUsages,
99                        sk_sp<const GrVkImageView>* framebufferView,
100                        sk_sp<const GrVkImageView>* textureView) {
101     GrVkImageView::Type viewType;
102     if (attachmentUsages & GrAttachment::UsageFlags::kStencilAttachment) {
103         // If we have stencil usage then we shouldn't have any other usages
104         SkASSERT(attachmentUsages == GrAttachment::UsageFlags::kStencilAttachment);
105         viewType = GrVkImageView::kStencil_Type;
106     } else {
107         viewType = GrVkImageView::kColor_Type;
108     }
109 
110     if (SkToBool(attachmentUsages & GrAttachment::UsageFlags::kStencilAttachment) ||
111         SkToBool(attachmentUsages & GrAttachment::UsageFlags::kColorAttachment)) {
112         // Attachments can only have a mip level of 1
113         *framebufferView = GrVkImageView::Make(
114                 gpu, info.fImage, info.fFormat, viewType, 1, info.fYcbcrConversionInfo);
115         if (!*framebufferView) {
116             return false;
117         }
118     }
119 
120     if (attachmentUsages & GrAttachment::UsageFlags::kTexture) {
121         *textureView = GrVkImageView::Make(gpu,
122                                            info.fImage,
123                                            info.fFormat,
124                                            viewType,
125                                            info.fLevelCount,
126                                            info.fYcbcrConversionInfo);
127         if (!*textureView) {
128             return false;
129         }
130     }
131     return true;
132 }
133 
Make(GrVkGpu * gpu,SkISize dimensions,UsageFlags attachmentUsages,int sampleCnt,VkFormat format,uint32_t mipLevels,VkImageUsageFlags vkUsageFlags,GrProtected isProtected,GrMemoryless memoryless,SkBudgeted budgeted)134 sk_sp<GrVkImage> GrVkImage::Make(GrVkGpu* gpu,
135                                  SkISize dimensions,
136                                  UsageFlags attachmentUsages,
137                                  int sampleCnt,
138                                  VkFormat format,
139                                  uint32_t mipLevels,
140                                  VkImageUsageFlags vkUsageFlags,
141                                  GrProtected isProtected,
142                                  GrMemoryless memoryless,
143                                  SkBudgeted budgeted) {
144     GrVkImage::ImageDesc imageDesc;
145     imageDesc.fImageType = VK_IMAGE_TYPE_2D;
146     imageDesc.fFormat = format;
147     imageDesc.fWidth = dimensions.width();
148     imageDesc.fHeight = dimensions.height();
149     imageDesc.fLevels = mipLevels;
150     imageDesc.fSamples = sampleCnt;
151     imageDesc.fImageTiling = VK_IMAGE_TILING_OPTIMAL;
152     imageDesc.fUsageFlags = vkUsageFlags;
153     imageDesc.fIsProtected = isProtected;
154 
155     GrVkImageInfo info;
156     if (!GrVkImage::InitImageInfo(gpu, imageDesc, &info)) {
157         return nullptr;
158     }
159 
160     sk_sp<const GrVkImageView> framebufferView;
161     sk_sp<const GrVkImageView> textureView;
162     if (!make_views(gpu, info, attachmentUsages, &framebufferView, &textureView)) {
163         GrVkImage::DestroyImageInfo(gpu, &info);
164         return nullptr;
165     }
166 
167     sk_sp<GrBackendSurfaceMutableStateImpl> mutableState(
168             new GrBackendSurfaceMutableStateImpl(info.fImageLayout, info.fCurrentQueueFamily));
169     return sk_sp<GrVkImage>(new GrVkImage(gpu,
170                                           dimensions,
171                                           attachmentUsages,
172                                           info,
173                                           std::move(mutableState),
174                                           std::move(framebufferView),
175                                           std::move(textureView),
176                                           budgeted));
177 }
178 
MakeWrapped(GrVkGpu * gpu,SkISize dimensions,const GrVkImageInfo & info,sk_sp<GrBackendSurfaceMutableStateImpl> mutableState,UsageFlags attachmentUsages,GrWrapOwnership ownership,GrWrapCacheable cacheable,bool forSecondaryCB)179 sk_sp<GrVkImage> GrVkImage::MakeWrapped(GrVkGpu* gpu,
180                                         SkISize dimensions,
181                                         const GrVkImageInfo& info,
182                                         sk_sp<GrBackendSurfaceMutableStateImpl> mutableState,
183                                         UsageFlags attachmentUsages,
184                                         GrWrapOwnership ownership,
185                                         GrWrapCacheable cacheable,
186                                         bool forSecondaryCB) {
187     sk_sp<const GrVkImageView> framebufferView;
188     sk_sp<const GrVkImageView> textureView;
189     if (!forSecondaryCB) {
190         if (!make_views(gpu, info, attachmentUsages, &framebufferView, &textureView)) {
191             return nullptr;
192         }
193     }
194 
195     GrBackendObjectOwnership backendOwnership = kBorrow_GrWrapOwnership == ownership
196                                                         ? GrBackendObjectOwnership::kBorrowed
197                                                         : GrBackendObjectOwnership::kOwned;
198 
199     return sk_sp<GrVkImage>(new GrVkImage(gpu,
200                                           dimensions,
201                                           attachmentUsages,
202                                           info,
203                                           std::move(mutableState),
204                                           std::move(framebufferView),
205                                           std::move(textureView),
206                                           backendOwnership,
207                                           cacheable,
208                                           forSecondaryCB));
209 }
210 
211 // OH ISSUE: Integrate Destroy and Free
DestroyAndFreeImageMemory(const GrVkGpu * gpu,const GrVkAlloc & alloc,const VkImage & image)212 void GrVkImage::DestroyAndFreeImageMemory(const GrVkGpu* gpu, const GrVkAlloc& alloc, const VkImage& image)
213 {
214     VK_CALL(gpu, DestroyImage(gpu->device(), image, nullptr));
215     GrVkMemory::FreeImageMemory(gpu, alloc);
216 }
217 
GrVkImage(GrVkGpu * gpu,SkISize dimensions,UsageFlags supportedUsages,const GrVkImageInfo & info,sk_sp<GrBackendSurfaceMutableStateImpl> mutableState,sk_sp<const GrVkImageView> framebufferView,sk_sp<const GrVkImageView> textureView,SkBudgeted budgeted)218 GrVkImage::GrVkImage(GrVkGpu* gpu,
219                      SkISize dimensions,
220                      UsageFlags supportedUsages,
221                      const GrVkImageInfo& info,
222                      sk_sp<GrBackendSurfaceMutableStateImpl> mutableState,
223                      sk_sp<const GrVkImageView> framebufferView,
224                      sk_sp<const GrVkImageView> textureView,
225                      SkBudgeted budgeted)
226         : GrAttachment(gpu,
227                        dimensions,
228                        supportedUsages,
229                        info.fSampleCount,
230                        info.fLevelCount > 1 ? GrMipmapped::kYes : GrMipmapped::kNo,
231                        info.fProtected,
232                        info.fAlloc.fFlags & GrVkAlloc::kLazilyAllocated_Flag ? GrMemoryless::kYes
233                                                                              : GrMemoryless::kNo)
234         , fInfo(info)
235         , fInitialQueueFamily(info.fCurrentQueueFamily)
236         , fMutableState(std::move(mutableState))
237         , fFramebufferView(std::move(framebufferView))
238         , fTextureView(std::move(textureView))
239 #ifdef SKIA_OHOS
240         , fBudgeted(budgeted)
241 #endif
242         , fIsBorrowed(false) {
243     this->init(gpu, false);
244     this->setRealAlloc(true); // OH ISSUE: set real alloc flag
245     this->setRealAllocSize(dimensions.height() * dimensions.width() * 4); // OH ISSUE: set real alloc size
246     this->registerWithCache(budgeted);
247 }
248 
GrVkImage(GrVkGpu * gpu,SkISize dimensions,UsageFlags supportedUsages,const GrVkImageInfo & info,sk_sp<GrBackendSurfaceMutableStateImpl> mutableState,sk_sp<const GrVkImageView> framebufferView,sk_sp<const GrVkImageView> textureView,GrBackendObjectOwnership ownership,GrWrapCacheable cacheable,bool forSecondaryCB)249 GrVkImage::GrVkImage(GrVkGpu* gpu,
250                      SkISize dimensions,
251                      UsageFlags supportedUsages,
252                      const GrVkImageInfo& info,
253                      sk_sp<GrBackendSurfaceMutableStateImpl> mutableState,
254                      sk_sp<const GrVkImageView> framebufferView,
255                      sk_sp<const GrVkImageView> textureView,
256                      GrBackendObjectOwnership ownership,
257                      GrWrapCacheable cacheable,
258                      bool forSecondaryCB)
259         : GrAttachment(gpu,
260                        dimensions,
261                        supportedUsages,
262                        info.fSampleCount,
263                        info.fLevelCount > 1 ? GrMipmapped::kYes : GrMipmapped::kNo,
264                        info.fProtected)
265         , fInfo(info)
266         , fInitialQueueFamily(info.fCurrentQueueFamily)
267         , fMutableState(std::move(mutableState))
268         , fFramebufferView(std::move(framebufferView))
269         , fTextureView(std::move(textureView))
270         , fIsBorrowed(GrBackendObjectOwnership::kBorrowed == ownership) {
271     this->init(gpu, forSecondaryCB);
272 #ifdef SKIA_DFX_FOR_OHOS
273     if (RealAllocConfig::GetRealAllocStatus()) {
274         // OH ISSUE: set real alloc flag
275         this->setRealAlloc(true);
276         // OH ISSUE: set real alloc size
277         this->setRealAllocSize(dimensions.height() * dimensions.width() * 4);
278     }
279 #endif
280     this->registerWithCacheWrapped(cacheable);
281 }
282 
init(GrVkGpu * gpu,bool forSecondaryCB)283 void GrVkImage::init(GrVkGpu* gpu, bool forSecondaryCB) {
284     SkASSERT(fMutableState->getImageLayout() == fInfo.fImageLayout);
285     SkASSERT(fMutableState->getQueueFamilyIndex() == fInfo.fCurrentQueueFamily);
286 #ifdef SK_DEBUG
287     if (fInfo.fImageUsageFlags & VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) {
288         SkASSERT(SkToBool(fInfo.fImageUsageFlags & VK_IMAGE_USAGE_TRANSFER_DST_BIT));
289     } else {
290         if (fInfo.fAlloc.fFlags & GrVkAlloc::kLazilyAllocated_Flag) {
291             SkASSERT(fInfo.fImageUsageFlags & VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT);
292             SkASSERT(!SkToBool(fInfo.fImageUsageFlags & VK_IMAGE_USAGE_TRANSFER_DST_BIT) &&
293                      !SkToBool(fInfo.fImageUsageFlags & VK_IMAGE_USAGE_TRANSFER_SRC_BIT));
294         } else {
295             SkASSERT(!SkToBool(fInfo.fImageUsageFlags & VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT));
296             SkASSERT(SkToBool(fInfo.fImageUsageFlags & VK_IMAGE_USAGE_TRANSFER_DST_BIT) &&
297                      SkToBool(fInfo.fImageUsageFlags & VK_IMAGE_USAGE_TRANSFER_SRC_BIT));
298         }
299     }
300     // We can't transfer from the non graphics queue to the graphics queue since we can't
301     // release the image from the original queue without having that queue. This limits us in terms
302     // of the types of queue indices we can handle.
303     if (fInfo.fCurrentQueueFamily != VK_QUEUE_FAMILY_IGNORED &&
304         fInfo.fCurrentQueueFamily != VK_QUEUE_FAMILY_EXTERNAL &&
305         fInfo.fCurrentQueueFamily != VK_QUEUE_FAMILY_FOREIGN_EXT) {
306         if (fInfo.fSharingMode == VK_SHARING_MODE_EXCLUSIVE) {
307             if (fInfo.fCurrentQueueFamily != gpu->queueIndex()) {
308                 SkASSERT(false);
309             }
310         } else {
311             SkASSERT(false);
312         }
313     }
314 #endif
315     if (forSecondaryCB) {
316         fResource = nullptr;
317     } else if (fIsBorrowed) {
318         fResource = new BorrowedResource(gpu, fInfo.fImage, fInfo.fAlloc, fInfo.fImageTiling);
319     } else {
320         SkASSERT(VK_NULL_HANDLE != fInfo.fAlloc.fMemory);
321         fResource = new Resource(gpu, fInfo.fImage, fInfo.fAlloc, fInfo.fImageTiling);
322     }
323 }
324 
LayoutToPipelineSrcStageFlags(const VkImageLayout layout)325 VkPipelineStageFlags GrVkImage::LayoutToPipelineSrcStageFlags(const VkImageLayout layout) {
326     if (VK_IMAGE_LAYOUT_GENERAL == layout) {
327         return VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
328     } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout ||
329                VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
330         return VK_PIPELINE_STAGE_TRANSFER_BIT;
331     } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout) {
332         return VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
333     } else if (VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout ||
334                VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL == layout) {
335         return VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
336     } else if (VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) {
337         return VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
338     } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
339         return VK_PIPELINE_STAGE_HOST_BIT;
340     } else if (VK_IMAGE_LAYOUT_PRESENT_SRC_KHR == layout) {
341         return VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
342     }
343 
344     SkASSERT(VK_IMAGE_LAYOUT_UNDEFINED == layout);
345     return VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
346 }
347 
LayoutToSrcAccessMask(const VkImageLayout layout)348 VkAccessFlags GrVkImage::LayoutToSrcAccessMask(const VkImageLayout layout) {
349     // Currently we assume we will never being doing any explict shader writes (this doesn't include
350     // color attachment or depth/stencil writes). So we will ignore the
351     // VK_MEMORY_OUTPUT_SHADER_WRITE_BIT.
352 
353     // We can only directly access the host memory if we are in preinitialized or general layout,
354     // and the image is linear.
355     // TODO: Add check for linear here so we are not always adding host to general, and we should
356     //       only be in preinitialized if we are linear
357     VkAccessFlags flags = 0;
358     if (VK_IMAGE_LAYOUT_GENERAL == layout) {
359         flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
360                 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
361                 VK_ACCESS_TRANSFER_WRITE_BIT |
362                 VK_ACCESS_HOST_WRITE_BIT;
363     } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
364         flags = VK_ACCESS_HOST_WRITE_BIT;
365     } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout) {
366         flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
367     } else if (VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout) {
368         flags = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
369     } else if (VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
370         flags = VK_ACCESS_TRANSFER_WRITE_BIT;
371     } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout ||
372                VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout ||
373                VK_IMAGE_LAYOUT_PRESENT_SRC_KHR == layout) {
374         // There are no writes that need to be made available
375         flags = 0;
376     }
377     return flags;
378 }
379 
vk_format_to_aspect_flags(VkFormat format)380 VkImageAspectFlags vk_format_to_aspect_flags(VkFormat format) {
381     switch (format) {
382         case VK_FORMAT_S8_UINT:
383             return VK_IMAGE_ASPECT_STENCIL_BIT;
384         case VK_FORMAT_D24_UNORM_S8_UINT: // fallthrough
385         case VK_FORMAT_D32_SFLOAT_S8_UINT:
386             return VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
387         default:
388             return VK_IMAGE_ASPECT_COLOR_BIT;
389     }
390 }
391 
setImageLayoutAndQueueIndex(const GrVkGpu * gpu,VkImageLayout newLayout,VkAccessFlags dstAccessMask,VkPipelineStageFlags dstStageMask,bool byRegion,uint32_t newQueueFamilyIndex)392 void GrVkImage::setImageLayoutAndQueueIndex(const GrVkGpu* gpu,
393                                             VkImageLayout newLayout,
394                                             VkAccessFlags dstAccessMask,
395                                             VkPipelineStageFlags dstStageMask,
396                                             bool byRegion,
397                                             uint32_t newQueueFamilyIndex) {
398 // Enable the following block to test new devices to confirm their lazy images stay at 0 memory use.
399 #if 0
400     if (fInfo.fAlloc.fFlags & GrVkAlloc::kLazilyAllocated_Flag) {
401         VkDeviceSize size;
402         VK_CALL(gpu, GetDeviceMemoryCommitment(gpu->device(), fInfo.fAlloc.fMemory, &size));
403 
404         SkDebugf("Lazy Image. This: %p, image: %d, size: %d\n", this, fInfo.fImage, size);
405     }
406 #endif
407     SkASSERT(!gpu->isDeviceLost());
408     SkASSERT(newLayout == this->currentLayout() ||
409              (VK_IMAGE_LAYOUT_UNDEFINED != newLayout &&
410               VK_IMAGE_LAYOUT_PREINITIALIZED != newLayout));
411     VkImageLayout currentLayout = this->currentLayout();
412     uint32_t currentQueueIndex = this->currentQueueFamilyIndex();
413 
414 #ifdef SK_DEBUG
415     if (fInfo.fSharingMode == VK_SHARING_MODE_CONCURRENT) {
416         if (newQueueFamilyIndex == VK_QUEUE_FAMILY_IGNORED) {
417             SkASSERT(currentQueueIndex == VK_QUEUE_FAMILY_IGNORED ||
418                      currentQueueIndex == VK_QUEUE_FAMILY_EXTERNAL ||
419                      currentQueueIndex == VK_QUEUE_FAMILY_FOREIGN_EXT);
420         } else {
421             SkASSERT(newQueueFamilyIndex == VK_QUEUE_FAMILY_EXTERNAL ||
422                      newQueueFamilyIndex == VK_QUEUE_FAMILY_FOREIGN_EXT);
423             SkASSERT(currentQueueIndex == VK_QUEUE_FAMILY_IGNORED);
424         }
425     } else {
426         SkASSERT(fInfo.fSharingMode == VK_SHARING_MODE_EXCLUSIVE);
427         if (newQueueFamilyIndex == VK_QUEUE_FAMILY_IGNORED ||
428             currentQueueIndex == gpu->queueIndex()) {
429             SkASSERT(currentQueueIndex == VK_QUEUE_FAMILY_IGNORED ||
430                      currentQueueIndex == VK_QUEUE_FAMILY_EXTERNAL ||
431                      currentQueueIndex == VK_QUEUE_FAMILY_FOREIGN_EXT ||
432                      currentQueueIndex == gpu->queueIndex());
433         } else if (newQueueFamilyIndex == VK_QUEUE_FAMILY_EXTERNAL ||
434                    newQueueFamilyIndex == VK_QUEUE_FAMILY_FOREIGN_EXT) {
435             SkASSERT(currentQueueIndex == VK_QUEUE_FAMILY_IGNORED ||
436                      currentQueueIndex == gpu->queueIndex());
437         }
438     }
439 #endif
440 
441     if (fInfo.fSharingMode == VK_SHARING_MODE_EXCLUSIVE) {
442         if (newQueueFamilyIndex == VK_QUEUE_FAMILY_IGNORED) {
443             newQueueFamilyIndex = gpu->queueIndex();
444         }
445         if (currentQueueIndex == VK_QUEUE_FAMILY_IGNORED) {
446             currentQueueIndex = gpu->queueIndex();
447         }
448     }
449 
450     // If the old and new layout are the same and the layout is a read only layout, there is no need
451     // to put in a barrier unless we also need to switch queues.
452     if (newLayout == currentLayout && currentQueueIndex == newQueueFamilyIndex &&
453         (VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL == currentLayout ||
454          VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == currentLayout ||
455          VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == currentLayout)) {
456         return;
457     }
458 
459     VkAccessFlags srcAccessMask = GrVkImage::LayoutToSrcAccessMask(currentLayout);
460     VkPipelineStageFlags srcStageMask = GrVkImage::LayoutToPipelineSrcStageFlags(currentLayout);
461 
462     VkImageAspectFlags aspectFlags = vk_format_to_aspect_flags(fInfo.fFormat);
463 
464     VkImageMemoryBarrier imageMemoryBarrier = {
465         VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,          // sType
466         nullptr,                                         // pNext
467         srcAccessMask,                                   // srcAccessMask
468         dstAccessMask,                                   // dstAccessMask
469         currentLayout,                                   // oldLayout
470         newLayout,                                       // newLayout
471         currentQueueIndex,                               // srcQueueFamilyIndex
472         newQueueFamilyIndex,                             // dstQueueFamilyIndex
473         fInfo.fImage,                                    // image
474         { aspectFlags, 0, fInfo.fLevelCount, 0, 1 }      // subresourceRange
475     };
476     SkASSERT(srcAccessMask == imageMemoryBarrier.srcAccessMask);
477     gpu->addImageMemoryBarrier(this->resource(), srcStageMask, dstStageMask, byRegion,
478                                &imageMemoryBarrier);
479 
480     this->updateImageLayout(newLayout);
481     this->setQueueFamilyIndex(newQueueFamilyIndex);
482 }
483 
InitImageInfo(GrVkGpu * gpu,const ImageDesc & imageDesc,GrVkImageInfo * info)484 bool GrVkImage::InitImageInfo(GrVkGpu* gpu, const ImageDesc& imageDesc, GrVkImageInfo* info) {
485     if (0 == imageDesc.fWidth || 0 == imageDesc.fHeight) {
486         return false;
487     }
488     if ((imageDesc.fIsProtected == GrProtected::kYes) && !gpu->vkCaps().supportsProtectedMemory()) {
489         return false;
490     }
491 
492     bool isLinear = VK_IMAGE_TILING_LINEAR == imageDesc.fImageTiling;
493     VkImageLayout initialLayout = isLinear ? VK_IMAGE_LAYOUT_PREINITIALIZED
494                                            : VK_IMAGE_LAYOUT_UNDEFINED;
495 
496     // Create Image
497     VkSampleCountFlagBits vkSamples;
498     if (!GrSampleCountToVkSampleCount(imageDesc.fSamples, &vkSamples)) {
499         return false;
500     }
501 
502     SkASSERT(VK_IMAGE_TILING_OPTIMAL == imageDesc.fImageTiling ||
503              VK_SAMPLE_COUNT_1_BIT == vkSamples);
504 
505     VkImageCreateFlags createflags = 0;
506     if (imageDesc.fIsProtected == GrProtected::kYes || gpu->protectedContext()) {
507         createflags |= VK_IMAGE_CREATE_PROTECTED_BIT;
508     }
509     const VkImageCreateInfo imageCreateInfo = {
510         VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,         // sType
511         nullptr,                                     // pNext
512         createflags,                                 // VkImageCreateFlags
513         imageDesc.fImageType,                        // VkImageType
514         imageDesc.fFormat,                           // VkFormat
515         { imageDesc.fWidth, imageDesc.fHeight, 1 },  // VkExtent3D
516         imageDesc.fLevels,                           // mipLevels
517         1,                                           // arrayLayers
518         vkSamples,                                   // samples
519         imageDesc.fImageTiling,                      // VkImageTiling
520         imageDesc.fUsageFlags,                       // VkImageUsageFlags
521         VK_SHARING_MODE_EXCLUSIVE,                   // VkSharingMode
522         0,                                           // queueFamilyCount
523         nullptr,                                     // pQueueFamilyIndices
524         initialLayout                                // initialLayout
525     };
526 
527     VkImage image = VK_NULL_HANDLE;
528     VkResult result;
529     if (imageDesc.fWidth * imageDesc.fHeight > VKIMAGE_LIMIT_SIZE) {
530         SkDebugf("GrVkImage::InitImageInfoInner failed, image is too large, width:%u, height::%u",
531             imageDesc.fWidth, imageDesc.fHeight);
532         return false;
533     }
534     GR_VK_CALL_RESULT(gpu, result, CreateImage(gpu->device(), &imageCreateInfo, nullptr, &image));
535     if (result != VK_SUCCESS) {
536         return false;
537     }
538 
539     GrMemoryless memoryless = imageDesc.fUsageFlags & VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT
540                                       ? GrMemoryless::kYes
541                                       : GrMemoryless::kNo;
542     GrVkAlloc alloc;
543     if (!GrVkMemory::AllocAndBindImageMemory(gpu, image, memoryless, &alloc,
544         imageDesc.fWidth * imageDesc.fHeight * 4) ||
545         (memoryless == GrMemoryless::kYes &&
546          !SkToBool(alloc.fFlags & GrVkAlloc::kLazilyAllocated_Flag))) {
547         VK_CALL(gpu, DestroyImage(gpu->device(), image, nullptr));
548         return false;
549     }
550 
551     info->fImage = image;
552     info->fAlloc = alloc;
553     info->fImageTiling = imageDesc.fImageTiling;
554     info->fImageLayout = initialLayout;
555     info->fFormat = imageDesc.fFormat;
556     info->fImageUsageFlags = imageDesc.fUsageFlags;
557     info->fSampleCount = imageDesc.fSamples;
558     info->fLevelCount = imageDesc.fLevels;
559     info->fCurrentQueueFamily = VK_QUEUE_FAMILY_IGNORED;
560     info->fProtected =
561             (createflags & VK_IMAGE_CREATE_PROTECTED_BIT) ? GrProtected::kYes : GrProtected::kNo;
562     info->fSharingMode = VK_SHARING_MODE_EXCLUSIVE;
563     return true;
564 }
565 
DestroyImageInfo(const GrVkGpu * gpu,GrVkImageInfo * info)566 void GrVkImage::DestroyImageInfo(const GrVkGpu* gpu, GrVkImageInfo* info) {
567     DestroyAndFreeImageMemory(gpu, info->fAlloc, info->fImage);
568 }
569 
~GrVkImage()570 GrVkImage::~GrVkImage() {
571     // should have been released first
572     SkASSERT(!fResource);
573     SkASSERT(!fFramebufferView);
574     SkASSERT(!fTextureView);
575 }
576 
prepareForPresent(GrVkGpu * gpu)577 void GrVkImage::prepareForPresent(GrVkGpu* gpu) {
578     VkImageLayout layout = this->currentLayout();
579     if (fInitialQueueFamily != VK_QUEUE_FAMILY_EXTERNAL &&
580         fInitialQueueFamily != VK_QUEUE_FAMILY_FOREIGN_EXT) {
581         if (gpu->vkCaps().supportsSwapchain()) {
582             layout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
583         }
584     }
585     this->setImageLayoutAndQueueIndex(gpu, layout, 0, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, false,
586                                       fInitialQueueFamily);
587 }
588 
prepareForExternal(GrVkGpu * gpu)589 void GrVkImage::prepareForExternal(GrVkGpu* gpu) {
590     this->setImageLayoutAndQueueIndex(gpu, this->currentLayout(), 0,
591                                       VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, false,
592                                      fInitialQueueFamily);
593 }
594 
releaseImage()595 void GrVkImage::releaseImage() {
596     if (fResource) {
597         fResource->unref();
598         fResource = nullptr;
599     }
600     fFramebufferView.reset();
601     fTextureView.reset();
602     fCachedBlendingInputDescSet.reset();
603     fCachedMSAALoadInputDescSet.reset();
604 }
605 
onRelease()606 void GrVkImage::onRelease() {
607     this->releaseImage();
608     GrAttachment::onRelease();
609 }
610 
onAbandon()611 void GrVkImage::onAbandon() {
612     this->releaseImage();
613     GrAttachment::onAbandon();
614 }
615 
setResourceRelease(sk_sp<GrRefCntedCallback> releaseHelper)616 void GrVkImage::setResourceRelease(sk_sp<GrRefCntedCallback> releaseHelper) {
617     SkASSERT(fResource);
618     // Forward the release proc on to GrVkImage::Resource
619     fResource->setRelease(std::move(releaseHelper));
620 }
621 
622 #ifdef SKIA_DFX_FOR_RECORD_VKIMAGE
dumpVkImageInfo(std::stringstream & dump) const623 void GrVkImage::dumpVkImageInfo(std::stringstream& dump) const {
624     VkMemoryRequirements memRequirements;
625     VK_CALL(getVkGpu(), GetImageMemoryRequirements(getVkGpu()->device(), image(), &memRequirements));
626     VkDeviceSize imageSize = memRequirements.size;
627 
628     fResource->dumpVkImageResource(dump);
629     dump << "Borrowed: " << isBorrowed() << ", " << "ImageSize: " << imageSize << ", ";
630     fResource->fCaller->Dump(dump);
631     dump << "\n";
632 }
633 
dumpVkImageResource(std::stringstream & dump)634 void GrVkImage::Resource::dumpVkImageResource(std::stringstream& dump) {
635     dump << "VkImage: " << fResource->fImage << ", "
636         << "Memory: " << fResource->fAlloc.fMemory << ", "
637         << "Offset: " << fResource->fAlloc.fOffset << ", "
638         << "Size: " << fResource->fAlloc.fSize << ", ";
639 }
640 
RecordFreeVkImage(bool isBorrowed) const641 void GrVkimage::Resource::RecordFreeVkImage(bool isBorrowed) const {
642     static const bool isInRenderSevice = IsRenderService();
643     if (isInRenderSevice) {
644         ParallelDebug::VkImageDestroyRecord::Record(fImage, isBorrowed, fCaller, fAlloc.fMemory);
645     }
646 }
647 #endif
648 
~Resource()649 GrVkImage::Resource::~Resource() {
650 #ifdef SKIA_DFX_FOR_RECORD_VKIMAGE
651     ParallelDebug::DestroyVkImageInvokeRecord(fCaller);
652 #endif
653 }
654 
freeGPUData() const655 void GrVkImage::Resource::freeGPUData() const {
656 #ifdef SKIA_DFX_FOR_RECORD_VKIMAGE
657     RecordFreeVkImage(true);
658 #endif
659     this->invokeReleaseProc();
660 
661     // OH ISSUE: asyn memory reclaimer
662     auto reclaimer = fGpu->memoryReclaimer();
663     if (reclaimer && reclaimer->addMemoryToWaitQueue(fGpu, fAlloc, fImage)) {
664         return;
665     }
666 
667     DestroyAndFreeImageMemory(fGpu, fAlloc, fImage);
668 }
669 
freeGPUData() const670 void GrVkImage::BorrowedResource::freeGPUData() const {
671 #ifdef SKIA_DFX_FOR_RECORD_VKIMAGE
672     RecordFreeVkImage(false);
673 #endif
674     this->invokeReleaseProc();
675 }
676 
write_input_desc_set(GrVkGpu * gpu,VkImageView view,VkImageLayout layout,VkDescriptorSet descSet)677 static void write_input_desc_set(GrVkGpu* gpu,
678                                  VkImageView view,
679                                  VkImageLayout layout,
680                                  VkDescriptorSet descSet) {
681     VkDescriptorImageInfo imageInfo;
682     memset(&imageInfo, 0, sizeof(VkDescriptorImageInfo));
683     imageInfo.sampler = VK_NULL_HANDLE;
684     imageInfo.imageView = view;
685     imageInfo.imageLayout = layout;
686 
687     VkWriteDescriptorSet writeInfo;
688     memset(&writeInfo, 0, sizeof(VkWriteDescriptorSet));
689     writeInfo.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
690     writeInfo.pNext = nullptr;
691     writeInfo.dstSet = descSet;
692     writeInfo.dstBinding = GrVkUniformHandler::kInputBinding;
693     writeInfo.dstArrayElement = 0;
694     writeInfo.descriptorCount = 1;
695     writeInfo.descriptorType = VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT;
696     writeInfo.pImageInfo = &imageInfo;
697     writeInfo.pBufferInfo = nullptr;
698     writeInfo.pTexelBufferView = nullptr;
699 
700     GR_VK_CALL(gpu->vkInterface(), UpdateDescriptorSets(gpu->device(), 1, &writeInfo, 0, nullptr));
701 }
702 
inputDescSetForBlending(GrVkGpu * gpu)703 gr_rp<const GrVkDescriptorSet> GrVkImage::inputDescSetForBlending(GrVkGpu* gpu) {
704     if (!this->supportsInputAttachmentUsage()) {
705         return nullptr;
706     }
707     if (fCachedBlendingInputDescSet) {
708         return fCachedBlendingInputDescSet;
709     }
710 
711     fCachedBlendingInputDescSet.reset(gpu->resourceProvider().getInputDescriptorSet());
712     if (!fCachedBlendingInputDescSet) {
713         return nullptr;
714     }
715 
716     write_input_desc_set(gpu,
717                          this->framebufferView()->imageView(),
718                          VK_IMAGE_LAYOUT_GENERAL,
719                          *fCachedBlendingInputDescSet->descriptorSet());
720 
721     return fCachedBlendingInputDescSet;
722 }
723 
inputDescSetForMSAALoad(GrVkGpu * gpu)724 gr_rp<const GrVkDescriptorSet> GrVkImage::inputDescSetForMSAALoad(GrVkGpu* gpu) {
725     if (!this->supportsInputAttachmentUsage()) {
726         return nullptr;
727     }
728     if (fCachedMSAALoadInputDescSet) {
729         return fCachedMSAALoadInputDescSet;
730     }
731 
732     fCachedMSAALoadInputDescSet.reset(gpu->resourceProvider().getInputDescriptorSet());
733     if (!fCachedMSAALoadInputDescSet) {
734         return nullptr;
735     }
736 
737     write_input_desc_set(gpu,
738                          this->framebufferView()->imageView(),
739                          VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
740                          *fCachedMSAALoadInputDescSet->descriptorSet());
741 
742     return fCachedMSAALoadInputDescSet;
743 }
744 
getVkGpu() const745 GrVkGpu* GrVkImage::getVkGpu() const {
746     SkASSERT(!this->wasDestroyed());
747     return static_cast<GrVkGpu*>(this->getGpu());
748 }
749 
onGpuMemorySize() const750 size_t GrVkImage::onGpuMemorySize() const
751 {
752     if (supportedUsages() & UsageFlags::kTexture) {
753         return GrSurface::ComputeSize(this->backendFormat(), this->dimensions(), 1, this->mipmapped());
754     } else {
755         return GrAttachment::onGpuMemorySize();
756     }
757 }
758 
759 #if GR_TEST_UTILS
setCurrentQueueFamilyToGraphicsQueue(GrVkGpu * gpu)760 void GrVkImage::setCurrentQueueFamilyToGraphicsQueue(GrVkGpu* gpu) {
761     fMutableState->setQueueFamilyIndex(gpu->queueIndex());
762 }
763 #endif
764 
765