1 /*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #ifdef SKIA_DFX_FOR_RECORD_VKIMAGE
9 #include "include/core/SkLog.h"
10 #include "include/gpu/vk/GrVulkanTracker.h"
11 #endif
12 #include "src/gpu/vk/GrVkImage.h"
13
14 #include "src/gpu/vk/GrVkGpu.h"
15 #include "src/gpu/vk/GrVkImageView.h"
16 #include "src/gpu/vk/GrVkMemory.h"
17 #include "src/gpu/vk/GrVkTexture.h"
18 #include "src/gpu/vk/GrVkUtil.h"
19
20 #define VK_CALL(GPU, X) GR_VK_CALL(GPU->vkInterface(), X)
21 constexpr uint32_t VKIMAGE_LIMIT_SIZE = 10000 * 10000; // Vk-Image Size need less than 10000*10000
22
MakeStencil(GrVkGpu * gpu,SkISize dimensions,int sampleCnt,VkFormat format)23 sk_sp<GrVkImage> GrVkImage::MakeStencil(GrVkGpu* gpu,
24 SkISize dimensions,
25 int sampleCnt,
26 VkFormat format) {
27 VkImageUsageFlags vkUsageFlags =
28 VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
29 return GrVkImage::Make(gpu,
30 dimensions,
31 UsageFlags::kStencilAttachment,
32 sampleCnt,
33 format,
34 /*mipLevels=*/1,
35 vkUsageFlags,
36 GrProtected::kNo,
37 GrMemoryless::kNo,
38 SkBudgeted::kYes);
39 }
40
MakeMSAA(GrVkGpu * gpu,SkISize dimensions,int numSamples,VkFormat format,GrProtected isProtected,GrMemoryless memoryless)41 sk_sp<GrVkImage> GrVkImage::MakeMSAA(GrVkGpu* gpu,
42 SkISize dimensions,
43 int numSamples,
44 VkFormat format,
45 GrProtected isProtected,
46 GrMemoryless memoryless) {
47 SkASSERT(numSamples > 1);
48
49 VkImageUsageFlags vkUsageFlags = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
50 if (memoryless == GrMemoryless::kYes) {
51 vkUsageFlags |= VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT;
52 } else {
53 vkUsageFlags |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
54 }
55 return GrVkImage::Make(gpu,
56 dimensions,
57 UsageFlags::kColorAttachment,
58 numSamples,
59 format,
60 /*mipLevels=*/1,
61 vkUsageFlags,
62 isProtected,
63 memoryless,
64 SkBudgeted::kYes);
65 }
66
MakeTexture(GrVkGpu * gpu,SkISize dimensions,VkFormat format,uint32_t mipLevels,GrRenderable renderable,int numSamples,SkBudgeted budgeted,GrProtected isProtected)67 sk_sp<GrVkImage> GrVkImage::MakeTexture(GrVkGpu* gpu,
68 SkISize dimensions,
69 VkFormat format,
70 uint32_t mipLevels,
71 GrRenderable renderable,
72 int numSamples,
73 SkBudgeted budgeted,
74 GrProtected isProtected) {
75 UsageFlags usageFlags = UsageFlags::kTexture;
76 VkImageUsageFlags vkUsageFlags = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
77 VK_IMAGE_USAGE_TRANSFER_DST_BIT;
78 if (renderable == GrRenderable::kYes) {
79 usageFlags |= UsageFlags::kColorAttachment;
80 vkUsageFlags |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
81 // We always make our render targets support being used as input attachments
82 vkUsageFlags |= VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT;
83 }
84
85 return GrVkImage::Make(gpu,
86 dimensions,
87 usageFlags,
88 numSamples,
89 format,
90 mipLevels,
91 vkUsageFlags,
92 isProtected,
93 GrMemoryless::kNo,
94 budgeted);
95 }
96
make_views(GrVkGpu * gpu,const GrVkImageInfo & info,GrAttachment::UsageFlags attachmentUsages,sk_sp<const GrVkImageView> * framebufferView,sk_sp<const GrVkImageView> * textureView)97 static bool make_views(GrVkGpu* gpu,
98 const GrVkImageInfo& info,
99 GrAttachment::UsageFlags attachmentUsages,
100 sk_sp<const GrVkImageView>* framebufferView,
101 sk_sp<const GrVkImageView>* textureView) {
102 GrVkImageView::Type viewType;
103 if (attachmentUsages & GrAttachment::UsageFlags::kStencilAttachment) {
104 // If we have stencil usage then we shouldn't have any other usages
105 SkASSERT(attachmentUsages == GrAttachment::UsageFlags::kStencilAttachment);
106 viewType = GrVkImageView::kStencil_Type;
107 } else {
108 viewType = GrVkImageView::kColor_Type;
109 }
110
111 if (SkToBool(attachmentUsages & GrAttachment::UsageFlags::kStencilAttachment) ||
112 SkToBool(attachmentUsages & GrAttachment::UsageFlags::kColorAttachment)) {
113 // Attachments can only have a mip level of 1
114 *framebufferView = GrVkImageView::Make(
115 gpu, info.fImage, info.fFormat, viewType, 1, info.fYcbcrConversionInfo);
116 if (!*framebufferView) {
117 return false;
118 }
119 }
120
121 if (attachmentUsages & GrAttachment::UsageFlags::kTexture) {
122 *textureView = GrVkImageView::Make(gpu,
123 info.fImage,
124 info.fFormat,
125 viewType,
126 info.fLevelCount,
127 info.fYcbcrConversionInfo);
128 if (!*textureView) {
129 return false;
130 }
131 }
132 return true;
133 }
134
Make(GrVkGpu * gpu,SkISize dimensions,UsageFlags attachmentUsages,int sampleCnt,VkFormat format,uint32_t mipLevels,VkImageUsageFlags vkUsageFlags,GrProtected isProtected,GrMemoryless memoryless,SkBudgeted budgeted)135 sk_sp<GrVkImage> GrVkImage::Make(GrVkGpu* gpu,
136 SkISize dimensions,
137 UsageFlags attachmentUsages,
138 int sampleCnt,
139 VkFormat format,
140 uint32_t mipLevels,
141 VkImageUsageFlags vkUsageFlags,
142 GrProtected isProtected,
143 GrMemoryless memoryless,
144 SkBudgeted budgeted) {
145 GrVkImage::ImageDesc imageDesc;
146 imageDesc.fImageType = VK_IMAGE_TYPE_2D;
147 imageDesc.fFormat = format;
148 imageDesc.fWidth = dimensions.width();
149 imageDesc.fHeight = dimensions.height();
150 imageDesc.fLevels = mipLevels;
151 imageDesc.fSamples = sampleCnt;
152 imageDesc.fImageTiling = VK_IMAGE_TILING_OPTIMAL;
153 imageDesc.fUsageFlags = vkUsageFlags;
154 imageDesc.fIsProtected = isProtected;
155
156 GrVkImageInfo info;
157 if (!GrVkImage::InitImageInfo(gpu, imageDesc, &info)) {
158 return nullptr;
159 }
160
161 sk_sp<const GrVkImageView> framebufferView;
162 sk_sp<const GrVkImageView> textureView;
163 if (!make_views(gpu, info, attachmentUsages, &framebufferView, &textureView)) {
164 GrVkImage::DestroyImageInfo(gpu, &info);
165 return nullptr;
166 }
167
168 sk_sp<GrBackendSurfaceMutableStateImpl> mutableState(
169 new GrBackendSurfaceMutableStateImpl(info.fImageLayout, info.fCurrentQueueFamily));
170 return sk_sp<GrVkImage>(new GrVkImage(gpu,
171 dimensions,
172 attachmentUsages,
173 info,
174 std::move(mutableState),
175 std::move(framebufferView),
176 std::move(textureView),
177 budgeted));
178 }
179
MakeWrapped(GrVkGpu * gpu,SkISize dimensions,const GrVkImageInfo & info,sk_sp<GrBackendSurfaceMutableStateImpl> mutableState,UsageFlags attachmentUsages,GrWrapOwnership ownership,GrWrapCacheable cacheable,bool forSecondaryCB)180 sk_sp<GrVkImage> GrVkImage::MakeWrapped(GrVkGpu* gpu,
181 SkISize dimensions,
182 const GrVkImageInfo& info,
183 sk_sp<GrBackendSurfaceMutableStateImpl> mutableState,
184 UsageFlags attachmentUsages,
185 GrWrapOwnership ownership,
186 GrWrapCacheable cacheable,
187 bool forSecondaryCB) {
188 sk_sp<const GrVkImageView> framebufferView;
189 sk_sp<const GrVkImageView> textureView;
190 if (!forSecondaryCB) {
191 if (!make_views(gpu, info, attachmentUsages, &framebufferView, &textureView)) {
192 return nullptr;
193 }
194 }
195
196 GrBackendObjectOwnership backendOwnership = kBorrow_GrWrapOwnership == ownership
197 ? GrBackendObjectOwnership::kBorrowed
198 : GrBackendObjectOwnership::kOwned;
199
200 return sk_sp<GrVkImage>(new GrVkImage(gpu,
201 dimensions,
202 attachmentUsages,
203 info,
204 std::move(mutableState),
205 std::move(framebufferView),
206 std::move(textureView),
207 backendOwnership,
208 cacheable,
209 forSecondaryCB));
210 }
211
212 // OH ISSUE: Integrate Destroy and Free
DestroyAndFreeImageMemory(const GrVkGpu * gpu,const GrVkAlloc & alloc,const VkImage & image)213 void GrVkImage::DestroyAndFreeImageMemory(const GrVkGpu* gpu, const GrVkAlloc& alloc, const VkImage& image)
214 {
215 VK_CALL(gpu, DestroyImage(gpu->device(), image, nullptr));
216 GrVkMemory::FreeImageMemory(gpu, alloc);
217 }
218
GrVkImage(GrVkGpu * gpu,SkISize dimensions,UsageFlags supportedUsages,const GrVkImageInfo & info,sk_sp<GrBackendSurfaceMutableStateImpl> mutableState,sk_sp<const GrVkImageView> framebufferView,sk_sp<const GrVkImageView> textureView,SkBudgeted budgeted)219 GrVkImage::GrVkImage(GrVkGpu* gpu,
220 SkISize dimensions,
221 UsageFlags supportedUsages,
222 const GrVkImageInfo& info,
223 sk_sp<GrBackendSurfaceMutableStateImpl> mutableState,
224 sk_sp<const GrVkImageView> framebufferView,
225 sk_sp<const GrVkImageView> textureView,
226 SkBudgeted budgeted)
227 : GrAttachment(gpu,
228 dimensions,
229 supportedUsages,
230 info.fSampleCount,
231 info.fLevelCount > 1 ? GrMipmapped::kYes : GrMipmapped::kNo,
232 info.fProtected,
233 info.fAlloc.fFlags & GrVkAlloc::kLazilyAllocated_Flag ? GrMemoryless::kYes
234 : GrMemoryless::kNo)
235 , fInfo(info)
236 , fInitialQueueFamily(info.fCurrentQueueFamily)
237 , fMutableState(std::move(mutableState))
238 , fFramebufferView(std::move(framebufferView))
239 , fTextureView(std::move(textureView))
240 #ifdef SKIA_OHOS
241 , fBudgeted(budgeted)
242 #endif
243 , fIsBorrowed(false) {
244 this->init(gpu, false);
245 this->setRealAlloc(true); // OH ISSUE: set real alloc flag
246 this->registerWithCache(budgeted);
247 }
248
GrVkImage(GrVkGpu * gpu,SkISize dimensions,UsageFlags supportedUsages,const GrVkImageInfo & info,sk_sp<GrBackendSurfaceMutableStateImpl> mutableState,sk_sp<const GrVkImageView> framebufferView,sk_sp<const GrVkImageView> textureView,GrBackendObjectOwnership ownership,GrWrapCacheable cacheable,bool forSecondaryCB)249 GrVkImage::GrVkImage(GrVkGpu* gpu,
250 SkISize dimensions,
251 UsageFlags supportedUsages,
252 const GrVkImageInfo& info,
253 sk_sp<GrBackendSurfaceMutableStateImpl> mutableState,
254 sk_sp<const GrVkImageView> framebufferView,
255 sk_sp<const GrVkImageView> textureView,
256 GrBackendObjectOwnership ownership,
257 GrWrapCacheable cacheable,
258 bool forSecondaryCB)
259 : GrAttachment(gpu,
260 dimensions,
261 supportedUsages,
262 info.fSampleCount,
263 info.fLevelCount > 1 ? GrMipmapped::kYes : GrMipmapped::kNo,
264 info.fProtected)
265 , fInfo(info)
266 , fInitialQueueFamily(info.fCurrentQueueFamily)
267 , fMutableState(std::move(mutableState))
268 , fFramebufferView(std::move(framebufferView))
269 , fTextureView(std::move(textureView))
270 , fIsBorrowed(GrBackendObjectOwnership::kBorrowed == ownership) {
271 this->init(gpu, forSecondaryCB);
272 #ifdef SKIA_DFX_FOR_OHOS
273 if (RealAllocConfig::GetRealAllocStatus()) {
274 // OH ISSUE: set real alloc flag
275 this->setRealAlloc(true);
276 }
277 #endif
278 this->registerWithCacheWrapped(cacheable);
279 }
280
init(GrVkGpu * gpu,bool forSecondaryCB)281 void GrVkImage::init(GrVkGpu* gpu, bool forSecondaryCB) {
282 SkASSERT(fMutableState->getImageLayout() == fInfo.fImageLayout);
283 SkASSERT(fMutableState->getQueueFamilyIndex() == fInfo.fCurrentQueueFamily);
284 #ifdef SK_DEBUG
285 if (fInfo.fImageUsageFlags & VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) {
286 SkASSERT(SkToBool(fInfo.fImageUsageFlags & VK_IMAGE_USAGE_TRANSFER_DST_BIT));
287 } else {
288 if (fInfo.fAlloc.fFlags & GrVkAlloc::kLazilyAllocated_Flag) {
289 SkASSERT(fInfo.fImageUsageFlags & VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT);
290 SkASSERT(!SkToBool(fInfo.fImageUsageFlags & VK_IMAGE_USAGE_TRANSFER_DST_BIT) &&
291 !SkToBool(fInfo.fImageUsageFlags & VK_IMAGE_USAGE_TRANSFER_SRC_BIT));
292 } else {
293 SkASSERT(!SkToBool(fInfo.fImageUsageFlags & VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT));
294 SkASSERT(SkToBool(fInfo.fImageUsageFlags & VK_IMAGE_USAGE_TRANSFER_DST_BIT) &&
295 SkToBool(fInfo.fImageUsageFlags & VK_IMAGE_USAGE_TRANSFER_SRC_BIT));
296 }
297 }
298 // We can't transfer from the non graphics queue to the graphics queue since we can't
299 // release the image from the original queue without having that queue. This limits us in terms
300 // of the types of queue indices we can handle.
301 if (fInfo.fCurrentQueueFamily != VK_QUEUE_FAMILY_IGNORED &&
302 fInfo.fCurrentQueueFamily != VK_QUEUE_FAMILY_EXTERNAL &&
303 fInfo.fCurrentQueueFamily != VK_QUEUE_FAMILY_FOREIGN_EXT) {
304 if (fInfo.fSharingMode == VK_SHARING_MODE_EXCLUSIVE) {
305 if (fInfo.fCurrentQueueFamily != gpu->queueIndex()) {
306 SkASSERT(false);
307 }
308 } else {
309 SkASSERT(false);
310 }
311 }
312 #endif
313 if (forSecondaryCB) {
314 fResource = nullptr;
315 } else if (fIsBorrowed) {
316 fResource = new BorrowedResource(gpu, fInfo.fImage, fInfo.fAlloc, fInfo.fImageTiling);
317 } else {
318 SkASSERT(VK_NULL_HANDLE != fInfo.fAlloc.fMemory);
319 fResource = new Resource(gpu, fInfo.fImage, fInfo.fAlloc, fInfo.fImageTiling);
320 }
321 }
322
LayoutToPipelineSrcStageFlags(const VkImageLayout layout)323 VkPipelineStageFlags GrVkImage::LayoutToPipelineSrcStageFlags(const VkImageLayout layout) {
324 if (VK_IMAGE_LAYOUT_GENERAL == layout) {
325 return VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
326 } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout ||
327 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
328 return VK_PIPELINE_STAGE_TRANSFER_BIT;
329 } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout) {
330 return VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
331 } else if (VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout ||
332 VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL == layout) {
333 return VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
334 } else if (VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) {
335 return VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
336 } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
337 return VK_PIPELINE_STAGE_HOST_BIT;
338 } else if (VK_IMAGE_LAYOUT_PRESENT_SRC_KHR == layout) {
339 return VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
340 }
341
342 SkASSERT(VK_IMAGE_LAYOUT_UNDEFINED == layout);
343 return VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
344 }
345
LayoutToSrcAccessMask(const VkImageLayout layout)346 VkAccessFlags GrVkImage::LayoutToSrcAccessMask(const VkImageLayout layout) {
347 // Currently we assume we will never being doing any explict shader writes (this doesn't include
348 // color attachment or depth/stencil writes). So we will ignore the
349 // VK_MEMORY_OUTPUT_SHADER_WRITE_BIT.
350
351 // We can only directly access the host memory if we are in preinitialized or general layout,
352 // and the image is linear.
353 // TODO: Add check for linear here so we are not always adding host to general, and we should
354 // only be in preinitialized if we are linear
355 VkAccessFlags flags = 0;
356 if (VK_IMAGE_LAYOUT_GENERAL == layout) {
357 flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
358 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
359 VK_ACCESS_TRANSFER_WRITE_BIT |
360 VK_ACCESS_HOST_WRITE_BIT;
361 } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
362 flags = VK_ACCESS_HOST_WRITE_BIT;
363 } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout) {
364 flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
365 } else if (VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout) {
366 flags = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
367 } else if (VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
368 flags = VK_ACCESS_TRANSFER_WRITE_BIT;
369 } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout ||
370 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout ||
371 VK_IMAGE_LAYOUT_PRESENT_SRC_KHR == layout) {
372 // There are no writes that need to be made available
373 flags = 0;
374 }
375 return flags;
376 }
377
vk_format_to_aspect_flags(VkFormat format)378 VkImageAspectFlags vk_format_to_aspect_flags(VkFormat format) {
379 switch (format) {
380 case VK_FORMAT_S8_UINT:
381 return VK_IMAGE_ASPECT_STENCIL_BIT;
382 case VK_FORMAT_D24_UNORM_S8_UINT: // fallthrough
383 case VK_FORMAT_D32_SFLOAT_S8_UINT:
384 return VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
385 default:
386 return VK_IMAGE_ASPECT_COLOR_BIT;
387 }
388 }
389
setImageLayoutAndQueueIndex(const GrVkGpu * gpu,VkImageLayout newLayout,VkAccessFlags dstAccessMask,VkPipelineStageFlags dstStageMask,bool byRegion,uint32_t newQueueFamilyIndex)390 void GrVkImage::setImageLayoutAndQueueIndex(const GrVkGpu* gpu,
391 VkImageLayout newLayout,
392 VkAccessFlags dstAccessMask,
393 VkPipelineStageFlags dstStageMask,
394 bool byRegion,
395 uint32_t newQueueFamilyIndex) {
396 // Enable the following block to test new devices to confirm their lazy images stay at 0 memory use.
397 #if 0
398 if (fInfo.fAlloc.fFlags & GrVkAlloc::kLazilyAllocated_Flag) {
399 VkDeviceSize size;
400 VK_CALL(gpu, GetDeviceMemoryCommitment(gpu->device(), fInfo.fAlloc.fMemory, &size));
401
402 SkDebugf("Lazy Image. This: %p, image: %d, size: %d\n", this, fInfo.fImage, size);
403 }
404 #endif
405 SkASSERT(!gpu->isDeviceLost());
406 SkASSERT(newLayout == this->currentLayout() ||
407 (VK_IMAGE_LAYOUT_UNDEFINED != newLayout &&
408 VK_IMAGE_LAYOUT_PREINITIALIZED != newLayout));
409 VkImageLayout currentLayout = this->currentLayout();
410 uint32_t currentQueueIndex = this->currentQueueFamilyIndex();
411
412 #ifdef SK_DEBUG
413 if (fInfo.fSharingMode == VK_SHARING_MODE_CONCURRENT) {
414 if (newQueueFamilyIndex == VK_QUEUE_FAMILY_IGNORED) {
415 SkASSERT(currentQueueIndex == VK_QUEUE_FAMILY_IGNORED ||
416 currentQueueIndex == VK_QUEUE_FAMILY_EXTERNAL ||
417 currentQueueIndex == VK_QUEUE_FAMILY_FOREIGN_EXT);
418 } else {
419 SkASSERT(newQueueFamilyIndex == VK_QUEUE_FAMILY_EXTERNAL ||
420 newQueueFamilyIndex == VK_QUEUE_FAMILY_FOREIGN_EXT);
421 SkASSERT(currentQueueIndex == VK_QUEUE_FAMILY_IGNORED);
422 }
423 } else {
424 SkASSERT(fInfo.fSharingMode == VK_SHARING_MODE_EXCLUSIVE);
425 if (newQueueFamilyIndex == VK_QUEUE_FAMILY_IGNORED ||
426 currentQueueIndex == gpu->queueIndex()) {
427 SkASSERT(currentQueueIndex == VK_QUEUE_FAMILY_IGNORED ||
428 currentQueueIndex == VK_QUEUE_FAMILY_EXTERNAL ||
429 currentQueueIndex == VK_QUEUE_FAMILY_FOREIGN_EXT ||
430 currentQueueIndex == gpu->queueIndex());
431 } else if (newQueueFamilyIndex == VK_QUEUE_FAMILY_EXTERNAL ||
432 newQueueFamilyIndex == VK_QUEUE_FAMILY_FOREIGN_EXT) {
433 SkASSERT(currentQueueIndex == VK_QUEUE_FAMILY_IGNORED ||
434 currentQueueIndex == gpu->queueIndex());
435 }
436 }
437 #endif
438
439 if (fInfo.fSharingMode == VK_SHARING_MODE_EXCLUSIVE) {
440 if (newQueueFamilyIndex == VK_QUEUE_FAMILY_IGNORED) {
441 newQueueFamilyIndex = gpu->queueIndex();
442 }
443 if (currentQueueIndex == VK_QUEUE_FAMILY_IGNORED) {
444 currentQueueIndex = gpu->queueIndex();
445 }
446 }
447
448 // If the old and new layout are the same and the layout is a read only layout, there is no need
449 // to put in a barrier unless we also need to switch queues.
450 if (newLayout == currentLayout && currentQueueIndex == newQueueFamilyIndex &&
451 (VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL == currentLayout ||
452 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == currentLayout ||
453 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == currentLayout)) {
454 return;
455 }
456
457 VkAccessFlags srcAccessMask = GrVkImage::LayoutToSrcAccessMask(currentLayout);
458 VkPipelineStageFlags srcStageMask = GrVkImage::LayoutToPipelineSrcStageFlags(currentLayout);
459
460 VkImageAspectFlags aspectFlags = vk_format_to_aspect_flags(fInfo.fFormat);
461
462 VkImageMemoryBarrier imageMemoryBarrier = {
463 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
464 nullptr, // pNext
465 srcAccessMask, // srcAccessMask
466 dstAccessMask, // dstAccessMask
467 currentLayout, // oldLayout
468 newLayout, // newLayout
469 currentQueueIndex, // srcQueueFamilyIndex
470 newQueueFamilyIndex, // dstQueueFamilyIndex
471 fInfo.fImage, // image
472 { aspectFlags, 0, fInfo.fLevelCount, 0, 1 } // subresourceRange
473 };
474 SkASSERT(srcAccessMask == imageMemoryBarrier.srcAccessMask);
475 gpu->addImageMemoryBarrier(this->resource(), srcStageMask, dstStageMask, byRegion,
476 &imageMemoryBarrier);
477
478 this->updateImageLayout(newLayout);
479 this->setQueueFamilyIndex(newQueueFamilyIndex);
480 }
481
InitImageInfo(GrVkGpu * gpu,const ImageDesc & imageDesc,GrVkImageInfo * info)482 bool GrVkImage::InitImageInfo(GrVkGpu* gpu, const ImageDesc& imageDesc, GrVkImageInfo* info) {
483 if (0 == imageDesc.fWidth || 0 == imageDesc.fHeight) {
484 return false;
485 }
486 if ((imageDesc.fIsProtected == GrProtected::kYes) && !gpu->vkCaps().supportsProtectedMemory()) {
487 return false;
488 }
489
490 bool isLinear = VK_IMAGE_TILING_LINEAR == imageDesc.fImageTiling;
491 VkImageLayout initialLayout = isLinear ? VK_IMAGE_LAYOUT_PREINITIALIZED
492 : VK_IMAGE_LAYOUT_UNDEFINED;
493
494 // Create Image
495 VkSampleCountFlagBits vkSamples;
496 if (!GrSampleCountToVkSampleCount(imageDesc.fSamples, &vkSamples)) {
497 return false;
498 }
499
500 SkASSERT(VK_IMAGE_TILING_OPTIMAL == imageDesc.fImageTiling ||
501 VK_SAMPLE_COUNT_1_BIT == vkSamples);
502
503 VkImageCreateFlags createflags = 0;
504 if (imageDesc.fIsProtected == GrProtected::kYes || gpu->protectedContext()) {
505 createflags |= VK_IMAGE_CREATE_PROTECTED_BIT;
506 }
507 const VkImageCreateInfo imageCreateInfo = {
508 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // sType
509 nullptr, // pNext
510 createflags, // VkImageCreateFlags
511 imageDesc.fImageType, // VkImageType
512 imageDesc.fFormat, // VkFormat
513 { imageDesc.fWidth, imageDesc.fHeight, 1 }, // VkExtent3D
514 imageDesc.fLevels, // mipLevels
515 1, // arrayLayers
516 vkSamples, // samples
517 imageDesc.fImageTiling, // VkImageTiling
518 imageDesc.fUsageFlags, // VkImageUsageFlags
519 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode
520 0, // queueFamilyCount
521 nullptr, // pQueueFamilyIndices
522 initialLayout // initialLayout
523 };
524
525 VkImage image = VK_NULL_HANDLE;
526 VkResult result;
527 if (imageDesc.fWidth * imageDesc.fHeight > VKIMAGE_LIMIT_SIZE) {
528 SkDebugf("GrVkImage::InitImageInfoInner failed, image is too large, width:%u, height::%u",
529 imageDesc.fWidth, imageDesc.fHeight);
530 return false;
531 }
532 GR_VK_CALL_RESULT(gpu, result, CreateImage(gpu->device(), &imageCreateInfo, nullptr, &image));
533 if (result != VK_SUCCESS) {
534 return false;
535 }
536
537 GrMemoryless memoryless = imageDesc.fUsageFlags & VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT
538 ? GrMemoryless::kYes
539 : GrMemoryless::kNo;
540 GrVkAlloc alloc;
541 if (!GrVkMemory::AllocAndBindImageMemory(gpu, image, memoryless, &alloc,
542 imageDesc.fWidth * imageDesc.fHeight * 4) ||
543 (memoryless == GrMemoryless::kYes &&
544 !SkToBool(alloc.fFlags & GrVkAlloc::kLazilyAllocated_Flag))) {
545 VK_CALL(gpu, DestroyImage(gpu->device(), image, nullptr));
546 return false;
547 }
548
549 info->fImage = image;
550 info->fAlloc = alloc;
551 info->fImageTiling = imageDesc.fImageTiling;
552 info->fImageLayout = initialLayout;
553 info->fFormat = imageDesc.fFormat;
554 info->fImageUsageFlags = imageDesc.fUsageFlags;
555 info->fSampleCount = imageDesc.fSamples;
556 info->fLevelCount = imageDesc.fLevels;
557 info->fCurrentQueueFamily = VK_QUEUE_FAMILY_IGNORED;
558 info->fProtected =
559 (createflags & VK_IMAGE_CREATE_PROTECTED_BIT) ? GrProtected::kYes : GrProtected::kNo;
560 info->fSharingMode = VK_SHARING_MODE_EXCLUSIVE;
561 return true;
562 }
563
DestroyImageInfo(const GrVkGpu * gpu,GrVkImageInfo * info)564 void GrVkImage::DestroyImageInfo(const GrVkGpu* gpu, GrVkImageInfo* info) {
565 DestroyAndFreeImageMemory(gpu, info->fAlloc, info->fImage);
566 }
567
~GrVkImage()568 GrVkImage::~GrVkImage() {
569 // should have been released first
570 SkASSERT(!fResource);
571 SkASSERT(!fFramebufferView);
572 SkASSERT(!fTextureView);
573 }
574
prepareForPresent(GrVkGpu * gpu)575 void GrVkImage::prepareForPresent(GrVkGpu* gpu) {
576 VkImageLayout layout = this->currentLayout();
577 if (fInitialQueueFamily != VK_QUEUE_FAMILY_EXTERNAL &&
578 fInitialQueueFamily != VK_QUEUE_FAMILY_FOREIGN_EXT) {
579 if (gpu->vkCaps().supportsSwapchain()) {
580 layout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
581 }
582 }
583 this->setImageLayoutAndQueueIndex(gpu, layout, 0, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, false,
584 fInitialQueueFamily);
585 }
586
prepareForExternal(GrVkGpu * gpu)587 void GrVkImage::prepareForExternal(GrVkGpu* gpu) {
588 this->setImageLayoutAndQueueIndex(gpu, this->currentLayout(), 0,
589 VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, false,
590 fInitialQueueFamily);
591 }
592
releaseImage()593 void GrVkImage::releaseImage() {
594 if (fResource) {
595 fResource->unref();
596 fResource = nullptr;
597 }
598 fFramebufferView.reset();
599 fTextureView.reset();
600 fCachedBlendingInputDescSet.reset();
601 fCachedMSAALoadInputDescSet.reset();
602 }
603
onRelease()604 void GrVkImage::onRelease() {
605 this->releaseImage();
606 GrAttachment::onRelease();
607 }
608
onAbandon()609 void GrVkImage::onAbandon() {
610 this->releaseImage();
611 GrAttachment::onAbandon();
612 }
613
setResourceRelease(sk_sp<GrRefCntedCallback> releaseHelper)614 void GrVkImage::setResourceRelease(sk_sp<GrRefCntedCallback> releaseHelper) {
615 SkASSERT(fResource);
616 // Forward the release proc on to GrVkImage::Resource
617 fResource->setRelease(std::move(releaseHelper));
618 }
619
620 #ifdef SKIA_DFX_FOR_RECORD_VKIMAGE
dumpVkImageInfo(std::stringstream & dump) const621 void GrVkImage::dumpVkImageInfo(std::stringstream& dump) const {
622 auto vkGpu = getVkGpu();
623 if (vkGpu == nullptr) {
624 SK_LOGE("GrVkImage::dumpVkImageInfo vkGpu nullptr");
625 return;
626 }
627 VkMemoryRequirements memRequirements;
628 VK_CALL(vkGpu, GetImageMemoryRequirements(vkGpu->device(), image(), &memRequirements));
629 VkDeviceSize imageSize = memRequirements.size;
630
631 if (fResource == nullptr) {
632 SK_LOGE("GrVkImage::dumpVkImageInfo fResource nullptr");
633 return;
634 }
635 fResource->dumpVkImageResource(dump);
636 dump << "Borrowed: " << isBorrowed() << ", " << "ImageSize: " << imageSize << ", ";
637 if (fResource->fCaller == nullptr) {
638 SK_LOGE("GrVkImage::dumpVkImageInfo fCaller nullptr");
639 } else {
640 fResource->fCaller->Dump(dump);
641 }
642 dump << "\n";
643 }
644
dumpVkImageResource(std::stringstream & dump)645 void GrVkImage::Resource::dumpVkImageResource(std::stringstream& dump) {
646 dump << "VkImage: " << fResource->fImage << ", "
647 << "Memory: " << fResource->fAlloc.fMemory << ", "
648 << "Offset: " << fResource->fAlloc.fOffset << ", "
649 << "Size: " << fResource->fAlloc.fSize << ", ";
650 }
651
RecordFreeVkImage(bool isBorrowed) const652 void GrVkImage::Resource::RecordFreeVkImage(bool isBorrowed) const {
653 static const bool isInRenderSevice = IsRenderService();
654 if (isInRenderSevice) {
655 ParallelDebug::VkImageDestroyRecord::Record(fImage, isBorrowed, fCaller, fAlloc.fMemory);
656 }
657 }
658
updateNodeId(uint64_t nodeId)659 void GrVkImage::updateNodeId(uint64_t nodeId) {
660 if (fResource && fResource->fCaller) {
661 fResource->fCaller->nodeId_ = nodeId;
662 }
663 }
664 #endif
665
~Resource()666 GrVkImage::Resource::~Resource() {
667 #ifdef SKIA_DFX_FOR_RECORD_VKIMAGE
668 ParallelDebug::DestroyVkImageInvokeRecord(fCaller);
669 #endif
670 }
671
freeGPUData() const672 void GrVkImage::Resource::freeGPUData() const {
673 #ifdef SKIA_DFX_FOR_RECORD_VKIMAGE
674 RecordFreeVkImage(true);
675 #endif
676 this->invokeReleaseProc();
677
678 // OH ISSUE: asyn memory reclaimer
679 auto reclaimer = fGpu->memoryReclaimer();
680 if (reclaimer && reclaimer->addMemoryToWaitQueue(fGpu, fAlloc, fImage)) {
681 return;
682 }
683
684 DestroyAndFreeImageMemory(fGpu, fAlloc, fImage);
685 }
686
freeGPUData() const687 void GrVkImage::BorrowedResource::freeGPUData() const {
688 #ifdef SKIA_DFX_FOR_RECORD_VKIMAGE
689 RecordFreeVkImage(false);
690 #endif
691 this->invokeReleaseProc();
692 }
693
write_input_desc_set(GrVkGpu * gpu,VkImageView view,VkImageLayout layout,VkDescriptorSet descSet)694 static void write_input_desc_set(GrVkGpu* gpu,
695 VkImageView view,
696 VkImageLayout layout,
697 VkDescriptorSet descSet) {
698 VkDescriptorImageInfo imageInfo;
699 memset(&imageInfo, 0, sizeof(VkDescriptorImageInfo));
700 imageInfo.sampler = VK_NULL_HANDLE;
701 imageInfo.imageView = view;
702 imageInfo.imageLayout = layout;
703
704 VkWriteDescriptorSet writeInfo;
705 memset(&writeInfo, 0, sizeof(VkWriteDescriptorSet));
706 writeInfo.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
707 writeInfo.pNext = nullptr;
708 writeInfo.dstSet = descSet;
709 writeInfo.dstBinding = GrVkUniformHandler::kInputBinding;
710 writeInfo.dstArrayElement = 0;
711 writeInfo.descriptorCount = 1;
712 writeInfo.descriptorType = VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT;
713 writeInfo.pImageInfo = &imageInfo;
714 writeInfo.pBufferInfo = nullptr;
715 writeInfo.pTexelBufferView = nullptr;
716
717 GR_VK_CALL(gpu->vkInterface(), UpdateDescriptorSets(gpu->device(), 1, &writeInfo, 0, nullptr));
718 }
719
inputDescSetForBlending(GrVkGpu * gpu)720 gr_rp<const GrVkDescriptorSet> GrVkImage::inputDescSetForBlending(GrVkGpu* gpu) {
721 if (!this->supportsInputAttachmentUsage()) {
722 return nullptr;
723 }
724 if (fCachedBlendingInputDescSet) {
725 return fCachedBlendingInputDescSet;
726 }
727
728 fCachedBlendingInputDescSet.reset(gpu->resourceProvider().getInputDescriptorSet());
729 if (!fCachedBlendingInputDescSet) {
730 return nullptr;
731 }
732
733 write_input_desc_set(gpu,
734 this->framebufferView()->imageView(),
735 VK_IMAGE_LAYOUT_GENERAL,
736 *fCachedBlendingInputDescSet->descriptorSet());
737
738 return fCachedBlendingInputDescSet;
739 }
740
inputDescSetForMSAALoad(GrVkGpu * gpu)741 gr_rp<const GrVkDescriptorSet> GrVkImage::inputDescSetForMSAALoad(GrVkGpu* gpu) {
742 if (!this->supportsInputAttachmentUsage()) {
743 return nullptr;
744 }
745 if (fCachedMSAALoadInputDescSet) {
746 return fCachedMSAALoadInputDescSet;
747 }
748
749 fCachedMSAALoadInputDescSet.reset(gpu->resourceProvider().getInputDescriptorSet());
750 if (!fCachedMSAALoadInputDescSet) {
751 return nullptr;
752 }
753
754 write_input_desc_set(gpu,
755 this->framebufferView()->imageView(),
756 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
757 *fCachedMSAALoadInputDescSet->descriptorSet());
758
759 return fCachedMSAALoadInputDescSet;
760 }
761
getVkGpu() const762 GrVkGpu* GrVkImage::getVkGpu() const {
763 SkASSERT(!this->wasDestroyed());
764 return static_cast<GrVkGpu*>(this->getGpu());
765 }
766
onGpuMemorySize() const767 size_t GrVkImage::onGpuMemorySize() const
768 {
769 if (supportedUsages() & UsageFlags::kTexture) {
770 return GrSurface::ComputeSize(this->backendFormat(), this->dimensions(), 1, this->mipmapped());
771 } else {
772 return GrAttachment::onGpuMemorySize();
773 }
774 }
775
776 #if GR_TEST_UTILS
setCurrentQueueFamilyToGraphicsQueue(GrVkGpu * gpu)777 void GrVkImage::setCurrentQueueFamilyToGraphicsQueue(GrVkGpu* gpu) {
778 fMutableState->setQueueFamilyIndex(gpu->queueIndex());
779 }
780 #endif
781
782