1 /*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/vk/GrVkImage.h"
9
10 #include "src/gpu/vk/GrVkGpu.h"
11 #include "src/gpu/vk/GrVkImageView.h"
12 #include "src/gpu/vk/GrVkMemory.h"
13 #include "src/gpu/vk/GrVkTexture.h"
14 #include "src/gpu/vk/GrVkUtil.h"
15
16 #define VK_CALL(GPU, X) GR_VK_CALL(GPU->vkInterface(), X)
17 constexpr uint32_t VKIMAGE_LIMIT_SIZE = 10000 * 10000; // Vk-Image Size need less than 10000*10000
18 constexpr uint32_t PIXEL_SIZE = 4; // 4 bytes per pixel
19
MakeStencil(GrVkGpu * gpu,SkISize dimensions,int sampleCnt,VkFormat format)20 sk_sp<GrVkImage> GrVkImage::MakeStencil(GrVkGpu* gpu,
21 SkISize dimensions,
22 int sampleCnt,
23 VkFormat format) {
24 VkImageUsageFlags vkUsageFlags =
25 VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
26 return GrVkImage::Make(gpu,
27 dimensions,
28 UsageFlags::kStencilAttachment,
29 sampleCnt,
30 format,
31 /*mipLevels=*/1,
32 vkUsageFlags,
33 GrProtected::kNo,
34 GrMemoryless::kNo,
35 SkBudgeted::kYes);
36 }
37
MakeMSAA(GrVkGpu * gpu,SkISize dimensions,int numSamples,VkFormat format,GrProtected isProtected,GrMemoryless memoryless)38 sk_sp<GrVkImage> GrVkImage::MakeMSAA(GrVkGpu* gpu,
39 SkISize dimensions,
40 int numSamples,
41 VkFormat format,
42 GrProtected isProtected,
43 GrMemoryless memoryless) {
44 SkASSERT(numSamples > 1);
45
46 VkImageUsageFlags vkUsageFlags = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
47 if (memoryless == GrMemoryless::kYes) {
48 vkUsageFlags |= VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT;
49 } else {
50 vkUsageFlags |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
51 }
52 return GrVkImage::Make(gpu,
53 dimensions,
54 UsageFlags::kColorAttachment,
55 numSamples,
56 format,
57 /*mipLevels=*/1,
58 vkUsageFlags,
59 isProtected,
60 memoryless,
61 SkBudgeted::kYes);
62 }
63
MakeTexture(GrVkGpu * gpu,SkISize dimensions,VkFormat format,uint32_t mipLevels,GrRenderable renderable,int numSamples,SkBudgeted budgeted,GrProtected isProtected)64 sk_sp<GrVkImage> GrVkImage::MakeTexture(GrVkGpu* gpu,
65 SkISize dimensions,
66 VkFormat format,
67 uint32_t mipLevels,
68 GrRenderable renderable,
69 int numSamples,
70 SkBudgeted budgeted,
71 GrProtected isProtected) {
72 UsageFlags usageFlags = UsageFlags::kTexture;
73 VkImageUsageFlags vkUsageFlags = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
74 VK_IMAGE_USAGE_TRANSFER_DST_BIT;
75 if (renderable == GrRenderable::kYes) {
76 usageFlags |= UsageFlags::kColorAttachment;
77 vkUsageFlags |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
78 // We always make our render targets support being used as input attachments
79 vkUsageFlags |= VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT;
80 }
81
82 return GrVkImage::Make(gpu,
83 dimensions,
84 usageFlags,
85 numSamples,
86 format,
87 mipLevels,
88 vkUsageFlags,
89 isProtected,
90 GrMemoryless::kNo,
91 budgeted);
92 }
93
make_views(GrVkGpu * gpu,const GrVkImageInfo & info,GrAttachment::UsageFlags attachmentUsages,sk_sp<const GrVkImageView> * framebufferView,sk_sp<const GrVkImageView> * textureView)94 static bool make_views(GrVkGpu* gpu,
95 const GrVkImageInfo& info,
96 GrAttachment::UsageFlags attachmentUsages,
97 sk_sp<const GrVkImageView>* framebufferView,
98 sk_sp<const GrVkImageView>* textureView) {
99 GrVkImageView::Type viewType;
100 if (attachmentUsages & GrAttachment::UsageFlags::kStencilAttachment) {
101 // If we have stencil usage then we shouldn't have any other usages
102 SkASSERT(attachmentUsages == GrAttachment::UsageFlags::kStencilAttachment);
103 viewType = GrVkImageView::kStencil_Type;
104 } else {
105 viewType = GrVkImageView::kColor_Type;
106 }
107
108 if (SkToBool(attachmentUsages & GrAttachment::UsageFlags::kStencilAttachment) ||
109 SkToBool(attachmentUsages & GrAttachment::UsageFlags::kColorAttachment)) {
110 // Attachments can only have a mip level of 1
111 *framebufferView = GrVkImageView::Make(
112 gpu, info.fImage, info.fFormat, viewType, 1, info.fYcbcrConversionInfo);
113 if (!*framebufferView) {
114 return false;
115 }
116 }
117
118 if (attachmentUsages & GrAttachment::UsageFlags::kTexture) {
119 *textureView = GrVkImageView::Make(gpu,
120 info.fImage,
121 info.fFormat,
122 viewType,
123 info.fLevelCount,
124 info.fYcbcrConversionInfo);
125 if (!*textureView) {
126 return false;
127 }
128 }
129 return true;
130 }
131
Make(GrVkGpu * gpu,SkISize dimensions,UsageFlags attachmentUsages,int sampleCnt,VkFormat format,uint32_t mipLevels,VkImageUsageFlags vkUsageFlags,GrProtected isProtected,GrMemoryless memoryless,SkBudgeted budgeted)132 sk_sp<GrVkImage> GrVkImage::Make(GrVkGpu* gpu,
133 SkISize dimensions,
134 UsageFlags attachmentUsages,
135 int sampleCnt,
136 VkFormat format,
137 uint32_t mipLevels,
138 VkImageUsageFlags vkUsageFlags,
139 GrProtected isProtected,
140 GrMemoryless memoryless,
141 SkBudgeted budgeted) {
142 GrVkImage::ImageDesc imageDesc;
143 imageDesc.fImageType = VK_IMAGE_TYPE_2D;
144 imageDesc.fFormat = format;
145 imageDesc.fWidth = dimensions.width();
146 imageDesc.fHeight = dimensions.height();
147 imageDesc.fLevels = mipLevels;
148 imageDesc.fSamples = sampleCnt;
149 imageDesc.fImageTiling = VK_IMAGE_TILING_OPTIMAL;
150 imageDesc.fUsageFlags = vkUsageFlags;
151 imageDesc.fIsProtected = isProtected;
152
153 GrVkImageInfo info;
154 if (!GrVkImage::InitImageInfo(gpu, imageDesc, &info)) {
155 return nullptr;
156 }
157
158 sk_sp<const GrVkImageView> framebufferView;
159 sk_sp<const GrVkImageView> textureView;
160 if (!make_views(gpu, info, attachmentUsages, &framebufferView, &textureView)) {
161 GrVkImage::DestroyImageInfo(gpu, &info);
162 return nullptr;
163 }
164
165 sk_sp<GrBackendSurfaceMutableStateImpl> mutableState(
166 new GrBackendSurfaceMutableStateImpl(info.fImageLayout, info.fCurrentQueueFamily));
167 return sk_sp<GrVkImage>(new GrVkImage(gpu,
168 dimensions,
169 attachmentUsages,
170 info,
171 std::move(mutableState),
172 std::move(framebufferView),
173 std::move(textureView),
174 budgeted));
175 }
176
MakeWrapped(GrVkGpu * gpu,SkISize dimensions,const GrVkImageInfo & info,sk_sp<GrBackendSurfaceMutableStateImpl> mutableState,UsageFlags attachmentUsages,GrWrapOwnership ownership,GrWrapCacheable cacheable,bool forSecondaryCB)177 sk_sp<GrVkImage> GrVkImage::MakeWrapped(GrVkGpu* gpu,
178 SkISize dimensions,
179 const GrVkImageInfo& info,
180 sk_sp<GrBackendSurfaceMutableStateImpl> mutableState,
181 UsageFlags attachmentUsages,
182 GrWrapOwnership ownership,
183 GrWrapCacheable cacheable,
184 bool forSecondaryCB) {
185 sk_sp<const GrVkImageView> framebufferView;
186 sk_sp<const GrVkImageView> textureView;
187 if (!forSecondaryCB) {
188 if (!make_views(gpu, info, attachmentUsages, &framebufferView, &textureView)) {
189 return nullptr;
190 }
191 }
192
193 GrBackendObjectOwnership backendOwnership = kBorrow_GrWrapOwnership == ownership
194 ? GrBackendObjectOwnership::kBorrowed
195 : GrBackendObjectOwnership::kOwned;
196
197 return sk_sp<GrVkImage>(new GrVkImage(gpu,
198 dimensions,
199 attachmentUsages,
200 info,
201 std::move(mutableState),
202 std::move(framebufferView),
203 std::move(textureView),
204 backendOwnership,
205 cacheable,
206 forSecondaryCB));
207 }
208
209 // OH ISSUE: Integrate Destroy and Free
DestroyAndFreeImageMemory(const GrVkGpu * gpu,const GrVkAlloc & alloc,const VkImage & image)210 void GrVkImage::DestroyAndFreeImageMemory(const GrVkGpu* gpu, const GrVkAlloc& alloc, const VkImage& image)
211 {
212 VK_CALL(gpu, DestroyImage(gpu->device(), image, nullptr));
213 GrVkMemory::FreeImageMemory(gpu, alloc);
214 }
215
GrVkImage(GrVkGpu * gpu,SkISize dimensions,UsageFlags supportedUsages,const GrVkImageInfo & info,sk_sp<GrBackendSurfaceMutableStateImpl> mutableState,sk_sp<const GrVkImageView> framebufferView,sk_sp<const GrVkImageView> textureView,SkBudgeted budgeted)216 GrVkImage::GrVkImage(GrVkGpu* gpu,
217 SkISize dimensions,
218 UsageFlags supportedUsages,
219 const GrVkImageInfo& info,
220 sk_sp<GrBackendSurfaceMutableStateImpl> mutableState,
221 sk_sp<const GrVkImageView> framebufferView,
222 sk_sp<const GrVkImageView> textureView,
223 SkBudgeted budgeted)
224 : GrAttachment(gpu,
225 dimensions,
226 supportedUsages,
227 info.fSampleCount,
228 info.fLevelCount > 1 ? GrMipmapped::kYes : GrMipmapped::kNo,
229 info.fProtected,
230 info.fAlloc.fFlags & GrVkAlloc::kLazilyAllocated_Flag ? GrMemoryless::kYes
231 : GrMemoryless::kNo)
232 , fInfo(info)
233 , fInitialQueueFamily(info.fCurrentQueueFamily)
234 , fMutableState(std::move(mutableState))
235 , fFramebufferView(std::move(framebufferView))
236 , fTextureView(std::move(textureView))
237 , fIsBorrowed(false) {
238 this->init(gpu, false);
239 this->setRealAlloc(true); // OH ISSUE: set real alloc flag
240 this->setRealAllocSize(dimensions.height() * dimensions.width() * PIXEL_SIZE); // OH ISSUE: set real alloc size
241 this->registerWithCache(budgeted);
242 }
243
GrVkImage(GrVkGpu * gpu,SkISize dimensions,UsageFlags supportedUsages,const GrVkImageInfo & info,sk_sp<GrBackendSurfaceMutableStateImpl> mutableState,sk_sp<const GrVkImageView> framebufferView,sk_sp<const GrVkImageView> textureView,GrBackendObjectOwnership ownership,GrWrapCacheable cacheable,bool forSecondaryCB)244 GrVkImage::GrVkImage(GrVkGpu* gpu,
245 SkISize dimensions,
246 UsageFlags supportedUsages,
247 const GrVkImageInfo& info,
248 sk_sp<GrBackendSurfaceMutableStateImpl> mutableState,
249 sk_sp<const GrVkImageView> framebufferView,
250 sk_sp<const GrVkImageView> textureView,
251 GrBackendObjectOwnership ownership,
252 GrWrapCacheable cacheable,
253 bool forSecondaryCB)
254 : GrAttachment(gpu,
255 dimensions,
256 supportedUsages,
257 info.fSampleCount,
258 info.fLevelCount > 1 ? GrMipmapped::kYes : GrMipmapped::kNo,
259 info.fProtected)
260 , fInfo(info)
261 , fInitialQueueFamily(info.fCurrentQueueFamily)
262 , fMutableState(std::move(mutableState))
263 , fFramebufferView(std::move(framebufferView))
264 , fTextureView(std::move(textureView))
265 , fIsBorrowed(GrBackendObjectOwnership::kBorrowed == ownership) {
266 this->init(gpu, forSecondaryCB);
267 #ifdef SKIA_DFX_FOR_OHOS
268 if (RealAllocConfig::GetRealAllocStatus()) {
269 // OH ISSUE: set real alloc flag
270 this->setRealAlloc(true);
271 // OH ISSUE: set real alloc size
272 this->setRealAllocSize(dimensions.height() * dimensions.width() * 4);
273 }
274 #endif
275 this->registerWithCacheWrapped(cacheable);
276 }
277
init(GrVkGpu * gpu,bool forSecondaryCB)278 void GrVkImage::init(GrVkGpu* gpu, bool forSecondaryCB) {
279 SkASSERT(fMutableState->getImageLayout() == fInfo.fImageLayout);
280 SkASSERT(fMutableState->getQueueFamilyIndex() == fInfo.fCurrentQueueFamily);
281 #ifdef SK_DEBUG
282 if (fInfo.fImageUsageFlags & VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) {
283 SkASSERT(SkToBool(fInfo.fImageUsageFlags & VK_IMAGE_USAGE_TRANSFER_DST_BIT));
284 } else {
285 if (fInfo.fAlloc.fFlags & GrVkAlloc::kLazilyAllocated_Flag) {
286 SkASSERT(fInfo.fImageUsageFlags & VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT);
287 SkASSERT(!SkToBool(fInfo.fImageUsageFlags & VK_IMAGE_USAGE_TRANSFER_DST_BIT) &&
288 !SkToBool(fInfo.fImageUsageFlags & VK_IMAGE_USAGE_TRANSFER_SRC_BIT));
289 } else {
290 SkASSERT(!SkToBool(fInfo.fImageUsageFlags & VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT));
291 SkASSERT(SkToBool(fInfo.fImageUsageFlags & VK_IMAGE_USAGE_TRANSFER_DST_BIT) &&
292 SkToBool(fInfo.fImageUsageFlags & VK_IMAGE_USAGE_TRANSFER_SRC_BIT));
293 }
294 }
295 // We can't transfer from the non graphics queue to the graphics queue since we can't
296 // release the image from the original queue without having that queue. This limits us in terms
297 // of the types of queue indices we can handle.
298 if (fInfo.fCurrentQueueFamily != VK_QUEUE_FAMILY_IGNORED &&
299 fInfo.fCurrentQueueFamily != VK_QUEUE_FAMILY_EXTERNAL &&
300 fInfo.fCurrentQueueFamily != VK_QUEUE_FAMILY_FOREIGN_EXT) {
301 if (fInfo.fSharingMode == VK_SHARING_MODE_EXCLUSIVE) {
302 if (fInfo.fCurrentQueueFamily != gpu->queueIndex()) {
303 SkASSERT(false);
304 }
305 } else {
306 SkASSERT(false);
307 }
308 }
309 #endif
310 if (forSecondaryCB) {
311 fResource = nullptr;
312 } else if (fIsBorrowed) {
313 fResource = new BorrowedResource(gpu, fInfo.fImage, fInfo.fAlloc, fInfo.fImageTiling);
314 } else {
315 SkASSERT(VK_NULL_HANDLE != fInfo.fAlloc.fMemory);
316 fResource = new Resource(gpu, fInfo.fImage, fInfo.fAlloc, fInfo.fImageTiling);
317 }
318 }
319
LayoutToPipelineSrcStageFlags(const VkImageLayout layout)320 VkPipelineStageFlags GrVkImage::LayoutToPipelineSrcStageFlags(const VkImageLayout layout) {
321 if (VK_IMAGE_LAYOUT_GENERAL == layout) {
322 return VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
323 } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout ||
324 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
325 return VK_PIPELINE_STAGE_TRANSFER_BIT;
326 } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout) {
327 return VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
328 } else if (VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout ||
329 VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL == layout) {
330 return VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
331 } else if (VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) {
332 return VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
333 } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
334 return VK_PIPELINE_STAGE_HOST_BIT;
335 } else if (VK_IMAGE_LAYOUT_PRESENT_SRC_KHR == layout) {
336 return VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
337 }
338
339 SkASSERT(VK_IMAGE_LAYOUT_UNDEFINED == layout);
340 return VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
341 }
342
LayoutToSrcAccessMask(const VkImageLayout layout)343 VkAccessFlags GrVkImage::LayoutToSrcAccessMask(const VkImageLayout layout) {
344 // Currently we assume we will never being doing any explict shader writes (this doesn't include
345 // color attachment or depth/stencil writes). So we will ignore the
346 // VK_MEMORY_OUTPUT_SHADER_WRITE_BIT.
347
348 // We can only directly access the host memory if we are in preinitialized or general layout,
349 // and the image is linear.
350 // TODO: Add check for linear here so we are not always adding host to general, and we should
351 // only be in preinitialized if we are linear
352 VkAccessFlags flags = 0;
353 if (VK_IMAGE_LAYOUT_GENERAL == layout) {
354 flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
355 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
356 VK_ACCESS_TRANSFER_WRITE_BIT |
357 VK_ACCESS_HOST_WRITE_BIT;
358 } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
359 flags = VK_ACCESS_HOST_WRITE_BIT;
360 } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout) {
361 flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
362 } else if (VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout) {
363 flags = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
364 } else if (VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
365 flags = VK_ACCESS_TRANSFER_WRITE_BIT;
366 } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout ||
367 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout ||
368 VK_IMAGE_LAYOUT_PRESENT_SRC_KHR == layout) {
369 // There are no writes that need to be made available
370 flags = 0;
371 }
372 return flags;
373 }
374
vk_format_to_aspect_flags(VkFormat format)375 VkImageAspectFlags vk_format_to_aspect_flags(VkFormat format) {
376 switch (format) {
377 case VK_FORMAT_S8_UINT:
378 return VK_IMAGE_ASPECT_STENCIL_BIT;
379 case VK_FORMAT_D24_UNORM_S8_UINT: // fallthrough
380 case VK_FORMAT_D32_SFLOAT_S8_UINT:
381 return VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
382 default:
383 return VK_IMAGE_ASPECT_COLOR_BIT;
384 }
385 }
386
setImageLayoutAndQueueIndex(const GrVkGpu * gpu,VkImageLayout newLayout,VkAccessFlags dstAccessMask,VkPipelineStageFlags dstStageMask,bool byRegion,uint32_t newQueueFamilyIndex)387 void GrVkImage::setImageLayoutAndQueueIndex(const GrVkGpu* gpu,
388 VkImageLayout newLayout,
389 VkAccessFlags dstAccessMask,
390 VkPipelineStageFlags dstStageMask,
391 bool byRegion,
392 uint32_t newQueueFamilyIndex) {
393 // Enable the following block to test new devices to confirm their lazy images stay at 0 memory use.
394 #if 0
395 if (fInfo.fAlloc.fFlags & GrVkAlloc::kLazilyAllocated_Flag) {
396 VkDeviceSize size;
397 VK_CALL(gpu, GetDeviceMemoryCommitment(gpu->device(), fInfo.fAlloc.fMemory, &size));
398
399 SkDebugf("Lazy Image. This: %p, image: %d, size: %d\n", this, fInfo.fImage, size);
400 }
401 #endif
402 SkASSERT(!gpu->isDeviceLost());
403 SkASSERT(newLayout == this->currentLayout() ||
404 (VK_IMAGE_LAYOUT_UNDEFINED != newLayout &&
405 VK_IMAGE_LAYOUT_PREINITIALIZED != newLayout));
406 VkImageLayout currentLayout = this->currentLayout();
407 uint32_t currentQueueIndex = this->currentQueueFamilyIndex();
408
409 #ifdef SK_DEBUG
410 if (fInfo.fSharingMode == VK_SHARING_MODE_CONCURRENT) {
411 if (newQueueFamilyIndex == VK_QUEUE_FAMILY_IGNORED) {
412 SkASSERT(currentQueueIndex == VK_QUEUE_FAMILY_IGNORED ||
413 currentQueueIndex == VK_QUEUE_FAMILY_EXTERNAL ||
414 currentQueueIndex == VK_QUEUE_FAMILY_FOREIGN_EXT);
415 } else {
416 SkASSERT(newQueueFamilyIndex == VK_QUEUE_FAMILY_EXTERNAL ||
417 newQueueFamilyIndex == VK_QUEUE_FAMILY_FOREIGN_EXT);
418 SkASSERT(currentQueueIndex == VK_QUEUE_FAMILY_IGNORED);
419 }
420 } else {
421 SkASSERT(fInfo.fSharingMode == VK_SHARING_MODE_EXCLUSIVE);
422 if (newQueueFamilyIndex == VK_QUEUE_FAMILY_IGNORED ||
423 currentQueueIndex == gpu->queueIndex()) {
424 SkASSERT(currentQueueIndex == VK_QUEUE_FAMILY_IGNORED ||
425 currentQueueIndex == VK_QUEUE_FAMILY_EXTERNAL ||
426 currentQueueIndex == VK_QUEUE_FAMILY_FOREIGN_EXT ||
427 currentQueueIndex == gpu->queueIndex());
428 } else if (newQueueFamilyIndex == VK_QUEUE_FAMILY_EXTERNAL ||
429 newQueueFamilyIndex == VK_QUEUE_FAMILY_FOREIGN_EXT) {
430 SkASSERT(currentQueueIndex == VK_QUEUE_FAMILY_IGNORED ||
431 currentQueueIndex == gpu->queueIndex());
432 }
433 }
434 #endif
435
436 if (fInfo.fSharingMode == VK_SHARING_MODE_EXCLUSIVE) {
437 if (newQueueFamilyIndex == VK_QUEUE_FAMILY_IGNORED) {
438 newQueueFamilyIndex = gpu->queueIndex();
439 }
440 if (currentQueueIndex == VK_QUEUE_FAMILY_IGNORED) {
441 currentQueueIndex = gpu->queueIndex();
442 }
443 }
444
445 // If the old and new layout are the same and the layout is a read only layout, there is no need
446 // to put in a barrier unless we also need to switch queues.
447 if (newLayout == currentLayout && currentQueueIndex == newQueueFamilyIndex &&
448 (VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL == currentLayout ||
449 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == currentLayout ||
450 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == currentLayout)) {
451 return;
452 }
453
454 VkAccessFlags srcAccessMask = GrVkImage::LayoutToSrcAccessMask(currentLayout);
455 VkPipelineStageFlags srcStageMask = GrVkImage::LayoutToPipelineSrcStageFlags(currentLayout);
456
457 VkImageAspectFlags aspectFlags = vk_format_to_aspect_flags(fInfo.fFormat);
458
459 VkImageMemoryBarrier imageMemoryBarrier = {
460 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
461 nullptr, // pNext
462 srcAccessMask, // srcAccessMask
463 dstAccessMask, // dstAccessMask
464 currentLayout, // oldLayout
465 newLayout, // newLayout
466 currentQueueIndex, // srcQueueFamilyIndex
467 newQueueFamilyIndex, // dstQueueFamilyIndex
468 fInfo.fImage, // image
469 { aspectFlags, 0, fInfo.fLevelCount, 0, 1 } // subresourceRange
470 };
471 SkASSERT(srcAccessMask == imageMemoryBarrier.srcAccessMask);
472 gpu->addImageMemoryBarrier(this->resource(), srcStageMask, dstStageMask, byRegion,
473 &imageMemoryBarrier);
474
475 this->updateImageLayout(newLayout);
476 this->setQueueFamilyIndex(newQueueFamilyIndex);
477 }
478
InitImageInfo(GrVkGpu * gpu,const ImageDesc & imageDesc,GrVkImageInfo * info)479 bool GrVkImage::InitImageInfo(GrVkGpu* gpu, const ImageDesc& imageDesc, GrVkImageInfo* info) {
480 if (0 == imageDesc.fWidth || 0 == imageDesc.fHeight) {
481 return false;
482 }
483 if ((imageDesc.fIsProtected == GrProtected::kYes) && !gpu->vkCaps().supportsProtectedMemory()) {
484 return false;
485 }
486
487 bool isLinear = VK_IMAGE_TILING_LINEAR == imageDesc.fImageTiling;
488 VkImageLayout initialLayout = isLinear ? VK_IMAGE_LAYOUT_PREINITIALIZED
489 : VK_IMAGE_LAYOUT_UNDEFINED;
490
491 // Create Image
492 VkSampleCountFlagBits vkSamples;
493 if (!GrSampleCountToVkSampleCount(imageDesc.fSamples, &vkSamples)) {
494 return false;
495 }
496
497 SkASSERT(VK_IMAGE_TILING_OPTIMAL == imageDesc.fImageTiling ||
498 VK_SAMPLE_COUNT_1_BIT == vkSamples);
499
500 VkImageCreateFlags createflags = 0;
501 if (imageDesc.fIsProtected == GrProtected::kYes || gpu->protectedContext()) {
502 createflags |= VK_IMAGE_CREATE_PROTECTED_BIT;
503 }
504 const VkImageCreateInfo imageCreateInfo = {
505 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // sType
506 nullptr, // pNext
507 createflags, // VkImageCreateFlags
508 imageDesc.fImageType, // VkImageType
509 imageDesc.fFormat, // VkFormat
510 { imageDesc.fWidth, imageDesc.fHeight, 1 }, // VkExtent3D
511 imageDesc.fLevels, // mipLevels
512 1, // arrayLayers
513 vkSamples, // samples
514 imageDesc.fImageTiling, // VkImageTiling
515 imageDesc.fUsageFlags, // VkImageUsageFlags
516 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode
517 0, // queueFamilyCount
518 nullptr, // pQueueFamilyIndices
519 initialLayout // initialLayout
520 };
521
522 VkImage image = VK_NULL_HANDLE;
523 VkResult result;
524 if (imageDesc.fWidth * imageDesc.fHeight > VKIMAGE_LIMIT_SIZE) {
525 SkDebugf("GrVkImage::InitImageInfoInner failed, image is too large, width:%u, height::%u",
526 imageDesc.fWidth, imageDesc.fHeight);
527 return false;
528 }
529 GR_VK_CALL_RESULT(gpu, result, CreateImage(gpu->device(), &imageCreateInfo, nullptr, &image));
530 if (result != VK_SUCCESS) {
531 return false;
532 }
533
534 GrMemoryless memoryless = imageDesc.fUsageFlags & VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT
535 ? GrMemoryless::kYes
536 : GrMemoryless::kNo;
537 GrVkAlloc alloc;
538 if (!GrVkMemory::AllocAndBindImageMemory(gpu, image, memoryless, &alloc,
539 imageDesc.fWidth * imageDesc.fHeight * 4) || // 4 bytes,RGBA
540 (memoryless == GrMemoryless::kYes &&
541 !SkToBool(alloc.fFlags & GrVkAlloc::kLazilyAllocated_Flag))) {
542 VK_CALL(gpu, DestroyImage(gpu->device(), image, nullptr));
543 return false;
544 }
545
546 info->fImage = image;
547 info->fAlloc = alloc;
548 info->fImageTiling = imageDesc.fImageTiling;
549 info->fImageLayout = initialLayout;
550 info->fFormat = imageDesc.fFormat;
551 info->fImageUsageFlags = imageDesc.fUsageFlags;
552 info->fSampleCount = imageDesc.fSamples;
553 info->fLevelCount = imageDesc.fLevels;
554 info->fCurrentQueueFamily = VK_QUEUE_FAMILY_IGNORED;
555 info->fProtected =
556 (createflags & VK_IMAGE_CREATE_PROTECTED_BIT) ? GrProtected::kYes : GrProtected::kNo;
557 info->fSharingMode = VK_SHARING_MODE_EXCLUSIVE;
558 return true;
559 }
560
DestroyImageInfo(const GrVkGpu * gpu,GrVkImageInfo * info)561 void GrVkImage::DestroyImageInfo(const GrVkGpu* gpu, GrVkImageInfo* info) {
562 DestroyAndFreeImageMemory(gpu, info->fAlloc, info->fImage);
563 }
564
~GrVkImage()565 GrVkImage::~GrVkImage() {
566 // should have been released first
567 SkASSERT(!fResource);
568 SkASSERT(!fFramebufferView);
569 SkASSERT(!fTextureView);
570 }
571
prepareForPresent(GrVkGpu * gpu)572 void GrVkImage::prepareForPresent(GrVkGpu* gpu) {
573 VkImageLayout layout = this->currentLayout();
574 if (fInitialQueueFamily != VK_QUEUE_FAMILY_EXTERNAL &&
575 fInitialQueueFamily != VK_QUEUE_FAMILY_FOREIGN_EXT) {
576 if (gpu->vkCaps().supportsSwapchain()) {
577 layout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
578 }
579 }
580 this->setImageLayoutAndQueueIndex(gpu, layout, 0, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, false,
581 fInitialQueueFamily);
582 }
583
prepareForExternal(GrVkGpu * gpu)584 void GrVkImage::prepareForExternal(GrVkGpu* gpu) {
585 this->setImageLayoutAndQueueIndex(gpu, this->currentLayout(), 0,
586 VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, false,
587 fInitialQueueFamily);
588 }
589
releaseImage()590 void GrVkImage::releaseImage() {
591 if (fResource) {
592 fResource->unref();
593 fResource = nullptr;
594 }
595 fFramebufferView.reset();
596 fTextureView.reset();
597 fCachedBlendingInputDescSet.reset();
598 fCachedMSAALoadInputDescSet.reset();
599 }
600
onRelease()601 void GrVkImage::onRelease() {
602 this->releaseImage();
603 GrAttachment::onRelease();
604 }
605
onAbandon()606 void GrVkImage::onAbandon() {
607 this->releaseImage();
608 GrAttachment::onAbandon();
609 }
610
setResourceRelease(sk_sp<GrRefCntedCallback> releaseHelper)611 void GrVkImage::setResourceRelease(sk_sp<GrRefCntedCallback> releaseHelper) {
612 SkASSERT(fResource);
613 // Forward the release proc on to GrVkImage::Resource
614 fResource->setRelease(std::move(releaseHelper));
615 }
616
freeGPUData() const617 void GrVkImage::Resource::freeGPUData() const {
618 this->invokeReleaseProc();
619
620 // OH ISSUE: asyn memory reclaimer
621 auto reclaimer = fGpu->memoryReclaimer();
622 if (reclaimer && reclaimer->addMemoryToWaitQueue(fGpu, fAlloc, fImage)) {
623 return;
624 }
625
626 DestroyAndFreeImageMemory(fGpu, fAlloc, fImage);
627 }
628
freeGPUData() const629 void GrVkImage::BorrowedResource::freeGPUData() const {
630 this->invokeReleaseProc();
631 }
632
write_input_desc_set(GrVkGpu * gpu,VkImageView view,VkImageLayout layout,VkDescriptorSet descSet)633 static void write_input_desc_set(GrVkGpu* gpu,
634 VkImageView view,
635 VkImageLayout layout,
636 VkDescriptorSet descSet) {
637 VkDescriptorImageInfo imageInfo;
638 memset(&imageInfo, 0, sizeof(VkDescriptorImageInfo));
639 imageInfo.sampler = VK_NULL_HANDLE;
640 imageInfo.imageView = view;
641 imageInfo.imageLayout = layout;
642
643 VkWriteDescriptorSet writeInfo;
644 memset(&writeInfo, 0, sizeof(VkWriteDescriptorSet));
645 writeInfo.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
646 writeInfo.pNext = nullptr;
647 writeInfo.dstSet = descSet;
648 writeInfo.dstBinding = GrVkUniformHandler::kInputBinding;
649 writeInfo.dstArrayElement = 0;
650 writeInfo.descriptorCount = 1;
651 writeInfo.descriptorType = VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT;
652 writeInfo.pImageInfo = &imageInfo;
653 writeInfo.pBufferInfo = nullptr;
654 writeInfo.pTexelBufferView = nullptr;
655
656 GR_VK_CALL(gpu->vkInterface(), UpdateDescriptorSets(gpu->device(), 1, &writeInfo, 0, nullptr));
657 }
658
inputDescSetForBlending(GrVkGpu * gpu)659 gr_rp<const GrVkDescriptorSet> GrVkImage::inputDescSetForBlending(GrVkGpu* gpu) {
660 if (!this->supportsInputAttachmentUsage()) {
661 return nullptr;
662 }
663 if (fCachedBlendingInputDescSet) {
664 return fCachedBlendingInputDescSet;
665 }
666
667 fCachedBlendingInputDescSet.reset(gpu->resourceProvider().getInputDescriptorSet());
668 if (!fCachedBlendingInputDescSet) {
669 return nullptr;
670 }
671
672 write_input_desc_set(gpu,
673 this->framebufferView()->imageView(),
674 VK_IMAGE_LAYOUT_GENERAL,
675 *fCachedBlendingInputDescSet->descriptorSet());
676
677 return fCachedBlendingInputDescSet;
678 }
679
inputDescSetForMSAALoad(GrVkGpu * gpu)680 gr_rp<const GrVkDescriptorSet> GrVkImage::inputDescSetForMSAALoad(GrVkGpu* gpu) {
681 if (!this->supportsInputAttachmentUsage()) {
682 return nullptr;
683 }
684 if (fCachedMSAALoadInputDescSet) {
685 return fCachedMSAALoadInputDescSet;
686 }
687
688 fCachedMSAALoadInputDescSet.reset(gpu->resourceProvider().getInputDescriptorSet());
689 if (!fCachedMSAALoadInputDescSet) {
690 return nullptr;
691 }
692
693 write_input_desc_set(gpu,
694 this->framebufferView()->imageView(),
695 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
696 *fCachedMSAALoadInputDescSet->descriptorSet());
697
698 return fCachedMSAALoadInputDescSet;
699 }
700
getVkGpu() const701 GrVkGpu* GrVkImage::getVkGpu() const {
702 SkASSERT(!this->wasDestroyed());
703 return static_cast<GrVkGpu*>(this->getGpu());
704 }
705
onGpuMemorySize() const706 size_t GrVkImage::onGpuMemorySize() const
707 {
708 if (supportedUsages() & UsageFlags::kTexture) {
709 return GrSurface::ComputeSize(this->backendFormat(), this->dimensions(), 1, this->mipmapped());
710 } else {
711 return GrAttachment::onGpuMemorySize();
712 }
713 }
714
715 #if GR_TEST_UTILS
setCurrentQueueFamilyToGraphicsQueue(GrVkGpu * gpu)716 void GrVkImage::setCurrentQueueFamilyToGraphicsQueue(GrVkGpu* gpu) {
717 fMutableState->setQueueFamilyIndex(gpu->queueIndex());
718 }
719 #endif
720
721