1 /*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/vk/GrVkImage.h"
9
10 #include "src/gpu/vk/GrVkGpu.h"
11 #include "src/gpu/vk/GrVkImageView.h"
12 #include "src/gpu/vk/GrVkMemory.h"
13 #include "src/gpu/vk/GrVkTexture.h"
14 #include "src/gpu/vk/GrVkUtil.h"
15
16 #define VK_CALL(GPU, X) GR_VK_CALL(GPU->vkInterface(), X)
17
MakeStencil(GrVkGpu * gpu,SkISize dimensions,int sampleCnt,VkFormat format)18 sk_sp<GrVkImage> GrVkImage::MakeStencil(GrVkGpu* gpu,
19 SkISize dimensions,
20 int sampleCnt,
21 VkFormat format) {
22 VkImageUsageFlags vkUsageFlags =
23 VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
24 return GrVkImage::Make(gpu,
25 dimensions,
26 UsageFlags::kStencilAttachment,
27 sampleCnt,
28 format,
29 /*mipLevels=*/1,
30 vkUsageFlags,
31 GrProtected::kNo,
32 GrMemoryless::kNo,
33 SkBudgeted::kYes);
34 }
35
MakeMSAA(GrVkGpu * gpu,SkISize dimensions,int numSamples,VkFormat format,GrProtected isProtected,GrMemoryless memoryless)36 sk_sp<GrVkImage> GrVkImage::MakeMSAA(GrVkGpu* gpu,
37 SkISize dimensions,
38 int numSamples,
39 VkFormat format,
40 GrProtected isProtected,
41 GrMemoryless memoryless) {
42 SkASSERT(numSamples > 1);
43
44 VkImageUsageFlags vkUsageFlags = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
45 if (memoryless == GrMemoryless::kYes) {
46 vkUsageFlags |= VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT;
47 } else {
48 vkUsageFlags |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
49 }
50 return GrVkImage::Make(gpu,
51 dimensions,
52 UsageFlags::kColorAttachment,
53 numSamples,
54 format,
55 /*mipLevels=*/1,
56 vkUsageFlags,
57 isProtected,
58 memoryless,
59 SkBudgeted::kYes);
60 }
61
MakeTexture(GrVkGpu * gpu,SkISize dimensions,VkFormat format,uint32_t mipLevels,GrRenderable renderable,int numSamples,SkBudgeted budgeted,GrProtected isProtected)62 sk_sp<GrVkImage> GrVkImage::MakeTexture(GrVkGpu* gpu,
63 SkISize dimensions,
64 VkFormat format,
65 uint32_t mipLevels,
66 GrRenderable renderable,
67 int numSamples,
68 SkBudgeted budgeted,
69 GrProtected isProtected) {
70 UsageFlags usageFlags = UsageFlags::kTexture;
71 VkImageUsageFlags vkUsageFlags = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
72 VK_IMAGE_USAGE_TRANSFER_DST_BIT;
73 if (renderable == GrRenderable::kYes) {
74 usageFlags |= UsageFlags::kColorAttachment;
75 vkUsageFlags |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
76 // We always make our render targets support being used as input attachments
77 vkUsageFlags |= VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT;
78 }
79
80 return GrVkImage::Make(gpu,
81 dimensions,
82 usageFlags,
83 numSamples,
84 format,
85 mipLevels,
86 vkUsageFlags,
87 isProtected,
88 GrMemoryless::kNo,
89 budgeted);
90 }
91
make_views(GrVkGpu * gpu,const GrVkImageInfo & info,GrAttachment::UsageFlags attachmentUsages,sk_sp<const GrVkImageView> * framebufferView,sk_sp<const GrVkImageView> * textureView)92 static bool make_views(GrVkGpu* gpu,
93 const GrVkImageInfo& info,
94 GrAttachment::UsageFlags attachmentUsages,
95 sk_sp<const GrVkImageView>* framebufferView,
96 sk_sp<const GrVkImageView>* textureView) {
97 GrVkImageView::Type viewType;
98 if (attachmentUsages & GrAttachment::UsageFlags::kStencilAttachment) {
99 // If we have stencil usage then we shouldn't have any other usages
100 SkASSERT(attachmentUsages == GrAttachment::UsageFlags::kStencilAttachment);
101 viewType = GrVkImageView::kStencil_Type;
102 } else {
103 viewType = GrVkImageView::kColor_Type;
104 }
105
106 if (SkToBool(attachmentUsages & GrAttachment::UsageFlags::kStencilAttachment) ||
107 SkToBool(attachmentUsages & GrAttachment::UsageFlags::kColorAttachment)) {
108 // Attachments can only have a mip level of 1
109 *framebufferView = GrVkImageView::Make(
110 gpu, info.fImage, info.fFormat, viewType, 1, info.fYcbcrConversionInfo);
111 if (!*framebufferView) {
112 return false;
113 }
114 }
115
116 if (attachmentUsages & GrAttachment::UsageFlags::kTexture) {
117 *textureView = GrVkImageView::Make(gpu,
118 info.fImage,
119 info.fFormat,
120 viewType,
121 info.fLevelCount,
122 info.fYcbcrConversionInfo);
123 if (!*textureView) {
124 return false;
125 }
126 }
127 return true;
128 }
129
Make(GrVkGpu * gpu,SkISize dimensions,UsageFlags attachmentUsages,int sampleCnt,VkFormat format,uint32_t mipLevels,VkImageUsageFlags vkUsageFlags,GrProtected isProtected,GrMemoryless memoryless,SkBudgeted budgeted)130 sk_sp<GrVkImage> GrVkImage::Make(GrVkGpu* gpu,
131 SkISize dimensions,
132 UsageFlags attachmentUsages,
133 int sampleCnt,
134 VkFormat format,
135 uint32_t mipLevels,
136 VkImageUsageFlags vkUsageFlags,
137 GrProtected isProtected,
138 GrMemoryless memoryless,
139 SkBudgeted budgeted) {
140 GrVkImage::ImageDesc imageDesc;
141 imageDesc.fImageType = VK_IMAGE_TYPE_2D;
142 imageDesc.fFormat = format;
143 imageDesc.fWidth = dimensions.width();
144 imageDesc.fHeight = dimensions.height();
145 imageDesc.fLevels = mipLevels;
146 imageDesc.fSamples = sampleCnt;
147 imageDesc.fImageTiling = VK_IMAGE_TILING_OPTIMAL;
148 imageDesc.fUsageFlags = vkUsageFlags;
149 imageDesc.fIsProtected = isProtected;
150
151 GrVkImageInfo info;
152 if (!GrVkImage::InitImageInfo(gpu, imageDesc, &info)) {
153 return nullptr;
154 }
155
156 sk_sp<const GrVkImageView> framebufferView;
157 sk_sp<const GrVkImageView> textureView;
158 if (!make_views(gpu, info, attachmentUsages, &framebufferView, &textureView)) {
159 GrVkImage::DestroyImageInfo(gpu, &info);
160 return nullptr;
161 }
162
163 sk_sp<GrBackendSurfaceMutableStateImpl> mutableState(
164 new GrBackendSurfaceMutableStateImpl(info.fImageLayout, info.fCurrentQueueFamily));
165 return sk_sp<GrVkImage>(new GrVkImage(gpu,
166 dimensions,
167 attachmentUsages,
168 info,
169 std::move(mutableState),
170 std::move(framebufferView),
171 std::move(textureView),
172 budgeted));
173 }
174
MakeWrapped(GrVkGpu * gpu,SkISize dimensions,const GrVkImageInfo & info,sk_sp<GrBackendSurfaceMutableStateImpl> mutableState,UsageFlags attachmentUsages,GrWrapOwnership ownership,GrWrapCacheable cacheable,bool forSecondaryCB)175 sk_sp<GrVkImage> GrVkImage::MakeWrapped(GrVkGpu* gpu,
176 SkISize dimensions,
177 const GrVkImageInfo& info,
178 sk_sp<GrBackendSurfaceMutableStateImpl> mutableState,
179 UsageFlags attachmentUsages,
180 GrWrapOwnership ownership,
181 GrWrapCacheable cacheable,
182 bool forSecondaryCB) {
183 sk_sp<const GrVkImageView> framebufferView;
184 sk_sp<const GrVkImageView> textureView;
185 if (!forSecondaryCB) {
186 if (!make_views(gpu, info, attachmentUsages, &framebufferView, &textureView)) {
187 return nullptr;
188 }
189 }
190
191 GrBackendObjectOwnership backendOwnership = kBorrow_GrWrapOwnership == ownership
192 ? GrBackendObjectOwnership::kBorrowed
193 : GrBackendObjectOwnership::kOwned;
194
195 return sk_sp<GrVkImage>(new GrVkImage(gpu,
196 dimensions,
197 attachmentUsages,
198 info,
199 std::move(mutableState),
200 std::move(framebufferView),
201 std::move(textureView),
202 backendOwnership,
203 cacheable,
204 forSecondaryCB));
205 }
206
GrVkImage(GrVkGpu * gpu,SkISize dimensions,UsageFlags supportedUsages,const GrVkImageInfo & info,sk_sp<GrBackendSurfaceMutableStateImpl> mutableState,sk_sp<const GrVkImageView> framebufferView,sk_sp<const GrVkImageView> textureView,SkBudgeted budgeted)207 GrVkImage::GrVkImage(GrVkGpu* gpu,
208 SkISize dimensions,
209 UsageFlags supportedUsages,
210 const GrVkImageInfo& info,
211 sk_sp<GrBackendSurfaceMutableStateImpl> mutableState,
212 sk_sp<const GrVkImageView> framebufferView,
213 sk_sp<const GrVkImageView> textureView,
214 SkBudgeted budgeted)
215 : GrAttachment(gpu,
216 dimensions,
217 supportedUsages,
218 info.fSampleCount,
219 info.fLevelCount > 1 ? GrMipmapped::kYes : GrMipmapped::kNo,
220 info.fProtected,
221 info.fAlloc.fFlags & GrVkAlloc::kLazilyAllocated_Flag ? GrMemoryless::kYes
222 : GrMemoryless::kNo)
223 , fInfo(info)
224 , fInitialQueueFamily(info.fCurrentQueueFamily)
225 , fMutableState(std::move(mutableState))
226 , fFramebufferView(std::move(framebufferView))
227 , fTextureView(std::move(textureView))
228 , fIsBorrowed(false) {
229 this->init(gpu, false);
230 this->registerWithCache(budgeted);
231 }
232
GrVkImage(GrVkGpu * gpu,SkISize dimensions,UsageFlags supportedUsages,const GrVkImageInfo & info,sk_sp<GrBackendSurfaceMutableStateImpl> mutableState,sk_sp<const GrVkImageView> framebufferView,sk_sp<const GrVkImageView> textureView,GrBackendObjectOwnership ownership,GrWrapCacheable cacheable,bool forSecondaryCB)233 GrVkImage::GrVkImage(GrVkGpu* gpu,
234 SkISize dimensions,
235 UsageFlags supportedUsages,
236 const GrVkImageInfo& info,
237 sk_sp<GrBackendSurfaceMutableStateImpl> mutableState,
238 sk_sp<const GrVkImageView> framebufferView,
239 sk_sp<const GrVkImageView> textureView,
240 GrBackendObjectOwnership ownership,
241 GrWrapCacheable cacheable,
242 bool forSecondaryCB)
243 : GrAttachment(gpu,
244 dimensions,
245 supportedUsages,
246 info.fSampleCount,
247 info.fLevelCount > 1 ? GrMipmapped::kYes : GrMipmapped::kNo,
248 info.fProtected)
249 , fInfo(info)
250 , fInitialQueueFamily(info.fCurrentQueueFamily)
251 , fMutableState(std::move(mutableState))
252 , fFramebufferView(std::move(framebufferView))
253 , fTextureView(std::move(textureView))
254 , fIsBorrowed(GrBackendObjectOwnership::kBorrowed == ownership) {
255 this->init(gpu, forSecondaryCB);
256 this->registerWithCacheWrapped(cacheable);
257 }
258
init(GrVkGpu * gpu,bool forSecondaryCB)259 void GrVkImage::init(GrVkGpu* gpu, bool forSecondaryCB) {
260 SkASSERT(fMutableState->getImageLayout() == fInfo.fImageLayout);
261 SkASSERT(fMutableState->getQueueFamilyIndex() == fInfo.fCurrentQueueFamily);
262 #ifdef SK_DEBUG
263 if (fInfo.fImageUsageFlags & VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) {
264 SkASSERT(SkToBool(fInfo.fImageUsageFlags & VK_IMAGE_USAGE_TRANSFER_DST_BIT));
265 } else {
266 if (fInfo.fAlloc.fFlags & GrVkAlloc::kLazilyAllocated_Flag) {
267 SkASSERT(fInfo.fImageUsageFlags & VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT);
268 SkASSERT(!SkToBool(fInfo.fImageUsageFlags & VK_IMAGE_USAGE_TRANSFER_DST_BIT) &&
269 !SkToBool(fInfo.fImageUsageFlags & VK_IMAGE_USAGE_TRANSFER_SRC_BIT));
270 } else {
271 SkASSERT(!SkToBool(fInfo.fImageUsageFlags & VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT));
272 SkASSERT(SkToBool(fInfo.fImageUsageFlags & VK_IMAGE_USAGE_TRANSFER_DST_BIT) &&
273 SkToBool(fInfo.fImageUsageFlags & VK_IMAGE_USAGE_TRANSFER_SRC_BIT));
274 }
275 }
276 // We can't transfer from the non graphics queue to the graphics queue since we can't
277 // release the image from the original queue without having that queue. This limits us in terms
278 // of the types of queue indices we can handle.
279 if (fInfo.fCurrentQueueFamily != VK_QUEUE_FAMILY_IGNORED &&
280 fInfo.fCurrentQueueFamily != VK_QUEUE_FAMILY_EXTERNAL &&
281 fInfo.fCurrentQueueFamily != VK_QUEUE_FAMILY_FOREIGN_EXT) {
282 if (fInfo.fSharingMode == VK_SHARING_MODE_EXCLUSIVE) {
283 if (fInfo.fCurrentQueueFamily != gpu->queueIndex()) {
284 SkASSERT(false);
285 }
286 } else {
287 SkASSERT(false);
288 }
289 }
290 #endif
291 if (forSecondaryCB) {
292 fResource = nullptr;
293 } else if (fIsBorrowed) {
294 fResource = new BorrowedResource(gpu, fInfo.fImage, fInfo.fAlloc, fInfo.fImageTiling);
295 } else {
296 SkASSERT(VK_NULL_HANDLE != fInfo.fAlloc.fMemory);
297 fResource = new Resource(gpu, fInfo.fImage, fInfo.fAlloc, fInfo.fImageTiling);
298 }
299 }
300
LayoutToPipelineSrcStageFlags(const VkImageLayout layout)301 VkPipelineStageFlags GrVkImage::LayoutToPipelineSrcStageFlags(const VkImageLayout layout) {
302 if (VK_IMAGE_LAYOUT_GENERAL == layout) {
303 return VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
304 } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout ||
305 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
306 return VK_PIPELINE_STAGE_TRANSFER_BIT;
307 } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout) {
308 return VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
309 } else if (VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout ||
310 VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL == layout) {
311 return VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
312 } else if (VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) {
313 return VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
314 } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
315 return VK_PIPELINE_STAGE_HOST_BIT;
316 } else if (VK_IMAGE_LAYOUT_PRESENT_SRC_KHR == layout) {
317 return VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
318 }
319
320 SkASSERT(VK_IMAGE_LAYOUT_UNDEFINED == layout);
321 return VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
322 }
323
LayoutToSrcAccessMask(const VkImageLayout layout)324 VkAccessFlags GrVkImage::LayoutToSrcAccessMask(const VkImageLayout layout) {
325 // Currently we assume we will never being doing any explict shader writes (this doesn't include
326 // color attachment or depth/stencil writes). So we will ignore the
327 // VK_MEMORY_OUTPUT_SHADER_WRITE_BIT.
328
329 // We can only directly access the host memory if we are in preinitialized or general layout,
330 // and the image is linear.
331 // TODO: Add check for linear here so we are not always adding host to general, and we should
332 // only be in preinitialized if we are linear
333 VkAccessFlags flags = 0;
334 if (VK_IMAGE_LAYOUT_GENERAL == layout) {
335 flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
336 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
337 VK_ACCESS_TRANSFER_WRITE_BIT |
338 VK_ACCESS_HOST_WRITE_BIT;
339 } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
340 flags = VK_ACCESS_HOST_WRITE_BIT;
341 } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout) {
342 flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
343 } else if (VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout) {
344 flags = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
345 } else if (VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
346 flags = VK_ACCESS_TRANSFER_WRITE_BIT;
347 } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout ||
348 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout ||
349 VK_IMAGE_LAYOUT_PRESENT_SRC_KHR == layout) {
350 // There are no writes that need to be made available
351 flags = 0;
352 }
353 return flags;
354 }
355
vk_format_to_aspect_flags(VkFormat format)356 VkImageAspectFlags vk_format_to_aspect_flags(VkFormat format) {
357 switch (format) {
358 case VK_FORMAT_S8_UINT:
359 return VK_IMAGE_ASPECT_STENCIL_BIT;
360 case VK_FORMAT_D24_UNORM_S8_UINT: // fallthrough
361 case VK_FORMAT_D32_SFLOAT_S8_UINT:
362 return VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
363 default:
364 return VK_IMAGE_ASPECT_COLOR_BIT;
365 }
366 }
367
setImageLayoutAndQueueIndex(const GrVkGpu * gpu,VkImageLayout newLayout,VkAccessFlags dstAccessMask,VkPipelineStageFlags dstStageMask,bool byRegion,uint32_t newQueueFamilyIndex)368 void GrVkImage::setImageLayoutAndQueueIndex(const GrVkGpu* gpu,
369 VkImageLayout newLayout,
370 VkAccessFlags dstAccessMask,
371 VkPipelineStageFlags dstStageMask,
372 bool byRegion,
373 uint32_t newQueueFamilyIndex) {
374 // Enable the following block to test new devices to confirm their lazy images stay at 0 memory use.
375 #if 0
376 if (fInfo.fAlloc.fFlags & GrVkAlloc::kLazilyAllocated_Flag) {
377 VkDeviceSize size;
378 VK_CALL(gpu, GetDeviceMemoryCommitment(gpu->device(), fInfo.fAlloc.fMemory, &size));
379
380 SkDebugf("Lazy Image. This: %p, image: %d, size: %d\n", this, fInfo.fImage, size);
381 }
382 #endif
383 SkASSERT(!gpu->isDeviceLost());
384 SkASSERT(newLayout == this->currentLayout() ||
385 (VK_IMAGE_LAYOUT_UNDEFINED != newLayout &&
386 VK_IMAGE_LAYOUT_PREINITIALIZED != newLayout));
387 VkImageLayout currentLayout = this->currentLayout();
388 uint32_t currentQueueIndex = this->currentQueueFamilyIndex();
389
390 #ifdef SK_DEBUG
391 if (fInfo.fSharingMode == VK_SHARING_MODE_CONCURRENT) {
392 if (newQueueFamilyIndex == VK_QUEUE_FAMILY_IGNORED) {
393 SkASSERT(currentQueueIndex == VK_QUEUE_FAMILY_IGNORED ||
394 currentQueueIndex == VK_QUEUE_FAMILY_EXTERNAL ||
395 currentQueueIndex == VK_QUEUE_FAMILY_FOREIGN_EXT);
396 } else {
397 SkASSERT(newQueueFamilyIndex == VK_QUEUE_FAMILY_EXTERNAL ||
398 newQueueFamilyIndex == VK_QUEUE_FAMILY_FOREIGN_EXT);
399 SkASSERT(currentQueueIndex == VK_QUEUE_FAMILY_IGNORED);
400 }
401 } else {
402 SkASSERT(fInfo.fSharingMode == VK_SHARING_MODE_EXCLUSIVE);
403 if (newQueueFamilyIndex == VK_QUEUE_FAMILY_IGNORED ||
404 currentQueueIndex == gpu->queueIndex()) {
405 SkASSERT(currentQueueIndex == VK_QUEUE_FAMILY_IGNORED ||
406 currentQueueIndex == VK_QUEUE_FAMILY_EXTERNAL ||
407 currentQueueIndex == VK_QUEUE_FAMILY_FOREIGN_EXT ||
408 currentQueueIndex == gpu->queueIndex());
409 } else if (newQueueFamilyIndex == VK_QUEUE_FAMILY_EXTERNAL ||
410 newQueueFamilyIndex == VK_QUEUE_FAMILY_FOREIGN_EXT) {
411 SkASSERT(currentQueueIndex == VK_QUEUE_FAMILY_IGNORED ||
412 currentQueueIndex == gpu->queueIndex());
413 }
414 }
415 #endif
416
417 if (fInfo.fSharingMode == VK_SHARING_MODE_EXCLUSIVE) {
418 if (newQueueFamilyIndex == VK_QUEUE_FAMILY_IGNORED) {
419 newQueueFamilyIndex = gpu->queueIndex();
420 }
421 if (currentQueueIndex == VK_QUEUE_FAMILY_IGNORED) {
422 currentQueueIndex = gpu->queueIndex();
423 }
424 }
425
426 // If the old and new layout are the same and the layout is a read only layout, there is no need
427 // to put in a barrier unless we also need to switch queues.
428 if (newLayout == currentLayout && currentQueueIndex == newQueueFamilyIndex &&
429 (VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL == currentLayout ||
430 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == currentLayout ||
431 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == currentLayout)) {
432 return;
433 }
434
435 VkAccessFlags srcAccessMask = GrVkImage::LayoutToSrcAccessMask(currentLayout);
436 VkPipelineStageFlags srcStageMask = GrVkImage::LayoutToPipelineSrcStageFlags(currentLayout);
437
438 VkImageAspectFlags aspectFlags = vk_format_to_aspect_flags(fInfo.fFormat);
439
440 VkImageMemoryBarrier imageMemoryBarrier = {
441 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
442 nullptr, // pNext
443 srcAccessMask, // srcAccessMask
444 dstAccessMask, // dstAccessMask
445 currentLayout, // oldLayout
446 newLayout, // newLayout
447 currentQueueIndex, // srcQueueFamilyIndex
448 newQueueFamilyIndex, // dstQueueFamilyIndex
449 fInfo.fImage, // image
450 { aspectFlags, 0, fInfo.fLevelCount, 0, 1 } // subresourceRange
451 };
452 SkASSERT(srcAccessMask == imageMemoryBarrier.srcAccessMask);
453 gpu->addImageMemoryBarrier(this->resource(), srcStageMask, dstStageMask, byRegion,
454 &imageMemoryBarrier);
455
456 this->updateImageLayout(newLayout);
457 this->setQueueFamilyIndex(newQueueFamilyIndex);
458 }
459
InitImageInfo(GrVkGpu * gpu,const ImageDesc & imageDesc,GrVkImageInfo * info)460 bool GrVkImage::InitImageInfo(GrVkGpu* gpu, const ImageDesc& imageDesc, GrVkImageInfo* info) {
461 if (0 == imageDesc.fWidth || 0 == imageDesc.fHeight) {
462 return false;
463 }
464 if ((imageDesc.fIsProtected == GrProtected::kYes) && !gpu->vkCaps().supportsProtectedMemory()) {
465 return false;
466 }
467
468 bool isLinear = VK_IMAGE_TILING_LINEAR == imageDesc.fImageTiling;
469 VkImageLayout initialLayout = isLinear ? VK_IMAGE_LAYOUT_PREINITIALIZED
470 : VK_IMAGE_LAYOUT_UNDEFINED;
471
472 // Create Image
473 VkSampleCountFlagBits vkSamples;
474 if (!GrSampleCountToVkSampleCount(imageDesc.fSamples, &vkSamples)) {
475 return false;
476 }
477
478 SkASSERT(VK_IMAGE_TILING_OPTIMAL == imageDesc.fImageTiling ||
479 VK_SAMPLE_COUNT_1_BIT == vkSamples);
480
481 VkImageCreateFlags createflags = 0;
482 if (imageDesc.fIsProtected == GrProtected::kYes || gpu->protectedContext()) {
483 createflags |= VK_IMAGE_CREATE_PROTECTED_BIT;
484 }
485 const VkImageCreateInfo imageCreateInfo = {
486 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // sType
487 nullptr, // pNext
488 createflags, // VkImageCreateFlags
489 imageDesc.fImageType, // VkImageType
490 imageDesc.fFormat, // VkFormat
491 { imageDesc.fWidth, imageDesc.fHeight, 1 }, // VkExtent3D
492 imageDesc.fLevels, // mipLevels
493 1, // arrayLayers
494 vkSamples, // samples
495 imageDesc.fImageTiling, // VkImageTiling
496 imageDesc.fUsageFlags, // VkImageUsageFlags
497 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode
498 0, // queueFamilyCount
499 nullptr, // pQueueFamilyIndices
500 initialLayout // initialLayout
501 };
502
503 VkImage image = VK_NULL_HANDLE;
504 VkResult result;
505 GR_VK_CALL_RESULT(gpu, result, CreateImage(gpu->device(), &imageCreateInfo, nullptr, &image));
506 if (result != VK_SUCCESS) {
507 return false;
508 }
509
510 GrMemoryless memoryless = imageDesc.fUsageFlags & VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT
511 ? GrMemoryless::kYes
512 : GrMemoryless::kNo;
513 GrVkAlloc alloc;
514 if (!GrVkMemory::AllocAndBindImageMemory(gpu, image, memoryless, &alloc) ||
515 (memoryless == GrMemoryless::kYes &&
516 !SkToBool(alloc.fFlags & GrVkAlloc::kLazilyAllocated_Flag))) {
517 VK_CALL(gpu, DestroyImage(gpu->device(), image, nullptr));
518 return false;
519 }
520
521 info->fImage = image;
522 info->fAlloc = alloc;
523 info->fImageTiling = imageDesc.fImageTiling;
524 info->fImageLayout = initialLayout;
525 info->fFormat = imageDesc.fFormat;
526 info->fImageUsageFlags = imageDesc.fUsageFlags;
527 info->fSampleCount = imageDesc.fSamples;
528 info->fLevelCount = imageDesc.fLevels;
529 info->fCurrentQueueFamily = VK_QUEUE_FAMILY_IGNORED;
530 info->fProtected =
531 (createflags & VK_IMAGE_CREATE_PROTECTED_BIT) ? GrProtected::kYes : GrProtected::kNo;
532 info->fSharingMode = VK_SHARING_MODE_EXCLUSIVE;
533 return true;
534 }
535
DestroyImageInfo(const GrVkGpu * gpu,GrVkImageInfo * info)536 void GrVkImage::DestroyImageInfo(const GrVkGpu* gpu, GrVkImageInfo* info) {
537 VK_CALL(gpu, DestroyImage(gpu->device(), info->fImage, nullptr));
538 GrVkMemory::FreeImageMemory(gpu, info->fAlloc);
539 }
540
~GrVkImage()541 GrVkImage::~GrVkImage() {
542 // should have been released first
543 SkASSERT(!fResource);
544 SkASSERT(!fFramebufferView);
545 SkASSERT(!fTextureView);
546 }
547
prepareForPresent(GrVkGpu * gpu)548 void GrVkImage::prepareForPresent(GrVkGpu* gpu) {
549 VkImageLayout layout = this->currentLayout();
550 if (fInitialQueueFamily != VK_QUEUE_FAMILY_EXTERNAL &&
551 fInitialQueueFamily != VK_QUEUE_FAMILY_FOREIGN_EXT) {
552 if (gpu->vkCaps().supportsSwapchain()) {
553 layout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
554 }
555 }
556 this->setImageLayoutAndQueueIndex(gpu, layout, 0, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, false,
557 fInitialQueueFamily);
558 }
559
prepareForExternal(GrVkGpu * gpu)560 void GrVkImage::prepareForExternal(GrVkGpu* gpu) {
561 this->setImageLayoutAndQueueIndex(gpu, this->currentLayout(), 0,
562 VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, false,
563 fInitialQueueFamily);
564 }
565
releaseImage()566 void GrVkImage::releaseImage() {
567 if (fResource) {
568 fResource->unref();
569 fResource = nullptr;
570 }
571 fFramebufferView.reset();
572 fTextureView.reset();
573 fCachedBlendingInputDescSet.reset();
574 fCachedMSAALoadInputDescSet.reset();
575 }
576
onRelease()577 void GrVkImage::onRelease() {
578 this->releaseImage();
579 GrAttachment::onRelease();
580 }
581
onAbandon()582 void GrVkImage::onAbandon() {
583 this->releaseImage();
584 GrAttachment::onAbandon();
585 }
586
setResourceRelease(sk_sp<GrRefCntedCallback> releaseHelper)587 void GrVkImage::setResourceRelease(sk_sp<GrRefCntedCallback> releaseHelper) {
588 SkASSERT(fResource);
589 // Forward the release proc on to GrVkImage::Resource
590 fResource->setRelease(std::move(releaseHelper));
591 }
592
freeGPUData() const593 void GrVkImage::Resource::freeGPUData() const {
594 this->invokeReleaseProc();
595 VK_CALL(fGpu, DestroyImage(fGpu->device(), fImage, nullptr));
596 GrVkMemory::FreeImageMemory(fGpu, fAlloc);
597 }
598
freeGPUData() const599 void GrVkImage::BorrowedResource::freeGPUData() const {
600 this->invokeReleaseProc();
601 }
602
write_input_desc_set(GrVkGpu * gpu,VkImageView view,VkImageLayout layout,VkDescriptorSet descSet)603 static void write_input_desc_set(GrVkGpu* gpu,
604 VkImageView view,
605 VkImageLayout layout,
606 VkDescriptorSet descSet) {
607 VkDescriptorImageInfo imageInfo;
608 memset(&imageInfo, 0, sizeof(VkDescriptorImageInfo));
609 imageInfo.sampler = VK_NULL_HANDLE;
610 imageInfo.imageView = view;
611 imageInfo.imageLayout = layout;
612
613 VkWriteDescriptorSet writeInfo;
614 memset(&writeInfo, 0, sizeof(VkWriteDescriptorSet));
615 writeInfo.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
616 writeInfo.pNext = nullptr;
617 writeInfo.dstSet = descSet;
618 writeInfo.dstBinding = GrVkUniformHandler::kInputBinding;
619 writeInfo.dstArrayElement = 0;
620 writeInfo.descriptorCount = 1;
621 writeInfo.descriptorType = VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT;
622 writeInfo.pImageInfo = &imageInfo;
623 writeInfo.pBufferInfo = nullptr;
624 writeInfo.pTexelBufferView = nullptr;
625
626 GR_VK_CALL(gpu->vkInterface(), UpdateDescriptorSets(gpu->device(), 1, &writeInfo, 0, nullptr));
627 }
628
inputDescSetForBlending(GrVkGpu * gpu)629 gr_rp<const GrVkDescriptorSet> GrVkImage::inputDescSetForBlending(GrVkGpu* gpu) {
630 if (!this->supportsInputAttachmentUsage()) {
631 return nullptr;
632 }
633 if (fCachedBlendingInputDescSet) {
634 return fCachedBlendingInputDescSet;
635 }
636
637 fCachedBlendingInputDescSet.reset(gpu->resourceProvider().getInputDescriptorSet());
638 if (!fCachedBlendingInputDescSet) {
639 return nullptr;
640 }
641
642 write_input_desc_set(gpu,
643 this->framebufferView()->imageView(),
644 VK_IMAGE_LAYOUT_GENERAL,
645 *fCachedBlendingInputDescSet->descriptorSet());
646
647 return fCachedBlendingInputDescSet;
648 }
649
inputDescSetForMSAALoad(GrVkGpu * gpu)650 gr_rp<const GrVkDescriptorSet> GrVkImage::inputDescSetForMSAALoad(GrVkGpu* gpu) {
651 if (!this->supportsInputAttachmentUsage()) {
652 return nullptr;
653 }
654 if (fCachedMSAALoadInputDescSet) {
655 return fCachedMSAALoadInputDescSet;
656 }
657
658 fCachedMSAALoadInputDescSet.reset(gpu->resourceProvider().getInputDescriptorSet());
659 if (!fCachedMSAALoadInputDescSet) {
660 return nullptr;
661 }
662
663 write_input_desc_set(gpu,
664 this->framebufferView()->imageView(),
665 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
666 *fCachedMSAALoadInputDescSet->descriptorSet());
667
668 return fCachedMSAALoadInputDescSet;
669 }
670
getVkGpu() const671 GrVkGpu* GrVkImage::getVkGpu() const {
672 SkASSERT(!this->wasDestroyed());
673 return static_cast<GrVkGpu*>(this->getGpu());
674 }
675
onGpuMemorySize() const676 size_t GrVkImage::onGpuMemorySize() const
677 {
678 if (supportedUsages() & UsageFlags::kTexture) {
679 return GrSurface::ComputeSize(this->backendFormat(), this->dimensions(), 1, this->mipmapped());
680 } else {
681 return GrAttachment::onGpuMemorySize();
682 }
683 }
684
685 #if GR_TEST_UTILS
setCurrentQueueFamilyToGraphicsQueue(GrVkGpu * gpu)686 void GrVkImage::setCurrentQueueFamilyToGraphicsQueue(GrVkGpu* gpu) {
687 fMutableState->setQueueFamilyIndex(gpu->queueIndex());
688 }
689 #endif
690
691