1 /*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/vk/GrVkImage.h"
9
10 #include "src/gpu/vk/GrVkGpu.h"
11 #include "src/gpu/vk/GrVkImageView.h"
12 #include "src/gpu/vk/GrVkMemory.h"
13 #include "src/gpu/vk/GrVkTexture.h"
14 #include "src/gpu/vk/GrVkUtil.h"
15
16 #define VK_CALL(GPU, X) GR_VK_CALL(GPU->vkInterface(), X)
17 constexpr uint32_t VKIMAGE_LIMIT_SIZE = 10000 * 10000; // Vk-Image Size need less than 10000*10000
18 constexpr uint32_t PIXEL_SIZE = 4; // 4 bytes per pixel
19
MakeStencil(GrVkGpu * gpu,SkISize dimensions,int sampleCnt,VkFormat format)20 sk_sp<GrVkImage> GrVkImage::MakeStencil(GrVkGpu* gpu,
21 SkISize dimensions,
22 int sampleCnt,
23 VkFormat format) {
24 VkImageUsageFlags vkUsageFlags =
25 VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
26 return GrVkImage::Make(gpu,
27 dimensions,
28 UsageFlags::kStencilAttachment,
29 sampleCnt,
30 format,
31 /*mipLevels=*/1,
32 vkUsageFlags,
33 GrProtected::kNo,
34 GrMemoryless::kNo,
35 SkBudgeted::kYes);
36 }
37
MakeMSAA(GrVkGpu * gpu,SkISize dimensions,int numSamples,VkFormat format,GrProtected isProtected,GrMemoryless memoryless)38 sk_sp<GrVkImage> GrVkImage::MakeMSAA(GrVkGpu* gpu,
39 SkISize dimensions,
40 int numSamples,
41 VkFormat format,
42 GrProtected isProtected,
43 GrMemoryless memoryless) {
44 SkASSERT(numSamples > 1);
45
46 VkImageUsageFlags vkUsageFlags = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
47 if (memoryless == GrMemoryless::kYes) {
48 vkUsageFlags |= VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT;
49 } else {
50 vkUsageFlags |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
51 }
52 return GrVkImage::Make(gpu,
53 dimensions,
54 UsageFlags::kColorAttachment,
55 numSamples,
56 format,
57 /*mipLevels=*/1,
58 vkUsageFlags,
59 isProtected,
60 memoryless,
61 SkBudgeted::kYes);
62 }
63
MakeTexture(GrVkGpu * gpu,SkISize dimensions,VkFormat format,uint32_t mipLevels,GrRenderable renderable,int numSamples,SkBudgeted budgeted,GrProtected isProtected)64 sk_sp<GrVkImage> GrVkImage::MakeTexture(GrVkGpu* gpu,
65 SkISize dimensions,
66 VkFormat format,
67 uint32_t mipLevels,
68 GrRenderable renderable,
69 int numSamples,
70 SkBudgeted budgeted,
71 GrProtected isProtected) {
72 UsageFlags usageFlags = UsageFlags::kTexture;
73 VkImageUsageFlags vkUsageFlags = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
74 VK_IMAGE_USAGE_TRANSFER_DST_BIT;
75 if (renderable == GrRenderable::kYes) {
76 usageFlags |= UsageFlags::kColorAttachment;
77 vkUsageFlags |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
78 // We always make our render targets support being used as input attachments
79 vkUsageFlags |= VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT;
80 }
81
82 return GrVkImage::Make(gpu,
83 dimensions,
84 usageFlags,
85 numSamples,
86 format,
87 mipLevels,
88 vkUsageFlags,
89 isProtected,
90 GrMemoryless::kNo,
91 budgeted);
92 }
93
make_views(GrVkGpu * gpu,const GrVkImageInfo & info,GrAttachment::UsageFlags attachmentUsages,sk_sp<const GrVkImageView> * framebufferView,sk_sp<const GrVkImageView> * textureView)94 static bool make_views(GrVkGpu* gpu,
95 const GrVkImageInfo& info,
96 GrAttachment::UsageFlags attachmentUsages,
97 sk_sp<const GrVkImageView>* framebufferView,
98 sk_sp<const GrVkImageView>* textureView) {
99 GrVkImageView::Type viewType;
100 if (attachmentUsages & GrAttachment::UsageFlags::kStencilAttachment) {
101 // If we have stencil usage then we shouldn't have any other usages
102 SkASSERT(attachmentUsages == GrAttachment::UsageFlags::kStencilAttachment);
103 viewType = GrVkImageView::kStencil_Type;
104 } else {
105 viewType = GrVkImageView::kColor_Type;
106 }
107
108 if (SkToBool(attachmentUsages & GrAttachment::UsageFlags::kStencilAttachment) ||
109 SkToBool(attachmentUsages & GrAttachment::UsageFlags::kColorAttachment)) {
110 // Attachments can only have a mip level of 1
111 *framebufferView = GrVkImageView::Make(
112 gpu, info.fImage, info.fFormat, viewType, 1, info.fYcbcrConversionInfo);
113 if (!*framebufferView) {
114 return false;
115 }
116 }
117
118 if (attachmentUsages & GrAttachment::UsageFlags::kTexture) {
119 *textureView = GrVkImageView::Make(gpu,
120 info.fImage,
121 info.fFormat,
122 viewType,
123 info.fLevelCount,
124 info.fYcbcrConversionInfo);
125 if (!*textureView) {
126 return false;
127 }
128 }
129 return true;
130 }
131
Make(GrVkGpu * gpu,SkISize dimensions,UsageFlags attachmentUsages,int sampleCnt,VkFormat format,uint32_t mipLevels,VkImageUsageFlags vkUsageFlags,GrProtected isProtected,GrMemoryless memoryless,SkBudgeted budgeted)132 sk_sp<GrVkImage> GrVkImage::Make(GrVkGpu* gpu,
133 SkISize dimensions,
134 UsageFlags attachmentUsages,
135 int sampleCnt,
136 VkFormat format,
137 uint32_t mipLevels,
138 VkImageUsageFlags vkUsageFlags,
139 GrProtected isProtected,
140 GrMemoryless memoryless,
141 SkBudgeted budgeted) {
142 GrVkImage::ImageDesc imageDesc;
143 imageDesc.fImageType = VK_IMAGE_TYPE_2D;
144 imageDesc.fFormat = format;
145 imageDesc.fWidth = dimensions.width();
146 imageDesc.fHeight = dimensions.height();
147 imageDesc.fLevels = mipLevels;
148 imageDesc.fSamples = sampleCnt;
149 imageDesc.fImageTiling = VK_IMAGE_TILING_OPTIMAL;
150 imageDesc.fUsageFlags = vkUsageFlags;
151 imageDesc.fIsProtected = isProtected;
152
153 GrVkImageInfo info;
154 if (!GrVkImage::InitImageInfo(gpu, imageDesc, &info)) {
155 return nullptr;
156 }
157
158 sk_sp<const GrVkImageView> framebufferView;
159 sk_sp<const GrVkImageView> textureView;
160 if (!make_views(gpu, info, attachmentUsages, &framebufferView, &textureView)) {
161 GrVkImage::DestroyImageInfo(gpu, &info);
162 return nullptr;
163 }
164
165 sk_sp<GrBackendSurfaceMutableStateImpl> mutableState(
166 new GrBackendSurfaceMutableStateImpl(info.fImageLayout, info.fCurrentQueueFamily));
167 return sk_sp<GrVkImage>(new GrVkImage(gpu,
168 dimensions,
169 attachmentUsages,
170 info,
171 std::move(mutableState),
172 std::move(framebufferView),
173 std::move(textureView),
174 budgeted));
175 }
176
MakeWrapped(GrVkGpu * gpu,SkISize dimensions,const GrVkImageInfo & info,sk_sp<GrBackendSurfaceMutableStateImpl> mutableState,UsageFlags attachmentUsages,GrWrapOwnership ownership,GrWrapCacheable cacheable,bool forSecondaryCB)177 sk_sp<GrVkImage> GrVkImage::MakeWrapped(GrVkGpu* gpu,
178 SkISize dimensions,
179 const GrVkImageInfo& info,
180 sk_sp<GrBackendSurfaceMutableStateImpl> mutableState,
181 UsageFlags attachmentUsages,
182 GrWrapOwnership ownership,
183 GrWrapCacheable cacheable,
184 bool forSecondaryCB) {
185 sk_sp<const GrVkImageView> framebufferView;
186 sk_sp<const GrVkImageView> textureView;
187 if (!forSecondaryCB) {
188 if (!make_views(gpu, info, attachmentUsages, &framebufferView, &textureView)) {
189 return nullptr;
190 }
191 }
192
193 GrBackendObjectOwnership backendOwnership = kBorrow_GrWrapOwnership == ownership
194 ? GrBackendObjectOwnership::kBorrowed
195 : GrBackendObjectOwnership::kOwned;
196
197 return sk_sp<GrVkImage>(new GrVkImage(gpu,
198 dimensions,
199 attachmentUsages,
200 info,
201 std::move(mutableState),
202 std::move(framebufferView),
203 std::move(textureView),
204 backendOwnership,
205 cacheable,
206 forSecondaryCB));
207 }
208
209 // OH ISSUE: Integrate Destroy and Free
DestroyAndFreeImageMemory(const GrVkGpu * gpu,const GrVkAlloc & alloc,const VkImage & image)210 void GrVkImage::DestroyAndFreeImageMemory(const GrVkGpu* gpu, const GrVkAlloc& alloc, const VkImage& image)
211 {
212 VK_CALL(gpu, DestroyImage(gpu->device(), image, nullptr));
213 GrVkMemory::FreeImageMemory(gpu, alloc);
214 }
215
GrVkImage(GrVkGpu * gpu,SkISize dimensions,UsageFlags supportedUsages,const GrVkImageInfo & info,sk_sp<GrBackendSurfaceMutableStateImpl> mutableState,sk_sp<const GrVkImageView> framebufferView,sk_sp<const GrVkImageView> textureView,SkBudgeted budgeted)216 GrVkImage::GrVkImage(GrVkGpu* gpu,
217 SkISize dimensions,
218 UsageFlags supportedUsages,
219 const GrVkImageInfo& info,
220 sk_sp<GrBackendSurfaceMutableStateImpl> mutableState,
221 sk_sp<const GrVkImageView> framebufferView,
222 sk_sp<const GrVkImageView> textureView,
223 SkBudgeted budgeted)
224 : GrAttachment(gpu,
225 dimensions,
226 supportedUsages,
227 info.fSampleCount,
228 info.fLevelCount > 1 ? GrMipmapped::kYes : GrMipmapped::kNo,
229 info.fProtected,
230 info.fAlloc.fFlags & GrVkAlloc::kLazilyAllocated_Flag ? GrMemoryless::kYes
231 : GrMemoryless::kNo)
232 , fInfo(info)
233 , fInitialQueueFamily(info.fCurrentQueueFamily)
234 , fMutableState(std::move(mutableState))
235 , fFramebufferView(std::move(framebufferView))
236 , fTextureView(std::move(textureView))
237 , fIsBorrowed(false) {
238 this->init(gpu, false);
239 this->setRealAlloc(true); // OH ISSUE: set real alloc flag
240 this->setRealAllocSize(dimensions.height() * dimensions.width() * PIXEL_SIZE); // OH ISSUE: set real alloc size
241 this->registerWithCache(budgeted);
242 }
243
GrVkImage(GrVkGpu * gpu,SkISize dimensions,UsageFlags supportedUsages,const GrVkImageInfo & info,sk_sp<GrBackendSurfaceMutableStateImpl> mutableState,sk_sp<const GrVkImageView> framebufferView,sk_sp<const GrVkImageView> textureView,GrBackendObjectOwnership ownership,GrWrapCacheable cacheable,bool forSecondaryCB)244 GrVkImage::GrVkImage(GrVkGpu* gpu,
245 SkISize dimensions,
246 UsageFlags supportedUsages,
247 const GrVkImageInfo& info,
248 sk_sp<GrBackendSurfaceMutableStateImpl> mutableState,
249 sk_sp<const GrVkImageView> framebufferView,
250 sk_sp<const GrVkImageView> textureView,
251 GrBackendObjectOwnership ownership,
252 GrWrapCacheable cacheable,
253 bool forSecondaryCB)
254 : GrAttachment(gpu,
255 dimensions,
256 supportedUsages,
257 info.fSampleCount,
258 info.fLevelCount > 1 ? GrMipmapped::kYes : GrMipmapped::kNo,
259 info.fProtected)
260 , fInfo(info)
261 , fInitialQueueFamily(info.fCurrentQueueFamily)
262 , fMutableState(std::move(mutableState))
263 , fFramebufferView(std::move(framebufferView))
264 , fTextureView(std::move(textureView))
265 , fIsBorrowed(GrBackendObjectOwnership::kBorrowed == ownership) {
266 this->init(gpu, forSecondaryCB);
267 this->registerWithCacheWrapped(cacheable);
268 }
269
init(GrVkGpu * gpu,bool forSecondaryCB)270 void GrVkImage::init(GrVkGpu* gpu, bool forSecondaryCB) {
271 SkASSERT(fMutableState->getImageLayout() == fInfo.fImageLayout);
272 SkASSERT(fMutableState->getQueueFamilyIndex() == fInfo.fCurrentQueueFamily);
273 #ifdef SK_DEBUG
274 if (fInfo.fImageUsageFlags & VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) {
275 SkASSERT(SkToBool(fInfo.fImageUsageFlags & VK_IMAGE_USAGE_TRANSFER_DST_BIT));
276 } else {
277 if (fInfo.fAlloc.fFlags & GrVkAlloc::kLazilyAllocated_Flag) {
278 SkASSERT(fInfo.fImageUsageFlags & VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT);
279 SkASSERT(!SkToBool(fInfo.fImageUsageFlags & VK_IMAGE_USAGE_TRANSFER_DST_BIT) &&
280 !SkToBool(fInfo.fImageUsageFlags & VK_IMAGE_USAGE_TRANSFER_SRC_BIT));
281 } else {
282 SkASSERT(!SkToBool(fInfo.fImageUsageFlags & VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT));
283 SkASSERT(SkToBool(fInfo.fImageUsageFlags & VK_IMAGE_USAGE_TRANSFER_DST_BIT) &&
284 SkToBool(fInfo.fImageUsageFlags & VK_IMAGE_USAGE_TRANSFER_SRC_BIT));
285 }
286 }
287 // We can't transfer from the non graphics queue to the graphics queue since we can't
288 // release the image from the original queue without having that queue. This limits us in terms
289 // of the types of queue indices we can handle.
290 if (fInfo.fCurrentQueueFamily != VK_QUEUE_FAMILY_IGNORED &&
291 fInfo.fCurrentQueueFamily != VK_QUEUE_FAMILY_EXTERNAL &&
292 fInfo.fCurrentQueueFamily != VK_QUEUE_FAMILY_FOREIGN_EXT) {
293 if (fInfo.fSharingMode == VK_SHARING_MODE_EXCLUSIVE) {
294 if (fInfo.fCurrentQueueFamily != gpu->queueIndex()) {
295 SkASSERT(false);
296 }
297 } else {
298 SkASSERT(false);
299 }
300 }
301 #endif
302 if (forSecondaryCB) {
303 fResource = nullptr;
304 } else if (fIsBorrowed) {
305 fResource = new BorrowedResource(gpu, fInfo.fImage, fInfo.fAlloc, fInfo.fImageTiling);
306 } else {
307 SkASSERT(VK_NULL_HANDLE != fInfo.fAlloc.fMemory);
308 fResource = new Resource(gpu, fInfo.fImage, fInfo.fAlloc, fInfo.fImageTiling);
309 }
310 }
311
LayoutToPipelineSrcStageFlags(const VkImageLayout layout)312 VkPipelineStageFlags GrVkImage::LayoutToPipelineSrcStageFlags(const VkImageLayout layout) {
313 if (VK_IMAGE_LAYOUT_GENERAL == layout) {
314 return VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
315 } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout ||
316 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
317 return VK_PIPELINE_STAGE_TRANSFER_BIT;
318 } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout) {
319 return VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
320 } else if (VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout ||
321 VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL == layout) {
322 return VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
323 } else if (VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) {
324 return VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
325 } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
326 return VK_PIPELINE_STAGE_HOST_BIT;
327 } else if (VK_IMAGE_LAYOUT_PRESENT_SRC_KHR == layout) {
328 return VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
329 }
330
331 SkASSERT(VK_IMAGE_LAYOUT_UNDEFINED == layout);
332 return VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
333 }
334
LayoutToSrcAccessMask(const VkImageLayout layout)335 VkAccessFlags GrVkImage::LayoutToSrcAccessMask(const VkImageLayout layout) {
336 // Currently we assume we will never being doing any explict shader writes (this doesn't include
337 // color attachment or depth/stencil writes). So we will ignore the
338 // VK_MEMORY_OUTPUT_SHADER_WRITE_BIT.
339
340 // We can only directly access the host memory if we are in preinitialized or general layout,
341 // and the image is linear.
342 // TODO: Add check for linear here so we are not always adding host to general, and we should
343 // only be in preinitialized if we are linear
344 VkAccessFlags flags = 0;
345 if (VK_IMAGE_LAYOUT_GENERAL == layout) {
346 flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
347 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
348 VK_ACCESS_TRANSFER_WRITE_BIT |
349 VK_ACCESS_HOST_WRITE_BIT;
350 } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
351 flags = VK_ACCESS_HOST_WRITE_BIT;
352 } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout) {
353 flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
354 } else if (VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout) {
355 flags = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
356 } else if (VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
357 flags = VK_ACCESS_TRANSFER_WRITE_BIT;
358 } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout ||
359 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout ||
360 VK_IMAGE_LAYOUT_PRESENT_SRC_KHR == layout) {
361 // There are no writes that need to be made available
362 flags = 0;
363 }
364 return flags;
365 }
366
vk_format_to_aspect_flags(VkFormat format)367 VkImageAspectFlags vk_format_to_aspect_flags(VkFormat format) {
368 switch (format) {
369 case VK_FORMAT_S8_UINT:
370 return VK_IMAGE_ASPECT_STENCIL_BIT;
371 case VK_FORMAT_D24_UNORM_S8_UINT: // fallthrough
372 case VK_FORMAT_D32_SFLOAT_S8_UINT:
373 return VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
374 default:
375 return VK_IMAGE_ASPECT_COLOR_BIT;
376 }
377 }
378
setImageLayoutAndQueueIndex(const GrVkGpu * gpu,VkImageLayout newLayout,VkAccessFlags dstAccessMask,VkPipelineStageFlags dstStageMask,bool byRegion,uint32_t newQueueFamilyIndex)379 void GrVkImage::setImageLayoutAndQueueIndex(const GrVkGpu* gpu,
380 VkImageLayout newLayout,
381 VkAccessFlags dstAccessMask,
382 VkPipelineStageFlags dstStageMask,
383 bool byRegion,
384 uint32_t newQueueFamilyIndex) {
385 // Enable the following block to test new devices to confirm their lazy images stay at 0 memory use.
386 #if 0
387 if (fInfo.fAlloc.fFlags & GrVkAlloc::kLazilyAllocated_Flag) {
388 VkDeviceSize size;
389 VK_CALL(gpu, GetDeviceMemoryCommitment(gpu->device(), fInfo.fAlloc.fMemory, &size));
390
391 SkDebugf("Lazy Image. This: %p, image: %d, size: %d\n", this, fInfo.fImage, size);
392 }
393 #endif
394 SkASSERT(!gpu->isDeviceLost());
395 SkASSERT(newLayout == this->currentLayout() ||
396 (VK_IMAGE_LAYOUT_UNDEFINED != newLayout &&
397 VK_IMAGE_LAYOUT_PREINITIALIZED != newLayout));
398 VkImageLayout currentLayout = this->currentLayout();
399 uint32_t currentQueueIndex = this->currentQueueFamilyIndex();
400
401 #ifdef SK_DEBUG
402 if (fInfo.fSharingMode == VK_SHARING_MODE_CONCURRENT) {
403 if (newQueueFamilyIndex == VK_QUEUE_FAMILY_IGNORED) {
404 SkASSERT(currentQueueIndex == VK_QUEUE_FAMILY_IGNORED ||
405 currentQueueIndex == VK_QUEUE_FAMILY_EXTERNAL ||
406 currentQueueIndex == VK_QUEUE_FAMILY_FOREIGN_EXT);
407 } else {
408 SkASSERT(newQueueFamilyIndex == VK_QUEUE_FAMILY_EXTERNAL ||
409 newQueueFamilyIndex == VK_QUEUE_FAMILY_FOREIGN_EXT);
410 SkASSERT(currentQueueIndex == VK_QUEUE_FAMILY_IGNORED);
411 }
412 } else {
413 SkASSERT(fInfo.fSharingMode == VK_SHARING_MODE_EXCLUSIVE);
414 if (newQueueFamilyIndex == VK_QUEUE_FAMILY_IGNORED ||
415 currentQueueIndex == gpu->queueIndex()) {
416 SkASSERT(currentQueueIndex == VK_QUEUE_FAMILY_IGNORED ||
417 currentQueueIndex == VK_QUEUE_FAMILY_EXTERNAL ||
418 currentQueueIndex == VK_QUEUE_FAMILY_FOREIGN_EXT ||
419 currentQueueIndex == gpu->queueIndex());
420 } else if (newQueueFamilyIndex == VK_QUEUE_FAMILY_EXTERNAL ||
421 newQueueFamilyIndex == VK_QUEUE_FAMILY_FOREIGN_EXT) {
422 SkASSERT(currentQueueIndex == VK_QUEUE_FAMILY_IGNORED ||
423 currentQueueIndex == gpu->queueIndex());
424 }
425 }
426 #endif
427
428 if (fInfo.fSharingMode == VK_SHARING_MODE_EXCLUSIVE) {
429 if (newQueueFamilyIndex == VK_QUEUE_FAMILY_IGNORED) {
430 newQueueFamilyIndex = gpu->queueIndex();
431 }
432 if (currentQueueIndex == VK_QUEUE_FAMILY_IGNORED) {
433 currentQueueIndex = gpu->queueIndex();
434 }
435 }
436
437 // If the old and new layout are the same and the layout is a read only layout, there is no need
438 // to put in a barrier unless we also need to switch queues.
439 if (newLayout == currentLayout && currentQueueIndex == newQueueFamilyIndex &&
440 (VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL == currentLayout ||
441 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == currentLayout ||
442 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == currentLayout)) {
443 return;
444 }
445
446 VkAccessFlags srcAccessMask = GrVkImage::LayoutToSrcAccessMask(currentLayout);
447 VkPipelineStageFlags srcStageMask = GrVkImage::LayoutToPipelineSrcStageFlags(currentLayout);
448
449 VkImageAspectFlags aspectFlags = vk_format_to_aspect_flags(fInfo.fFormat);
450
451 VkImageMemoryBarrier imageMemoryBarrier = {
452 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
453 nullptr, // pNext
454 srcAccessMask, // srcAccessMask
455 dstAccessMask, // dstAccessMask
456 currentLayout, // oldLayout
457 newLayout, // newLayout
458 currentQueueIndex, // srcQueueFamilyIndex
459 newQueueFamilyIndex, // dstQueueFamilyIndex
460 fInfo.fImage, // image
461 { aspectFlags, 0, fInfo.fLevelCount, 0, 1 } // subresourceRange
462 };
463 SkASSERT(srcAccessMask == imageMemoryBarrier.srcAccessMask);
464 gpu->addImageMemoryBarrier(this->resource(), srcStageMask, dstStageMask, byRegion,
465 &imageMemoryBarrier);
466
467 this->updateImageLayout(newLayout);
468 this->setQueueFamilyIndex(newQueueFamilyIndex);
469 }
470
InitImageInfo(GrVkGpu * gpu,const ImageDesc & imageDesc,GrVkImageInfo * info)471 bool GrVkImage::InitImageInfo(GrVkGpu* gpu, const ImageDesc& imageDesc, GrVkImageInfo* info) {
472 if (0 == imageDesc.fWidth || 0 == imageDesc.fHeight) {
473 return false;
474 }
475 if ((imageDesc.fIsProtected == GrProtected::kYes) && !gpu->vkCaps().supportsProtectedMemory()) {
476 return false;
477 }
478
479 bool isLinear = VK_IMAGE_TILING_LINEAR == imageDesc.fImageTiling;
480 VkImageLayout initialLayout = isLinear ? VK_IMAGE_LAYOUT_PREINITIALIZED
481 : VK_IMAGE_LAYOUT_UNDEFINED;
482
483 // Create Image
484 VkSampleCountFlagBits vkSamples;
485 if (!GrSampleCountToVkSampleCount(imageDesc.fSamples, &vkSamples)) {
486 return false;
487 }
488
489 SkASSERT(VK_IMAGE_TILING_OPTIMAL == imageDesc.fImageTiling ||
490 VK_SAMPLE_COUNT_1_BIT == vkSamples);
491
492 VkImageCreateFlags createflags = 0;
493 if (imageDesc.fIsProtected == GrProtected::kYes || gpu->protectedContext()) {
494 createflags |= VK_IMAGE_CREATE_PROTECTED_BIT;
495 }
496 const VkImageCreateInfo imageCreateInfo = {
497 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // sType
498 nullptr, // pNext
499 createflags, // VkImageCreateFlags
500 imageDesc.fImageType, // VkImageType
501 imageDesc.fFormat, // VkFormat
502 { imageDesc.fWidth, imageDesc.fHeight, 1 }, // VkExtent3D
503 imageDesc.fLevels, // mipLevels
504 1, // arrayLayers
505 vkSamples, // samples
506 imageDesc.fImageTiling, // VkImageTiling
507 imageDesc.fUsageFlags, // VkImageUsageFlags
508 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode
509 0, // queueFamilyCount
510 nullptr, // pQueueFamilyIndices
511 initialLayout // initialLayout
512 };
513
514 VkImage image = VK_NULL_HANDLE;
515 VkResult result;
516 if (imageDesc.fWidth * imageDesc.fHeight > VKIMAGE_LIMIT_SIZE) {
517 SkDebugf("GrVkImage::InitImageInfoInner failed, image is too large, width:%u, height::%u",
518 imageDesc.fWidth, imageDesc.fHeight);
519 return false;
520 }
521 GR_VK_CALL_RESULT(gpu, result, CreateImage(gpu->device(), &imageCreateInfo, nullptr, &image));
522 if (result != VK_SUCCESS) {
523 return false;
524 }
525
526 GrMemoryless memoryless = imageDesc.fUsageFlags & VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT
527 ? GrMemoryless::kYes
528 : GrMemoryless::kNo;
529 GrVkAlloc alloc;
530 if (!GrVkMemory::AllocAndBindImageMemory(gpu, image, memoryless, &alloc,
531 imageDesc.fWidth * imageDesc.fHeight * 4) || // 4 bytes,RGBA
532 (memoryless == GrMemoryless::kYes &&
533 !SkToBool(alloc.fFlags & GrVkAlloc::kLazilyAllocated_Flag))) {
534 VK_CALL(gpu, DestroyImage(gpu->device(), image, nullptr));
535 return false;
536 }
537
538 info->fImage = image;
539 info->fAlloc = alloc;
540 info->fImageTiling = imageDesc.fImageTiling;
541 info->fImageLayout = initialLayout;
542 info->fFormat = imageDesc.fFormat;
543 info->fImageUsageFlags = imageDesc.fUsageFlags;
544 info->fSampleCount = imageDesc.fSamples;
545 info->fLevelCount = imageDesc.fLevels;
546 info->fCurrentQueueFamily = VK_QUEUE_FAMILY_IGNORED;
547 info->fProtected =
548 (createflags & VK_IMAGE_CREATE_PROTECTED_BIT) ? GrProtected::kYes : GrProtected::kNo;
549 info->fSharingMode = VK_SHARING_MODE_EXCLUSIVE;
550 return true;
551 }
552
DestroyImageInfo(const GrVkGpu * gpu,GrVkImageInfo * info)553 void GrVkImage::DestroyImageInfo(const GrVkGpu* gpu, GrVkImageInfo* info) {
554 DestroyAndFreeImageMemory(gpu, info->fAlloc, info->fImage);
555 }
556
~GrVkImage()557 GrVkImage::~GrVkImage() {
558 // should have been released first
559 SkASSERT(!fResource);
560 SkASSERT(!fFramebufferView);
561 SkASSERT(!fTextureView);
562 }
563
prepareForPresent(GrVkGpu * gpu)564 void GrVkImage::prepareForPresent(GrVkGpu* gpu) {
565 VkImageLayout layout = this->currentLayout();
566 if (fInitialQueueFamily != VK_QUEUE_FAMILY_EXTERNAL &&
567 fInitialQueueFamily != VK_QUEUE_FAMILY_FOREIGN_EXT) {
568 if (gpu->vkCaps().supportsSwapchain()) {
569 layout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
570 }
571 }
572 this->setImageLayoutAndQueueIndex(gpu, layout, 0, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, false,
573 fInitialQueueFamily);
574 }
575
prepareForExternal(GrVkGpu * gpu)576 void GrVkImage::prepareForExternal(GrVkGpu* gpu) {
577 this->setImageLayoutAndQueueIndex(gpu, this->currentLayout(), 0,
578 VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, false,
579 fInitialQueueFamily);
580 }
581
releaseImage()582 void GrVkImage::releaseImage() {
583 if (fResource) {
584 fResource->unref();
585 fResource = nullptr;
586 }
587 fFramebufferView.reset();
588 fTextureView.reset();
589 fCachedBlendingInputDescSet.reset();
590 fCachedMSAALoadInputDescSet.reset();
591 }
592
onRelease()593 void GrVkImage::onRelease() {
594 this->releaseImage();
595 GrAttachment::onRelease();
596 }
597
onAbandon()598 void GrVkImage::onAbandon() {
599 this->releaseImage();
600 GrAttachment::onAbandon();
601 }
602
setResourceRelease(sk_sp<GrRefCntedCallback> releaseHelper)603 void GrVkImage::setResourceRelease(sk_sp<GrRefCntedCallback> releaseHelper) {
604 SkASSERT(fResource);
605 // Forward the release proc on to GrVkImage::Resource
606 fResource->setRelease(std::move(releaseHelper));
607 }
608
freeGPUData() const609 void GrVkImage::Resource::freeGPUData() const {
610 this->invokeReleaseProc();
611
612 // OH ISSUE: asyn memory reclaimer
613 auto reclaimer = fGpu->memoryReclaimer();
614 if (reclaimer && reclaimer->addMemoryToWaitQueue(fGpu, fAlloc, fImage)) {
615 return;
616 }
617
618 DestroyAndFreeImageMemory(fGpu, fAlloc, fImage);
619 }
620
freeGPUData() const621 void GrVkImage::BorrowedResource::freeGPUData() const {
622 this->invokeReleaseProc();
623 }
624
write_input_desc_set(GrVkGpu * gpu,VkImageView view,VkImageLayout layout,VkDescriptorSet descSet)625 static void write_input_desc_set(GrVkGpu* gpu,
626 VkImageView view,
627 VkImageLayout layout,
628 VkDescriptorSet descSet) {
629 VkDescriptorImageInfo imageInfo;
630 memset(&imageInfo, 0, sizeof(VkDescriptorImageInfo));
631 imageInfo.sampler = VK_NULL_HANDLE;
632 imageInfo.imageView = view;
633 imageInfo.imageLayout = layout;
634
635 VkWriteDescriptorSet writeInfo;
636 memset(&writeInfo, 0, sizeof(VkWriteDescriptorSet));
637 writeInfo.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
638 writeInfo.pNext = nullptr;
639 writeInfo.dstSet = descSet;
640 writeInfo.dstBinding = GrVkUniformHandler::kInputBinding;
641 writeInfo.dstArrayElement = 0;
642 writeInfo.descriptorCount = 1;
643 writeInfo.descriptorType = VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT;
644 writeInfo.pImageInfo = &imageInfo;
645 writeInfo.pBufferInfo = nullptr;
646 writeInfo.pTexelBufferView = nullptr;
647
648 GR_VK_CALL(gpu->vkInterface(), UpdateDescriptorSets(gpu->device(), 1, &writeInfo, 0, nullptr));
649 }
650
inputDescSetForBlending(GrVkGpu * gpu)651 gr_rp<const GrVkDescriptorSet> GrVkImage::inputDescSetForBlending(GrVkGpu* gpu) {
652 if (!this->supportsInputAttachmentUsage()) {
653 return nullptr;
654 }
655 if (fCachedBlendingInputDescSet) {
656 return fCachedBlendingInputDescSet;
657 }
658
659 fCachedBlendingInputDescSet.reset(gpu->resourceProvider().getInputDescriptorSet());
660 if (!fCachedBlendingInputDescSet) {
661 return nullptr;
662 }
663
664 write_input_desc_set(gpu,
665 this->framebufferView()->imageView(),
666 VK_IMAGE_LAYOUT_GENERAL,
667 *fCachedBlendingInputDescSet->descriptorSet());
668
669 return fCachedBlendingInputDescSet;
670 }
671
inputDescSetForMSAALoad(GrVkGpu * gpu)672 gr_rp<const GrVkDescriptorSet> GrVkImage::inputDescSetForMSAALoad(GrVkGpu* gpu) {
673 if (!this->supportsInputAttachmentUsage()) {
674 return nullptr;
675 }
676 if (fCachedMSAALoadInputDescSet) {
677 return fCachedMSAALoadInputDescSet;
678 }
679
680 fCachedMSAALoadInputDescSet.reset(gpu->resourceProvider().getInputDescriptorSet());
681 if (!fCachedMSAALoadInputDescSet) {
682 return nullptr;
683 }
684
685 write_input_desc_set(gpu,
686 this->framebufferView()->imageView(),
687 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
688 *fCachedMSAALoadInputDescSet->descriptorSet());
689
690 return fCachedMSAALoadInputDescSet;
691 }
692
getVkGpu() const693 GrVkGpu* GrVkImage::getVkGpu() const {
694 SkASSERT(!this->wasDestroyed());
695 return static_cast<GrVkGpu*>(this->getGpu());
696 }
697
onGpuMemorySize() const698 size_t GrVkImage::onGpuMemorySize() const
699 {
700 if (supportedUsages() & UsageFlags::kTexture) {
701 return GrSurface::ComputeSize(this->backendFormat(), this->dimensions(), 1, this->mipmapped());
702 } else {
703 return GrAttachment::onGpuMemorySize();
704 }
705 }
706
707 #if GR_TEST_UTILS
setCurrentQueueFamilyToGraphicsQueue(GrVkGpu * gpu)708 void GrVkImage::setCurrentQueueFamilyToGraphicsQueue(GrVkGpu* gpu) {
709 fMutableState->setQueueFamilyIndex(gpu->queueIndex());
710 }
711 #endif
712
713