• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2015 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #include "src/gpu/GrGpuResourcePriv.h"
9 #include "src/gpu/vk/GrVkGpu.h"
10 #include "src/gpu/vk/GrVkImage.h"
11 #include "src/gpu/vk/GrVkMemory.h"
12 #include "src/gpu/vk/GrVkTexture.h"
13 #include "src/gpu/vk/GrVkUtil.h"
14 
15 #define VK_CALL(GPU, X) GR_VK_CALL(GPU->vkInterface(), X)
16 
LayoutToPipelineSrcStageFlags(const VkImageLayout layout)17 VkPipelineStageFlags GrVkImage::LayoutToPipelineSrcStageFlags(const VkImageLayout layout) {
18     if (VK_IMAGE_LAYOUT_GENERAL == layout) {
19         return VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
20     } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout ||
21                VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
22         return VK_PIPELINE_STAGE_TRANSFER_BIT;
23     } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout) {
24         return VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
25     } else if (VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout ||
26                VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL == layout) {
27         return VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
28     } else if (VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) {
29         return VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
30     } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
31         return VK_PIPELINE_STAGE_HOST_BIT;
32     } else if (VK_IMAGE_LAYOUT_PRESENT_SRC_KHR == layout) {
33         return VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
34     }
35 
36     SkASSERT(VK_IMAGE_LAYOUT_UNDEFINED == layout);
37     return VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
38 }
39 
LayoutToSrcAccessMask(const VkImageLayout layout)40 VkAccessFlags GrVkImage::LayoutToSrcAccessMask(const VkImageLayout layout) {
41     // Currently we assume we will never being doing any explict shader writes (this doesn't include
42     // color attachment or depth/stencil writes). So we will ignore the
43     // VK_MEMORY_OUTPUT_SHADER_WRITE_BIT.
44 
45     // We can only directly access the host memory if we are in preinitialized or general layout,
46     // and the image is linear.
47     // TODO: Add check for linear here so we are not always adding host to general, and we should
48     //       only be in preinitialized if we are linear
49     VkAccessFlags flags = 0;
50     if (VK_IMAGE_LAYOUT_GENERAL == layout) {
51         flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
52                 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
53                 VK_ACCESS_TRANSFER_WRITE_BIT |
54                 VK_ACCESS_TRANSFER_READ_BIT |
55                 VK_ACCESS_SHADER_READ_BIT |
56                 VK_ACCESS_HOST_WRITE_BIT | VK_ACCESS_HOST_READ_BIT;
57     } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
58         flags = VK_ACCESS_HOST_WRITE_BIT;
59     } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout) {
60         flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT | VK_ACCESS_COLOR_ATTACHMENT_READ_BIT;
61     } else if (VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout) {
62         flags = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
63     } else if (VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
64         flags = VK_ACCESS_TRANSFER_WRITE_BIT;
65     } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout ||
66                VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout ||
67                VK_IMAGE_LAYOUT_PRESENT_SRC_KHR == layout) {
68         // There are no writes that need to be made available
69         flags = 0;
70     }
71     return flags;
72 }
73 
vk_format_to_aspect_flags(VkFormat format)74 VkImageAspectFlags vk_format_to_aspect_flags(VkFormat format) {
75     switch (format) {
76         case VK_FORMAT_S8_UINT:
77             return VK_IMAGE_ASPECT_STENCIL_BIT;
78         case VK_FORMAT_D24_UNORM_S8_UINT: // fallthrough
79         case VK_FORMAT_D32_SFLOAT_S8_UINT:
80             return VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
81         default:
82             SkASSERT(GrVkFormatIsSupported(format));
83             return VK_IMAGE_ASPECT_COLOR_BIT;
84     }
85 }
86 
setImageLayout(const GrVkGpu * gpu,VkImageLayout newLayout,VkAccessFlags dstAccessMask,VkPipelineStageFlags dstStageMask,bool byRegion,bool releaseFamilyQueue)87 void GrVkImage::setImageLayout(const GrVkGpu* gpu, VkImageLayout newLayout,
88                                VkAccessFlags dstAccessMask,
89                                VkPipelineStageFlags dstStageMask,
90                                bool byRegion, bool releaseFamilyQueue) {
91     SkASSERT(VK_IMAGE_LAYOUT_UNDEFINED != newLayout &&
92              VK_IMAGE_LAYOUT_PREINITIALIZED != newLayout);
93     VkImageLayout currentLayout = this->currentLayout();
94 
95     if (releaseFamilyQueue && fInfo.fCurrentQueueFamily == fInitialQueueFamily &&
96         newLayout == currentLayout) {
97         // We never transfered the image to this queue and we are releasing it so don't do anything.
98         return;
99     }
100 
101     // If the old and new layout are the same and the layout is a read only layout, there is no need
102     // to put in a barrier unless we also need to switch queues.
103     if (newLayout == currentLayout && !releaseFamilyQueue &&
104         (fInfo.fCurrentQueueFamily == VK_QUEUE_FAMILY_IGNORED ||
105          fInfo.fCurrentQueueFamily == gpu->queueIndex()) &&
106         (VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL == currentLayout ||
107          VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == currentLayout ||
108          VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == currentLayout)) {
109         return;
110     }
111 
112     VkAccessFlags srcAccessMask = GrVkImage::LayoutToSrcAccessMask(currentLayout);
113     VkPipelineStageFlags srcStageMask = GrVkImage::LayoutToPipelineSrcStageFlags(currentLayout);
114 
115     VkImageAspectFlags aspectFlags = vk_format_to_aspect_flags(fInfo.fFormat);
116 
117     uint32_t srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
118     uint32_t dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
119     if (fInfo.fCurrentQueueFamily != VK_QUEUE_FAMILY_IGNORED &&
120         gpu->queueIndex() != fInfo.fCurrentQueueFamily) {
121         // The image still is owned by its original queue family and we need to transfer it into
122         // ours.
123         SkASSERT(!releaseFamilyQueue);
124         SkASSERT(fInfo.fCurrentQueueFamily == fInitialQueueFamily);
125 
126         srcQueueFamilyIndex = fInfo.fCurrentQueueFamily;
127         dstQueueFamilyIndex = gpu->queueIndex();
128         fInfo.fCurrentQueueFamily = gpu->queueIndex();
129     } else if (releaseFamilyQueue) {
130         // We are releasing the image so we must transfer the image back to its original queue
131         // family.
132         srcQueueFamilyIndex = fInfo.fCurrentQueueFamily;
133         dstQueueFamilyIndex = fInitialQueueFamily;
134         fInfo.fCurrentQueueFamily = fInitialQueueFamily;
135     }
136 
137     VkImageMemoryBarrier imageMemoryBarrier = {
138         VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,          // sType
139         nullptr,                                         // pNext
140         srcAccessMask,                                   // srcAccessMask
141         dstAccessMask,                                   // dstAccessMask
142         currentLayout,                                   // oldLayout
143         newLayout,                                       // newLayout
144         srcQueueFamilyIndex,                             // srcQueueFamilyIndex
145         dstQueueFamilyIndex,                             // dstQueueFamilyIndex
146         fInfo.fImage,                                    // image
147         { aspectFlags, 0, fInfo.fLevelCount, 0, 1 }      // subresourceRange
148     };
149 
150     gpu->addImageMemoryBarrier(this->resource(), srcStageMask, dstStageMask, byRegion,
151                                &imageMemoryBarrier);
152 
153     this->updateImageLayout(newLayout);
154 }
155 
InitImageInfo(const GrVkGpu * gpu,const ImageDesc & imageDesc,GrVkImageInfo * info)156 bool GrVkImage::InitImageInfo(const GrVkGpu* gpu, const ImageDesc& imageDesc, GrVkImageInfo* info) {
157     if (0 == imageDesc.fWidth || 0 == imageDesc.fHeight) {
158         return false;
159     }
160     if ((imageDesc.fIsProtected == GrProtected::kYes) && !gpu->vkCaps().supportsProtectedMemory()) {
161         return false;
162     }
163     VkImage image = VK_NULL_HANDLE;
164     GrVkAlloc alloc;
165 
166     bool isLinear = VK_IMAGE_TILING_LINEAR == imageDesc.fImageTiling;
167     VkImageLayout initialLayout = isLinear ? VK_IMAGE_LAYOUT_PREINITIALIZED
168                                            : VK_IMAGE_LAYOUT_UNDEFINED;
169 
170     // Create Image
171     VkSampleCountFlagBits vkSamples;
172     if (!GrSampleCountToVkSampleCount(imageDesc.fSamples, &vkSamples)) {
173         return false;
174     }
175 
176     SkASSERT(VK_IMAGE_TILING_OPTIMAL == imageDesc.fImageTiling ||
177              VK_SAMPLE_COUNT_1_BIT == vkSamples);
178 
179     VkImageCreateFlags createflags = 0;
180     if (imageDesc.fIsProtected == GrProtected::kYes || gpu->protectedContext()) {
181         createflags |= VK_IMAGE_CREATE_PROTECTED_BIT;
182     }
183     const VkImageCreateInfo imageCreateInfo = {
184         VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,         // sType
185         nullptr,                                     // pNext
186         createflags,                                 // VkImageCreateFlags
187         imageDesc.fImageType,                        // VkImageType
188         imageDesc.fFormat,                           // VkFormat
189         { imageDesc.fWidth, imageDesc.fHeight, 1 },  // VkExtent3D
190         imageDesc.fLevels,                           // mipLevels
191         1,                                           // arrayLayers
192         vkSamples,                                   // samples
193         imageDesc.fImageTiling,                      // VkImageTiling
194         imageDesc.fUsageFlags,                       // VkImageUsageFlags
195         VK_SHARING_MODE_EXCLUSIVE,                   // VkSharingMode
196         0,                                           // queueFamilyCount
197         0,                                           // pQueueFamilyIndices
198         initialLayout                                // initialLayout
199     };
200 
201     GR_VK_CALL_ERRCHECK(gpu->vkInterface(), CreateImage(gpu->device(), &imageCreateInfo, nullptr,
202                                                         &image));
203 
204     if (!GrVkMemory::AllocAndBindImageMemory(gpu, image, isLinear, &alloc)) {
205         VK_CALL(gpu, DestroyImage(gpu->device(), image, nullptr));
206         return false;
207     }
208 
209     info->fImage = image;
210     info->fAlloc = alloc;
211     info->fImageTiling = imageDesc.fImageTiling;
212     info->fImageLayout = initialLayout;
213     info->fFormat = imageDesc.fFormat;
214     info->fLevelCount = imageDesc.fLevels;
215     info->fCurrentQueueFamily = VK_QUEUE_FAMILY_IGNORED;
216     info->fProtected =
217             (createflags & VK_IMAGE_CREATE_PROTECTED_BIT) ? GrProtected::kYes : GrProtected::kNo;
218     return true;
219 }
220 
DestroyImageInfo(const GrVkGpu * gpu,GrVkImageInfo * info)221 void GrVkImage::DestroyImageInfo(const GrVkGpu* gpu, GrVkImageInfo* info) {
222     VK_CALL(gpu, DestroyImage(gpu->device(), info->fImage, nullptr));
223     bool isLinear = VK_IMAGE_TILING_LINEAR == info->fImageTiling;
224     GrVkMemory::FreeImageMemory(gpu, isLinear, info->fAlloc);
225 }
226 
~GrVkImage()227 GrVkImage::~GrVkImage() {
228     // should have been released or abandoned first
229     SkASSERT(!fResource);
230 }
231 
prepareForPresent(GrVkGpu * gpu)232 void GrVkImage::prepareForPresent(GrVkGpu* gpu) {
233     VkImageLayout layout = this->currentLayout();
234     if (fInitialQueueFamily != VK_QUEUE_FAMILY_EXTERNAL &&
235         fInitialQueueFamily != VK_QUEUE_FAMILY_FOREIGN_EXT) {
236         if (gpu->vkCaps().supportsSwapchain()) {
237             layout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
238         }
239     }
240     this->setImageLayout(gpu, layout, 0, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, false, true);
241 }
242 
prepareForExternal(GrVkGpu * gpu)243 void GrVkImage::prepareForExternal(GrVkGpu* gpu) {
244     this->setImageLayout(gpu, this->currentLayout(), 0, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, false,
245                          true);
246 }
247 
releaseImage(GrVkGpu * gpu)248 void GrVkImage::releaseImage(GrVkGpu* gpu) {
249     if (fInfo.fCurrentQueueFamily != fInitialQueueFamily) {
250         // The Vulkan spec is vague on what to put for the dstStageMask here. The spec for image
251         // memory barrier says the dstStageMask must not be zero. However, in the spec when it talks
252         // about family queue transfers it says the dstStageMask is ignored and should be set to
253         // zero. Assuming it really is ignored we set it to VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT here
254         // since it makes the Vulkan validation layers happy.
255         this->setImageLayout(gpu, this->currentLayout(), 0, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
256                              false, true);
257     }
258     if (fResource) {
259         fResource->removeOwningTexture();
260         fResource->unref(gpu);
261         fResource = nullptr;
262     }
263 }
264 
abandonImage()265 void GrVkImage::abandonImage() {
266     if (fResource) {
267         fResource->removeOwningTexture();
268         fResource->unrefAndAbandon();
269         fResource = nullptr;
270     }
271 }
272 
setResourceRelease(sk_sp<GrRefCntedCallback> releaseHelper)273 void GrVkImage::setResourceRelease(sk_sp<GrRefCntedCallback> releaseHelper) {
274     SkASSERT(fResource);
275     // Forward the release proc on to GrVkImage::Resource
276     fResource->setRelease(std::move(releaseHelper));
277 }
278 
freeGPUData(GrVkGpu * gpu) const279 void GrVkImage::Resource::freeGPUData(GrVkGpu* gpu) const {
280     this->invokeReleaseProc();
281     VK_CALL(gpu, DestroyImage(gpu->device(), fImage, nullptr));
282     bool isLinear = (VK_IMAGE_TILING_LINEAR == fImageTiling);
283     GrVkMemory::FreeImageMemory(gpu, isLinear, fAlloc);
284 }
285 
addIdleProc(GrVkTexture * owningTexture,sk_sp<GrRefCntedCallback> idleProc) const286 void GrVkImage::Resource::addIdleProc(GrVkTexture* owningTexture,
287                                       sk_sp<GrRefCntedCallback> idleProc) const {
288     SkASSERT(!fOwningTexture || fOwningTexture == owningTexture);
289     fOwningTexture = owningTexture;
290     fIdleProcs.push_back(std::move(idleProc));
291 }
292 
idleProcCnt() const293 int GrVkImage::Resource::idleProcCnt() const { return fIdleProcs.count(); }
294 
idleProc(int i) const295 sk_sp<GrRefCntedCallback> GrVkImage::Resource::idleProc(int i) const { return fIdleProcs[i]; }
296 
resetIdleProcs() const297 void GrVkImage::Resource::resetIdleProcs() const { fIdleProcs.reset(); }
298 
removeOwningTexture() const299 void GrVkImage::Resource::removeOwningTexture() const { fOwningTexture = nullptr; }
300 
notifyAddedToCommandBuffer() const301 void GrVkImage::Resource::notifyAddedToCommandBuffer() const { ++fNumCommandBufferOwners; }
302 
notifyRemovedFromCommandBuffer() const303 void GrVkImage::Resource::notifyRemovedFromCommandBuffer() const {
304     SkASSERT(fNumCommandBufferOwners);
305     if (--fNumCommandBufferOwners || !fIdleProcs.count()) {
306         return;
307     }
308     if (fOwningTexture) {
309         if (fOwningTexture->resourcePriv().hasRefOrPendingIO()) {
310             // Wait for the texture to become idle in the cache to call the procs.
311             return;
312         }
313         fOwningTexture->callIdleProcsOnBehalfOfResource();
314     } else {
315         fIdleProcs.reset();
316     }
317 }
318 
freeGPUData(GrVkGpu * gpu) const319 void GrVkImage::BorrowedResource::freeGPUData(GrVkGpu* gpu) const {
320     this->invokeReleaseProc();
321 }
322 
abandonGPUData() const323 void GrVkImage::BorrowedResource::abandonGPUData() const {
324     this->invokeReleaseProc();
325 }
326 
327 #if GR_TEST_UTILS
setCurrentQueueFamilyToGraphicsQueue(GrVkGpu * gpu)328 void GrVkImage::setCurrentQueueFamilyToGraphicsQueue(GrVkGpu* gpu) {
329     fInfo.fCurrentQueueFamily = gpu->queueIndex();
330 }
331 #endif
332 
333