• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2015 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #include "GrVkImage.h"
9 #include "GrGpuResourcePriv.h"
10 #include "GrVkGpu.h"
11 #include "GrVkMemory.h"
12 #include "GrVkTexture.h"
13 #include "GrVkUtil.h"
14 
15 #define VK_CALL(GPU, X) GR_VK_CALL(GPU->vkInterface(), X)
16 
LayoutToPipelineSrcStageFlags(const VkImageLayout layout)17 VkPipelineStageFlags GrVkImage::LayoutToPipelineSrcStageFlags(const VkImageLayout layout) {
18     if (VK_IMAGE_LAYOUT_GENERAL == layout) {
19         return VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
20     } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout ||
21                VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
22         return VK_PIPELINE_STAGE_TRANSFER_BIT;
23     } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout) {
24         return VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
25     } else if (VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout ||
26                VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL == layout) {
27         return VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
28     } else if (VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) {
29         return VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
30     } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
31         return VK_PIPELINE_STAGE_HOST_BIT;
32     }
33 
34     SkASSERT(VK_IMAGE_LAYOUT_UNDEFINED == layout);
35     return VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
36 }
37 
LayoutToSrcAccessMask(const VkImageLayout layout)38 VkAccessFlags GrVkImage::LayoutToSrcAccessMask(const VkImageLayout layout) {
39     // Currently we assume we will never being doing any explict shader writes (this doesn't include
40     // color attachment or depth/stencil writes). So we will ignore the
41     // VK_MEMORY_OUTPUT_SHADER_WRITE_BIT.
42 
43     // We can only directly access the host memory if we are in preinitialized or general layout,
44     // and the image is linear.
45     // TODO: Add check for linear here so we are not always adding host to general, and we should
46     //       only be in preinitialized if we are linear
47     VkAccessFlags flags = 0;
48     if (VK_IMAGE_LAYOUT_GENERAL == layout) {
49         flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
50                 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
51                 VK_ACCESS_TRANSFER_WRITE_BIT |
52                 VK_ACCESS_TRANSFER_READ_BIT |
53                 VK_ACCESS_SHADER_READ_BIT |
54                 VK_ACCESS_HOST_WRITE_BIT | VK_ACCESS_HOST_READ_BIT;
55     } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
56         flags = VK_ACCESS_HOST_WRITE_BIT;
57     } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout) {
58         flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT | VK_ACCESS_COLOR_ATTACHMENT_READ_BIT;
59     } else if (VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout) {
60         flags = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
61     } else if (VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
62         flags = VK_ACCESS_TRANSFER_WRITE_BIT;
63     } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout) {
64         flags = VK_ACCESS_TRANSFER_READ_BIT;
65     } else if (VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) {
66         flags = VK_ACCESS_SHADER_READ_BIT;
67     }
68     return flags;
69 }
70 
vk_format_to_aspect_flags(VkFormat format)71 VkImageAspectFlags vk_format_to_aspect_flags(VkFormat format) {
72     switch (format) {
73         case VK_FORMAT_S8_UINT:
74             return VK_IMAGE_ASPECT_STENCIL_BIT;
75         case VK_FORMAT_D24_UNORM_S8_UINT: // fallthrough
76         case VK_FORMAT_D32_SFLOAT_S8_UINT:
77             return VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
78         default:
79             SkASSERT(GrVkFormatIsSupported(format));
80             return VK_IMAGE_ASPECT_COLOR_BIT;
81     }
82 }
83 
setImageLayout(const GrVkGpu * gpu,VkImageLayout newLayout,VkAccessFlags dstAccessMask,VkPipelineStageFlags dstStageMask,bool byRegion,bool releaseFamilyQueue)84 void GrVkImage::setImageLayout(const GrVkGpu* gpu, VkImageLayout newLayout,
85                                VkAccessFlags dstAccessMask,
86                                VkPipelineStageFlags dstStageMask,
87                                bool byRegion, bool releaseFamilyQueue) {
88     SkASSERT(VK_IMAGE_LAYOUT_UNDEFINED != newLayout &&
89              VK_IMAGE_LAYOUT_PREINITIALIZED != newLayout);
90     VkImageLayout currentLayout = this->currentLayout();
91 
92     if (releaseFamilyQueue && fInfo.fCurrentQueueFamily == fInitialQueueFamily) {
93         // We never transfered the image to this queue and we are releasing it so don't do anything.
94         return;
95     }
96 
97     // If the old and new layout are the same and the layout is a read only layout, there is no need
98     // to put in a barrier.
99     if (newLayout == currentLayout &&
100         (VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL == currentLayout ||
101          VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == currentLayout ||
102          VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == currentLayout)) {
103         return;
104     }
105 
106     VkAccessFlags srcAccessMask = GrVkImage::LayoutToSrcAccessMask(currentLayout);
107     VkPipelineStageFlags srcStageMask = GrVkImage::LayoutToPipelineSrcStageFlags(currentLayout);
108 
109     VkImageAspectFlags aspectFlags = vk_format_to_aspect_flags(fInfo.fFormat);
110 
111     uint32_t srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
112     uint32_t dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
113     if (fInfo.fCurrentQueueFamily != VK_QUEUE_FAMILY_IGNORED &&
114         gpu->queueIndex() != fInfo.fCurrentQueueFamily) {
115         // The image still is owned by its original queue family and we need to transfer it into
116         // ours.
117         SkASSERT(!releaseFamilyQueue);
118         SkASSERT(fInfo.fCurrentQueueFamily == fInitialQueueFamily);
119 
120         srcQueueFamilyIndex = fInfo.fCurrentQueueFamily;
121         dstQueueFamilyIndex = gpu->queueIndex();
122         fInfo.fCurrentQueueFamily = gpu->queueIndex();
123     } else if (releaseFamilyQueue) {
124         // We are releasing the image so we must transfer the image back to its original queue
125         // family.
126         SkASSERT(fInfo.fCurrentQueueFamily == gpu->queueIndex());
127         srcQueueFamilyIndex = fInfo.fCurrentQueueFamily;
128         dstQueueFamilyIndex = fInitialQueueFamily;
129         fInfo.fCurrentQueueFamily = fInitialQueueFamily;
130     }
131 
132     VkImageMemoryBarrier imageMemoryBarrier = {
133         VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,          // sType
134         nullptr,                                         // pNext
135         srcAccessMask,                                   // outputMask
136         dstAccessMask,                                   // inputMask
137         currentLayout,                                   // oldLayout
138         newLayout,                                       // newLayout
139         srcQueueFamilyIndex,                             // srcQueueFamilyIndex
140         dstQueueFamilyIndex,                             // dstQueueFamilyIndex
141         fInfo.fImage,                                    // image
142         { aspectFlags, 0, fInfo.fLevelCount, 0, 1 }      // subresourceRange
143     };
144 
145     gpu->addImageMemoryBarrier(srcStageMask, dstStageMask, byRegion, &imageMemoryBarrier);
146 
147     this->updateImageLayout(newLayout);
148 }
149 
InitImageInfo(const GrVkGpu * gpu,const ImageDesc & imageDesc,GrVkImageInfo * info)150 bool GrVkImage::InitImageInfo(const GrVkGpu* gpu, const ImageDesc& imageDesc, GrVkImageInfo* info) {
151     if (0 == imageDesc.fWidth || 0 == imageDesc.fHeight) {
152         return false;
153     }
154     VkImage image = 0;
155     GrVkAlloc alloc;
156 
157     bool isLinear = VK_IMAGE_TILING_LINEAR == imageDesc.fImageTiling;
158     VkImageLayout initialLayout = isLinear ? VK_IMAGE_LAYOUT_PREINITIALIZED
159                                            : VK_IMAGE_LAYOUT_UNDEFINED;
160 
161     // Create Image
162     VkSampleCountFlagBits vkSamples;
163     if (!GrSampleCountToVkSampleCount(imageDesc.fSamples, &vkSamples)) {
164         return false;
165     }
166 
167     SkASSERT(VK_IMAGE_TILING_OPTIMAL == imageDesc.fImageTiling ||
168              VK_SAMPLE_COUNT_1_BIT == vkSamples);
169 
170     const VkImageCreateInfo imageCreateInfo = {
171         VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,         // sType
172         nullptr,                                     // pNext
173         0,                                           // VkImageCreateFlags
174         imageDesc.fImageType,                        // VkImageType
175         imageDesc.fFormat,                           // VkFormat
176         { imageDesc.fWidth, imageDesc.fHeight, 1 },  // VkExtent3D
177         imageDesc.fLevels,                           // mipLevels
178         1,                                           // arrayLayers
179         vkSamples,                                   // samples
180         imageDesc.fImageTiling,                      // VkImageTiling
181         imageDesc.fUsageFlags,                       // VkImageUsageFlags
182         VK_SHARING_MODE_EXCLUSIVE,                   // VkSharingMode
183         0,                                           // queueFamilyCount
184         0,                                           // pQueueFamilyIndices
185         initialLayout                                // initialLayout
186     };
187 
188     GR_VK_CALL_ERRCHECK(gpu->vkInterface(), CreateImage(gpu->device(), &imageCreateInfo, nullptr,
189                                                         &image));
190 
191     if (!GrVkMemory::AllocAndBindImageMemory(gpu, image, isLinear, &alloc)) {
192         VK_CALL(gpu, DestroyImage(gpu->device(), image, nullptr));
193         return false;
194     }
195 
196     info->fImage = image;
197     info->fAlloc = alloc;
198     info->fImageTiling = imageDesc.fImageTiling;
199     info->fImageLayout = initialLayout;
200     info->fFormat = imageDesc.fFormat;
201     info->fLevelCount = imageDesc.fLevels;
202     info->fCurrentQueueFamily = VK_QUEUE_FAMILY_IGNORED;
203     return true;
204 }
205 
DestroyImageInfo(const GrVkGpu * gpu,GrVkImageInfo * info)206 void GrVkImage::DestroyImageInfo(const GrVkGpu* gpu, GrVkImageInfo* info) {
207     VK_CALL(gpu, DestroyImage(gpu->device(), info->fImage, nullptr));
208     bool isLinear = VK_IMAGE_TILING_LINEAR == info->fImageTiling;
209     GrVkMemory::FreeImageMemory(gpu, isLinear, info->fAlloc);
210 }
211 
~GrVkImage()212 GrVkImage::~GrVkImage() {
213     // should have been released or abandoned first
214     SkASSERT(!fResource);
215 }
216 
releaseImage(GrVkGpu * gpu)217 void GrVkImage::releaseImage(GrVkGpu* gpu) {
218     if (fInfo.fCurrentQueueFamily != fInitialQueueFamily) {
219         this->setImageLayout(gpu, this->currentLayout(), 0, 0, false, true);
220     }
221     if (fResource) {
222         fResource->removeOwningTexture();
223         fResource->unref(gpu);
224         fResource = nullptr;
225     }
226 }
227 
abandonImage()228 void GrVkImage::abandonImage() {
229     if (fResource) {
230         fResource->removeOwningTexture();
231         fResource->unrefAndAbandon();
232         fResource = nullptr;
233     }
234 }
235 
setResourceRelease(sk_sp<GrReleaseProcHelper> releaseHelper)236 void GrVkImage::setResourceRelease(sk_sp<GrReleaseProcHelper> releaseHelper) {
237     SkASSERT(fResource);
238     // Forward the release proc on to GrVkImage::Resource
239     fResource->setRelease(std::move(releaseHelper));
240 }
241 
freeGPUData(GrVkGpu * gpu) const242 void GrVkImage::Resource::freeGPUData(GrVkGpu* gpu) const {
243     SkASSERT(!fReleaseHelper);
244     VK_CALL(gpu, DestroyImage(gpu->device(), fImage, nullptr));
245     bool isLinear = (VK_IMAGE_TILING_LINEAR == fImageTiling);
246     GrVkMemory::FreeImageMemory(gpu, isLinear, fAlloc);
247 }
248 
setIdleProc(GrVkTexture * owner,GrTexture::IdleProc proc,void * context) const249 void GrVkImage::Resource::setIdleProc(GrVkTexture* owner, GrTexture::IdleProc proc,
250                                       void* context) const {
251     fOwningTexture = owner;
252     fIdleProc = proc;
253     fIdleProcContext = context;
254 }
255 
removeOwningTexture() const256 void GrVkImage::Resource::removeOwningTexture() const { fOwningTexture = nullptr; }
257 
notifyAddedToCommandBuffer() const258 void GrVkImage::Resource::notifyAddedToCommandBuffer() const { ++fNumCommandBufferOwners; }
259 
notifyRemovedFromCommandBuffer() const260 void GrVkImage::Resource::notifyRemovedFromCommandBuffer() const {
261     SkASSERT(fNumCommandBufferOwners);
262     if (--fNumCommandBufferOwners || !fIdleProc) {
263         return;
264     }
265     if (fOwningTexture && fOwningTexture->resourcePriv().hasRefOrPendingIO()) {
266         return;
267     }
268     fIdleProc(fIdleProcContext);
269     if (fOwningTexture) {
270         fOwningTexture->setIdleProc(nullptr, nullptr);
271         // Changing the texture's proc should change ours.
272         SkASSERT(!fIdleProc);
273         SkASSERT(!fIdleProc);
274     } else {
275         fIdleProc = nullptr;
276         fIdleProcContext = nullptr;
277     }
278 }
279 
freeGPUData(GrVkGpu * gpu) const280 void GrVkImage::BorrowedResource::freeGPUData(GrVkGpu* gpu) const {
281     this->invokeReleaseProc();
282 }
283 
abandonGPUData() const284 void GrVkImage::BorrowedResource::abandonGPUData() const {
285     this->invokeReleaseProc();
286 }
287 
288