1 /*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/vk/GrVkGpu.h"
9 #include "src/gpu/vk/GrVkImage.h"
10 #include "src/gpu/vk/GrVkMemory.h"
11 #include "src/gpu/vk/GrVkTexture.h"
12 #include "src/gpu/vk/GrVkUtil.h"
13
14 #define VK_CALL(GPU, X) GR_VK_CALL(GPU->vkInterface(), X)
15
GrVkImage(const GrVkGpu * gpu,const GrVkImageInfo & info,sk_sp<GrBackendSurfaceMutableStateImpl> mutableState,GrBackendObjectOwnership ownership,bool forSecondaryCB)16 GrVkImage::GrVkImage(const GrVkGpu* gpu,
17 const GrVkImageInfo& info,
18 sk_sp<GrBackendSurfaceMutableStateImpl> mutableState,
19 GrBackendObjectOwnership ownership,
20 bool forSecondaryCB)
21 : fInfo(info)
22 , fInitialQueueFamily(info.fCurrentQueueFamily)
23 , fMutableState(std::move(mutableState))
24 , fIsBorrowed(GrBackendObjectOwnership::kBorrowed == ownership) {
25 SkASSERT(fMutableState->getImageLayout() == fInfo.fImageLayout);
26 SkASSERT(fMutableState->getQueueFamilyIndex() == fInfo.fCurrentQueueFamily);
27 #ifdef SK_DEBUG
28 if (info.fImageUsageFlags & VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) {
29 SkASSERT(SkToBool(info.fImageUsageFlags & VK_IMAGE_USAGE_TRANSFER_DST_BIT));
30 } else {
31 SkASSERT(SkToBool(info.fImageUsageFlags & VK_IMAGE_USAGE_TRANSFER_DST_BIT) &&
32 SkToBool(info.fImageUsageFlags & VK_IMAGE_USAGE_TRANSFER_SRC_BIT));
33 }
34 // We can't transfer from the non graphics queue to the graphics queue since we can't
35 // release the image from the original queue without having that queue. This limits us in terms
36 // of the types of queue indices we can handle.
37 if (info.fCurrentQueueFamily != VK_QUEUE_FAMILY_IGNORED &&
38 info.fCurrentQueueFamily != VK_QUEUE_FAMILY_EXTERNAL &&
39 info.fCurrentQueueFamily != VK_QUEUE_FAMILY_FOREIGN_EXT) {
40 if (info.fSharingMode == VK_SHARING_MODE_EXCLUSIVE) {
41 if (info.fCurrentQueueFamily != gpu->queueIndex()) {
42 SkASSERT(false);
43 }
44 } else {
45 SkASSERT(false);
46 }
47 }
48 #endif
49 if (forSecondaryCB) {
50 fResource = nullptr;
51 } else if (fIsBorrowed) {
52 fResource = new BorrowedResource(gpu, info.fImage, info.fAlloc, info.fImageTiling);
53 } else {
54 SkASSERT(VK_NULL_HANDLE != info.fAlloc.fMemory);
55 fResource = new Resource(gpu, info.fImage, info.fAlloc, info.fImageTiling);
56 }
57 }
58
LayoutToPipelineSrcStageFlags(const VkImageLayout layout)59 VkPipelineStageFlags GrVkImage::LayoutToPipelineSrcStageFlags(const VkImageLayout layout) {
60 if (VK_IMAGE_LAYOUT_GENERAL == layout) {
61 return VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
62 } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout ||
63 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
64 return VK_PIPELINE_STAGE_TRANSFER_BIT;
65 } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout) {
66 return VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
67 } else if (VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout ||
68 VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL == layout) {
69 return VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
70 } else if (VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) {
71 return VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
72 } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
73 return VK_PIPELINE_STAGE_HOST_BIT;
74 } else if (VK_IMAGE_LAYOUT_PRESENT_SRC_KHR == layout) {
75 return VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
76 }
77
78 SkASSERT(VK_IMAGE_LAYOUT_UNDEFINED == layout);
79 return VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
80 }
81
LayoutToSrcAccessMask(const VkImageLayout layout)82 VkAccessFlags GrVkImage::LayoutToSrcAccessMask(const VkImageLayout layout) {
83 // Currently we assume we will never being doing any explict shader writes (this doesn't include
84 // color attachment or depth/stencil writes). So we will ignore the
85 // VK_MEMORY_OUTPUT_SHADER_WRITE_BIT.
86
87 // We can only directly access the host memory if we are in preinitialized or general layout,
88 // and the image is linear.
89 // TODO: Add check for linear here so we are not always adding host to general, and we should
90 // only be in preinitialized if we are linear
91 VkAccessFlags flags = 0;
92 if (VK_IMAGE_LAYOUT_GENERAL == layout) {
93 flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
94 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
95 VK_ACCESS_TRANSFER_WRITE_BIT |
96 VK_ACCESS_HOST_WRITE_BIT;
97 } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
98 flags = VK_ACCESS_HOST_WRITE_BIT;
99 } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout) {
100 flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
101 } else if (VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout) {
102 flags = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
103 } else if (VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
104 flags = VK_ACCESS_TRANSFER_WRITE_BIT;
105 } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout ||
106 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout ||
107 VK_IMAGE_LAYOUT_PRESENT_SRC_KHR == layout) {
108 // There are no writes that need to be made available
109 flags = 0;
110 }
111 return flags;
112 }
113
vk_format_to_aspect_flags(VkFormat format)114 VkImageAspectFlags vk_format_to_aspect_flags(VkFormat format) {
115 switch (format) {
116 case VK_FORMAT_S8_UINT:
117 return VK_IMAGE_ASPECT_STENCIL_BIT;
118 case VK_FORMAT_D24_UNORM_S8_UINT: // fallthrough
119 case VK_FORMAT_D32_SFLOAT_S8_UINT:
120 return VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
121 default:
122 return VK_IMAGE_ASPECT_COLOR_BIT;
123 }
124 }
125
setImageLayoutAndQueueIndex(const GrVkGpu * gpu,VkImageLayout newLayout,VkAccessFlags dstAccessMask,VkPipelineStageFlags dstStageMask,bool byRegion,uint32_t newQueueFamilyIndex)126 void GrVkImage::setImageLayoutAndQueueIndex(const GrVkGpu* gpu,
127 VkImageLayout newLayout,
128 VkAccessFlags dstAccessMask,
129 VkPipelineStageFlags dstStageMask,
130 bool byRegion,
131 uint32_t newQueueFamilyIndex) {
132 SkASSERT(!gpu->isDeviceLost());
133 SkASSERT(newLayout == this->currentLayout() ||
134 (VK_IMAGE_LAYOUT_UNDEFINED != newLayout &&
135 VK_IMAGE_LAYOUT_PREINITIALIZED != newLayout));
136 VkImageLayout currentLayout = this->currentLayout();
137 uint32_t currentQueueIndex = this->currentQueueFamilyIndex();
138
139 #ifdef SK_DEBUG
140 if (fInfo.fSharingMode == VK_SHARING_MODE_CONCURRENT) {
141 if (newQueueFamilyIndex == VK_QUEUE_FAMILY_IGNORED) {
142 SkASSERT(currentQueueIndex == VK_QUEUE_FAMILY_IGNORED ||
143 currentQueueIndex == VK_QUEUE_FAMILY_EXTERNAL ||
144 currentQueueIndex == VK_QUEUE_FAMILY_FOREIGN_EXT);
145 } else {
146 SkASSERT(newQueueFamilyIndex == VK_QUEUE_FAMILY_EXTERNAL ||
147 newQueueFamilyIndex == VK_QUEUE_FAMILY_FOREIGN_EXT);
148 SkASSERT(currentQueueIndex == VK_QUEUE_FAMILY_IGNORED);
149 }
150 } else {
151 SkASSERT(fInfo.fSharingMode == VK_SHARING_MODE_EXCLUSIVE);
152 if (newQueueFamilyIndex == VK_QUEUE_FAMILY_IGNORED ||
153 currentQueueIndex == gpu->queueIndex()) {
154 SkASSERT(currentQueueIndex == VK_QUEUE_FAMILY_IGNORED ||
155 currentQueueIndex == VK_QUEUE_FAMILY_EXTERNAL ||
156 currentQueueIndex == VK_QUEUE_FAMILY_FOREIGN_EXT ||
157 currentQueueIndex == gpu->queueIndex());
158 } else if (newQueueFamilyIndex == VK_QUEUE_FAMILY_EXTERNAL ||
159 newQueueFamilyIndex == VK_QUEUE_FAMILY_FOREIGN_EXT) {
160 SkASSERT(currentQueueIndex == VK_QUEUE_FAMILY_IGNORED ||
161 currentQueueIndex == gpu->queueIndex());
162 }
163 }
164 #endif
165
166 if (fInfo.fSharingMode == VK_SHARING_MODE_EXCLUSIVE) {
167 if (newQueueFamilyIndex == VK_QUEUE_FAMILY_IGNORED) {
168 newQueueFamilyIndex = gpu->queueIndex();
169 }
170 if (currentQueueIndex == VK_QUEUE_FAMILY_IGNORED) {
171 currentQueueIndex = gpu->queueIndex();
172 }
173 }
174
175 // If the old and new layout are the same and the layout is a read only layout, there is no need
176 // to put in a barrier unless we also need to switch queues.
177 if (newLayout == currentLayout && currentQueueIndex == newQueueFamilyIndex &&
178 (VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL == currentLayout ||
179 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == currentLayout ||
180 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == currentLayout)) {
181 return;
182 }
183
184 VkAccessFlags srcAccessMask = GrVkImage::LayoutToSrcAccessMask(currentLayout);
185 VkPipelineStageFlags srcStageMask = GrVkImage::LayoutToPipelineSrcStageFlags(currentLayout);
186
187 VkImageAspectFlags aspectFlags = vk_format_to_aspect_flags(fInfo.fFormat);
188
189 VkImageMemoryBarrier imageMemoryBarrier = {
190 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
191 nullptr, // pNext
192 srcAccessMask, // srcAccessMask
193 dstAccessMask, // dstAccessMask
194 currentLayout, // oldLayout
195 newLayout, // newLayout
196 currentQueueIndex, // srcQueueFamilyIndex
197 newQueueFamilyIndex, // dstQueueFamilyIndex
198 fInfo.fImage, // image
199 { aspectFlags, 0, fInfo.fLevelCount, 0, 1 } // subresourceRange
200 };
201 SkASSERT(srcAccessMask == imageMemoryBarrier.srcAccessMask);
202 gpu->addImageMemoryBarrier(this->resource(), srcStageMask, dstStageMask, byRegion,
203 &imageMemoryBarrier);
204
205 this->updateImageLayout(newLayout);
206 this->setQueueFamilyIndex(newQueueFamilyIndex);
207 }
208
InitImageInfo(GrVkGpu * gpu,const ImageDesc & imageDesc,GrVkImageInfo * info)209 bool GrVkImage::InitImageInfo(GrVkGpu* gpu, const ImageDesc& imageDesc, GrVkImageInfo* info) {
210 if (0 == imageDesc.fWidth || 0 == imageDesc.fHeight) {
211 return false;
212 }
213 if ((imageDesc.fIsProtected == GrProtected::kYes) && !gpu->vkCaps().supportsProtectedMemory()) {
214 return false;
215 }
216 VkImage image = VK_NULL_HANDLE;
217 GrVkAlloc alloc;
218
219 bool isLinear = VK_IMAGE_TILING_LINEAR == imageDesc.fImageTiling;
220 VkImageLayout initialLayout = isLinear ? VK_IMAGE_LAYOUT_PREINITIALIZED
221 : VK_IMAGE_LAYOUT_UNDEFINED;
222
223 // Create Image
224 VkSampleCountFlagBits vkSamples;
225 if (!GrSampleCountToVkSampleCount(imageDesc.fSamples, &vkSamples)) {
226 return false;
227 }
228
229 SkASSERT(VK_IMAGE_TILING_OPTIMAL == imageDesc.fImageTiling ||
230 VK_SAMPLE_COUNT_1_BIT == vkSamples);
231
232 VkImageCreateFlags createflags = 0;
233 if (imageDesc.fIsProtected == GrProtected::kYes || gpu->protectedContext()) {
234 createflags |= VK_IMAGE_CREATE_PROTECTED_BIT;
235 }
236 const VkImageCreateInfo imageCreateInfo = {
237 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // sType
238 nullptr, // pNext
239 createflags, // VkImageCreateFlags
240 imageDesc.fImageType, // VkImageType
241 imageDesc.fFormat, // VkFormat
242 { imageDesc.fWidth, imageDesc.fHeight, 1 }, // VkExtent3D
243 imageDesc.fLevels, // mipLevels
244 1, // arrayLayers
245 vkSamples, // samples
246 imageDesc.fImageTiling, // VkImageTiling
247 imageDesc.fUsageFlags, // VkImageUsageFlags
248 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode
249 0, // queueFamilyCount
250 nullptr, // pQueueFamilyIndices
251 initialLayout // initialLayout
252 };
253
254 VkResult result;
255 GR_VK_CALL_RESULT(gpu, result, CreateImage(gpu->device(), &imageCreateInfo, nullptr, &image));
256 if (result != VK_SUCCESS) {
257 return false;
258 }
259
260 if (!GrVkMemory::AllocAndBindImageMemory(gpu, image, isLinear, &alloc)) {
261 VK_CALL(gpu, DestroyImage(gpu->device(), image, nullptr));
262 return false;
263 }
264
265 info->fImage = image;
266 info->fAlloc = alloc;
267 info->fImageTiling = imageDesc.fImageTiling;
268 info->fImageLayout = initialLayout;
269 info->fFormat = imageDesc.fFormat;
270 info->fImageUsageFlags = imageDesc.fUsageFlags;
271 info->fSampleCount = imageDesc.fSamples;
272 info->fLevelCount = imageDesc.fLevels;
273 info->fCurrentQueueFamily = VK_QUEUE_FAMILY_IGNORED;
274 info->fProtected =
275 (createflags & VK_IMAGE_CREATE_PROTECTED_BIT) ? GrProtected::kYes : GrProtected::kNo;
276 info->fSharingMode = VK_SHARING_MODE_EXCLUSIVE;
277 return true;
278 }
279
DestroyImageInfo(const GrVkGpu * gpu,GrVkImageInfo * info)280 void GrVkImage::DestroyImageInfo(const GrVkGpu* gpu, GrVkImageInfo* info) {
281 VK_CALL(gpu, DestroyImage(gpu->device(), info->fImage, nullptr));
282 bool isLinear = VK_IMAGE_TILING_LINEAR == info->fImageTiling;
283 GrVkMemory::FreeImageMemory(gpu, isLinear, info->fAlloc);
284 }
285
~GrVkImage()286 GrVkImage::~GrVkImage() {
287 // should have been released first
288 SkASSERT(!fResource);
289 }
290
prepareForPresent(GrVkGpu * gpu)291 void GrVkImage::prepareForPresent(GrVkGpu* gpu) {
292 VkImageLayout layout = this->currentLayout();
293 if (fInitialQueueFamily != VK_QUEUE_FAMILY_EXTERNAL &&
294 fInitialQueueFamily != VK_QUEUE_FAMILY_FOREIGN_EXT) {
295 if (gpu->vkCaps().supportsSwapchain()) {
296 layout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
297 }
298 }
299 this->setImageLayoutAndQueueIndex(gpu, layout, 0, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, false,
300 fInitialQueueFamily);
301 }
302
prepareForExternal(GrVkGpu * gpu)303 void GrVkImage::prepareForExternal(GrVkGpu* gpu) {
304 this->setImageLayoutAndQueueIndex(gpu, this->currentLayout(), 0,
305 VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, false,
306 fInitialQueueFamily);
307 }
308
releaseImage()309 void GrVkImage::releaseImage() {
310 if (fResource) {
311 fResource->unref();
312 fResource = nullptr;
313 }
314 }
315
setResourceRelease(sk_sp<GrRefCntedCallback> releaseHelper)316 void GrVkImage::setResourceRelease(sk_sp<GrRefCntedCallback> releaseHelper) {
317 SkASSERT(fResource);
318 // Forward the release proc on to GrVkImage::Resource
319 fResource->setRelease(std::move(releaseHelper));
320 }
321
freeGPUData() const322 void GrVkImage::Resource::freeGPUData() const {
323 this->invokeReleaseProc();
324 VK_CALL(fGpu, DestroyImage(fGpu->device(), fImage, nullptr));
325 bool isLinear = (VK_IMAGE_TILING_LINEAR == fImageTiling);
326 GrVkMemory::FreeImageMemory(fGpu, isLinear, fAlloc);
327 }
328
freeGPUData() const329 void GrVkImage::BorrowedResource::freeGPUData() const {
330 this->invokeReleaseProc();
331 }
332
333 #if GR_TEST_UTILS
setCurrentQueueFamilyToGraphicsQueue(GrVkGpu * gpu)334 void GrVkImage::setCurrentQueueFamilyToGraphicsQueue(GrVkGpu* gpu) {
335 fMutableState->setQueueFamilyIndex(gpu->queueIndex());
336 }
337 #endif
338
339