1 /*
2 * Copyright 2022 Google LLC
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/graphite/vk/VulkanTexture.h"
9
10 #include "include/gpu/MutableTextureState.h"
11 #include "include/gpu/graphite/vk/VulkanGraphiteTypes.h"
12 #include "include/gpu/vk/VulkanMutableTextureState.h"
13 #include "src/core/SkMipmap.h"
14 #include "src/gpu/graphite/Log.h"
15 #include "src/gpu/graphite/Sampler.h"
16 #include "src/gpu/graphite/vk/VulkanCaps.h"
17 #include "src/gpu/graphite/vk/VulkanCommandBuffer.h"
18 #include "src/gpu/graphite/vk/VulkanDescriptorSet.h"
19 #include "src/gpu/graphite/vk/VulkanFramebuffer.h"
20 #include "src/gpu/graphite/vk/VulkanGraphiteUtils.h"
21 #include "src/gpu/graphite/vk/VulkanResourceProvider.h"
22 #include "src/gpu/graphite/vk/VulkanSharedContext.h"
23 #include "src/gpu/vk/VulkanMemory.h"
24 #include "src/gpu/vk/VulkanMutableTextureStatePriv.h"
25
26 namespace skgpu::graphite {
27
MakeVkImage(const VulkanSharedContext * sharedContext,SkISize dimensions,const TextureInfo & info,CreatedImageInfo * outInfo)28 bool VulkanTexture::MakeVkImage(const VulkanSharedContext* sharedContext,
29 SkISize dimensions,
30 const TextureInfo& info,
31 CreatedImageInfo* outInfo) {
32 SkASSERT(outInfo);
33 const VulkanCaps& caps = sharedContext->vulkanCaps();
34
35 if (dimensions.isEmpty()) {
36 SKGPU_LOG_E("Tried to create VkImage with empty dimensions.");
37 return false;
38 }
39 if (dimensions.width() > caps.maxTextureSize() ||
40 dimensions.height() > caps.maxTextureSize()) {
41 SKGPU_LOG_E("Tried to create VkImage with too large a size.");
42 return false;
43 }
44
45 if ((info.isProtected() == Protected::kYes) != caps.protectedSupport()) {
46 SKGPU_LOG_E("Tried to create %s VkImage in %s Context.",
47 info.isProtected() == Protected::kYes ? "protected" : "unprotected",
48 caps.protectedSupport() ? "protected" : "unprotected");
49 return false;
50 }
51
52 const auto& vkInfo = TextureInfoPriv::Get<VulkanTextureInfo>(info);
53
54 bool isLinear = vkInfo.fImageTiling == VK_IMAGE_TILING_LINEAR;
55 VkImageLayout initialLayout = isLinear ? VK_IMAGE_LAYOUT_PREINITIALIZED
56 : VK_IMAGE_LAYOUT_UNDEFINED;
57
58 // Create Image
59 VkSampleCountFlagBits vkSamples;
60 if (!SampleCountToVkSampleCount(vkInfo.fSampleCount, &vkSamples)) {
61 SKGPU_LOG_E("Failed creating VkImage because we could not covert the number of samples: "
62 "%u to a VkSampleCountFlagBits.", info.numSamples());
63 return false;
64 }
65
66 SkASSERT(!isLinear || vkSamples == VK_SAMPLE_COUNT_1_BIT);
67
68 VkImageCreateFlags createflags = 0;
69 if (info.isProtected() == Protected::kYes && caps.protectedSupport()) {
70 createflags |= VK_IMAGE_CREATE_PROTECTED_BIT;
71 }
72
73 uint32_t numMipLevels = 1;
74 if (vkInfo.fMipmapped == Mipmapped::kYes) {
75 numMipLevels = SkMipmap::ComputeLevelCount(dimensions.width(), dimensions.height()) + 1;
76 }
77
78 uint32_t width = static_cast<uint32_t>(dimensions.fWidth);
79 uint32_t height = static_cast<uint32_t>(dimensions.fHeight);
80
81 const VkImageCreateInfo imageCreateInfo = {
82 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // sType
83 nullptr, // pNext
84 createflags, // VkImageCreateFlags
85 VK_IMAGE_TYPE_2D, // VkImageType
86 vkInfo.fFormat, // VkFormat
87 { width, height, 1 }, // VkExtent3D
88 numMipLevels, // mipLevels
89 1, // arrayLayers
90 vkSamples, // samples
91 vkInfo.fImageTiling, // VkImageTiling
92 vkInfo.fImageUsageFlags, // VkImageUsageFlags
93 vkInfo.fSharingMode, // VkSharingMode
94 0, // queueFamilyCount
95 nullptr, // pQueueFamilyIndices
96 initialLayout // initialLayout
97 };
98
99 auto device = sharedContext->device();
100
101 VkImage image = VK_NULL_HANDLE;
102 VkResult result;
103 VULKAN_CALL_RESULT(
104 sharedContext, result, CreateImage(device, &imageCreateInfo, nullptr, &image));
105 if (result != VK_SUCCESS) {
106 SKGPU_LOG_E("Failed call to vkCreateImage with error: %d", result);
107 return false;
108 }
109
110 auto allocator = sharedContext->memoryAllocator();
111 bool forceDedicatedMemory = caps.shouldAlwaysUseDedicatedImageMemory();
112 bool useLazyAllocation =
113 SkToBool(vkInfo.fImageUsageFlags & VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT);
114
115 auto checkResult = [sharedContext](VkResult result) {
116 return sharedContext->checkVkResult(result);
117 };
118 if (!skgpu::VulkanMemory::AllocImageMemory(allocator,
119 image,
120 info.isProtected(),
121 forceDedicatedMemory,
122 useLazyAllocation,
123 checkResult,
124 &outInfo->fMemoryAlloc)) {
125 VULKAN_CALL(sharedContext->interface(), DestroyImage(device, image, nullptr));
126 return false;
127 }
128
129 if (useLazyAllocation &&
130 !SkToBool(outInfo->fMemoryAlloc.fFlags & skgpu::VulkanAlloc::kLazilyAllocated_Flag)) {
131 SKGPU_LOG_E("Failed allocate lazy vulkan memory when requested");
132 skgpu::VulkanMemory::FreeImageMemory(allocator, outInfo->fMemoryAlloc);
133 return false;
134 }
135
136 VULKAN_CALL_RESULT(
137 sharedContext,
138 result,
139 BindImageMemory(
140 device, image, outInfo->fMemoryAlloc.fMemory, outInfo->fMemoryAlloc.fOffset));
141 if (result != VK_SUCCESS) {
142 skgpu::VulkanMemory::FreeImageMemory(allocator, outInfo->fMemoryAlloc);
143 VULKAN_CALL(sharedContext->interface(), DestroyImage(device, image, nullptr));
144 return false;
145 }
146
147 outInfo->fImage = image;
148 outInfo->fMutableState = sk_make_sp<MutableTextureState>(
149 skgpu::MutableTextureStates::MakeVulkan(initialLayout, VK_QUEUE_FAMILY_IGNORED));
150 return true;
151 }
152
Make(const VulkanSharedContext * sharedContext,SkISize dimensions,const TextureInfo & info,sk_sp<VulkanYcbcrConversion> ycbcrConversion)153 sk_sp<Texture> VulkanTexture::Make(const VulkanSharedContext* sharedContext,
154 SkISize dimensions,
155 const TextureInfo& info,
156 sk_sp<VulkanYcbcrConversion> ycbcrConversion) {
157 CreatedImageInfo imageInfo;
158 if (!MakeVkImage(sharedContext, dimensions, info, &imageInfo)) {
159 return nullptr;
160 }
161
162 return sk_sp<Texture>(new VulkanTexture(sharedContext,
163 dimensions,
164 info,
165 std::move(imageInfo.fMutableState),
166 imageInfo.fImage,
167 imageInfo.fMemoryAlloc,
168 Ownership::kOwned,
169 std::move(ycbcrConversion)));
170 }
171
MakeWrapped(const VulkanSharedContext * sharedContext,SkISize dimensions,const TextureInfo & info,sk_sp<MutableTextureState> mutableState,VkImage image,const VulkanAlloc & alloc,sk_sp<VulkanYcbcrConversion> ycbcrConversion)172 sk_sp<Texture> VulkanTexture::MakeWrapped(const VulkanSharedContext* sharedContext,
173 SkISize dimensions,
174 const TextureInfo& info,
175 sk_sp<MutableTextureState> mutableState,
176 VkImage image,
177 const VulkanAlloc& alloc,
178 sk_sp<VulkanYcbcrConversion> ycbcrConversion) {
179 return sk_sp<Texture>(new VulkanTexture(sharedContext,
180 dimensions,
181 info,
182 std::move(mutableState),
183 image,
184 alloc,
185 Ownership::kWrapped,
186 std::move(ycbcrConversion)));
187 }
188
~VulkanTexture()189 VulkanTexture::~VulkanTexture() {}
190
vk_format_to_aspect_flags(VkFormat format)191 VkImageAspectFlags vk_format_to_aspect_flags(VkFormat format) {
192 switch (format) {
193 case VK_FORMAT_S8_UINT:
194 return VK_IMAGE_ASPECT_STENCIL_BIT;
195 case VK_FORMAT_D16_UNORM:
196 [[fallthrough]];
197 case VK_FORMAT_D32_SFLOAT:
198 return VK_IMAGE_ASPECT_DEPTH_BIT;
199 case VK_FORMAT_D24_UNORM_S8_UINT:
200 [[fallthrough]];
201 case VK_FORMAT_D32_SFLOAT_S8_UINT:
202 return VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
203 default:
204 return VK_IMAGE_ASPECT_COLOR_BIT;
205 }
206 }
207
setImageLayoutAndQueueIndex(VulkanCommandBuffer * cmdBuffer,VkImageLayout newLayout,VkAccessFlags dstAccessMask,VkPipelineStageFlags dstStageMask,bool byRegion,uint32_t newQueueFamilyIndex) const208 void VulkanTexture::setImageLayoutAndQueueIndex(VulkanCommandBuffer* cmdBuffer,
209 VkImageLayout newLayout,
210 VkAccessFlags dstAccessMask,
211 VkPipelineStageFlags dstStageMask,
212 bool byRegion,
213 uint32_t newQueueFamilyIndex) const {
214
215 SkASSERT(newLayout == this->currentLayout() ||
216 (VK_IMAGE_LAYOUT_UNDEFINED != newLayout &&
217 VK_IMAGE_LAYOUT_PREINITIALIZED != newLayout));
218 VkImageLayout currentLayout = this->currentLayout();
219 uint32_t currentQueueIndex = this->currentQueueFamilyIndex();
220
221 const auto& textureInfo = this->vulkanTextureInfo();
222 auto sharedContext = static_cast<const VulkanSharedContext*>(this->sharedContext());
223
224 // Enable the following block on new devices to test that their lazy images stay at 0 memory use
225 #if 0
226 auto device = sharedContext->device();
227 if (fAlloc.fFlags & skgpu::VulkanAlloc::kLazilyAllocated_Flag) {
228 VkDeviceSize size;
229 VULKAN_CALL(sharedContext->interface(), GetDeviceMemoryCommitment(device, fAlloc.fMemory, &size));
230
231 SkDebugf("Lazy Image. This: %p, image: %d, size: %d\n", this, fImage, size);
232 }
233 #endif
234 #ifdef SK_DEBUG
235 if (textureInfo.fSharingMode == VK_SHARING_MODE_CONCURRENT) {
236 if (newQueueFamilyIndex == VK_QUEUE_FAMILY_IGNORED) {
237 SkASSERT(currentQueueIndex == VK_QUEUE_FAMILY_IGNORED ||
238 currentQueueIndex == VK_QUEUE_FAMILY_EXTERNAL ||
239 currentQueueIndex == VK_QUEUE_FAMILY_FOREIGN_EXT);
240 } else {
241 SkASSERT(newQueueFamilyIndex == VK_QUEUE_FAMILY_EXTERNAL ||
242 newQueueFamilyIndex == VK_QUEUE_FAMILY_FOREIGN_EXT);
243 SkASSERT(currentQueueIndex == VK_QUEUE_FAMILY_IGNORED);
244 }
245 } else {
246 SkASSERT(textureInfo.fSharingMode == VK_SHARING_MODE_EXCLUSIVE);
247 if (newQueueFamilyIndex == VK_QUEUE_FAMILY_IGNORED ||
248 currentQueueIndex == sharedContext->queueIndex()) {
249 SkASSERT(currentQueueIndex == VK_QUEUE_FAMILY_IGNORED ||
250 currentQueueIndex == VK_QUEUE_FAMILY_EXTERNAL ||
251 currentQueueIndex == VK_QUEUE_FAMILY_FOREIGN_EXT ||
252 currentQueueIndex == sharedContext->queueIndex());
253 } else if (newQueueFamilyIndex == VK_QUEUE_FAMILY_EXTERNAL ||
254 newQueueFamilyIndex == VK_QUEUE_FAMILY_FOREIGN_EXT) {
255 SkASSERT(currentQueueIndex == VK_QUEUE_FAMILY_IGNORED ||
256 currentQueueIndex == sharedContext->queueIndex());
257 }
258 }
259 #endif
260
261 if (textureInfo.fSharingMode == VK_SHARING_MODE_EXCLUSIVE) {
262 if (newQueueFamilyIndex == VK_QUEUE_FAMILY_IGNORED) {
263 newQueueFamilyIndex = sharedContext->queueIndex();
264 }
265 if (currentQueueIndex == VK_QUEUE_FAMILY_IGNORED) {
266 currentQueueIndex = sharedContext->queueIndex();
267 }
268 }
269
270 // If the old and new layout are the same and the layout is a read only layout, there is no need
271 // to put in a barrier unless we also need to switch queues.
272 if (newLayout == currentLayout && currentQueueIndex == newQueueFamilyIndex &&
273 (VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL == currentLayout ||
274 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == currentLayout ||
275 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == currentLayout)) {
276 return;
277 }
278
279 VkAccessFlags srcAccessMask = VulkanTexture::LayoutToSrcAccessMask(currentLayout);
280 VkPipelineStageFlags srcStageMask = VulkanTexture::LayoutToPipelineSrcStageFlags(currentLayout);
281
282 VkImageAspectFlags aspectFlags = vk_format_to_aspect_flags(textureInfo.fFormat);
283 uint32_t numMipLevels = 1;
284 SkISize dimensions = this->dimensions();
285 if (this->mipmapped() == Mipmapped::kYes) {
286 numMipLevels = SkMipmap::ComputeLevelCount(dimensions.width(), dimensions.height()) + 1;
287 }
288 VkImageMemoryBarrier imageMemoryBarrier = {
289 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
290 nullptr, // pNext
291 srcAccessMask, // srcAccessMask
292 dstAccessMask, // dstAccessMask
293 currentLayout, // oldLayout
294 newLayout, // newLayout
295 currentQueueIndex, // srcQueueFamilyIndex
296 newQueueFamilyIndex, // dstQueueFamilyIndex
297 fImage, // image
298 { aspectFlags, 0, numMipLevels, 0, 1 } // subresourceRange
299 };
300 SkASSERT(srcAccessMask == imageMemoryBarrier.srcAccessMask);
301 cmdBuffer->addImageMemoryBarrier(this, srcStageMask, dstStageMask, byRegion,
302 &imageMemoryBarrier);
303
304 skgpu::MutableTextureStates::SetVkImageLayout(this->mutableState(), newLayout);
305 skgpu::MutableTextureStates::SetVkQueueFamilyIndex(this->mutableState(), newQueueFamilyIndex);
306 }
307
has_transient_usage(const TextureInfo & info)308 static bool has_transient_usage(const TextureInfo& info) {
309 const auto& vkInfo = TextureInfoPriv::Get<VulkanTextureInfo>(info);
310 return vkInfo.fImageUsageFlags & VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT;
311 }
312
VulkanTexture(const VulkanSharedContext * sharedContext,SkISize dimensions,const TextureInfo & info,sk_sp<MutableTextureState> mutableState,VkImage image,const VulkanAlloc & alloc,Ownership ownership,sk_sp<VulkanYcbcrConversion> ycbcrConversion)313 VulkanTexture::VulkanTexture(const VulkanSharedContext* sharedContext,
314 SkISize dimensions,
315 const TextureInfo& info,
316 sk_sp<MutableTextureState> mutableState,
317 VkImage image,
318 const VulkanAlloc& alloc,
319 Ownership ownership,
320 sk_sp<VulkanYcbcrConversion> ycbcrConversion)
321 : Texture(sharedContext,
322 dimensions,
323 info,
324 has_transient_usage(info),
325 std::move(mutableState),
326 ownership)
327 , fImage(image)
328 , fMemoryAlloc(alloc)
329 , fYcbcrConversion(std::move(ycbcrConversion)) {}
330
freeGpuData()331 void VulkanTexture::freeGpuData() {
332 // Need to delete any ImageViews first
333 fImageViews.clear();
334
335 // If the texture is wrapped we don't own this data
336 if (this->ownership() != Ownership::kWrapped) {
337 auto sharedContext = static_cast<const VulkanSharedContext*>(this->sharedContext());
338 VULKAN_CALL(sharedContext->interface(),
339 DestroyImage(sharedContext->device(), fImage, nullptr));
340 skgpu::VulkanMemory::FreeImageMemory(sharedContext->memoryAllocator(), fMemoryAlloc);
341 }
342 }
343
updateImageLayout(VkImageLayout newLayout)344 void VulkanTexture::updateImageLayout(VkImageLayout newLayout) {
345 skgpu::MutableTextureStates::SetVkImageLayout(this->mutableState(), newLayout);
346 }
347
currentLayout() const348 VkImageLayout VulkanTexture::currentLayout() const {
349 return skgpu::MutableTextureStates::GetVkImageLayout(this->mutableState());
350 }
351
currentQueueFamilyIndex() const352 uint32_t VulkanTexture::currentQueueFamilyIndex() const {
353 return skgpu::MutableTextureStates::GetVkQueueFamilyIndex(this->mutableState());
354 }
355
LayoutToPipelineSrcStageFlags(const VkImageLayout layout)356 VkPipelineStageFlags VulkanTexture::LayoutToPipelineSrcStageFlags(const VkImageLayout layout) {
357 if (VK_IMAGE_LAYOUT_GENERAL == layout) {
358 return VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
359 } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout ||
360 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
361 return VK_PIPELINE_STAGE_TRANSFER_BIT;
362 } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout) {
363 return VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
364 } else if (VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout ||
365 VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL == layout) {
366 return VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
367 } else if (VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) {
368 return VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
369 } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
370 return VK_PIPELINE_STAGE_HOST_BIT;
371 } else if (VK_IMAGE_LAYOUT_PRESENT_SRC_KHR == layout) {
372 return VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
373 }
374
375 SkASSERT(VK_IMAGE_LAYOUT_UNDEFINED == layout);
376 return VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
377 }
378
LayoutToSrcAccessMask(const VkImageLayout layout)379 VkAccessFlags VulkanTexture::LayoutToSrcAccessMask(const VkImageLayout layout) {
380 // Currently we assume we will never being doing any explict shader writes (this doesn't include
381 // color attachment or depth/stencil writes). So we will ignore the
382 // VK_MEMORY_OUTPUT_SHADER_WRITE_BIT.
383
384 // We can only directly access the host memory if we are in preinitialized or general layout,
385 // and the image is linear.
386 // TODO: Add check for linear here so we are not always adding host to general, and we should
387 // only be in preinitialized if we are linear
388 VkAccessFlags flags = 0;
389 if (VK_IMAGE_LAYOUT_GENERAL == layout) {
390 flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
391 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
392 VK_ACCESS_TRANSFER_WRITE_BIT |
393 VK_ACCESS_HOST_WRITE_BIT;
394 } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
395 flags = VK_ACCESS_HOST_WRITE_BIT;
396 } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout) {
397 flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
398 } else if (VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout) {
399 flags = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
400 } else if (VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
401 flags = VK_ACCESS_TRANSFER_WRITE_BIT;
402 } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout ||
403 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout ||
404 VK_IMAGE_LAYOUT_PRESENT_SRC_KHR == layout) {
405 // There are no writes that need to be made available
406 flags = 0;
407 }
408 return flags;
409 }
410
getImageView(VulkanImageView::Usage usage) const411 const VulkanImageView* VulkanTexture::getImageView(VulkanImageView::Usage usage) const {
412 for (int i = 0; i < fImageViews.size(); ++i) {
413 if (fImageViews[i]->usage() == usage) {
414 return fImageViews[i].get();
415 }
416 }
417
418 auto sharedContext = static_cast<const VulkanSharedContext*>(this->sharedContext());
419 const auto& vkTexInfo = this->vulkanTextureInfo();
420 int miplevels = vkTexInfo.fMipmapped == Mipmapped::kYes
421 ? SkMipmap::ComputeLevelCount(this->dimensions().width(),
422 this->dimensions().height()) + 1
423 : 1;
424 auto imageView = VulkanImageView::Make(sharedContext,
425 fImage,
426 vkTexInfo.fFormat,
427 usage,
428 miplevels,
429 fYcbcrConversion);
430 return fImageViews.push_back(std::move(imageView)).get();
431 }
432
supportsInputAttachmentUsage() const433 bool VulkanTexture::supportsInputAttachmentUsage() const {
434 return (this->vulkanTextureInfo().fImageUsageFlags & VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT);
435 }
436
onUpdateGpuMemorySize()437 size_t VulkanTexture::onUpdateGpuMemorySize() {
438 if (!has_transient_usage(this->textureInfo())) {
439 // We don't expect non-transient textures to change their size over time.
440 return this->gpuMemorySize();
441 }
442
443 auto sharedContext = static_cast<const VulkanSharedContext*>(this->sharedContext());
444 VkDeviceSize committedMemory;
445 VULKAN_CALL(sharedContext->interface(),
446 GetDeviceMemoryCommitment(sharedContext->device(),
447 fMemoryAlloc.fMemory,
448 &committedMemory));
449 return committedMemory;
450 }
451
getCachedSingleTextureDescriptorSet(const Sampler * sampler) const452 sk_sp<VulkanDescriptorSet> VulkanTexture::getCachedSingleTextureDescriptorSet(
453 const Sampler* sampler) const {
454 SkASSERT(sampler);
455 for (auto& cachedSet : fCachedSingleTextureDescSets) {
456 if (cachedSet.first->uniqueID() == sampler->uniqueID()) {
457 return cachedSet.second;
458 }
459 }
460 return nullptr;
461 }
462
addCachedSingleTextureDescriptorSet(sk_sp<VulkanDescriptorSet> set,sk_sp<const Sampler> sampler) const463 void VulkanTexture::addCachedSingleTextureDescriptorSet(sk_sp<VulkanDescriptorSet> set,
464 sk_sp<const Sampler> sampler) const {
465 SkASSERT(set);
466 SkASSERT(sampler);
467 fCachedSingleTextureDescSets.push_back(std::make_pair(std::move(sampler), std::move(set)));
468 }
469
getCachedFramebuffer(const RenderPassDesc & renderPassDesc,const VulkanTexture * msaaTexture,const VulkanTexture * depthStencilTexture) const470 sk_sp<VulkanFramebuffer> VulkanTexture::getCachedFramebuffer(
471 const RenderPassDesc& renderPassDesc,
472 const VulkanTexture* msaaTexture,
473 const VulkanTexture* depthStencilTexture) const {
474 for (auto& cachedFB : fCachedFramebuffers) {
475 if (cachedFB->compatible(renderPassDesc, msaaTexture, depthStencilTexture)) {
476 return cachedFB;
477 }
478 }
479 return nullptr;
480 }
481
addCachedFramebuffer(sk_sp<VulkanFramebuffer> fb)482 void VulkanTexture::addCachedFramebuffer(sk_sp<VulkanFramebuffer> fb) {
483 SkASSERT(fb);
484 fCachedFramebuffers.push_back(std::move(fb));
485 }
486
487 } // namespace skgpu::graphite
488