• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2022 Google LLC
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #include "src/gpu/graphite/vk/VulkanBuffer.h"
9 
10 #include "include/gpu/vk/VulkanMemoryAllocator.h"
11 #include "src/gpu/graphite/vk/VulkanGraphiteUtilsPriv.h"
12 #include "src/gpu/vk/VulkanMemory.h"
13 
14 namespace skgpu::graphite {
15 
Make(const VulkanSharedContext * sharedContext,size_t size,BufferType type,PrioritizeGpuReads prioritizeGpuReads)16 sk_sp<Buffer> VulkanBuffer::Make(const VulkanSharedContext* sharedContext,
17                                  size_t size,
18                                  BufferType type,
19                                  PrioritizeGpuReads prioritizeGpuReads) {
20     if (size <= 0) {
21         return nullptr;
22     }
23     VkBuffer buffer;
24     skgpu::VulkanAlloc alloc;
25 
26     // The only time we don't require mappable buffers is when we're on a device where gpu only
27     // memory has faster reads on the gpu than memory that is also mappable on the cpu. Protected
28     // memory always uses mappable buffers.
29     bool requiresMappable = sharedContext->isProtected() == Protected::kYes ||
30                             prioritizeGpuReads == PrioritizeGpuReads::kNo ||
31                             !sharedContext->vulkanCaps().gpuOnlyBuffersMorePerformant();
32 
33     using BufferUsage = skgpu::VulkanMemoryAllocator::BufferUsage;
34 
35     // The default usage captures use cases besides transfer buffers. GPU-only buffers are preferred
36     // unless mappability is required.
37     BufferUsage allocUsage =
38             requiresMappable ? BufferUsage::kCpuWritesGpuReads : BufferUsage::kGpuOnly;
39 
40     // Create the buffer object
41     VkBufferCreateInfo bufInfo;
42     memset(&bufInfo, 0, sizeof(VkBufferCreateInfo));
43     bufInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
44     bufInfo.flags = 0;
45     bufInfo.size = size;
46 
47     // To support SkMesh buffer updates we make Vertex and Index buffers capable of being transfer
48     // dsts.
49     switch (type) {
50         case BufferType::kVertex:
51             bufInfo.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
52             break;
53         case BufferType::kIndex:
54             bufInfo.usage = VK_BUFFER_USAGE_INDEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
55             break;
56         case BufferType::kStorage:
57             bufInfo.usage = VK_BUFFER_USAGE_STORAGE_BUFFER_BIT;
58             break;
59         case BufferType::kIndirect:
60             bufInfo.usage =
61                     VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT;
62             break;
63         case BufferType::kVertexStorage:
64             bufInfo.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT;
65             break;
66         case BufferType::kIndexStorage:
67             bufInfo.usage = VK_BUFFER_USAGE_INDEX_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT;
68             break;
69         case BufferType::kUniform:
70             bufInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
71             allocUsage = BufferUsage::kCpuWritesGpuReads;
72             break;
73         case BufferType::kXferCpuToGpu:
74             bufInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
75             allocUsage = BufferUsage::kTransfersFromCpuToGpu;
76             break;
77         case BufferType::kXferGpuToCpu:
78             bufInfo.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT;
79             allocUsage = BufferUsage::kTransfersFromGpuToCpu;
80             break;
81     }
82 
83     // We may not always get a mappable buffer for non-dynamic access buffers. Thus we set the
84     // transfer dst usage bit in case we need to do a copy to write data. It doesn't really hurt
85     // to set this extra usage flag, but we could narrow the scope of buffers we set it on more than
86     // just not dynamic.
87     if (!requiresMappable) {
88         bufInfo.usage |= VK_BUFFER_USAGE_TRANSFER_DST_BIT;
89     }
90 
91     bufInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
92     bufInfo.queueFamilyIndexCount = 0;
93     bufInfo.pQueueFamilyIndices = nullptr;
94 
95     VkResult result;
96     VULKAN_CALL_RESULT(sharedContext->interface(), result, CreateBuffer(sharedContext->device(),
97                                                            &bufInfo,
98                                                            nullptr, /*const VkAllocationCallbacks*/
99                                                            &buffer));
100     if (result != VK_SUCCESS) {
101         return nullptr;
102     }
103 
104     auto allocator = sharedContext->memoryAllocator();
105     bool shouldPersistentlyMapCpuToGpu =
106         sharedContext->vulkanCaps().shouldPersistentlyMapCpuToGpuBuffers();
107     //AllocBufferMemory
108     auto checkResult = [](VkResult result) {
109         return result == VK_SUCCESS;
110     };
111     if (!skgpu::VulkanMemory::AllocBufferMemory(allocator,
112                                                 buffer,
113                                                 allocUsage,
114                                                 shouldPersistentlyMapCpuToGpu,
115                                                 checkResult,
116                                                 &alloc)) {
117         VULKAN_CALL(sharedContext->interface(), DestroyBuffer(sharedContext->device(),
118                 buffer,
119                 /*const VkAllocationCallbacks*=*/nullptr));
120         return nullptr;
121     }
122 
123     // Bind buffer
124     VULKAN_CALL_RESULT(sharedContext->interface(), result, BindBufferMemory(sharedContext->device(),
125                                                                             buffer,
126                                                                             alloc.fMemory,
127                                                                             alloc.fOffset));
128     if (result != VK_SUCCESS) {
129         skgpu::VulkanMemory::FreeBufferMemory(allocator, alloc);
130         VULKAN_CALL(sharedContext->interface(), DestroyBuffer(sharedContext->device(),
131                 buffer,
132                 /*const VkAllocationCallbacks*=*/nullptr));
133         return nullptr;
134     }
135 
136     // TODO: If this is a uniform buffer, we must set up a descriptor set.
137     // const GrVkDescriptorSet* uniformDescSet = nullptr;
138     // if (bufferType == kUniform::kUniform) {
139     //     uniformDescSet = make_uniform_desc_set(gpu, buffer, size);
140     //     if (!uniformDescSet) {
141     //         VK_CALL(gpu, DestroyBuffer(gpu->device(), buffer, nullptr));
142     //         skgpu::VulkanMemory::FreeBufferMemory(allocator, alloc);
143     //         return nullptr;
144     //     }
145     // }
146 
147     return sk_sp<Buffer>(new VulkanBuffer(sharedContext,
148                                           size,
149                                           type,
150                                           prioritizeGpuReads,
151                                           std::move(buffer),
152                                           alloc));
153 }
154 
VulkanBuffer(const VulkanSharedContext * sharedContext,size_t size,BufferType type,PrioritizeGpuReads prioritizeGpuReads,VkBuffer buffer,const skgpu::VulkanAlloc & alloc)155 VulkanBuffer::VulkanBuffer(const VulkanSharedContext* sharedContext,
156                            size_t size,
157                            BufferType type,
158                            PrioritizeGpuReads prioritizeGpuReads,
159                            VkBuffer buffer,
160                            const skgpu::VulkanAlloc& alloc)
161         : Buffer(sharedContext, size)
162         , fBuffer(std::move(buffer))
163         , fAlloc(alloc)
164         // We assume a buffer is used for CPU reads only in the case of GPU->CPU transfer buffers.
165         , fBufferUsedForCPURead(type == BufferType::kXferGpuToCpu) {}
166 
freeGpuData()167 void VulkanBuffer::freeGpuData() {
168     if (fMapPtr) {
169         this->internalUnmap(0, this->size());
170         fMapPtr = nullptr;
171     }
172 
173     // TODO: If this is a uniform buffer, we must clean up the descriptor set.
174     //if (fUniformDescriptorSet) {
175     //    fUniformDescriptorSet->recycle();
176     //    fUniformDescriptorSet = nullptr;
177     //}
178 
179     const VulkanSharedContext* sharedContext =
180             static_cast<const VulkanSharedContext*>(this->sharedContext());
181     SkASSERT(fBuffer);
182     SkASSERT(fAlloc.fMemory && fAlloc.fBackendMemory);
183     VULKAN_CALL(sharedContext->interface(),
184                 DestroyBuffer(sharedContext->device(), fBuffer, nullptr));
185     fBuffer = VK_NULL_HANDLE;
186 
187     skgpu::VulkanMemory::FreeBufferMemory(sharedContext->memoryAllocator(), fAlloc);
188     fAlloc.fMemory = VK_NULL_HANDLE;
189     fAlloc.fBackendMemory = 0;
190 }
191 
internalMap(size_t readOffset,size_t readSize)192 void VulkanBuffer::internalMap(size_t readOffset, size_t readSize) {
193     SkASSERT(!fMapPtr);
194     if (this->isMappable()) {
195         // Not every buffer will use command buffer usage refs. Instead, the command buffer just
196         // holds normal refs. Systems higher up in Graphite should be making sure not to reuse a
197         // buffer that currently has a ref held by something else. However, we do need to make sure
198         // there isn't a buffer with just a command buffer usage that is trying to be mapped.
199 #ifdef SK_DEBUG
200         SkASSERT(!this->debugHasCommandBufferRef());
201 #endif
202         SkASSERT(fAlloc.fSize > 0);
203         SkASSERT(fAlloc.fSize >= readOffset + readSize);
204 
205         const VulkanSharedContext* sharedContext = this->vulkanSharedContext();
206 
207         auto allocator = sharedContext->memoryAllocator();
208         auto checkResult = [sharedContext](VkResult result) {
209             return sharedContext->checkVkResult(result);
210         };
211         fMapPtr = skgpu::VulkanMemory::MapAlloc(allocator, fAlloc, checkResult);
212         if (fMapPtr && readSize != 0) {
213             // "Invalidate" here means make device writes visible to the host. That is, it makes
214             // sure any GPU writes are finished in the range we might read from.
215             skgpu::VulkanMemory::InvalidateMappedAlloc(allocator,
216                                                        fAlloc,
217                                                        readOffset,
218                                                        readSize,
219                                                        nullptr);
220         }
221     }
222 }
223 
internalUnmap(size_t flushOffset,size_t flushSize)224 void VulkanBuffer::internalUnmap(size_t flushOffset, size_t flushSize) {
225     SkASSERT(fMapPtr && this->isMappable());
226 
227     SkASSERT(fAlloc.fSize > 0);
228     SkASSERT(fAlloc.fSize >= flushOffset + flushSize);
229 
230     auto allocator = this->vulkanSharedContext()->memoryAllocator();
231     skgpu::VulkanMemory::FlushMappedAlloc(allocator, fAlloc, flushOffset, flushSize, nullptr);
232     skgpu::VulkanMemory::UnmapAlloc(allocator, fAlloc);
233 }
234 
onMap()235 void VulkanBuffer::onMap() {
236     SkASSERT(fBuffer);
237     SkASSERT(!this->isMapped());
238 
239     this->internalMap(0, fBufferUsedForCPURead ? this->size() : 0);
240 }
241 
onUnmap()242 void VulkanBuffer::onUnmap() {
243     SkASSERT(fBuffer);
244     SkASSERT(this->isMapped());
245     this->internalUnmap(0, fBufferUsedForCPURead ? 0 : this->size());
246 }
247 } // namespace skgpu::graphite
248 
249