• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2021 Google LLC
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #include "src/gpu/vk/GrVkBuffer.h"
9 
10 #include "include/gpu/GrDirectContext.h"
11 #include "src/gpu/GrDirectContextPriv.h"
12 #include "src/gpu/GrResourceProvider.h"
13 #include "src/gpu/vk/GrVkDescriptorSet.h"
14 #include "src/gpu/vk/GrVkGpu.h"
15 #include "src/gpu/vk/GrVkMemory.h"
16 #include "src/gpu/vk/GrVkMemoryReclaimer.h"
17 #include "src/gpu/vk/GrVkUtil.h"
18 
19 #define VK_CALL(GPU, X) GR_VK_CALL(GPU->vkInterface(), X)
20 
GrVkBuffer(GrVkGpu * gpu,size_t sizeInBytes,GrGpuBufferType bufferType,GrAccessPattern accessPattern,VkBuffer buffer,const GrVkAlloc & alloc,const GrVkDescriptorSet * uniformDescriptorSet)21 GrVkBuffer::GrVkBuffer(GrVkGpu* gpu,
22                          size_t sizeInBytes,
23                          GrGpuBufferType bufferType,
24                          GrAccessPattern accessPattern,
25                          VkBuffer buffer,
26                          const GrVkAlloc& alloc,
27                          const GrVkDescriptorSet* uniformDescriptorSet)
28         : GrGpuBuffer(gpu, sizeInBytes, bufferType, accessPattern)
29         , fBuffer(buffer)
30         , fAlloc(alloc)
31         , fUniformDescriptorSet(uniformDescriptorSet) {
32     // We always require dynamic buffers to be mappable
33     SkASSERT(accessPattern != kDynamic_GrAccessPattern || this->isVkMappable());
34     SkASSERT(bufferType != GrGpuBufferType::kUniform || uniformDescriptorSet);
35     this->setRealAlloc(true); // OH ISSUE: set real alloc flag
36     this->registerWithCache(SkBudgeted::kYes);
37 }
38 
make_uniform_desc_set(GrVkGpu * gpu,VkBuffer buffer,size_t size)39 static const GrVkDescriptorSet* make_uniform_desc_set(GrVkGpu* gpu, VkBuffer buffer, size_t size) {
40     const GrVkDescriptorSet* descriptorSet = gpu->resourceProvider().getUniformDescriptorSet();
41     if (!descriptorSet) {
42         return nullptr;
43     }
44 
45     VkDescriptorBufferInfo bufferInfo;
46     memset(&bufferInfo, 0, sizeof(VkDescriptorBufferInfo));
47     bufferInfo.buffer = buffer;
48     bufferInfo.offset = 0;
49     bufferInfo.range = size;
50 
51     VkWriteDescriptorSet descriptorWrite;
52     memset(&descriptorWrite, 0, sizeof(VkWriteDescriptorSet));
53     descriptorWrite.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
54     descriptorWrite.pNext = nullptr;
55     descriptorWrite.dstSet = *descriptorSet->descriptorSet();
56     descriptorWrite.dstBinding = GrVkUniformHandler::kUniformBinding;
57     descriptorWrite.dstArrayElement = 0;
58     descriptorWrite.descriptorCount = 1;
59     descriptorWrite.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
60     descriptorWrite.pImageInfo = nullptr;
61     descriptorWrite.pBufferInfo = &bufferInfo;
62     descriptorWrite.pTexelBufferView = nullptr;
63 
64     GR_VK_CALL(gpu->vkInterface(),
65                UpdateDescriptorSets(gpu->device(), 1, &descriptorWrite, 0, nullptr));
66     return descriptorSet;
67 }
68 
Make(GrVkGpu * gpu,size_t size,GrGpuBufferType bufferType,GrAccessPattern accessPattern)69 sk_sp<GrVkBuffer> GrVkBuffer::Make(GrVkGpu* gpu,
70                                      size_t size,
71                                      GrGpuBufferType bufferType,
72                                      GrAccessPattern accessPattern) {
73     VkBuffer buffer;
74     GrVkAlloc alloc;
75 
76     // The only time we don't require mappable buffers is when we have a static access pattern and
77     // we're on a device where gpu only memory has faster reads on the gpu than memory that is also
78     // mappable on the cpu. Protected memory always uses mappable buffers.
79     bool requiresMappable = gpu->protectedContext() ||
80                             accessPattern == kDynamic_GrAccessPattern ||
81                             accessPattern == kStream_GrAccessPattern ||
82                             !gpu->vkCaps().gpuOnlyBuffersMorePerformant();
83 
84     using BufferUsage = GrVkMemoryAllocator::BufferUsage;
85     BufferUsage allocUsage;
86 
87     // create the buffer object
88     VkBufferCreateInfo bufInfo;
89     memset(&bufInfo, 0, sizeof(VkBufferCreateInfo));
90     bufInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
91     bufInfo.flags = 0;
92     bufInfo.size = size;
93     switch (bufferType) {
94         case GrGpuBufferType::kVertex:
95             bufInfo.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;
96             allocUsage = requiresMappable ? BufferUsage::kCpuWritesGpuReads : BufferUsage::kGpuOnly;
97             break;
98         case GrGpuBufferType::kIndex:
99             bufInfo.usage = VK_BUFFER_USAGE_INDEX_BUFFER_BIT;
100             allocUsage = requiresMappable ? BufferUsage::kCpuWritesGpuReads : BufferUsage::kGpuOnly;
101             break;
102         case GrGpuBufferType::kDrawIndirect:
103             bufInfo.usage = VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT;
104             allocUsage = requiresMappable ? BufferUsage::kCpuWritesGpuReads : BufferUsage::kGpuOnly;
105             break;
106         case GrGpuBufferType::kUniform:
107             bufInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
108             allocUsage = BufferUsage::kCpuWritesGpuReads;
109             break;
110         case GrGpuBufferType::kXferCpuToGpu:
111             bufInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
112             allocUsage = BufferUsage::kTransfersFromCpuToGpu;
113             break;
114         case GrGpuBufferType::kXferGpuToCpu:
115             bufInfo.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT;
116             allocUsage = BufferUsage::kTransfersFromGpuToCpu;
117             break;
118     }
119     // We may not always get a mappable buffer for non dynamic access buffers. Thus we set the
120     // transfer dst usage bit in case we need to do a copy to write data.
121     // TODO: It doesn't really hurt setting this extra usage flag, but maybe we can narrow the scope
122     // of buffers we set it on more than just not dynamic.
123     if (!requiresMappable) {
124         bufInfo.usage |= VK_BUFFER_USAGE_TRANSFER_DST_BIT;
125     }
126 
127     bufInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
128     bufInfo.queueFamilyIndexCount = 0;
129     bufInfo.pQueueFamilyIndices = nullptr;
130 
131     VkResult err;
132     err = VK_CALL(gpu, CreateBuffer(gpu->device(), &bufInfo, nullptr, &buffer));
133     if (err) {
134         return nullptr;
135     }
136 
137 #ifdef SKIA_DFX_FOR_OHOS
138     if (!GrVkMemory::AllocAndBindBufferMemory(gpu, buffer, allocUsage, &alloc, size)) {
139 #else
140     if (!GrVkMemory::AllocAndBindBufferMemory(gpu, buffer, allocUsage, &alloc)) {
141 #endif
142         VK_CALL(gpu, DestroyBuffer(gpu->device(), buffer, nullptr));
143         return nullptr;
144     }
145 
146     // If this is a uniform buffer we must setup a descriptor set
147     const GrVkDescriptorSet* uniformDescSet = nullptr;
148     if (bufferType == GrGpuBufferType::kUniform) {
149         uniformDescSet = make_uniform_desc_set(gpu, buffer, size);
150         if (!uniformDescSet) {
151             DestroyAndFreeBufferMemory(gpu, alloc, buffer);
152             return nullptr;
153         }
154     }
155 
156     return sk_sp<GrVkBuffer>(new GrVkBuffer(gpu, size, bufferType, accessPattern, buffer, alloc,
157                                               uniformDescSet));
158 }
159 
160 sk_sp<GrVkBuffer> GrVkBuffer::MakeFromOHNativeBuffer(GrVkGpu* gpu,
161                                                      OH_NativeBuffer *nativeBuffer,
162                                                      size_t bufferSize,
163                                                      GrGpuBufferType bufferType,
164                                                      GrAccessPattern accessPattern) {
165     SkASSERT(gpu);
166     SkASSERT(nativeBuffer);
167 
168     VkBuffer buffer;
169     GrVkAlloc alloc;
170 
171     // create the buffer object
172     VkBufferCreateInfo bufInfo{};
173     bufInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
174     bufInfo.flags = 0;
175     bufInfo.size = bufferSize;
176     switch (bufferType) {
177         case GrGpuBufferType::kVertex:
178             bufInfo.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;
179             break;
180         case GrGpuBufferType::kIndex:
181             bufInfo.usage = VK_BUFFER_USAGE_INDEX_BUFFER_BIT;
182             break;
183         case GrGpuBufferType::kDrawIndirect:
184             bufInfo.usage = VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT;
185             break;
186         case GrGpuBufferType::kUniform:
187             bufInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
188             break;
189         case GrGpuBufferType::kXferCpuToGpu:
190             bufInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
191             break;
192         case GrGpuBufferType::kXferGpuToCpu:
193             bufInfo.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT;
194             break;
195     }
196 
197     bool requiresMappable = gpu->protectedContext() ||
198                             accessPattern == kDynamic_GrAccessPattern ||
199                             accessPattern == kStream_GrAccessPattern ||
200                             !gpu->vkCaps().gpuOnlyBuffersMorePerformant();
201     if (!requiresMappable) {
202         bufInfo.usage |= VK_BUFFER_USAGE_TRANSFER_DST_BIT;
203     }
204 
205     bufInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
206     bufInfo.queueFamilyIndexCount = 0;
207     bufInfo.pQueueFamilyIndices = nullptr;
208 
209     VkResult err = VK_CALL(gpu, CreateBuffer(gpu->device(), &bufInfo, nullptr, &buffer));
210     if (err) {
211         return nullptr;
212     }
213 
214     if (!GrVkMemory::ImportAndBindBufferMemory(gpu, nativeBuffer, buffer, &alloc)) {
215         VK_CALL(gpu, DestroyBuffer(gpu->device(), buffer, nullptr));
216         return nullptr;
217     }
218 
219     return sk_sp<GrVkBuffer>(new GrVkBuffer(gpu, bufferSize, bufferType, accessPattern, buffer, alloc, nullptr));
220 }
221 
222 // OH ISSUE: Integrate Destroy and Free
223 void GrVkBuffer::DestroyAndFreeBufferMemory(const GrVkGpu* gpu, const GrVkAlloc& alloc, const VkBuffer& buffer)
224 {
225     VK_CALL(gpu, DestroyBuffer(gpu->device(), buffer, nullptr));
226     GrVkMemory::FreeBufferMemory(gpu, alloc);
227 }
228 
229 void GrVkBuffer::vkMap(size_t size) {
230     SkASSERT(!fMapPtr);
231     if (this->isVkMappable()) {
232         // Not every buffer will use command buffer usage refs and instead the command buffer just
233         // holds normal refs. Systems higher up in Ganesh should be making sure not to reuse a
234         // buffer that currently has a ref held by something else. However, we do need to make sure
235         // there isn't a buffer with just a command buffer usage that is trying to be mapped.
236         SkASSERT(this->internalHasNoCommandBufferUsages());
237         SkASSERT(fAlloc.fSize > 0);
238         SkASSERT(fAlloc.fSize >= size);
239         fMapPtr = GrVkMemory::MapAlloc(this->getVkGpu(), fAlloc);
240         if (fMapPtr && this->intendedType() == GrGpuBufferType::kXferGpuToCpu) {
241             GrVkMemory::InvalidateMappedAlloc(this->getVkGpu(), fAlloc, 0, size);
242         }
243     }
244 }
245 
246 void GrVkBuffer::vkUnmap(size_t size) {
247     SkASSERT(fMapPtr && this->isVkMappable());
248 
249     SkASSERT(fAlloc.fSize > 0);
250     SkASSERT(fAlloc.fSize >= size);
251 
252     GrVkGpu* gpu = this->getVkGpu();
253     GrVkMemory::FlushMappedAlloc(gpu, fAlloc, 0, size);
254     GrVkMemory::UnmapAlloc(gpu, fAlloc);
255 }
256 
257 static VkAccessFlags buffer_type_to_access_flags(GrGpuBufferType type) {
258     switch (type) {
259         case GrGpuBufferType::kIndex:
260             return VK_ACCESS_INDEX_READ_BIT;
261         case GrGpuBufferType::kVertex:
262             return VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT;
263         default:
264             // This helper is only called for static buffers so we should only ever see index or
265             // vertex buffers types
266             SkUNREACHABLE;
267     }
268 }
269 
270 void GrVkBuffer::copyCpuDataToGpuBuffer(const void* src, size_t size) {
271     SkASSERT(src);
272 
273     GrVkGpu* gpu = this->getVkGpu();
274 
275     // We should never call this method in protected contexts.
276     SkASSERT(!gpu->protectedContext());
277 
278     // The vulkan api restricts the use of vkCmdUpdateBuffer to updates that are less than or equal
279     // to 65536 bytes and a size the is 4 byte aligned.
280     if ((size <= 65536) && (0 == (size & 0x3)) && !gpu->vkCaps().avoidUpdateBuffers()) {
281         gpu->updateBuffer(sk_ref_sp(this), src, /*offset=*/0, size);
282     } else {
283         GrResourceProvider* resourceProvider = gpu->getContext()->priv().resourceProvider();
284         sk_sp<GrGpuBuffer> transferBuffer = resourceProvider->createBuffer(
285                 size, GrGpuBufferType::kXferCpuToGpu, kDynamic_GrAccessPattern, src);
286         if (!transferBuffer) {
287             return;
288         }
289 
290         gpu->copyBuffer(std::move(transferBuffer), sk_ref_sp(this), /*srcOffset=*/0,
291                         /*dstOffset=*/0, size);
292     }
293 
294     this->addMemoryBarrier(VK_ACCESS_TRANSFER_WRITE_BIT,
295                            buffer_type_to_access_flags(this->intendedType()),
296                            VK_PIPELINE_STAGE_TRANSFER_BIT,
297                            VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
298                            /*byRegion=*/false);
299 }
300 
301 void GrVkBuffer::addMemoryBarrier(VkAccessFlags srcAccessMask,
302                                   VkAccessFlags dstAccesMask,
303                                   VkPipelineStageFlags srcStageMask,
304                                   VkPipelineStageFlags dstStageMask,
305                                   bool byRegion) const {
306     VkBufferMemoryBarrier bufferMemoryBarrier = {
307             VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,  // sType
308             nullptr,                                  // pNext
309             srcAccessMask,                            // srcAccessMask
310             dstAccesMask,                             // dstAccessMask
311             VK_QUEUE_FAMILY_IGNORED,                  // srcQueueFamilyIndex
312             VK_QUEUE_FAMILY_IGNORED,                  // dstQueueFamilyIndex
313             fBuffer,                                  // buffer
314             0,                                        // offset
315             this->size(),                             // size
316     };
317 
318     // TODO: restrict to area of buffer we're interested in
319     this->getVkGpu()->addBufferMemoryBarrier(srcStageMask, dstStageMask, byRegion,
320                                              &bufferMemoryBarrier);
321 }
322 
323 void GrVkBuffer::vkRelease() {
324     if (this->wasDestroyed()) {
325         return;
326     }
327 
328     if (fMapPtr) {
329         this->vkUnmap(this->size());
330         fMapPtr = nullptr;
331     }
332 
333     if (fUniformDescriptorSet) {
334         fUniformDescriptorSet->recycle();
335         fUniformDescriptorSet = nullptr;
336     }
337 
338     SkASSERT(fBuffer);
339     SkASSERT(fAlloc.fMemory && fAlloc.fBackendMemory);
340 
341     // OH ISSUE: asyn memory reclaimer
342     auto reclaimer = this->getVkGpu()->memoryReclaimer();
343     if (!reclaimer || !reclaimer->addMemoryToWaitQueue(this->getVkGpu(), fAlloc, fBuffer)) {
344         DestroyAndFreeBufferMemory(this->getVkGpu(), fAlloc, fBuffer);
345     }
346 
347     fBuffer = VK_NULL_HANDLE;
348     fAlloc.fMemory = VK_NULL_HANDLE;
349     fAlloc.fBackendMemory = 0;
350 }
351 
352 void GrVkBuffer::onRelease() {
353     this->vkRelease();
354     this->GrGpuBuffer::onRelease();
355 }
356 
357 void GrVkBuffer::onAbandon() {
358     this->vkRelease();
359     this->GrGpuBuffer::onAbandon();
360 }
361 
362 void GrVkBuffer::onMap() {
363     if (!this->wasDestroyed()) {
364         this->vkMap(this->size());
365     }
366 }
367 
368 void GrVkBuffer::onUnmap() {
369     if (!this->wasDestroyed()) {
370         this->vkUnmap(this->size());
371     }
372 }
373 
374 bool GrVkBuffer::onUpdateData(const void* src, size_t srcSizeInBytes) {
375     if (this->wasDestroyed()) {
376         return false;
377     }
378 
379     if (srcSizeInBytes > this->size()) {
380         return false;
381     }
382 
383     if (this->isVkMappable()) {
384         this->vkMap(srcSizeInBytes);
385         if (!fMapPtr) {
386             return false;
387         }
388         memcpy(fMapPtr, src, srcSizeInBytes);
389         this->vkUnmap(srcSizeInBytes);
390         fMapPtr = nullptr;
391     } else {
392         this->copyCpuDataToGpuBuffer(src, srcSizeInBytes);
393     }
394     return true;
395 }
396 
397 GrVkGpu* GrVkBuffer::getVkGpu() const {
398     SkASSERT(!this->wasDestroyed());
399     return static_cast<GrVkGpu*>(this->getGpu());
400 }
401 
402 const VkDescriptorSet* GrVkBuffer::uniformDescriptorSet() const {
403     SkASSERT(fUniformDescriptorSet);
404     return fUniformDescriptorSet->descriptorSet();
405 }
406 
407