• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2021 Google LLC
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #include "src/gpu/vk/GrVkBuffer.h"
9 
10 #include "include/gpu/GrDirectContext.h"
11 #include "src/gpu/GrDirectContextPriv.h"
12 #include "src/gpu/GrResourceProvider.h"
13 #include "src/gpu/vk/GrVkDescriptorSet.h"
14 #include "src/gpu/vk/GrVkGpu.h"
15 #include "src/gpu/vk/GrVkMemory.h"
16 #include "src/gpu/vk/GrVkMemoryReclaimer.h"
17 #include "src/gpu/vk/GrVkUtil.h"
18 
19 #define VK_CALL(GPU, X) GR_VK_CALL(GPU->vkInterface(), X)
20 
GrVkBuffer(GrVkGpu * gpu,size_t sizeInBytes,GrGpuBufferType bufferType,GrAccessPattern accessPattern,VkBuffer buffer,const GrVkAlloc & alloc,const GrVkDescriptorSet * uniformDescriptorSet)21 GrVkBuffer::GrVkBuffer(GrVkGpu* gpu,
22                          size_t sizeInBytes,
23                          GrGpuBufferType bufferType,
24                          GrAccessPattern accessPattern,
25                          VkBuffer buffer,
26                          const GrVkAlloc& alloc,
27                          const GrVkDescriptorSet* uniformDescriptorSet)
28         : GrGpuBuffer(gpu, sizeInBytes, bufferType, accessPattern)
29         , fBuffer(buffer)
30         , fAlloc(alloc)
31         , fUniformDescriptorSet(uniformDescriptorSet) {
32     // We always require dynamic buffers to be mappable
33     SkASSERT(accessPattern != kDynamic_GrAccessPattern || this->isVkMappable());
34     SkASSERT(bufferType != GrGpuBufferType::kUniform || uniformDescriptorSet);
35     this->setRealAlloc(true); // OH ISSUE: set real alloc flag
36     this->setRealAllocSize(sizeInBytes); // OH ISSUE: set real alloc size
37     this->registerWithCache(SkBudgeted::kYes);
38 }
39 
make_uniform_desc_set(GrVkGpu * gpu,VkBuffer buffer,size_t size)40 static const GrVkDescriptorSet* make_uniform_desc_set(GrVkGpu* gpu, VkBuffer buffer, size_t size) {
41     const GrVkDescriptorSet* descriptorSet = gpu->resourceProvider().getUniformDescriptorSet();
42     if (!descriptorSet) {
43         return nullptr;
44     }
45 
46     VkDescriptorBufferInfo bufferInfo;
47     memset(&bufferInfo, 0, sizeof(VkDescriptorBufferInfo));
48     bufferInfo.buffer = buffer;
49     bufferInfo.offset = 0;
50     bufferInfo.range = size;
51 
52     VkWriteDescriptorSet descriptorWrite;
53     memset(&descriptorWrite, 0, sizeof(VkWriteDescriptorSet));
54     descriptorWrite.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
55     descriptorWrite.pNext = nullptr;
56     descriptorWrite.dstSet = *descriptorSet->descriptorSet();
57     descriptorWrite.dstBinding = GrVkUniformHandler::kUniformBinding;
58     descriptorWrite.dstArrayElement = 0;
59     descriptorWrite.descriptorCount = 1;
60     descriptorWrite.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
61     descriptorWrite.pImageInfo = nullptr;
62     descriptorWrite.pBufferInfo = &bufferInfo;
63     descriptorWrite.pTexelBufferView = nullptr;
64 
65     GR_VK_CALL(gpu->vkInterface(),
66                UpdateDescriptorSets(gpu->device(), 1, &descriptorWrite, 0, nullptr));
67     return descriptorSet;
68 }
69 
Make(GrVkGpu * gpu,size_t size,GrGpuBufferType bufferType,GrAccessPattern accessPattern)70 sk_sp<GrVkBuffer> GrVkBuffer::Make(GrVkGpu* gpu,
71                                      size_t size,
72                                      GrGpuBufferType bufferType,
73                                      GrAccessPattern accessPattern) {
74     VkBuffer buffer;
75     GrVkAlloc alloc;
76 
77     // The only time we don't require mappable buffers is when we have a static access pattern and
78     // we're on a device where gpu only memory has faster reads on the gpu than memory that is also
79     // mappable on the cpu. Protected memory always uses mappable buffers.
80     bool requiresMappable = gpu->protectedContext() ||
81                             accessPattern == kDynamic_GrAccessPattern ||
82                             accessPattern == kStream_GrAccessPattern ||
83                             !gpu->vkCaps().gpuOnlyBuffersMorePerformant();
84 
85     using BufferUsage = GrVkMemoryAllocator::BufferUsage;
86     BufferUsage allocUsage;
87 
88     // create the buffer object
89     VkBufferCreateInfo bufInfo;
90     memset(&bufInfo, 0, sizeof(VkBufferCreateInfo));
91     bufInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
92     bufInfo.flags = 0;
93     bufInfo.size = size;
94     switch (bufferType) {
95         case GrGpuBufferType::kVertex:
96             bufInfo.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;
97             allocUsage = requiresMappable ? BufferUsage::kCpuWritesGpuReads : BufferUsage::kGpuOnly;
98             break;
99         case GrGpuBufferType::kIndex:
100             bufInfo.usage = VK_BUFFER_USAGE_INDEX_BUFFER_BIT;
101             allocUsage = requiresMappable ? BufferUsage::kCpuWritesGpuReads : BufferUsage::kGpuOnly;
102             break;
103         case GrGpuBufferType::kDrawIndirect:
104             bufInfo.usage = VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT;
105             allocUsage = requiresMappable ? BufferUsage::kCpuWritesGpuReads : BufferUsage::kGpuOnly;
106             break;
107         case GrGpuBufferType::kUniform:
108             bufInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
109             allocUsage = BufferUsage::kCpuWritesGpuReads;
110             break;
111         case GrGpuBufferType::kXferCpuToGpu:
112             bufInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
113             allocUsage = BufferUsage::kTransfersFromCpuToGpu;
114             break;
115         case GrGpuBufferType::kXferGpuToCpu:
116             bufInfo.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT;
117             allocUsage = BufferUsage::kTransfersFromGpuToCpu;
118             break;
119     }
120     // We may not always get a mappable buffer for non dynamic access buffers. Thus we set the
121     // transfer dst usage bit in case we need to do a copy to write data.
122     // TODO: It doesn't really hurt setting this extra usage flag, but maybe we can narrow the scope
123     // of buffers we set it on more than just not dynamic.
124     if (!requiresMappable) {
125         bufInfo.usage |= VK_BUFFER_USAGE_TRANSFER_DST_BIT;
126     }
127 
128     bufInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
129     bufInfo.queueFamilyIndexCount = 0;
130     bufInfo.pQueueFamilyIndices = nullptr;
131 
132     VkResult err;
133     err = VK_CALL(gpu, CreateBuffer(gpu->device(), &bufInfo, nullptr, &buffer));
134     if (err) {
135         return nullptr;
136     }
137 
138 #ifdef SKIA_DFX_FOR_OHOS
139     if (!GrVkMemory::AllocAndBindBufferMemory(gpu, buffer, allocUsage, &alloc, size)) {
140 #else
141     if (!GrVkMemory::AllocAndBindBufferMemory(gpu, buffer, allocUsage, &alloc)) {
142 #endif
143         VK_CALL(gpu, DestroyBuffer(gpu->device(), buffer, nullptr));
144         return nullptr;
145     }
146 
147     // If this is a uniform buffer we must setup a descriptor set
148     const GrVkDescriptorSet* uniformDescSet = nullptr;
149     if (bufferType == GrGpuBufferType::kUniform) {
150         uniformDescSet = make_uniform_desc_set(gpu, buffer, size);
151         if (!uniformDescSet) {
152             DestroyAndFreeBufferMemory(gpu, alloc, buffer);
153             return nullptr;
154         }
155     }
156 
157     return sk_sp<GrVkBuffer>(new GrVkBuffer(gpu, size, bufferType, accessPattern, buffer, alloc,
158                                               uniformDescSet));
159 }
160 
161 sk_sp<GrVkBuffer> GrVkBuffer::MakeFromOHNativeBuffer(GrVkGpu* gpu,
162                                                      OH_NativeBuffer *nativeBuffer,
163                                                      size_t bufferSize,
164                                                      GrGpuBufferType bufferType,
165                                                      GrAccessPattern accessPattern) {
166     SkASSERT(gpu);
167     SkASSERT(nativeBuffer);
168 
169     VkBuffer buffer;
170     GrVkAlloc alloc;
171 
172     // create the buffer object
173     VkBufferCreateInfo bufInfo{};
174     bufInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
175     bufInfo.flags = 0;
176     bufInfo.size = bufferSize;
177     switch (bufferType) {
178         case GrGpuBufferType::kVertex:
179             bufInfo.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;
180             break;
181         case GrGpuBufferType::kIndex:
182             bufInfo.usage = VK_BUFFER_USAGE_INDEX_BUFFER_BIT;
183             break;
184         case GrGpuBufferType::kDrawIndirect:
185             bufInfo.usage = VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT;
186             break;
187         case GrGpuBufferType::kUniform:
188             bufInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
189             break;
190         case GrGpuBufferType::kXferCpuToGpu:
191             bufInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
192             break;
193         case GrGpuBufferType::kXferGpuToCpu:
194             bufInfo.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT;
195             break;
196     }
197 
198     bool requiresMappable = gpu->protectedContext() ||
199                             accessPattern == kDynamic_GrAccessPattern ||
200                             accessPattern == kStream_GrAccessPattern ||
201                             !gpu->vkCaps().gpuOnlyBuffersMorePerformant();
202     if (!requiresMappable) {
203         bufInfo.usage |= VK_BUFFER_USAGE_TRANSFER_DST_BIT;
204     }
205 
206     bufInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
207     bufInfo.queueFamilyIndexCount = 0;
208     bufInfo.pQueueFamilyIndices = nullptr;
209 
210     VkResult err = VK_CALL(gpu, CreateBuffer(gpu->device(), &bufInfo, nullptr, &buffer));
211     if (err) {
212         return nullptr;
213     }
214 
215     if (!GrVkMemory::ImportAndBindBufferMemory(gpu, nativeBuffer, buffer, &alloc)) {
216         VK_CALL(gpu, DestroyBuffer(gpu->device(), buffer, nullptr));
217         return nullptr;
218     }
219 
220     return sk_sp<GrVkBuffer>(new GrVkBuffer(gpu, bufferSize, bufferType, accessPattern, buffer, alloc, nullptr));
221 }
222 
223 // OH ISSUE: Integrate Destroy and Free
224 void GrVkBuffer::DestroyAndFreeBufferMemory(const GrVkGpu* gpu, const GrVkAlloc& alloc, const VkBuffer& buffer)
225 {
226     VK_CALL(gpu, DestroyBuffer(gpu->device(), buffer, nullptr));
227     GrVkMemory::FreeBufferMemory(gpu, alloc);
228 }
229 
230 void GrVkBuffer::vkMap(size_t size) {
231     SkASSERT(!fMapPtr);
232     if (this->isVkMappable()) {
233         // Not every buffer will use command buffer usage refs and instead the command buffer just
234         // holds normal refs. Systems higher up in Ganesh should be making sure not to reuse a
235         // buffer that currently has a ref held by something else. However, we do need to make sure
236         // there isn't a buffer with just a command buffer usage that is trying to be mapped.
237         SkASSERT(this->internalHasNoCommandBufferUsages());
238         SkASSERT(fAlloc.fSize > 0);
239         SkASSERT(fAlloc.fSize >= size);
240         fMapPtr = GrVkMemory::MapAlloc(this->getVkGpu(), fAlloc);
241         if (fMapPtr && this->intendedType() == GrGpuBufferType::kXferGpuToCpu) {
242             GrVkMemory::InvalidateMappedAlloc(this->getVkGpu(), fAlloc, 0, size);
243         }
244     }
245 }
246 
247 void GrVkBuffer::vkUnmap(size_t size) {
248     SkASSERT(fMapPtr && this->isVkMappable());
249 
250     SkASSERT(fAlloc.fSize > 0);
251     SkASSERT(fAlloc.fSize >= size);
252 
253     GrVkGpu* gpu = this->getVkGpu();
254     GrVkMemory::FlushMappedAlloc(gpu, fAlloc, 0, size);
255     GrVkMemory::UnmapAlloc(gpu, fAlloc);
256 }
257 
258 static VkAccessFlags buffer_type_to_access_flags(GrGpuBufferType type) {
259     switch (type) {
260         case GrGpuBufferType::kIndex:
261             return VK_ACCESS_INDEX_READ_BIT;
262         case GrGpuBufferType::kVertex:
263             return VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT;
264         default:
265             // This helper is only called for static buffers so we should only ever see index or
266             // vertex buffers types
267             SkUNREACHABLE;
268     }
269 }
270 
271 void GrVkBuffer::copyCpuDataToGpuBuffer(const void* src, size_t size) {
272     SkASSERT(src);
273 
274     GrVkGpu* gpu = this->getVkGpu();
275 
276     // We should never call this method in protected contexts.
277     SkASSERT(!gpu->protectedContext());
278 
279     // The vulkan api restricts the use of vkCmdUpdateBuffer to updates that are less than or equal
280     // to 65536 bytes and a size the is 4 byte aligned.
281     if ((size <= 65536) && (0 == (size & 0x3)) && !gpu->vkCaps().avoidUpdateBuffers()) {
282         gpu->updateBuffer(sk_ref_sp(this), src, /*offset=*/0, size);
283     } else {
284         GrResourceProvider* resourceProvider = gpu->getContext()->priv().resourceProvider();
285         sk_sp<GrGpuBuffer> transferBuffer = resourceProvider->createBuffer(
286                 size, GrGpuBufferType::kXferCpuToGpu, kDynamic_GrAccessPattern, src);
287         if (!transferBuffer) {
288             return;
289         }
290 
291         gpu->copyBuffer(std::move(transferBuffer), sk_ref_sp(this), /*srcOffset=*/0,
292                         /*dstOffset=*/0, size);
293     }
294 
295     this->addMemoryBarrier(VK_ACCESS_TRANSFER_WRITE_BIT,
296                            buffer_type_to_access_flags(this->intendedType()),
297                            VK_PIPELINE_STAGE_TRANSFER_BIT,
298                            VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
299                            /*byRegion=*/false);
300 }
301 
302 void GrVkBuffer::addMemoryBarrier(VkAccessFlags srcAccessMask,
303                                   VkAccessFlags dstAccesMask,
304                                   VkPipelineStageFlags srcStageMask,
305                                   VkPipelineStageFlags dstStageMask,
306                                   bool byRegion) const {
307     VkBufferMemoryBarrier bufferMemoryBarrier = {
308             VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,  // sType
309             nullptr,                                  // pNext
310             srcAccessMask,                            // srcAccessMask
311             dstAccesMask,                             // dstAccessMask
312             VK_QUEUE_FAMILY_IGNORED,                  // srcQueueFamilyIndex
313             VK_QUEUE_FAMILY_IGNORED,                  // dstQueueFamilyIndex
314             fBuffer,                                  // buffer
315             0,                                        // offset
316             this->size(),                             // size
317     };
318 
319     // TODO: restrict to area of buffer we're interested in
320     this->getVkGpu()->addBufferMemoryBarrier(srcStageMask, dstStageMask, byRegion,
321                                              &bufferMemoryBarrier);
322 }
323 
324 void GrVkBuffer::vkRelease() {
325     if (this->wasDestroyed()) {
326         return;
327     }
328 
329     if (fMapPtr) {
330         this->vkUnmap(this->size());
331         fMapPtr = nullptr;
332     }
333 
334     if (fUniformDescriptorSet) {
335         fUniformDescriptorSet->recycle();
336         fUniformDescriptorSet = nullptr;
337     }
338 
339     SkASSERT(fBuffer);
340     SkASSERT(fAlloc.fMemory && fAlloc.fBackendMemory);
341 
342     // OH ISSUE: asyn memory reclaimer
343     auto reclaimer = this->getVkGpu()->memoryReclaimer();
344     if (!reclaimer || !reclaimer->addMemoryToWaitQueue(this->getVkGpu(), fAlloc, fBuffer)) {
345         DestroyAndFreeBufferMemory(this->getVkGpu(), fAlloc, fBuffer);
346     }
347 
348     fBuffer = VK_NULL_HANDLE;
349     fAlloc.fMemory = VK_NULL_HANDLE;
350     fAlloc.fBackendMemory = 0;
351 }
352 
353 void GrVkBuffer::onRelease() {
354     this->vkRelease();
355     this->GrGpuBuffer::onRelease();
356 }
357 
358 void GrVkBuffer::onAbandon() {
359     this->vkRelease();
360     this->GrGpuBuffer::onAbandon();
361 }
362 
363 void GrVkBuffer::onMap() {
364     if (!this->wasDestroyed()) {
365         this->vkMap(this->size());
366     }
367 }
368 
369 void GrVkBuffer::onUnmap() {
370     if (!this->wasDestroyed()) {
371         this->vkUnmap(this->size());
372     }
373 }
374 
375 bool GrVkBuffer::onUpdateData(const void* src, size_t srcSizeInBytes) {
376     if (this->wasDestroyed()) {
377         return false;
378     }
379 
380     if (srcSizeInBytes > this->size()) {
381         return false;
382     }
383 
384     if (this->isVkMappable()) {
385         this->vkMap(srcSizeInBytes);
386         if (!fMapPtr) {
387             return false;
388         }
389         memcpy(fMapPtr, src, srcSizeInBytes);
390         this->vkUnmap(srcSizeInBytes);
391         fMapPtr = nullptr;
392     } else {
393         this->copyCpuDataToGpuBuffer(src, srcSizeInBytes);
394     }
395     return true;
396 }
397 
398 GrVkGpu* GrVkBuffer::getVkGpu() const {
399     SkASSERT(!this->wasDestroyed());
400     return static_cast<GrVkGpu*>(this->getGpu());
401 }
402 
403 const VkDescriptorSet* GrVkBuffer::uniformDescriptorSet() const {
404     SkASSERT(fUniformDescriptorSet);
405     return fUniformDescriptorSet->descriptorSet();
406 }
407 
408