• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2015 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #include "GrVkBuffer.h"
9 #include "GrVkGpu.h"
10 #include "GrVkMemory.h"
11 #include "GrVkUtil.h"
12 
13 #define VK_CALL(GPU, X) GR_VK_CALL(GPU->vkInterface(), X)
14 
15 #ifdef SK_DEBUG
16 #define VALIDATE() this->validate()
17 #else
18 #define VALIDATE() do {} while(false)
19 #endif
20 
Create(const GrVkGpu * gpu,const Desc & desc)21 const GrVkBuffer::Resource* GrVkBuffer::Create(const GrVkGpu* gpu, const Desc& desc) {
22     VkBuffer       buffer;
23     GrVkAlloc      alloc;
24 
25     // create the buffer object
26     VkBufferCreateInfo bufInfo;
27     memset(&bufInfo, 0, sizeof(VkBufferCreateInfo));
28     bufInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
29     bufInfo.flags = 0;
30     bufInfo.size = desc.fSizeInBytes;
31     switch (desc.fType) {
32         case kVertex_Type:
33             bufInfo.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;
34             break;
35         case kIndex_Type:
36             bufInfo.usage = VK_BUFFER_USAGE_INDEX_BUFFER_BIT;
37             break;
38         case kUniform_Type:
39             bufInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
40             break;
41         case kCopyRead_Type:
42             bufInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
43             break;
44         case kCopyWrite_Type:
45             bufInfo.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT;
46             break;
47         case kTexel_Type:
48             bufInfo.usage = VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT;
49     }
50     if (!desc.fDynamic) {
51         bufInfo.usage |= VK_BUFFER_USAGE_TRANSFER_DST_BIT;
52     }
53 
54     bufInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
55     bufInfo.queueFamilyIndexCount = 0;
56     bufInfo.pQueueFamilyIndices = nullptr;
57 
58     VkResult err;
59     err = VK_CALL(gpu, CreateBuffer(gpu->device(), &bufInfo, nullptr, &buffer));
60     if (err) {
61         return nullptr;
62     }
63 
64     if (!GrVkMemory::AllocAndBindBufferMemory(gpu,
65                                               buffer,
66                                               desc.fType,
67                                               desc.fDynamic,
68                                               &alloc)) {
69         return nullptr;
70     }
71 
72     const GrVkBuffer::Resource* resource = new GrVkBuffer::Resource(buffer, alloc, desc.fType);
73     if (!resource) {
74         VK_CALL(gpu, DestroyBuffer(gpu->device(), buffer, nullptr));
75         GrVkMemory::FreeBufferMemory(gpu, desc.fType, alloc);
76         return nullptr;
77     }
78 
79     return resource;
80 }
81 
addMemoryBarrier(const GrVkGpu * gpu,VkAccessFlags srcAccessMask,VkAccessFlags dstAccesMask,VkPipelineStageFlags srcStageMask,VkPipelineStageFlags dstStageMask,bool byRegion) const82 void GrVkBuffer::addMemoryBarrier(const GrVkGpu* gpu,
83                                   VkAccessFlags srcAccessMask,
84                                   VkAccessFlags dstAccesMask,
85                                   VkPipelineStageFlags srcStageMask,
86                                   VkPipelineStageFlags dstStageMask,
87                                   bool byRegion) const {
88     VkBufferMemoryBarrier bufferMemoryBarrier = {
89         VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // sType
90         NULL,                                    // pNext
91         srcAccessMask,                           // srcAccessMask
92         dstAccesMask,                            // dstAccessMask
93         VK_QUEUE_FAMILY_IGNORED,                 // srcQueueFamilyIndex
94         VK_QUEUE_FAMILY_IGNORED,                 // dstQueueFamilyIndex
95         this->buffer(),                          // buffer
96         0,                                       // offset
97         fDesc.fSizeInBytes,                      // size
98     };
99 
100     // TODO: restrict to area of buffer we're interested in
101     gpu->addBufferMemoryBarrier(srcStageMask, dstStageMask, byRegion, &bufferMemoryBarrier);
102 }
103 
freeGPUData(const GrVkGpu * gpu) const104 void GrVkBuffer::Resource::freeGPUData(const GrVkGpu* gpu) const {
105     SkASSERT(fBuffer);
106     SkASSERT(fAlloc.fMemory);
107     VK_CALL(gpu, DestroyBuffer(gpu->device(), fBuffer, nullptr));
108     GrVkMemory::FreeBufferMemory(gpu, fType, fAlloc);
109 }
110 
vkRelease(const GrVkGpu * gpu)111 void GrVkBuffer::vkRelease(const GrVkGpu* gpu) {
112     VALIDATE();
113     fResource->recycle(const_cast<GrVkGpu*>(gpu));
114     fResource = nullptr;
115     if (!fDesc.fDynamic) {
116         delete[] (unsigned char*)fMapPtr;
117     }
118     fMapPtr = nullptr;
119     VALIDATE();
120 }
121 
vkAbandon()122 void GrVkBuffer::vkAbandon() {
123     fResource->unrefAndAbandon();
124     fResource = nullptr;
125     if (!fDesc.fDynamic) {
126         delete[] (unsigned char*)fMapPtr;
127     }
128     fMapPtr = nullptr;
129     VALIDATE();
130 }
131 
buffer_type_to_access_flags(GrVkBuffer::Type type)132 VkAccessFlags buffer_type_to_access_flags(GrVkBuffer::Type type) {
133     switch (type) {
134         case GrVkBuffer::kIndex_Type:
135             return VK_ACCESS_INDEX_READ_BIT;
136         case GrVkBuffer::kVertex_Type:
137             return VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT;
138         default:
139             // This helper is only called for static buffers so we should only ever see index or
140             // vertex buffers types
141             SkASSERT(false);
142             return 0;
143     }
144 }
145 
internalMap(GrVkGpu * gpu,size_t size,bool * createdNewBuffer)146 void GrVkBuffer::internalMap(GrVkGpu* gpu, size_t size, bool* createdNewBuffer) {
147     VALIDATE();
148     SkASSERT(!this->vkIsMapped());
149 
150     if (!fResource->unique()) {
151         if (fDesc.fDynamic) {
152             // in use by the command buffer, so we need to create a new one
153             fResource->recycle(gpu);
154             fResource = this->createResource(gpu, fDesc);
155             if (createdNewBuffer) {
156                 *createdNewBuffer = true;
157             }
158         } else {
159             SkASSERT(fMapPtr);
160             this->addMemoryBarrier(gpu,
161                                    buffer_type_to_access_flags(fDesc.fType),
162                                    VK_ACCESS_TRANSFER_WRITE_BIT,
163                                    VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
164                                    VK_PIPELINE_STAGE_TRANSFER_BIT,
165                                    false);
166         }
167     }
168 
169     if (fDesc.fDynamic) {
170         const GrVkAlloc& alloc = this->alloc();
171         VkResult err = VK_CALL(gpu, MapMemory(gpu->device(), alloc.fMemory,
172                                               alloc.fOffset + fOffset,
173                                               size, 0, &fMapPtr));
174         if (err) {
175             fMapPtr = nullptr;
176         }
177     } else {
178         if (!fMapPtr) {
179             fMapPtr = new unsigned char[this->size()];
180         }
181     }
182 
183     VALIDATE();
184 }
185 
internalUnmap(GrVkGpu * gpu,size_t size)186 void GrVkBuffer::internalUnmap(GrVkGpu* gpu, size_t size) {
187     VALIDATE();
188     SkASSERT(this->vkIsMapped());
189 
190     if (fDesc.fDynamic) {
191         GrVkMemory::FlushMappedAlloc(gpu, this->alloc());
192         VK_CALL(gpu, UnmapMemory(gpu->device(), this->alloc().fMemory));
193         fMapPtr = nullptr;
194     } else {
195         gpu->updateBuffer(this, fMapPtr, this->offset(), size);
196         this->addMemoryBarrier(gpu,
197                                VK_ACCESS_TRANSFER_WRITE_BIT,
198                                buffer_type_to_access_flags(fDesc.fType),
199                                VK_PIPELINE_STAGE_TRANSFER_BIT,
200                                VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
201                                false);
202     }
203 }
204 
vkIsMapped() const205 bool GrVkBuffer::vkIsMapped() const {
206     VALIDATE();
207     return SkToBool(fMapPtr);
208 }
209 
vkUpdateData(GrVkGpu * gpu,const void * src,size_t srcSizeInBytes,bool * createdNewBuffer)210 bool GrVkBuffer::vkUpdateData(GrVkGpu* gpu, const void* src, size_t srcSizeInBytes,
211                               bool* createdNewBuffer) {
212     if (srcSizeInBytes > fDesc.fSizeInBytes) {
213         return false;
214     }
215 
216     this->internalMap(gpu, srcSizeInBytes, createdNewBuffer);
217     if (!fMapPtr) {
218         return false;
219     }
220 
221     memcpy(fMapPtr, src, srcSizeInBytes);
222 
223     this->internalUnmap(gpu, srcSizeInBytes);
224 
225     return true;
226 }
227 
validate() const228 void GrVkBuffer::validate() const {
229     SkASSERT(!fResource || kVertex_Type == fDesc.fType || kIndex_Type == fDesc.fType
230              || kTexel_Type == fDesc.fType || kCopyRead_Type == fDesc.fType
231              || kCopyWrite_Type == fDesc.fType || kUniform_Type == fDesc.fType);
232 }
233