1 /*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/vk/GrVkBuffer.h"
9 #include "src/gpu/vk/GrVkGpu.h"
10 #include "src/gpu/vk/GrVkMemory.h"
11 #include "src/gpu/vk/GrVkTransferBuffer.h"
12 #include "src/gpu/vk/GrVkUtil.h"
13
14 #define VK_CALL(GPU, X) GR_VK_CALL(GPU->vkInterface(), X)
15
16 #ifdef SK_DEBUG
17 #define VALIDATE() this->validate()
18 #else
19 #define VALIDATE() do {} while(false)
20 #endif
21
Create(const GrVkGpu * gpu,const Desc & desc)22 const GrVkBuffer::Resource* GrVkBuffer::Create(const GrVkGpu* gpu, const Desc& desc) {
23 SkASSERT(!gpu->protectedContext() || (gpu->protectedContext() == desc.fDynamic));
24 VkBuffer buffer;
25 GrVkAlloc alloc;
26
27 // create the buffer object
28 VkBufferCreateInfo bufInfo;
29 memset(&bufInfo, 0, sizeof(VkBufferCreateInfo));
30 bufInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
31 bufInfo.flags = 0;
32 bufInfo.size = desc.fSizeInBytes;
33 switch (desc.fType) {
34 case kVertex_Type:
35 bufInfo.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;
36 break;
37 case kIndex_Type:
38 bufInfo.usage = VK_BUFFER_USAGE_INDEX_BUFFER_BIT;
39 break;
40 case kUniform_Type:
41 bufInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
42 break;
43 case kCopyRead_Type:
44 bufInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
45 break;
46 case kCopyWrite_Type:
47 bufInfo.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT;
48 break;
49 case kTexel_Type:
50 bufInfo.usage = VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT;
51 }
52 if (!desc.fDynamic) {
53 bufInfo.usage |= VK_BUFFER_USAGE_TRANSFER_DST_BIT;
54 }
55
56 bufInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
57 bufInfo.queueFamilyIndexCount = 0;
58 bufInfo.pQueueFamilyIndices = nullptr;
59
60 VkResult err;
61 err = VK_CALL(gpu, CreateBuffer(gpu->device(), &bufInfo, nullptr, &buffer));
62 if (err) {
63 return nullptr;
64 }
65
66 if (!GrVkMemory::AllocAndBindBufferMemory(gpu,
67 buffer,
68 desc.fType,
69 desc.fDynamic,
70 &alloc)) {
71 return nullptr;
72 }
73
74 const GrVkBuffer::Resource* resource = new GrVkBuffer::Resource(buffer, alloc, desc.fType);
75 if (!resource) {
76 VK_CALL(gpu, DestroyBuffer(gpu->device(), buffer, nullptr));
77 GrVkMemory::FreeBufferMemory(gpu, desc.fType, alloc);
78 return nullptr;
79 }
80
81 return resource;
82 }
83
addMemoryBarrier(const GrVkGpu * gpu,VkAccessFlags srcAccessMask,VkAccessFlags dstAccesMask,VkPipelineStageFlags srcStageMask,VkPipelineStageFlags dstStageMask,bool byRegion) const84 void GrVkBuffer::addMemoryBarrier(const GrVkGpu* gpu,
85 VkAccessFlags srcAccessMask,
86 VkAccessFlags dstAccesMask,
87 VkPipelineStageFlags srcStageMask,
88 VkPipelineStageFlags dstStageMask,
89 bool byRegion) const {
90 VkBufferMemoryBarrier bufferMemoryBarrier = {
91 VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // sType
92 nullptr, // pNext
93 srcAccessMask, // srcAccessMask
94 dstAccesMask, // dstAccessMask
95 VK_QUEUE_FAMILY_IGNORED, // srcQueueFamilyIndex
96 VK_QUEUE_FAMILY_IGNORED, // dstQueueFamilyIndex
97 this->buffer(), // buffer
98 0, // offset
99 fDesc.fSizeInBytes, // size
100 };
101
102 // TODO: restrict to area of buffer we're interested in
103 gpu->addBufferMemoryBarrier(this->resource(), srcStageMask, dstStageMask, byRegion,
104 &bufferMemoryBarrier);
105 }
106
freeGPUData(GrVkGpu * gpu) const107 void GrVkBuffer::Resource::freeGPUData(GrVkGpu* gpu) const {
108 SkASSERT(fBuffer);
109 SkASSERT(fAlloc.fMemory);
110 VK_CALL(gpu, DestroyBuffer(gpu->device(), fBuffer, nullptr));
111 GrVkMemory::FreeBufferMemory(gpu, fType, fAlloc);
112 }
113
vkRelease(const GrVkGpu * gpu)114 void GrVkBuffer::vkRelease(const GrVkGpu* gpu) {
115 VALIDATE();
116 fResource->recycle(const_cast<GrVkGpu*>(gpu));
117 fResource = nullptr;
118 if (!fDesc.fDynamic) {
119 delete[] (unsigned char*)fMapPtr;
120 }
121 fMapPtr = nullptr;
122 VALIDATE();
123 }
124
vkAbandon()125 void GrVkBuffer::vkAbandon() {
126 fResource->unrefAndAbandon();
127 fResource = nullptr;
128 if (!fDesc.fDynamic) {
129 delete[] (unsigned char*)fMapPtr;
130 }
131 fMapPtr = nullptr;
132 VALIDATE();
133 }
134
buffer_type_to_access_flags(GrVkBuffer::Type type)135 VkAccessFlags buffer_type_to_access_flags(GrVkBuffer::Type type) {
136 switch (type) {
137 case GrVkBuffer::kIndex_Type:
138 return VK_ACCESS_INDEX_READ_BIT;
139 case GrVkBuffer::kVertex_Type:
140 return VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT;
141 default:
142 // This helper is only called for static buffers so we should only ever see index or
143 // vertex buffers types
144 SkASSERT(false);
145 return 0;
146 }
147 }
148
internalMap(GrVkGpu * gpu,size_t size,bool * createdNewBuffer)149 void GrVkBuffer::internalMap(GrVkGpu* gpu, size_t size, bool* createdNewBuffer) {
150 VALIDATE();
151 SkASSERT(!this->vkIsMapped());
152
153 if (!fResource->unique()) {
154 if (fDesc.fDynamic) {
155 // in use by the command buffer, so we need to create a new one
156 fResource->recycle(gpu);
157 fResource = this->createResource(gpu, fDesc);
158 if (createdNewBuffer) {
159 *createdNewBuffer = true;
160 }
161 } else {
162 SkASSERT(fMapPtr);
163 this->addMemoryBarrier(gpu,
164 buffer_type_to_access_flags(fDesc.fType),
165 VK_ACCESS_TRANSFER_WRITE_BIT,
166 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
167 VK_PIPELINE_STAGE_TRANSFER_BIT,
168 false);
169 }
170 }
171
172 if (fDesc.fDynamic) {
173 const GrVkAlloc& alloc = this->alloc();
174 SkASSERT(alloc.fSize > 0);
175 SkASSERT(alloc.fSize >= size);
176 SkASSERT(0 == fOffset);
177
178 fMapPtr = GrVkMemory::MapAlloc(gpu, alloc);
179 } else {
180 if (!fMapPtr) {
181 fMapPtr = new unsigned char[this->size()];
182 }
183 }
184
185 VALIDATE();
186 }
187
copyCpuDataToGpuBuffer(GrVkGpu * gpu,const void * src,size_t size)188 void GrVkBuffer::copyCpuDataToGpuBuffer(GrVkGpu* gpu, const void* src, size_t size) {
189 SkASSERT(src);
190 // We should never call this method in protected contexts.
191 SkASSERT(!gpu->protectedContext());
192 // The vulkan api restricts the use of vkCmdUpdateBuffer to updates that are less than or equal
193 // to 65536 bytes and a size the is 4 byte aligned.
194 if ((size <= 65536) && (0 == (size & 0x3)) && !gpu->vkCaps().avoidUpdateBuffers()) {
195 gpu->updateBuffer(this, src, this->offset(), size);
196 } else {
197 sk_sp<GrVkTransferBuffer> transferBuffer =
198 GrVkTransferBuffer::Make(gpu, size, GrVkBuffer::kCopyRead_Type);
199 if (!transferBuffer) {
200 return;
201 }
202
203 char* buffer = (char*) transferBuffer->map();
204 memcpy (buffer, src, size);
205 transferBuffer->unmap();
206
207 gpu->copyBuffer(transferBuffer.get(), this, 0, this->offset(), size);
208 }
209 this->addMemoryBarrier(gpu,
210 VK_ACCESS_TRANSFER_WRITE_BIT,
211 buffer_type_to_access_flags(fDesc.fType),
212 VK_PIPELINE_STAGE_TRANSFER_BIT,
213 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
214 false);
215 }
216
internalUnmap(GrVkGpu * gpu,size_t size)217 void GrVkBuffer::internalUnmap(GrVkGpu* gpu, size_t size) {
218 VALIDATE();
219 SkASSERT(this->vkIsMapped());
220
221 if (fDesc.fDynamic) {
222 const GrVkAlloc& alloc = this->alloc();
223 SkASSERT(alloc.fSize > 0);
224 SkASSERT(alloc.fSize >= size);
225 // We currently don't use fOffset
226 SkASSERT(0 == fOffset);
227
228 GrVkMemory::FlushMappedAlloc(gpu, alloc, 0, size);
229 GrVkMemory::UnmapAlloc(gpu, alloc);
230 fMapPtr = nullptr;
231 } else {
232 SkASSERT(fMapPtr);
233 this->copyCpuDataToGpuBuffer(gpu, fMapPtr, size);
234 }
235 }
236
vkIsMapped() const237 bool GrVkBuffer::vkIsMapped() const {
238 VALIDATE();
239 return SkToBool(fMapPtr);
240 }
241
vkUpdateData(GrVkGpu * gpu,const void * src,size_t srcSizeInBytes,bool * createdNewBuffer)242 bool GrVkBuffer::vkUpdateData(GrVkGpu* gpu, const void* src, size_t srcSizeInBytes,
243 bool* createdNewBuffer) {
244 if (srcSizeInBytes > fDesc.fSizeInBytes) {
245 return false;
246 }
247
248 if (fDesc.fDynamic) {
249 this->internalMap(gpu, srcSizeInBytes, createdNewBuffer);
250 if (!fMapPtr) {
251 return false;
252 }
253
254 memcpy(fMapPtr, src, srcSizeInBytes);
255 this->internalUnmap(gpu, srcSizeInBytes);
256 } else {
257 this->copyCpuDataToGpuBuffer(gpu, src, srcSizeInBytes);
258 }
259
260
261 return true;
262 }
263
validate() const264 void GrVkBuffer::validate() const {
265 SkASSERT(!fResource || kVertex_Type == fDesc.fType || kIndex_Type == fDesc.fType
266 || kTexel_Type == fDesc.fType || kCopyRead_Type == fDesc.fType
267 || kCopyWrite_Type == fDesc.fType || kUniform_Type == fDesc.fType);
268 }
269