• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //
2 // Copyright 2016 The ANGLE Project Authors. All rights reserved.
3 // Use of this source code is governed by a BSD-style license that can be
4 // found in the LICENSE file.
5 //
6 // BufferVk.cpp:
7 //    Implements the class methods for BufferVk.
8 //
9 
10 #include "libANGLE/renderer/vulkan/BufferVk.h"
11 
12 #include "common/debug.h"
13 #include "common/mathutil.h"
14 #include "common/utilities.h"
15 #include "libANGLE/Context.h"
16 #include "libANGLE/renderer/vulkan/ContextVk.h"
17 #include "libANGLE/renderer/vulkan/RendererVk.h"
18 #include "libANGLE/trace.h"
19 
20 namespace rx
21 {
22 
23 namespace
24 {
25 // Vertex attribute buffers are used as storage buffers for conversion in compute, where access to
26 // the buffer is made in 4-byte chunks.  Assume the size of the buffer is 4k+n where n is in [0, 3).
27 // On some hardware, reading 4 bytes from address 4k returns 0, making it impossible to read the
28 // last n bytes.  By rounding up the buffer sizes to a multiple of 4, the problem is alleviated.
29 constexpr size_t kBufferSizeGranularity = 4;
30 static_assert(gl::isPow2(kBufferSizeGranularity), "use as alignment, must be power of two");
31 
32 // Start with a fairly small buffer size. We can increase this dynamically as we convert more data.
33 constexpr size_t kConvertedArrayBufferInitialSize = 1024 * 8;
34 
35 // Base size for all staging buffers
36 constexpr size_t kStagingBufferBaseSize = 1024;
37 // Fix the staging buffer size multiplier for unpack buffers, for now
38 constexpr size_t kUnpackBufferStagingBufferMultiplier = 1024;
39 
CalculateStagingBufferSize(gl::BufferBinding target,size_t size,size_t alignment)40 size_t CalculateStagingBufferSize(gl::BufferBinding target, size_t size, size_t alignment)
41 {
42     size_t alignedSize = rx::roundUp(size, alignment);
43     int multiplier     = std::max(gl::log2(alignedSize), 1);
44 
45     switch (target)
46     {
47         case gl::BufferBinding::Array:
48         case gl::BufferBinding::DrawIndirect:
49         case gl::BufferBinding::ElementArray:
50         case gl::BufferBinding::Uniform:
51             return kStagingBufferBaseSize * multiplier;
52 
53         case gl::BufferBinding::PixelUnpack:
54             return std::max(alignedSize,
55                             (kStagingBufferBaseSize * kUnpackBufferStagingBufferMultiplier));
56 
57         default:
58             return kStagingBufferBaseSize;
59     }
60 }
61 
62 // Buffers that have a static usage pattern will be allocated in
63 // device local memory to speed up access to and from the GPU.
64 // Dynamic usage patterns or that are frequently mapped
65 // will now request host cached memory to speed up access from the CPU.
GetPreferredMemoryType(gl::BufferBinding target,gl::BufferUsage usage)66 ANGLE_INLINE VkMemoryPropertyFlags GetPreferredMemoryType(gl::BufferBinding target,
67                                                           gl::BufferUsage usage)
68 {
69     constexpr VkMemoryPropertyFlags kDeviceLocalFlags =
70         (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT |
71          VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT);
72     constexpr VkMemoryPropertyFlags kHostCachedFlags =
73         (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT |
74          VK_MEMORY_PROPERTY_HOST_CACHED_BIT);
75     constexpr VkMemoryPropertyFlags kHostUncachedFlags =
76         (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT);
77 
78     if (target == gl::BufferBinding::PixelUnpack)
79     {
80         return kHostCachedFlags;
81     }
82 
83     switch (usage)
84     {
85         case gl::BufferUsage::StaticCopy:
86         case gl::BufferUsage::StaticDraw:
87         case gl::BufferUsage::StaticRead:
88             // For static usage, request a device local memory
89             return kDeviceLocalFlags;
90         case gl::BufferUsage::DynamicDraw:
91         case gl::BufferUsage::StreamDraw:
92             // For non-static usage where the CPU performs a write-only access, request
93             // a host uncached memory
94             return kHostUncachedFlags;
95         case gl::BufferUsage::DynamicCopy:
96         case gl::BufferUsage::DynamicRead:
97         case gl::BufferUsage::StreamCopy:
98         case gl::BufferUsage::StreamRead:
99             // For all other types of usage, request a host cached memory
100             return kHostCachedFlags;
101         default:
102             UNREACHABLE();
103             return kHostCachedFlags;
104     }
105 }
106 }  // namespace
107 
108 // ConversionBuffer implementation.
ConversionBuffer(RendererVk * renderer,VkBufferUsageFlags usageFlags,size_t initialSize,size_t alignment,bool hostVisible)109 ConversionBuffer::ConversionBuffer(RendererVk *renderer,
110                                    VkBufferUsageFlags usageFlags,
111                                    size_t initialSize,
112                                    size_t alignment,
113                                    bool hostVisible)
114     : dirty(true), lastAllocationOffset(0)
115 {
116     data.init(renderer, usageFlags, alignment, initialSize, hostVisible);
117 }
118 
119 ConversionBuffer::~ConversionBuffer() = default;
120 
121 ConversionBuffer::ConversionBuffer(ConversionBuffer &&other) = default;
122 
123 // BufferVk::VertexConversionBuffer implementation.
VertexConversionBuffer(RendererVk * renderer,angle::FormatID formatIDIn,GLuint strideIn,size_t offsetIn,bool hostVisible)124 BufferVk::VertexConversionBuffer::VertexConversionBuffer(RendererVk *renderer,
125                                                          angle::FormatID formatIDIn,
126                                                          GLuint strideIn,
127                                                          size_t offsetIn,
128                                                          bool hostVisible)
129     : ConversionBuffer(renderer,
130                        vk::kVertexBufferUsageFlags,
131                        kConvertedArrayBufferInitialSize,
132                        vk::kVertexBufferAlignment,
133                        hostVisible),
134       formatID(formatIDIn),
135       stride(strideIn),
136       offset(offsetIn)
137 {}
138 
139 BufferVk::VertexConversionBuffer::VertexConversionBuffer(VertexConversionBuffer &&other) = default;
140 
141 BufferVk::VertexConversionBuffer::~VertexConversionBuffer() = default;
142 
143 // BufferVk implementation.
BufferVk(const gl::BufferState & state)144 BufferVk::BufferVk(const gl::BufferState &state) : BufferImpl(state) {}
145 
~BufferVk()146 BufferVk::~BufferVk() {}
147 
destroy(const gl::Context * context)148 void BufferVk::destroy(const gl::Context *context)
149 {
150     ContextVk *contextVk = vk::GetImpl(context);
151 
152     release(contextVk);
153 }
154 
release(ContextVk * contextVk)155 void BufferVk::release(ContextVk *contextVk)
156 {
157     RendererVk *renderer = contextVk->getRenderer();
158     mBuffer.release(renderer);
159     mStagingBuffer.release(renderer);
160     mShadowBuffer.release();
161 
162     for (ConversionBuffer &buffer : mVertexConversionBuffers)
163     {
164         buffer.data.release(renderer);
165     }
166 }
167 
initializeStagingBuffer(ContextVk * contextVk,gl::BufferBinding target,size_t size)168 void BufferVk::initializeStagingBuffer(ContextVk *contextVk, gl::BufferBinding target, size_t size)
169 {
170     RendererVk *rendererVk = contextVk->getRenderer();
171 
172     constexpr VkImageUsageFlags kBufferUsageFlags = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
173     size_t alignment =
174         static_cast<size_t>(rendererVk->getPhysicalDeviceProperties().limits.minMemoryMapAlignment);
175     size_t stagingBufferSize = CalculateStagingBufferSize(target, size, alignment);
176 
177     mStagingBuffer.init(rendererVk, kBufferUsageFlags, alignment, stagingBufferSize, true);
178 }
179 
initializeShadowBuffer(ContextVk * contextVk,gl::BufferBinding target,size_t size)180 angle::Result BufferVk::initializeShadowBuffer(ContextVk *contextVk,
181                                                gl::BufferBinding target,
182                                                size_t size)
183 {
184     // For now, enable shadow buffers only for pixel unpack buffers.
185     // If usecases present themselves, we can enable them for other buffer types.
186     if (target == gl::BufferBinding::PixelUnpack)
187     {
188         // Initialize the shadow buffer
189         mShadowBuffer.init(size);
190 
191         // Allocate required memory. If allocation fails, treat it is a non-fatal error
192         // since we do not need the shadow buffer for functionality
193         ANGLE_TRY(mShadowBuffer.allocate(size));
194     }
195 
196     return angle::Result::Continue;
197 }
198 
updateShadowBuffer(const uint8_t * data,size_t size,size_t offset)199 void BufferVk::updateShadowBuffer(const uint8_t *data, size_t size, size_t offset)
200 {
201     if (mShadowBuffer.valid())
202     {
203         mShadowBuffer.updateData(data, size, offset);
204     }
205 }
206 
setData(const gl::Context * context,gl::BufferBinding target,const void * data,size_t size,gl::BufferUsage usage)207 angle::Result BufferVk::setData(const gl::Context *context,
208                                 gl::BufferBinding target,
209                                 const void *data,
210                                 size_t size,
211                                 gl::BufferUsage usage)
212 {
213     ContextVk *contextVk = vk::GetImpl(context);
214 
215     if (size > static_cast<size_t>(mState.getSize()))
216     {
217         // Release and re-create the memory and buffer.
218         release(contextVk);
219 
220         // We could potentially use multiple backing buffers for different usages.
221         // For now keep a single buffer with all relevant usage flags.
222         VkImageUsageFlags usageFlags =
223             VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT |
224             VK_BUFFER_USAGE_INDEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_SRC_BIT |
225             VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT |
226             VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT;
227 
228         if (contextVk->getFeatures().supportsTransformFeedbackExtension.enabled)
229         {
230             usageFlags |= VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_BUFFER_BIT_EXT;
231         }
232 
233         VkBufferCreateInfo createInfo    = {};
234         createInfo.sType                 = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
235         createInfo.flags                 = 0;
236         createInfo.size                  = roundUpPow2(size, kBufferSizeGranularity);
237         createInfo.usage                 = usageFlags;
238         createInfo.sharingMode           = VK_SHARING_MODE_EXCLUSIVE;
239         createInfo.queueFamilyIndexCount = 0;
240         createInfo.pQueueFamilyIndices   = nullptr;
241 
242         // Assume host visible/coherent memory available.
243         VkMemoryPropertyFlags memoryPropertyFlags = GetPreferredMemoryType(target, usage);
244 
245         ANGLE_TRY(mBuffer.init(contextVk, createInfo, memoryPropertyFlags));
246 
247         // Initialize the staging buffer
248         initializeStagingBuffer(contextVk, target, size);
249 
250         // Initialize the shadow buffer
251         ANGLE_TRY(initializeShadowBuffer(contextVk, target, size));
252     }
253 
254     if (data && size > 0)
255     {
256         ANGLE_TRY(setDataImpl(contextVk, static_cast<const uint8_t *>(data), size, 0));
257     }
258 
259     return angle::Result::Continue;
260 }
261 
setSubData(const gl::Context * context,gl::BufferBinding target,const void * data,size_t size,size_t offset)262 angle::Result BufferVk::setSubData(const gl::Context *context,
263                                    gl::BufferBinding target,
264                                    const void *data,
265                                    size_t size,
266                                    size_t offset)
267 {
268     ASSERT(mBuffer.valid());
269 
270     ContextVk *contextVk = vk::GetImpl(context);
271     ANGLE_TRY(setDataImpl(contextVk, static_cast<const uint8_t *>(data), size, offset));
272 
273     return angle::Result::Continue;
274 }
275 
copySubData(const gl::Context * context,BufferImpl * source,GLintptr sourceOffset,GLintptr destOffset,GLsizeiptr size)276 angle::Result BufferVk::copySubData(const gl::Context *context,
277                                     BufferImpl *source,
278                                     GLintptr sourceOffset,
279                                     GLintptr destOffset,
280                                     GLsizeiptr size)
281 {
282     ASSERT(mBuffer.valid());
283 
284     ContextVk *contextVk = vk::GetImpl(context);
285     auto *sourceBuffer   = GetAs<BufferVk>(source);
286     ASSERT(sourceBuffer->getBuffer().valid());
287 
288     // If the shadow buffer is enabled for the destination buffer then
289     // we need to update that as well. This will require us to complete
290     // all recorded and in-flight commands involving the source buffer.
291     if (mShadowBuffer.valid())
292     {
293         ANGLE_TRY(sourceBuffer->getBuffer().waitForIdle(contextVk));
294 
295         // Update the shadow buffer
296         uint8_t *srcPtr;
297         ANGLE_TRY(sourceBuffer->getBuffer().mapWithOffset(contextVk, &srcPtr, sourceOffset));
298 
299         updateShadowBuffer(srcPtr, size, destOffset);
300 
301         // Unmap the source buffer
302         sourceBuffer->getBuffer().unmap(contextVk->getRenderer());
303     }
304 
305     vk::CommandBuffer *commandBuffer = nullptr;
306 
307     ANGLE_TRY(contextVk->onBufferRead(VK_ACCESS_TRANSFER_READ_BIT, &sourceBuffer->getBuffer()));
308     ANGLE_TRY(contextVk->onBufferWrite(VK_ACCESS_TRANSFER_WRITE_BIT, &mBuffer));
309     ANGLE_TRY(contextVk->endRenderPassAndGetCommandBuffer(&commandBuffer));
310 
311     // Enqueue a copy command on the GPU.
312     const VkBufferCopy copyRegion = {static_cast<VkDeviceSize>(sourceOffset),
313                                      static_cast<VkDeviceSize>(destOffset),
314                                      static_cast<VkDeviceSize>(size)};
315 
316     commandBuffer->copyBuffer(sourceBuffer->getBuffer().getBuffer(), mBuffer.getBuffer(), 1,
317                               &copyRegion);
318 
319     // The new destination buffer data may require a conversion for the next draw, so mark it dirty.
320     onDataChanged();
321 
322     return angle::Result::Continue;
323 }
324 
map(const gl::Context * context,GLenum access,void ** mapPtr)325 angle::Result BufferVk::map(const gl::Context *context, GLenum access, void **mapPtr)
326 {
327     ASSERT(mBuffer.valid());
328 
329     return mapImpl(vk::GetImpl(context), mapPtr);
330 }
331 
mapRange(const gl::Context * context,size_t offset,size_t length,GLbitfield access,void ** mapPtr)332 angle::Result BufferVk::mapRange(const gl::Context *context,
333                                  size_t offset,
334                                  size_t length,
335                                  GLbitfield access,
336                                  void **mapPtr)
337 {
338     return mapRangeImpl(vk::GetImpl(context), offset, length, access, mapPtr);
339 }
340 
mapImpl(ContextVk * contextVk,void ** mapPtr)341 angle::Result BufferVk::mapImpl(ContextVk *contextVk, void **mapPtr)
342 {
343     return mapRangeImpl(contextVk, 0, static_cast<VkDeviceSize>(mState.getSize()), 0, mapPtr);
344 }
345 
mapRangeImpl(ContextVk * contextVk,VkDeviceSize offset,VkDeviceSize length,GLbitfield access,void ** mapPtr)346 angle::Result BufferVk::mapRangeImpl(ContextVk *contextVk,
347                                      VkDeviceSize offset,
348                                      VkDeviceSize length,
349                                      GLbitfield access,
350                                      void **mapPtr)
351 {
352     if (!mShadowBuffer.valid())
353     {
354         ASSERT(mBuffer.valid());
355 
356         if ((access & GL_MAP_UNSYNCHRONIZED_BIT) == 0)
357         {
358             ANGLE_TRY(mBuffer.waitForIdle(contextVk));
359         }
360 
361         ANGLE_TRY(mBuffer.mapWithOffset(contextVk, reinterpret_cast<uint8_t **>(mapPtr),
362                                         static_cast<size_t>(offset)));
363     }
364     else
365     {
366         // If the app requested a GL_MAP_UNSYNCHRONIZED_BIT access, the spec states -
367         //      No GL error is generated if pending operations which source or modify the
368         //      buffer overlap the mapped region, but the result of such previous and any
369         //      subsequent operations is undefined
370         // To keep the code simple, irrespective of whether the access was GL_MAP_UNSYNCHRONIZED_BIT
371         // or not, just return the shadow buffer.
372         mShadowBuffer.map(static_cast<size_t>(offset), mapPtr);
373     }
374 
375     return angle::Result::Continue;
376 }
377 
unmap(const gl::Context * context,GLboolean * result)378 angle::Result BufferVk::unmap(const gl::Context *context, GLboolean *result)
379 {
380     ANGLE_TRY(unmapImpl(vk::GetImpl(context)));
381 
382     // This should be false if the contents have been corrupted through external means.  Vulkan
383     // doesn't provide such information.
384     *result = true;
385 
386     return angle::Result::Continue;
387 }
388 
unmapImpl(ContextVk * contextVk)389 angle::Result BufferVk::unmapImpl(ContextVk *contextVk)
390 {
391     ASSERT(mBuffer.valid());
392 
393     if (!mShadowBuffer.valid())
394     {
395         mBuffer.unmap(contextVk->getRenderer());
396         mBuffer.onExternalWrite(VK_ACCESS_HOST_WRITE_BIT);
397     }
398     else
399     {
400         bool writeOperation = ((mState.getAccessFlags() & GL_MAP_WRITE_BIT) != 0);
401         size_t offset       = static_cast<size_t>(mState.getMapOffset());
402         size_t size         = static_cast<size_t>(mState.getMapLength());
403 
404         // If it was a write operation we need to update the GPU buffer.
405         if (writeOperation)
406         {
407             // We do not yet know if thie data will ever be used. Perform a staged
408             // update which will get flushed if and when necessary.
409             const uint8_t *data = getShadowBuffer(offset);
410             ANGLE_TRY(stagedUpdate(contextVk, data, size, offset));
411         }
412 
413         mShadowBuffer.unmap();
414     }
415 
416     markConversionBuffersDirty();
417 
418     return angle::Result::Continue;
419 }
420 
getIndexRange(const gl::Context * context,gl::DrawElementsType type,size_t offset,size_t count,bool primitiveRestartEnabled,gl::IndexRange * outRange)421 angle::Result BufferVk::getIndexRange(const gl::Context *context,
422                                       gl::DrawElementsType type,
423                                       size_t offset,
424                                       size_t count,
425                                       bool primitiveRestartEnabled,
426                                       gl::IndexRange *outRange)
427 {
428     ContextVk *contextVk = vk::GetImpl(context);
429     RendererVk *renderer = contextVk->getRenderer();
430 
431     // This is a workaround for the mock ICD not implementing buffer memory state.
432     // Could be removed if https://github.com/KhronosGroup/Vulkan-Tools/issues/84 is fixed.
433     if (renderer->isMockICDEnabled())
434     {
435         outRange->start = 0;
436         outRange->end   = 0;
437         return angle::Result::Continue;
438     }
439 
440     ANGLE_TRACE_EVENT0("gpu.angle", "BufferVk::getIndexRange");
441 
442     uint8_t *mapPointer;
443 
444     if (!mShadowBuffer.valid())
445     {
446         // Needed before reading buffer or we could get stale data.
447         ANGLE_TRY(mBuffer.finishRunningCommands(contextVk));
448 
449         ASSERT(mBuffer.valid());
450 
451         ANGLE_TRY(mBuffer.mapWithOffset(contextVk, &mapPointer, offset));
452     }
453     else
454     {
455         mapPointer = getShadowBuffer(offset);
456     }
457 
458     *outRange = gl::ComputeIndexRange(type, mapPointer, count, primitiveRestartEnabled);
459 
460     mBuffer.unmap(renderer);
461     return angle::Result::Continue;
462 }
463 
directUpdate(ContextVk * contextVk,const uint8_t * data,size_t size,size_t offset)464 angle::Result BufferVk::directUpdate(ContextVk *contextVk,
465                                      const uint8_t *data,
466                                      size_t size,
467                                      size_t offset)
468 {
469     uint8_t *mapPointer = nullptr;
470 
471     ANGLE_TRY(mBuffer.mapWithOffset(contextVk, &mapPointer, offset));
472     ASSERT(mapPointer);
473 
474     memcpy(mapPointer, data, size);
475 
476     mBuffer.unmap(contextVk->getRenderer());
477     mBuffer.onExternalWrite(VK_ACCESS_HOST_WRITE_BIT);
478 
479     return angle::Result::Continue;
480 }
481 
stagedUpdate(ContextVk * contextVk,const uint8_t * data,size_t size,size_t offset)482 angle::Result BufferVk::stagedUpdate(ContextVk *contextVk,
483                                      const uint8_t *data,
484                                      size_t size,
485                                      size_t offset)
486 {
487     // Acquire a "new" staging buffer
488     bool needToReleasePreviousBuffers = false;
489     uint8_t *mapPointer               = nullptr;
490     VkDeviceSize stagingBufferOffset  = 0;
491 
492     ANGLE_TRY(mStagingBuffer.allocate(contextVk, size, &mapPointer, nullptr, &stagingBufferOffset,
493                                       &needToReleasePreviousBuffers));
494     if (needToReleasePreviousBuffers)
495     {
496         // Release previous staging buffers
497         mStagingBuffer.releaseInFlightBuffers(contextVk);
498     }
499     ASSERT(mapPointer);
500 
501     memcpy(mapPointer, data, size);
502 
503     // Enqueue a copy command on the GPU.
504     VkBufferCopy copyRegion = {stagingBufferOffset, offset, size};
505     ANGLE_TRY(mBuffer.copyFromBuffer(contextVk, mStagingBuffer.getCurrentBuffer(),
506                                      VK_ACCESS_HOST_WRITE_BIT, copyRegion));
507     mStagingBuffer.getCurrentBuffer()->retain(&contextVk->getResourceUseList());
508 
509     return angle::Result::Continue;
510 }
511 
setDataImpl(ContextVk * contextVk,const uint8_t * data,size_t size,size_t offset)512 angle::Result BufferVk::setDataImpl(ContextVk *contextVk,
513                                     const uint8_t *data,
514                                     size_t size,
515                                     size_t offset)
516 {
517     // Update shadow buffer
518     updateShadowBuffer(data, size, offset);
519 
520     // If the buffer is currently in use, stage the update. Otherwise update the buffer directly.
521     if (mBuffer.isCurrentlyInUse(contextVk->getLastCompletedQueueSerial()))
522     {
523         ANGLE_TRY(stagedUpdate(contextVk, data, size, offset));
524     }
525     else
526     {
527         ANGLE_TRY(directUpdate(contextVk, data, size, offset));
528     }
529 
530     // Update conversions
531     markConversionBuffersDirty();
532 
533     return angle::Result::Continue;
534 }
535 
copyToBufferImpl(ContextVk * contextVk,vk::BufferHelper * destBuffer,uint32_t copyCount,const VkBufferCopy * copies)536 angle::Result BufferVk::copyToBufferImpl(ContextVk *contextVk,
537                                          vk::BufferHelper *destBuffer,
538                                          uint32_t copyCount,
539                                          const VkBufferCopy *copies)
540 {
541     vk::CommandBuffer *commandBuffer;
542     ANGLE_TRY(contextVk->onBufferWrite(VK_ACCESS_TRANSFER_WRITE_BIT, destBuffer));
543     ANGLE_TRY(contextVk->onBufferRead(VK_ACCESS_TRANSFER_READ_BIT, &mBuffer));
544     ANGLE_TRY(contextVk->endRenderPassAndGetCommandBuffer(&commandBuffer));
545 
546     commandBuffer->copyBuffer(mBuffer.getBuffer(), destBuffer->getBuffer(), copyCount, copies);
547 
548     return angle::Result::Continue;
549 }
550 
getVertexConversionBuffer(RendererVk * renderer,angle::FormatID formatID,GLuint stride,size_t offset,bool hostVisible)551 ConversionBuffer *BufferVk::getVertexConversionBuffer(RendererVk *renderer,
552                                                       angle::FormatID formatID,
553                                                       GLuint stride,
554                                                       size_t offset,
555                                                       bool hostVisible)
556 {
557     for (VertexConversionBuffer &buffer : mVertexConversionBuffers)
558     {
559         if (buffer.formatID == formatID && buffer.stride == stride && buffer.offset == offset)
560         {
561             return &buffer;
562         }
563     }
564 
565     mVertexConversionBuffers.emplace_back(renderer, formatID, stride, offset, hostVisible);
566     return &mVertexConversionBuffers.back();
567 }
568 
markConversionBuffersDirty()569 void BufferVk::markConversionBuffersDirty()
570 {
571     for (VertexConversionBuffer &buffer : mVertexConversionBuffers)
572     {
573         buffer.dirty = true;
574     }
575 }
576 
onDataChanged()577 void BufferVk::onDataChanged()
578 {
579     markConversionBuffersDirty();
580 }
581 
582 }  // namespace rx
583