1 //
2 // Copyright 2016 The ANGLE Project Authors. All rights reserved.
3 // Use of this source code is governed by a BSD-style license that can be
4 // found in the LICENSE file.
5 //
6 // BufferVk.cpp:
7 // Implements the class methods for BufferVk.
8 //
9
10 #include "libANGLE/renderer/vulkan/BufferVk.h"
11
12 #include "common/FixedVector.h"
13 #include "common/debug.h"
14 #include "common/mathutil.h"
15 #include "common/utilities.h"
16 #include "libANGLE/Context.h"
17 #include "libANGLE/renderer/vulkan/ContextVk.h"
18 #include "libANGLE/renderer/vulkan/RendererVk.h"
19 #include "libANGLE/trace.h"
20
21 namespace rx
22 {
23
24 namespace
25 {
26 // Vertex attribute buffers are used as storage buffers for conversion in compute, where access to
27 // the buffer is made in 4-byte chunks. Assume the size of the buffer is 4k+n where n is in [0, 3).
28 // On some hardware, reading 4 bytes from address 4k returns 0, making it impossible to read the
29 // last n bytes. By rounding up the buffer sizes to a multiple of 4, the problem is alleviated.
30 constexpr size_t kBufferSizeGranularity = 4;
31 static_assert(gl::isPow2(kBufferSizeGranularity), "use as alignment, must be power of two");
32
33 // Start with a fairly small buffer size. We can increase this dynamically as we convert more data.
34 constexpr size_t kConvertedArrayBufferInitialSize = 1024 * 8;
35
36 // Base size for all staging buffers
37 constexpr size_t kStagingBufferBaseSize = 1024;
38 // Fix the staging buffer size multiplier for unpack buffers, for now
39 constexpr size_t kUnpackBufferStagingBufferMultiplier = 1024;
40
CalculateStagingBufferSize(gl::BufferBinding target,size_t size,size_t alignment)41 size_t CalculateStagingBufferSize(gl::BufferBinding target, size_t size, size_t alignment)
42 {
43 size_t alignedSize = rx::roundUp(size, alignment);
44 int multiplier = std::max(gl::log2(alignedSize), 1);
45
46 switch (target)
47 {
48 case gl::BufferBinding::Array:
49 case gl::BufferBinding::DrawIndirect:
50 case gl::BufferBinding::ElementArray:
51 case gl::BufferBinding::Uniform:
52 return kStagingBufferBaseSize * multiplier;
53
54 case gl::BufferBinding::PixelUnpack:
55 return std::max(alignedSize,
56 (kStagingBufferBaseSize * kUnpackBufferStagingBufferMultiplier));
57
58 default:
59 return kStagingBufferBaseSize;
60 }
61 }
62
63 // Buffers that have a static usage pattern will be allocated in
64 // device local memory to speed up access to and from the GPU.
65 // Dynamic usage patterns or that are frequently mapped
66 // will now request host cached memory to speed up access from the CPU.
GetPreferredMemoryType(gl::BufferBinding target,gl::BufferUsage usage)67 ANGLE_INLINE VkMemoryPropertyFlags GetPreferredMemoryType(gl::BufferBinding target,
68 gl::BufferUsage usage)
69 {
70 constexpr VkMemoryPropertyFlags kDeviceLocalFlags =
71 (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT |
72 VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT);
73 constexpr VkMemoryPropertyFlags kHostCachedFlags =
74 (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT |
75 VK_MEMORY_PROPERTY_HOST_CACHED_BIT);
76 constexpr VkMemoryPropertyFlags kHostUncachedFlags =
77 (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT);
78
79 if (target == gl::BufferBinding::PixelUnpack)
80 {
81 return kHostCachedFlags;
82 }
83
84 switch (usage)
85 {
86 case gl::BufferUsage::StaticCopy:
87 case gl::BufferUsage::StaticDraw:
88 case gl::BufferUsage::StaticRead:
89 // For static usage, request a device local memory
90 return kDeviceLocalFlags;
91 case gl::BufferUsage::DynamicDraw:
92 case gl::BufferUsage::StreamDraw:
93 // For non-static usage where the CPU performs a write-only access, request
94 // a host uncached memory
95 return kHostUncachedFlags;
96 case gl::BufferUsage::DynamicCopy:
97 case gl::BufferUsage::DynamicRead:
98 case gl::BufferUsage::StreamCopy:
99 case gl::BufferUsage::StreamRead:
100 // For all other types of usage, request a host cached memory
101 return kHostCachedFlags;
102 default:
103 UNREACHABLE();
104 return kHostCachedFlags;
105 }
106 }
107
SubDataSizeMeetsThreshold(size_t subDataSize,size_t bufferSize)108 ANGLE_INLINE bool SubDataSizeMeetsThreshold(size_t subDataSize, size_t bufferSize)
109 {
110 // A sub data update with size > 50% of buffer size meets the threshold
111 // to acquire a new BufferHelper from the pool.
112 return subDataSize > (bufferSize / 2);
113 }
114 } // namespace
115
116 // ConversionBuffer implementation.
ConversionBuffer(RendererVk * renderer,VkBufferUsageFlags usageFlags,size_t initialSize,size_t alignment,bool hostVisible)117 ConversionBuffer::ConversionBuffer(RendererVk *renderer,
118 VkBufferUsageFlags usageFlags,
119 size_t initialSize,
120 size_t alignment,
121 bool hostVisible)
122 : dirty(true), lastAllocationOffset(0)
123 {
124 data.init(renderer, usageFlags, alignment, initialSize, hostVisible);
125 }
126
127 ConversionBuffer::~ConversionBuffer() = default;
128
129 ConversionBuffer::ConversionBuffer(ConversionBuffer &&other) = default;
130
131 // BufferVk::VertexConversionBuffer implementation.
VertexConversionBuffer(RendererVk * renderer,angle::FormatID formatIDIn,GLuint strideIn,size_t offsetIn,bool hostVisible)132 BufferVk::VertexConversionBuffer::VertexConversionBuffer(RendererVk *renderer,
133 angle::FormatID formatIDIn,
134 GLuint strideIn,
135 size_t offsetIn,
136 bool hostVisible)
137 : ConversionBuffer(renderer,
138 vk::kVertexBufferUsageFlags,
139 kConvertedArrayBufferInitialSize,
140 vk::kVertexBufferAlignment,
141 hostVisible),
142 formatID(formatIDIn),
143 stride(strideIn),
144 offset(offsetIn)
145 {}
146
147 BufferVk::VertexConversionBuffer::VertexConversionBuffer(VertexConversionBuffer &&other) = default;
148
149 BufferVk::VertexConversionBuffer::~VertexConversionBuffer() = default;
150
151 // BufferVk implementation.
BufferVk(const gl::BufferState & state)152 BufferVk::BufferVk(const gl::BufferState &state) : BufferImpl(state), mBuffer(nullptr) {}
153
~BufferVk()154 BufferVk::~BufferVk() {}
155
destroy(const gl::Context * context)156 void BufferVk::destroy(const gl::Context *context)
157 {
158 ContextVk *contextVk = vk::GetImpl(context);
159
160 release(contextVk);
161 }
162
release(ContextVk * contextVk)163 void BufferVk::release(ContextVk *contextVk)
164 {
165 RendererVk *renderer = contextVk->getRenderer();
166 mStagingBuffer.release(renderer);
167 mShadowBuffer.release();
168 mBufferPool.release(renderer);
169 mBuffer = nullptr;
170
171 for (ConversionBuffer &buffer : mVertexConversionBuffers)
172 {
173 buffer.data.release(renderer);
174 }
175 }
176
initializeStagingBuffer(ContextVk * contextVk,gl::BufferBinding target,size_t size)177 void BufferVk::initializeStagingBuffer(ContextVk *contextVk, gl::BufferBinding target, size_t size)
178 {
179 RendererVk *rendererVk = contextVk->getRenderer();
180
181 constexpr VkImageUsageFlags kBufferUsageFlags = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
182 size_t alignment =
183 static_cast<size_t>(rendererVk->getPhysicalDeviceProperties().limits.minMemoryMapAlignment);
184 size_t stagingBufferSize = CalculateStagingBufferSize(target, size, alignment);
185
186 mStagingBuffer.init(rendererVk, kBufferUsageFlags, alignment, stagingBufferSize, true);
187 }
188
initializeShadowBuffer(ContextVk * contextVk,gl::BufferBinding target,size_t size)189 angle::Result BufferVk::initializeShadowBuffer(ContextVk *contextVk,
190 gl::BufferBinding target,
191 size_t size)
192 {
193 // For now, enable shadow buffers only for pixel unpack buffers.
194 // If usecases present themselves, we can enable them for other buffer types.
195 if (target == gl::BufferBinding::PixelUnpack)
196 {
197 // Initialize the shadow buffer
198 mShadowBuffer.init(size);
199
200 // Allocate required memory. If allocation fails, treat it is a non-fatal error
201 // since we do not need the shadow buffer for functionality
202 ANGLE_TRY(mShadowBuffer.allocate(size));
203 }
204
205 return angle::Result::Continue;
206 }
207
updateShadowBuffer(const uint8_t * data,size_t size,size_t offset)208 void BufferVk::updateShadowBuffer(const uint8_t *data, size_t size, size_t offset)
209 {
210 if (mShadowBuffer.valid())
211 {
212 mShadowBuffer.updateData(data, size, offset);
213 }
214 }
215
setData(const gl::Context * context,gl::BufferBinding target,const void * data,size_t size,gl::BufferUsage usage)216 angle::Result BufferVk::setData(const gl::Context *context,
217 gl::BufferBinding target,
218 const void *data,
219 size_t size,
220 gl::BufferUsage usage)
221 {
222 ContextVk *contextVk = vk::GetImpl(context);
223
224 // BufferData call is re-specifying the entire buffer
225 // Release and init a new mBuffer with this new size
226 if (size > 0 && size != static_cast<size_t>(mState.getSize()))
227 {
228 // Release and re-create the memory and buffer.
229 release(contextVk);
230
231 // We could potentially use multiple backing buffers for different usages.
232 // For now keep a single buffer with all relevant usage flags.
233 VkImageUsageFlags usageFlags =
234 VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT |
235 VK_BUFFER_USAGE_INDEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_SRC_BIT |
236 VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT |
237 VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT;
238
239 if (contextVk->getFeatures().supportsTransformFeedbackExtension.enabled)
240 {
241 usageFlags |= VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_BUFFER_BIT_EXT;
242 }
243
244 // Assume host visible/coherent memory available.
245 VkMemoryPropertyFlags memoryPropertyFlags = GetPreferredMemoryType(target, usage);
246
247 // mBuffer will be allocated through a DynamicBuffer
248 constexpr size_t kBufferHelperAlignment = 1;
249 constexpr size_t kBufferHelperPoolInitialSize = 0;
250
251 mBufferPool.initWithFlags(contextVk->getRenderer(), usageFlags, kBufferHelperAlignment,
252 kBufferHelperPoolInitialSize, memoryPropertyFlags);
253
254 ANGLE_TRY(acquireBufferHelper(contextVk, size, &mBuffer));
255
256 // Initialize the staging buffer
257 initializeStagingBuffer(contextVk, target, size);
258
259 // Initialize the shadow buffer
260 ANGLE_TRY(initializeShadowBuffer(contextVk, target, size));
261 }
262
263 if (data && size > 0)
264 {
265 ANGLE_TRY(setDataImpl(contextVk, static_cast<const uint8_t *>(data), size, 0));
266 }
267
268 return angle::Result::Continue;
269 }
270
setSubData(const gl::Context * context,gl::BufferBinding target,const void * data,size_t size,size_t offset)271 angle::Result BufferVk::setSubData(const gl::Context *context,
272 gl::BufferBinding target,
273 const void *data,
274 size_t size,
275 size_t offset)
276 {
277 ASSERT(mBuffer && mBuffer->valid());
278
279 ContextVk *contextVk = vk::GetImpl(context);
280 ANGLE_TRY(setDataImpl(contextVk, static_cast<const uint8_t *>(data), size, offset));
281
282 return angle::Result::Continue;
283 }
284
copySubData(const gl::Context * context,BufferImpl * source,GLintptr sourceOffset,GLintptr destOffset,GLsizeiptr size)285 angle::Result BufferVk::copySubData(const gl::Context *context,
286 BufferImpl *source,
287 GLintptr sourceOffset,
288 GLintptr destOffset,
289 GLsizeiptr size)
290 {
291 ASSERT(mBuffer && mBuffer->valid());
292
293 ContextVk *contextVk = vk::GetImpl(context);
294 auto *sourceBuffer = GetAs<BufferVk>(source);
295 ASSERT(sourceBuffer->getBuffer().valid());
296
297 // If the shadow buffer is enabled for the destination buffer then
298 // we need to update that as well. This will require us to complete
299 // all recorded and in-flight commands involving the source buffer.
300 if (mShadowBuffer.valid())
301 {
302 ANGLE_TRY(sourceBuffer->getBuffer().waitForIdle(contextVk));
303
304 // Update the shadow buffer
305 uint8_t *srcPtr;
306 ANGLE_TRY(sourceBuffer->getBuffer().mapWithOffset(contextVk, &srcPtr, sourceOffset));
307
308 updateShadowBuffer(srcPtr, size, destOffset);
309
310 // Unmap the source buffer
311 sourceBuffer->getBuffer().unmap(contextVk->getRenderer());
312 }
313
314 vk::CommandBuffer *commandBuffer = nullptr;
315
316 ANGLE_TRY(contextVk->onBufferTransferRead(&sourceBuffer->getBuffer()));
317 ANGLE_TRY(contextVk->onBufferTransferWrite(mBuffer));
318 ANGLE_TRY(contextVk->endRenderPassAndGetCommandBuffer(&commandBuffer));
319
320 // Enqueue a copy command on the GPU.
321 const VkBufferCopy copyRegion = {static_cast<VkDeviceSize>(sourceOffset),
322 static_cast<VkDeviceSize>(destOffset),
323 static_cast<VkDeviceSize>(size)};
324
325 commandBuffer->copyBuffer(sourceBuffer->getBuffer().getBuffer(), mBuffer->getBuffer(), 1,
326 ©Region);
327
328 // The new destination buffer data may require a conversion for the next draw, so mark it dirty.
329 onDataChanged();
330
331 return angle::Result::Continue;
332 }
333
map(const gl::Context * context,GLenum access,void ** mapPtr)334 angle::Result BufferVk::map(const gl::Context *context, GLenum access, void **mapPtr)
335 {
336 ASSERT(mBuffer && mBuffer->valid());
337
338 return mapImpl(vk::GetImpl(context), mapPtr);
339 }
340
mapRange(const gl::Context * context,size_t offset,size_t length,GLbitfield access,void ** mapPtr)341 angle::Result BufferVk::mapRange(const gl::Context *context,
342 size_t offset,
343 size_t length,
344 GLbitfield access,
345 void **mapPtr)
346 {
347 return mapRangeImpl(vk::GetImpl(context), offset, length, access, mapPtr);
348 }
349
mapImpl(ContextVk * contextVk,void ** mapPtr)350 angle::Result BufferVk::mapImpl(ContextVk *contextVk, void **mapPtr)
351 {
352 return mapRangeImpl(contextVk, 0, static_cast<VkDeviceSize>(mState.getSize()), 0, mapPtr);
353 }
354
mapRangeImpl(ContextVk * contextVk,VkDeviceSize offset,VkDeviceSize length,GLbitfield access,void ** mapPtr)355 angle::Result BufferVk::mapRangeImpl(ContextVk *contextVk,
356 VkDeviceSize offset,
357 VkDeviceSize length,
358 GLbitfield access,
359 void **mapPtr)
360 {
361 if (!mShadowBuffer.valid())
362 {
363 ASSERT(mBuffer && mBuffer->valid());
364
365 if ((access & GL_MAP_UNSYNCHRONIZED_BIT) == 0)
366 {
367 ANGLE_TRY(mBuffer->waitForIdle(contextVk));
368 }
369
370 ANGLE_TRY(mBuffer->mapWithOffset(contextVk, reinterpret_cast<uint8_t **>(mapPtr),
371 static_cast<size_t>(offset)));
372 }
373 else
374 {
375 // If the app requested a GL_MAP_UNSYNCHRONIZED_BIT access, the spec states -
376 // No GL error is generated if pending operations which source or modify the
377 // buffer overlap the mapped region, but the result of such previous and any
378 // subsequent operations is undefined
379 // To keep the code simple, irrespective of whether the access was GL_MAP_UNSYNCHRONIZED_BIT
380 // or not, just return the shadow buffer.
381 mShadowBuffer.map(static_cast<size_t>(offset), mapPtr);
382 }
383
384 return angle::Result::Continue;
385 }
386
unmap(const gl::Context * context,GLboolean * result)387 angle::Result BufferVk::unmap(const gl::Context *context, GLboolean *result)
388 {
389 ANGLE_TRY(unmapImpl(vk::GetImpl(context)));
390
391 // This should be false if the contents have been corrupted through external means. Vulkan
392 // doesn't provide such information.
393 *result = true;
394
395 return angle::Result::Continue;
396 }
397
unmapImpl(ContextVk * contextVk)398 angle::Result BufferVk::unmapImpl(ContextVk *contextVk)
399 {
400 ASSERT(mBuffer && mBuffer->valid());
401
402 if (!mShadowBuffer.valid())
403 {
404 mBuffer->unmap(contextVk->getRenderer());
405 mBuffer->onExternalWrite(VK_ACCESS_HOST_WRITE_BIT);
406 }
407 else
408 {
409 bool writeOperation = ((mState.getAccessFlags() & GL_MAP_WRITE_BIT) != 0);
410 size_t offset = static_cast<size_t>(mState.getMapOffset());
411 size_t size = static_cast<size_t>(mState.getMapLength());
412
413 // If it was a write operation we need to update the GPU buffer.
414 if (writeOperation)
415 {
416 // We do not yet know if this data will ever be used. Perform a staged
417 // update which will get flushed if and when necessary.
418 const uint8_t *data = getShadowBuffer(offset);
419 ANGLE_TRY(stagedUpdate(contextVk, data, size, offset));
420 }
421
422 mShadowBuffer.unmap();
423 }
424
425 markConversionBuffersDirty();
426
427 return angle::Result::Continue;
428 }
429
getIndexRange(const gl::Context * context,gl::DrawElementsType type,size_t offset,size_t count,bool primitiveRestartEnabled,gl::IndexRange * outRange)430 angle::Result BufferVk::getIndexRange(const gl::Context *context,
431 gl::DrawElementsType type,
432 size_t offset,
433 size_t count,
434 bool primitiveRestartEnabled,
435 gl::IndexRange *outRange)
436 {
437 ContextVk *contextVk = vk::GetImpl(context);
438 RendererVk *renderer = contextVk->getRenderer();
439
440 // This is a workaround for the mock ICD not implementing buffer memory state.
441 // Could be removed if https://github.com/KhronosGroup/Vulkan-Tools/issues/84 is fixed.
442 if (renderer->isMockICDEnabled())
443 {
444 outRange->start = 0;
445 outRange->end = 0;
446 return angle::Result::Continue;
447 }
448
449 ANGLE_TRACE_EVENT0("gpu.angle", "BufferVk::getIndexRange");
450
451 uint8_t *mapPointer;
452
453 if (!mShadowBuffer.valid())
454 {
455 // Needed before reading buffer or we could get stale data.
456 ANGLE_TRY(mBuffer->finishRunningCommands(contextVk));
457
458 ASSERT(mBuffer && mBuffer->valid());
459
460 ANGLE_TRY(mBuffer->mapWithOffset(contextVk, &mapPointer, offset));
461 }
462 else
463 {
464 mapPointer = getShadowBuffer(offset);
465 }
466
467 *outRange = gl::ComputeIndexRange(type, mapPointer, count, primitiveRestartEnabled);
468
469 mBuffer->unmap(renderer);
470 return angle::Result::Continue;
471 }
472
directUpdate(ContextVk * contextVk,const uint8_t * data,size_t size,size_t offset)473 angle::Result BufferVk::directUpdate(ContextVk *contextVk,
474 const uint8_t *data,
475 size_t size,
476 size_t offset)
477 {
478 uint8_t *mapPointer = nullptr;
479
480 ANGLE_TRY(mBuffer->mapWithOffset(contextVk, &mapPointer, offset));
481 ASSERT(mapPointer);
482
483 memcpy(mapPointer, data, size);
484 mBuffer->unmap(contextVk->getRenderer());
485 ASSERT(mBuffer->isCoherent());
486 mBuffer->onExternalWrite(VK_ACCESS_HOST_WRITE_BIT);
487
488 return angle::Result::Continue;
489 }
490
stagedUpdate(ContextVk * contextVk,const uint8_t * data,size_t size,size_t offset)491 angle::Result BufferVk::stagedUpdate(ContextVk *contextVk,
492 const uint8_t *data,
493 size_t size,
494 size_t offset)
495 {
496 // Acquire a "new" staging buffer
497 bool needToReleasePreviousBuffers = false;
498 uint8_t *mapPointer = nullptr;
499 VkDeviceSize stagingBufferOffset = 0;
500
501 ANGLE_TRY(mStagingBuffer.allocate(contextVk, size, &mapPointer, nullptr, &stagingBufferOffset,
502 &needToReleasePreviousBuffers));
503 if (needToReleasePreviousBuffers)
504 {
505 // Release previous staging buffers
506 mStagingBuffer.releaseInFlightBuffers(contextVk);
507 }
508 ASSERT(mapPointer);
509
510 memcpy(mapPointer, data, size);
511 ASSERT(!mStagingBuffer.isCoherent());
512 ANGLE_TRY(mStagingBuffer.flush(contextVk));
513 mStagingBuffer.getCurrentBuffer()->onExternalWrite(VK_ACCESS_HOST_WRITE_BIT);
514
515 // Enqueue a copy command on the GPU.
516 VkBufferCopy copyRegion = {stagingBufferOffset, offset, size};
517 ANGLE_TRY(
518 mBuffer->copyFromBuffer(contextVk, mStagingBuffer.getCurrentBuffer(), 1, ©Region));
519 mStagingBuffer.getCurrentBuffer()->retain(&contextVk->getResourceUseList());
520
521 return angle::Result::Continue;
522 }
523
acquireAndUpdate(ContextVk * contextVk,const uint8_t * data,size_t size,size_t offset)524 angle::Result BufferVk::acquireAndUpdate(ContextVk *contextVk,
525 const uint8_t *data,
526 size_t size,
527 size_t offset)
528 {
529 // Here we acquire a new BufferHelper and directUpdate() the new buffer.
530 // If the subData size was less than the buffer's size we additionally enqueue
531 // a GPU copy of the remaining regions from the old mBuffer to the new one.
532 vk::BufferHelper *src = mBuffer;
533 size_t offsetAfterSubdata = (offset + size);
534 bool updateRegionBeforeSubData = (offset > 0);
535 bool updateRegionAfterSubData = (offsetAfterSubdata < static_cast<size_t>(mState.getSize()));
536
537 if (updateRegionBeforeSubData || updateRegionAfterSubData)
538 {
539 src->retain(&contextVk->getResourceUseList());
540 }
541
542 ANGLE_TRY(acquireBufferHelper(contextVk, size, &mBuffer));
543 ANGLE_TRY(directUpdate(contextVk, data, size, offset));
544
545 constexpr int kMaxCopyRegions = 2;
546 angle::FixedVector<VkBufferCopy, kMaxCopyRegions> copyRegions;
547
548 if (updateRegionBeforeSubData)
549 {
550 copyRegions.push_back({0, 0, offset});
551 }
552 if (updateRegionAfterSubData)
553 {
554 copyRegions.push_back({offsetAfterSubdata, offsetAfterSubdata,
555 (static_cast<size_t>(mState.getSize()) - offsetAfterSubdata)});
556 }
557
558 if (!copyRegions.empty())
559 {
560 ANGLE_TRY(mBuffer->copyFromBuffer(contextVk, src, static_cast<uint32_t>(copyRegions.size()),
561 copyRegions.data()));
562 }
563
564 return angle::Result::Continue;
565 }
566
setDataImpl(ContextVk * contextVk,const uint8_t * data,size_t size,size_t offset)567 angle::Result BufferVk::setDataImpl(ContextVk *contextVk,
568 const uint8_t *data,
569 size_t size,
570 size_t offset)
571 {
572 // Update shadow buffer
573 updateShadowBuffer(data, size, offset);
574
575 // if the buffer is currently in use
576 // if sub data size meets threshold, acquire a new BufferHelper from the pool
577 // else stage an update
578 // else update the buffer directly
579 if (mBuffer->isCurrentlyInUse(contextVk->getLastCompletedQueueSerial()))
580 {
581 if (SubDataSizeMeetsThreshold(size, static_cast<size_t>(mState.getSize())))
582 {
583 ANGLE_TRY(acquireAndUpdate(contextVk, data, size, offset));
584 }
585 else
586 {
587 ANGLE_TRY(stagedUpdate(contextVk, data, size, offset));
588 }
589 }
590 else
591 {
592 ANGLE_TRY(directUpdate(contextVk, data, size, offset));
593 }
594
595 // Update conversions
596 markConversionBuffersDirty();
597
598 return angle::Result::Continue;
599 }
600
copyToBufferImpl(ContextVk * contextVk,vk::BufferHelper * destBuffer,uint32_t copyCount,const VkBufferCopy * copies)601 angle::Result BufferVk::copyToBufferImpl(ContextVk *contextVk,
602 vk::BufferHelper *destBuffer,
603 uint32_t copyCount,
604 const VkBufferCopy *copies)
605 {
606 vk::CommandBuffer *commandBuffer;
607 ANGLE_TRY(contextVk->onBufferTransferWrite(destBuffer));
608 ANGLE_TRY(contextVk->onBufferTransferRead(mBuffer));
609 ANGLE_TRY(contextVk->endRenderPassAndGetCommandBuffer(&commandBuffer));
610
611 commandBuffer->copyBuffer(mBuffer->getBuffer(), destBuffer->getBuffer(), copyCount, copies);
612
613 return angle::Result::Continue;
614 }
615
getVertexConversionBuffer(RendererVk * renderer,angle::FormatID formatID,GLuint stride,size_t offset,bool hostVisible)616 ConversionBuffer *BufferVk::getVertexConversionBuffer(RendererVk *renderer,
617 angle::FormatID formatID,
618 GLuint stride,
619 size_t offset,
620 bool hostVisible)
621 {
622 for (VertexConversionBuffer &buffer : mVertexConversionBuffers)
623 {
624 if (buffer.formatID == formatID && buffer.stride == stride && buffer.offset == offset)
625 {
626 return &buffer;
627 }
628 }
629
630 mVertexConversionBuffers.emplace_back(renderer, formatID, stride, offset, hostVisible);
631 return &mVertexConversionBuffers.back();
632 }
633
markConversionBuffersDirty()634 void BufferVk::markConversionBuffersDirty()
635 {
636 for (VertexConversionBuffer &buffer : mVertexConversionBuffers)
637 {
638 buffer.dirty = true;
639 }
640 }
641
onDataChanged()642 void BufferVk::onDataChanged()
643 {
644 markConversionBuffersDirty();
645 }
646
acquireBufferHelper(ContextVk * contextVk,size_t sizeInBytes,vk::BufferHelper ** bufferHelperOut)647 angle::Result BufferVk::acquireBufferHelper(ContextVk *contextVk,
648 size_t sizeInBytes,
649 vk::BufferHelper **bufferHelperOut)
650 {
651 bool needToReleasePreviousBuffers = false;
652 size_t size = roundUpPow2(sizeInBytes, kBufferSizeGranularity);
653
654 ANGLE_TRY(mBufferPool.allocate(contextVk, size, nullptr, nullptr, nullptr,
655 &needToReleasePreviousBuffers));
656
657 if (needToReleasePreviousBuffers)
658 {
659 // Release previous buffers
660 mBufferPool.releaseInFlightBuffers(contextVk);
661 }
662
663 ASSERT(bufferHelperOut);
664
665 *bufferHelperOut = mBufferPool.getCurrentBuffer();
666 ASSERT(*bufferHelperOut);
667
668 return angle::Result::Continue;
669 }
670
671 } // namespace rx
672