1 //
2 // Copyright 2016 The ANGLE Project Authors. All rights reserved.
3 // Use of this source code is governed by a BSD-style license that can be
4 // found in the LICENSE file.
5 //
6 // VertexArrayVk.cpp:
7 // Implements the class methods for VertexArrayVk.
8 //
9
10 #include "libANGLE/renderer/vulkan/VertexArrayVk.h"
11
12 #include "common/debug.h"
13 #include "common/utilities.h"
14 #include "libANGLE/Context.h"
15 #include "libANGLE/renderer/vulkan/BufferVk.h"
16 #include "libANGLE/renderer/vulkan/ContextVk.h"
17 #include "libANGLE/renderer/vulkan/FramebufferVk.h"
18 #include "libANGLE/renderer/vulkan/RendererVk.h"
19 #include "libANGLE/renderer/vulkan/ResourceVk.h"
20 #include "libANGLE/renderer/vulkan/vk_format_utils.h"
21 #include "libANGLE/trace.h"
22
23 namespace rx
24 {
25 namespace
26 {
27 constexpr int kStreamIndexBufferCachedIndexCount = 6;
28 constexpr int kMaxCachedStreamIndexBuffers = 4;
29 constexpr size_t kDefaultValueSize = sizeof(gl::VertexAttribCurrentValueData::Values);
30
BindingIsAligned(const gl::VertexBinding & binding,const angle::Format & angleFormat,unsigned int attribSize,GLuint relativeOffset)31 ANGLE_INLINE bool BindingIsAligned(const gl::VertexBinding &binding,
32 const angle::Format &angleFormat,
33 unsigned int attribSize,
34 GLuint relativeOffset)
35 {
36 GLintptr totalOffset = binding.getOffset() + relativeOffset;
37 GLuint mask = angleFormat.componentAlignmentMask;
38 if (mask != std::numeric_limits<GLuint>::max())
39 {
40 return ((totalOffset & mask) == 0 && (binding.getStride() & mask) == 0);
41 }
42 else
43 {
44 // To perform the GPU conversion for formats with components that aren't byte-aligned
45 // (for example, A2BGR10 or RGB10A2), one element has to be placed in 4 bytes to perform
46 // the compute shader. So, binding offset and stride has to be aligned to formatSize.
47 unsigned int formatSize = angleFormat.pixelBytes;
48 return (totalOffset % formatSize == 0) && (binding.getStride() % formatSize == 0);
49 }
50 }
51
WarnOnVertexFormatConversion(ContextVk * contextVk,const vk::Format & vertexFormat,bool compressed,bool insertEventMarker)52 void WarnOnVertexFormatConversion(ContextVk *contextVk,
53 const vk::Format &vertexFormat,
54 bool compressed,
55 bool insertEventMarker)
56 {
57 if (!vertexFormat.getVertexLoadRequiresConversion(compressed))
58 {
59 return;
60 }
61
62 ANGLE_VK_PERF_WARNING(
63 contextVk, GL_DEBUG_SEVERITY_LOW,
64 "The Vulkan driver does not support vertex attribute format 0x%04X, emulating with 0x%04X",
65 vertexFormat.getIntendedFormat().glInternalFormat,
66 vertexFormat.getActualBufferFormat(compressed).glInternalFormat);
67 }
68
StreamVertexData(ContextVk * contextVk,vk::BufferHelper * dstBufferHelper,const uint8_t * srcData,size_t bytesToAllocate,size_t dstOffset,size_t vertexCount,size_t srcStride,VertexCopyFunction vertexLoadFunction)69 angle::Result StreamVertexData(ContextVk *contextVk,
70 vk::BufferHelper *dstBufferHelper,
71 const uint8_t *srcData,
72 size_t bytesToAllocate,
73 size_t dstOffset,
74 size_t vertexCount,
75 size_t srcStride,
76 VertexCopyFunction vertexLoadFunction)
77 {
78 RendererVk *renderer = contextVk->getRenderer();
79
80 uint8_t *dst = dstBufferHelper->getMappedMemory() + dstOffset;
81
82 vertexLoadFunction(srcData, srcStride, vertexCount, dst);
83
84 ANGLE_TRY(dstBufferHelper->flush(renderer));
85
86 return angle::Result::Continue;
87 }
88
StreamVertexDataWithDivisor(ContextVk * contextVk,vk::BufferHelper * dstBufferHelper,const uint8_t * srcData,size_t bytesToAllocate,size_t srcStride,size_t dstStride,VertexCopyFunction vertexLoadFunction,uint32_t divisor,size_t numSrcVertices)89 angle::Result StreamVertexDataWithDivisor(ContextVk *contextVk,
90 vk::BufferHelper *dstBufferHelper,
91 const uint8_t *srcData,
92 size_t bytesToAllocate,
93 size_t srcStride,
94 size_t dstStride,
95 VertexCopyFunction vertexLoadFunction,
96 uint32_t divisor,
97 size_t numSrcVertices)
98 {
99 RendererVk *renderer = contextVk->getRenderer();
100
101 uint8_t *dst = dstBufferHelper->getMappedMemory();
102
103 // Each source vertex is used `divisor` times before advancing. Clamp to avoid OOB reads.
104 size_t clampedSize = std::min(numSrcVertices * dstStride * divisor, bytesToAllocate);
105
106 ASSERT(clampedSize % dstStride == 0);
107 ASSERT(divisor > 0);
108
109 uint32_t srcVertexUseCount = 0;
110 for (size_t dataCopied = 0; dataCopied < clampedSize; dataCopied += dstStride)
111 {
112 vertexLoadFunction(srcData, srcStride, 1, dst);
113 srcVertexUseCount++;
114 if (srcVertexUseCount == divisor)
115 {
116 srcData += srcStride;
117 srcVertexUseCount = 0;
118 }
119 dst += dstStride;
120 }
121
122 // Satisfy robustness constraints (only if extension enabled)
123 if (contextVk->getExtensions().robustnessEXT)
124 {
125 if (clampedSize < bytesToAllocate)
126 {
127 memset(dst, 0, bytesToAllocate - clampedSize);
128 }
129 }
130
131 ANGLE_TRY(dstBufferHelper->flush(renderer));
132
133 return angle::Result::Continue;
134 }
135
GetVertexCount(BufferVk * srcBuffer,const gl::VertexBinding & binding,uint32_t srcFormatSize)136 size_t GetVertexCount(BufferVk *srcBuffer, const gl::VertexBinding &binding, uint32_t srcFormatSize)
137 {
138 // Bytes usable for vertex data.
139 GLint64 bytes = srcBuffer->getSize() - binding.getOffset();
140 if (bytes < srcFormatSize)
141 return 0;
142
143 // Count the last vertex. It may occupy less than a full stride.
144 // This is also correct if stride happens to be less than srcFormatSize.
145 size_t numVertices = 1;
146 bytes -= srcFormatSize;
147
148 // Count how many strides fit remaining space.
149 if (bytes > 0)
150 numVertices += static_cast<size_t>(bytes) / binding.getStride();
151
152 return numVertices;
153 }
154 } // anonymous namespace
155
VertexArrayVk(ContextVk * contextVk,const gl::VertexArrayState & state)156 VertexArrayVk::VertexArrayVk(ContextVk *contextVk, const gl::VertexArrayState &state)
157 : VertexArrayImpl(state),
158 mCurrentArrayBufferHandles{},
159 mCurrentArrayBufferOffsets{},
160 mCurrentArrayBufferRelativeOffsets{},
161 mCurrentArrayBuffers{},
162 mCurrentElementArrayBuffer(nullptr),
163 mLineLoopHelper(contextVk->getRenderer()),
164 mDirtyLineLoopTranslation(true)
165 {
166 vk::BufferHelper &emptyBuffer = contextVk->getEmptyBuffer();
167
168 mCurrentArrayBufferHandles.fill(emptyBuffer.getBuffer().getHandle());
169 mCurrentArrayBufferOffsets.fill(0);
170 mCurrentArrayBufferRelativeOffsets.fill(0);
171 mCurrentArrayBuffers.fill(&emptyBuffer);
172 }
173
~VertexArrayVk()174 VertexArrayVk::~VertexArrayVk() {}
175
destroy(const gl::Context * context)176 void VertexArrayVk::destroy(const gl::Context *context)
177 {
178 ContextVk *contextVk = vk::GetImpl(context);
179
180 RendererVk *renderer = contextVk->getRenderer();
181
182 for (std::unique_ptr<vk::BufferHelper> &buffer : mCachedStreamIndexBuffers)
183 {
184 buffer->release(renderer);
185 }
186
187 mStreamedIndexData.release(renderer);
188 mTranslatedByteIndexData.release(renderer);
189 mTranslatedByteIndirectData.release(renderer);
190 mLineLoopHelper.release(contextVk);
191 }
192
convertIndexBufferGPU(ContextVk * contextVk,BufferVk * bufferVk,const void * indices)193 angle::Result VertexArrayVk::convertIndexBufferGPU(ContextVk *contextVk,
194 BufferVk *bufferVk,
195 const void *indices)
196 {
197 intptr_t offsetIntoSrcData = reinterpret_cast<intptr_t>(indices);
198 size_t srcDataSize = static_cast<size_t>(bufferVk->getSize()) - offsetIntoSrcData;
199
200 // Allocate buffer for results
201 ANGLE_TRY(mTranslatedByteIndexData.allocateForVertexConversion(
202 contextVk, sizeof(GLushort) * srcDataSize, vk::MemoryHostVisibility::NonVisible));
203 mCurrentElementArrayBuffer = &mTranslatedByteIndexData;
204
205 vk::BufferHelper *dst = &mTranslatedByteIndexData;
206 vk::BufferHelper *src = &bufferVk->getBuffer();
207
208 // Copy relevant section of the source into destination at allocated offset. Note that the
209 // offset returned by allocate() above is in bytes. As is the indices offset pointer.
210 UtilsVk::ConvertIndexParameters params = {};
211 params.srcOffset = static_cast<uint32_t>(offsetIntoSrcData);
212 params.dstOffset = 0;
213 params.maxIndex = static_cast<uint32_t>(bufferVk->getSize());
214
215 return contextVk->getUtils().convertIndexBuffer(contextVk, dst, src, params);
216 }
217
convertIndexBufferIndirectGPU(ContextVk * contextVk,vk::BufferHelper * srcIndirectBuf,VkDeviceSize srcIndirectBufOffset,vk::BufferHelper ** indirectBufferVkOut)218 angle::Result VertexArrayVk::convertIndexBufferIndirectGPU(ContextVk *contextVk,
219 vk::BufferHelper *srcIndirectBuf,
220 VkDeviceSize srcIndirectBufOffset,
221 vk::BufferHelper **indirectBufferVkOut)
222 {
223 size_t srcDataSize = static_cast<size_t>(mCurrentElementArrayBuffer->getSize());
224 ASSERT(mCurrentElementArrayBuffer ==
225 &vk::GetImpl(getState().getElementArrayBuffer())->getBuffer());
226
227 vk::BufferHelper *srcIndexBuf = mCurrentElementArrayBuffer;
228
229 // Allocate buffer for results
230 ANGLE_TRY(mTranslatedByteIndexData.allocateForVertexConversion(
231 contextVk, sizeof(GLushort) * srcDataSize, vk::MemoryHostVisibility::NonVisible));
232 vk::BufferHelper *dstIndexBuf = &mTranslatedByteIndexData;
233
234 ANGLE_TRY(mTranslatedByteIndirectData.allocateForVertexConversion(
235 contextVk, sizeof(VkDrawIndexedIndirectCommand), vk::MemoryHostVisibility::NonVisible));
236 vk::BufferHelper *dstIndirectBuf = &mTranslatedByteIndirectData;
237
238 // Save new element array buffer
239 mCurrentElementArrayBuffer = dstIndexBuf;
240 // Tell caller what new indirect buffer is
241 *indirectBufferVkOut = dstIndirectBuf;
242
243 // Copy relevant section of the source into destination at allocated offset. Note that the
244 // offset returned by allocate() above is in bytes. As is the indices offset pointer.
245 UtilsVk::ConvertIndexIndirectParameters params = {};
246 params.srcIndirectBufOffset = static_cast<uint32_t>(srcIndirectBufOffset);
247 params.srcIndexBufOffset = 0;
248 params.dstIndexBufOffset = 0;
249 params.maxIndex = static_cast<uint32_t>(srcDataSize);
250 params.dstIndirectBufOffset = 0;
251
252 return contextVk->getUtils().convertIndexIndirectBuffer(contextVk, srcIndirectBuf, srcIndexBuf,
253 dstIndirectBuf, dstIndexBuf, params);
254 }
255
handleLineLoopIndexIndirect(ContextVk * contextVk,gl::DrawElementsType glIndexType,vk::BufferHelper * srcIndirectBuf,VkDeviceSize indirectBufferOffset,vk::BufferHelper ** indirectBufferOut)256 angle::Result VertexArrayVk::handleLineLoopIndexIndirect(ContextVk *contextVk,
257 gl::DrawElementsType glIndexType,
258 vk::BufferHelper *srcIndirectBuf,
259 VkDeviceSize indirectBufferOffset,
260 vk::BufferHelper **indirectBufferOut)
261 {
262 ANGLE_TRY(mLineLoopHelper.streamIndicesIndirect(
263 contextVk, glIndexType, mCurrentElementArrayBuffer, srcIndirectBuf, indirectBufferOffset,
264 &mCurrentElementArrayBuffer, indirectBufferOut));
265
266 return angle::Result::Continue;
267 }
268
handleLineLoopIndirectDraw(const gl::Context * context,vk::BufferHelper * indirectBufferVk,VkDeviceSize indirectBufferOffset,vk::BufferHelper ** indirectBufferOut)269 angle::Result VertexArrayVk::handleLineLoopIndirectDraw(const gl::Context *context,
270 vk::BufferHelper *indirectBufferVk,
271 VkDeviceSize indirectBufferOffset,
272 vk::BufferHelper **indirectBufferOut)
273 {
274 size_t maxVertexCount = 0;
275 ContextVk *contextVk = vk::GetImpl(context);
276 const gl::AttributesMask activeAttribs =
277 context->getStateCache().getActiveBufferedAttribsMask();
278
279 const auto &attribs = mState.getVertexAttributes();
280 const auto &bindings = mState.getVertexBindings();
281
282 for (size_t attribIndex : activeAttribs)
283 {
284 const gl::VertexAttribute &attrib = attribs[attribIndex];
285 ASSERT(attrib.enabled);
286 VkDeviceSize bufSize = getCurrentArrayBuffers()[attribIndex]->getSize();
287 const gl::VertexBinding &binding = bindings[attrib.bindingIndex];
288 size_t stride = binding.getStride();
289 size_t vertexCount = static_cast<size_t>(bufSize / stride);
290 if (vertexCount > maxVertexCount)
291 {
292 maxVertexCount = vertexCount;
293 }
294 }
295 ANGLE_TRY(mLineLoopHelper.streamArrayIndirect(contextVk, maxVertexCount + 1, indirectBufferVk,
296 indirectBufferOffset, &mCurrentElementArrayBuffer,
297 indirectBufferOut));
298
299 return angle::Result::Continue;
300 }
301
convertIndexBufferCPU(ContextVk * contextVk,gl::DrawElementsType indexType,size_t indexCount,const void * sourcePointer,BufferBindingDirty * bindingDirty)302 angle::Result VertexArrayVk::convertIndexBufferCPU(ContextVk *contextVk,
303 gl::DrawElementsType indexType,
304 size_t indexCount,
305 const void *sourcePointer,
306 BufferBindingDirty *bindingDirty)
307 {
308 ASSERT(!mState.getElementArrayBuffer() || indexType == gl::DrawElementsType::UnsignedByte);
309 RendererVk *renderer = contextVk->getRenderer();
310 size_t elementSize = contextVk->getVkIndexTypeSize(indexType);
311 const size_t amount = elementSize * indexCount;
312
313 // Applications often time draw a quad with two triangles. This is try to catch all the
314 // common used element array buffer with pre-created BufferHelper objects to improve
315 // performance.
316 if (indexCount == kStreamIndexBufferCachedIndexCount &&
317 indexType == gl::DrawElementsType::UnsignedShort)
318 {
319 for (std::unique_ptr<vk::BufferHelper> &buffer : mCachedStreamIndexBuffers)
320 {
321 void *ptr = buffer->getMappedMemory();
322 if (memcmp(sourcePointer, ptr, amount) == 0)
323 {
324 // Found a matching cached buffer, use the cached internal index buffer.
325 *bindingDirty = mCurrentElementArrayBuffer == buffer.get()
326 ? BufferBindingDirty::No
327 : BufferBindingDirty::Yes;
328 mCurrentElementArrayBuffer = buffer.get();
329 return angle::Result::Continue;
330 }
331 }
332
333 // If we still have capacity, cache this index buffer for future use.
334 if (mCachedStreamIndexBuffers.size() < kMaxCachedStreamIndexBuffers)
335 {
336 std::unique_ptr<vk::BufferHelper> buffer = std::make_unique<vk::BufferHelper>();
337 ANGLE_TRY(buffer->initSuballocation(contextVk,
338 renderer->getVertexConversionBufferMemoryTypeIndex(
339 vk::MemoryHostVisibility::Visible),
340 amount,
341 renderer->getVertexConversionBufferAlignment()));
342 memcpy(buffer->getMappedMemory(), sourcePointer, amount);
343 ANGLE_TRY(buffer->flush(renderer));
344
345 mCachedStreamIndexBuffers.push_back(std::move(buffer));
346
347 *bindingDirty = BufferBindingDirty::Yes;
348 mCurrentElementArrayBuffer = mCachedStreamIndexBuffers.back().get();
349 return angle::Result::Continue;
350 }
351 }
352
353 ANGLE_TRY(mStreamedIndexData.allocateForVertexConversion(contextVk, amount,
354 vk::MemoryHostVisibility::Visible));
355 GLubyte *dst = mStreamedIndexData.getMappedMemory();
356
357 *bindingDirty = BufferBindingDirty::Yes;
358 mCurrentElementArrayBuffer = &mStreamedIndexData;
359
360 if (contextVk->shouldConvertUint8VkIndexType(indexType))
361 {
362 // Unsigned bytes don't have direct support in Vulkan so we have to expand the
363 // memory to a GLushort.
364 const GLubyte *in = static_cast<const GLubyte *>(sourcePointer);
365 GLushort *expandedDst = reinterpret_cast<GLushort *>(dst);
366 bool primitiveRestart = contextVk->getState().isPrimitiveRestartEnabled();
367
368 constexpr GLubyte kUnsignedByteRestartValue = 0xFF;
369 constexpr GLushort kUnsignedShortRestartValue = 0xFFFF;
370
371 if (primitiveRestart)
372 {
373 for (size_t index = 0; index < indexCount; index++)
374 {
375 GLushort value = static_cast<GLushort>(in[index]);
376 if (in[index] == kUnsignedByteRestartValue)
377 {
378 // Convert from 8-bit restart value to 16-bit restart value
379 value = kUnsignedShortRestartValue;
380 }
381 expandedDst[index] = value;
382 }
383 }
384 else
385 {
386 // Fast path for common case.
387 for (size_t index = 0; index < indexCount; index++)
388 {
389 expandedDst[index] = static_cast<GLushort>(in[index]);
390 }
391 }
392 }
393 else
394 {
395 // The primitive restart value is the same for OpenGL and Vulkan,
396 // so there's no need to perform any conversion.
397 memcpy(dst, sourcePointer, amount);
398 }
399 return mStreamedIndexData.flush(contextVk->getRenderer());
400 }
401
402 // We assume the buffer is completely full of the same kind of data and convert
403 // and/or align it as we copy it to a buffer. The assumption could be wrong
404 // but the alternative of copying it piecemeal on each draw would have a lot more
405 // overhead.
convertVertexBufferGPU(ContextVk * contextVk,BufferVk * srcBuffer,const gl::VertexBinding & binding,size_t attribIndex,const vk::Format & vertexFormat,ConversionBuffer * conversion,GLuint relativeOffset,bool compressed)406 angle::Result VertexArrayVk::convertVertexBufferGPU(ContextVk *contextVk,
407 BufferVk *srcBuffer,
408 const gl::VertexBinding &binding,
409 size_t attribIndex,
410 const vk::Format &vertexFormat,
411 ConversionBuffer *conversion,
412 GLuint relativeOffset,
413 bool compressed)
414 {
415 const angle::Format &srcFormat = vertexFormat.getIntendedFormat();
416 const angle::Format &dstFormat = vertexFormat.getActualBufferFormat(compressed);
417
418 ASSERT(binding.getStride() % (srcFormat.pixelBytes / srcFormat.channelCount) == 0);
419
420 unsigned srcFormatSize = srcFormat.pixelBytes;
421 unsigned dstFormatSize = dstFormat.pixelBytes;
422
423 size_t numVertices = GetVertexCount(srcBuffer, binding, srcFormatSize);
424 if (numVertices == 0)
425 {
426 return angle::Result::Continue;
427 }
428 ASSERT(vertexFormat.getVertexInputAlignment(compressed) <= vk::kVertexBufferAlignment);
429
430 // Allocate buffer for results
431 vk::BufferHelper *dstBuffer = conversion->data.get();
432 ANGLE_TRY(dstBuffer->allocateForVertexConversion(contextVk, numVertices * dstFormatSize,
433 vk::MemoryHostVisibility::NonVisible));
434
435 ASSERT(conversion->dirty);
436 conversion->dirty = false;
437
438 vk::BufferHelper *srcBufferHelper = &srcBuffer->getBuffer();
439
440 UtilsVk::ConvertVertexParameters params;
441 params.vertexCount = numVertices;
442 params.srcFormat = &srcFormat;
443 params.dstFormat = &dstFormat;
444 params.srcStride = binding.getStride();
445 params.srcOffset = binding.getOffset() + relativeOffset;
446 params.dstOffset = 0;
447
448 ANGLE_TRY(
449 contextVk->getUtils().convertVertexBuffer(contextVk, dstBuffer, srcBufferHelper, params));
450
451 return angle::Result::Continue;
452 }
453
convertVertexBufferCPU(ContextVk * contextVk,BufferVk * srcBuffer,const gl::VertexBinding & binding,size_t attribIndex,const vk::Format & vertexFormat,ConversionBuffer * conversion,GLuint relativeOffset,bool compressed)454 angle::Result VertexArrayVk::convertVertexBufferCPU(ContextVk *contextVk,
455 BufferVk *srcBuffer,
456 const gl::VertexBinding &binding,
457 size_t attribIndex,
458 const vk::Format &vertexFormat,
459 ConversionBuffer *conversion,
460 GLuint relativeOffset,
461 bool compressed)
462 {
463 ANGLE_TRACE_EVENT0("gpu.angle", "VertexArrayVk::convertVertexBufferCpu");
464
465 unsigned srcFormatSize = vertexFormat.getIntendedFormat().pixelBytes;
466 unsigned dstFormatSize = vertexFormat.getActualBufferFormat(compressed).pixelBytes;
467
468 size_t numVertices = GetVertexCount(srcBuffer, binding, srcFormatSize);
469 if (numVertices == 0)
470 {
471 return angle::Result::Continue;
472 }
473
474 void *src = nullptr;
475 ANGLE_TRY(srcBuffer->mapImpl(contextVk, GL_MAP_READ_BIT, &src));
476 const uint8_t *srcBytes = reinterpret_cast<const uint8_t *>(src);
477 srcBytes += binding.getOffset() + relativeOffset;
478 ASSERT(vertexFormat.getVertexInputAlignment(compressed) <= vk::kVertexBufferAlignment);
479
480 vk::BufferHelper *dstBufferHelper = conversion->data.get();
481 // Allocate buffer for results
482 ANGLE_TRY(dstBufferHelper->allocateForVertexConversion(contextVk, numVertices * dstFormatSize,
483 vk::MemoryHostVisibility::Visible));
484
485 ANGLE_TRY(StreamVertexData(contextVk, dstBufferHelper, srcBytes, numVertices * dstFormatSize, 0,
486 numVertices, binding.getStride(),
487 vertexFormat.getVertexLoadFunction(compressed)));
488 ANGLE_TRY(srcBuffer->unmapImpl(contextVk));
489 mCurrentArrayBuffers[attribIndex] = dstBufferHelper;
490
491 ASSERT(conversion->dirty);
492 conversion->dirty = false;
493
494 return angle::Result::Continue;
495 }
496
updateCurrentElementArrayBuffer()497 void VertexArrayVk::updateCurrentElementArrayBuffer()
498 {
499 ASSERT(mState.getElementArrayBuffer() != nullptr);
500 ASSERT(mState.getElementArrayBuffer()->getSize() > 0);
501
502 BufferVk *bufferVk = vk::GetImpl(mState.getElementArrayBuffer());
503 mCurrentElementArrayBuffer = &bufferVk->getBuffer();
504 }
505
syncState(const gl::Context * context,const gl::VertexArray::DirtyBits & dirtyBits,gl::VertexArray::DirtyAttribBitsArray * attribBits,gl::VertexArray::DirtyBindingBitsArray * bindingBits)506 angle::Result VertexArrayVk::syncState(const gl::Context *context,
507 const gl::VertexArray::DirtyBits &dirtyBits,
508 gl::VertexArray::DirtyAttribBitsArray *attribBits,
509 gl::VertexArray::DirtyBindingBitsArray *bindingBits)
510 {
511 ASSERT(dirtyBits.any());
512
513 ContextVk *contextVk = vk::GetImpl(context);
514 contextVk->getPerfCounters().vertexArraySyncStateCalls++;
515
516 const std::vector<gl::VertexAttribute> &attribs = mState.getVertexAttributes();
517 const std::vector<gl::VertexBinding> &bindings = mState.getVertexBindings();
518
519 for (size_t dirtyBit : dirtyBits)
520 {
521 switch (dirtyBit)
522 {
523 case gl::VertexArray::DIRTY_BIT_ELEMENT_ARRAY_BUFFER:
524 case gl::VertexArray::DIRTY_BIT_ELEMENT_ARRAY_BUFFER_DATA:
525 {
526 gl::Buffer *bufferGL = mState.getElementArrayBuffer();
527 if (bufferGL && bufferGL->getSize() > 0)
528 {
529 // Note that just updating buffer data may still result in a new
530 // vk::BufferHelper allocation.
531 updateCurrentElementArrayBuffer();
532 }
533 else
534 {
535 mCurrentElementArrayBuffer = nullptr;
536 }
537
538 mLineLoopBufferFirstIndex.reset();
539 mLineLoopBufferLastIndex.reset();
540 ANGLE_TRY(contextVk->onIndexBufferChange(mCurrentElementArrayBuffer));
541 mDirtyLineLoopTranslation = true;
542 break;
543 }
544
545 #define ANGLE_VERTEX_DIRTY_ATTRIB_FUNC(INDEX) \
546 case gl::VertexArray::DIRTY_BIT_ATTRIB_0 + INDEX: \
547 { \
548 const bool bufferOnly = \
549 (*attribBits)[INDEX].to_ulong() == \
550 angle::Bit<unsigned long>(gl::VertexArray::DIRTY_ATTRIB_POINTER_BUFFER); \
551 ANGLE_TRY(syncDirtyAttrib(contextVk, attribs[INDEX], \
552 bindings[attribs[INDEX].bindingIndex], INDEX, bufferOnly)); \
553 (*attribBits)[INDEX].reset(); \
554 break; \
555 }
556
557 ANGLE_VERTEX_INDEX_CASES(ANGLE_VERTEX_DIRTY_ATTRIB_FUNC)
558
559 #define ANGLE_VERTEX_DIRTY_BINDING_FUNC(INDEX) \
560 case gl::VertexArray::DIRTY_BIT_BINDING_0 + INDEX: \
561 for (size_t attribIndex : bindings[INDEX].getBoundAttributesMask()) \
562 { \
563 ANGLE_TRY(syncDirtyAttrib(contextVk, attribs[attribIndex], bindings[INDEX], \
564 attribIndex, false)); \
565 } \
566 (*bindingBits)[INDEX].reset(); \
567 break;
568
569 ANGLE_VERTEX_INDEX_CASES(ANGLE_VERTEX_DIRTY_BINDING_FUNC)
570
571 #define ANGLE_VERTEX_DIRTY_BUFFER_DATA_FUNC(INDEX) \
572 case gl::VertexArray::DIRTY_BIT_BUFFER_DATA_0 + INDEX: \
573 ANGLE_TRY(syncDirtyAttrib(contextVk, attribs[INDEX], \
574 bindings[attribs[INDEX].bindingIndex], INDEX, false)); \
575 break;
576
577 ANGLE_VERTEX_INDEX_CASES(ANGLE_VERTEX_DIRTY_BUFFER_DATA_FUNC)
578
579 default:
580 UNREACHABLE();
581 break;
582 }
583 }
584
585 return angle::Result::Continue;
586 }
587
588 #undef ANGLE_VERTEX_DIRTY_ATTRIB_FUNC
589 #undef ANGLE_VERTEX_DIRTY_BINDING_FUNC
590 #undef ANGLE_VERTEX_DIRTY_BUFFER_DATA_FUNC
591
setDefaultPackedInput(ContextVk * contextVk,size_t attribIndex)592 ANGLE_INLINE angle::Result VertexArrayVk::setDefaultPackedInput(ContextVk *contextVk,
593 size_t attribIndex)
594 {
595 const gl::State &glState = contextVk->getState();
596 const gl::VertexAttribCurrentValueData &defaultValue =
597 glState.getVertexAttribCurrentValues()[attribIndex];
598
599 angle::FormatID format = GetCurrentValueFormatID(defaultValue.Type);
600
601 return contextVk->onVertexAttributeChange(attribIndex, 0, 0, format, false, 0, nullptr);
602 }
603
updateActiveAttribInfo(ContextVk * contextVk)604 angle::Result VertexArrayVk::updateActiveAttribInfo(ContextVk *contextVk)
605 {
606 const std::vector<gl::VertexAttribute> &attribs = mState.getVertexAttributes();
607 const std::vector<gl::VertexBinding> &bindings = mState.getVertexBindings();
608
609 // Update pipeline cache with current active attribute info
610 for (size_t attribIndex : mState.getEnabledAttributesMask())
611 {
612 const gl::VertexAttribute &attrib = attribs[attribIndex];
613 const gl::VertexBinding &binding = bindings[attribs[attribIndex].bindingIndex];
614
615 ANGLE_TRY(contextVk->onVertexAttributeChange(
616 attribIndex, mCurrentArrayBufferStrides[attribIndex], binding.getDivisor(),
617 attrib.format->id, mCurrentArrayBufferCompressed.test(attribIndex),
618 mCurrentArrayBufferRelativeOffsets[attribIndex], mCurrentArrayBuffers[attribIndex]));
619 }
620
621 return angle::Result::Continue;
622 }
623
syncDirtyAttrib(ContextVk * contextVk,const gl::VertexAttribute & attrib,const gl::VertexBinding & binding,size_t attribIndex,bool bufferOnly)624 angle::Result VertexArrayVk::syncDirtyAttrib(ContextVk *contextVk,
625 const gl::VertexAttribute &attrib,
626 const gl::VertexBinding &binding,
627 size_t attribIndex,
628 bool bufferOnly)
629 {
630 RendererVk *renderer = contextVk->getRenderer();
631 if (attrib.enabled)
632 {
633 const vk::Format &vertexFormat = renderer->getFormat(attrib.format->id);
634
635 GLuint stride;
636 // Init attribute offset to the front-end value
637 mCurrentArrayBufferRelativeOffsets[attribIndex] = attrib.relativeOffset;
638 gl::Buffer *bufferGL = binding.getBuffer().get();
639 // Emulated and/or client-side attribs will be streamed
640 bool isStreamingVertexAttrib =
641 (binding.getDivisor() > renderer->getMaxVertexAttribDivisor()) || (bufferGL == nullptr);
642 mStreamingVertexAttribsMask.set(attribIndex, isStreamingVertexAttrib);
643 bool compressed = false;
644
645 if (bufferGL)
646 {
647 mContentsObservers->disableForBuffer(bufferGL, static_cast<uint32_t>(attribIndex));
648 }
649
650 if (!isStreamingVertexAttrib && bufferGL->getSize() > 0)
651 {
652 BufferVk *bufferVk = vk::GetImpl(bufferGL);
653 const angle::Format &intendedFormat = vertexFormat.getIntendedFormat();
654 bool bindingIsAligned = BindingIsAligned(
655 binding, intendedFormat, intendedFormat.channelCount, attrib.relativeOffset);
656
657 if (renderer->getFeatures().compressVertexData.enabled &&
658 gl::IsStaticBufferUsage(bufferGL->getUsage()) &&
659 vertexFormat.canCompressBufferData())
660 {
661 compressed = true;
662 }
663
664 bool needsConversion =
665 vertexFormat.getVertexLoadRequiresConversion(compressed) || !bindingIsAligned;
666
667 if (needsConversion)
668 {
669 mContentsObservers->enableForBuffer(bufferGL, static_cast<uint32_t>(attribIndex));
670
671 WarnOnVertexFormatConversion(contextVk, vertexFormat, compressed, true);
672
673 ConversionBuffer *conversion = bufferVk->getVertexConversionBuffer(
674 renderer, intendedFormat.id, binding.getStride(),
675 binding.getOffset() + attrib.relativeOffset, !bindingIsAligned);
676 if (conversion->dirty)
677 {
678 if (compressed)
679 {
680 INFO() << "Compressing vertex data in buffer " << bufferGL->id().value
681 << " from " << ToUnderlying(vertexFormat.getIntendedFormatID())
682 << " to "
683 << ToUnderlying(vertexFormat.getActualBufferFormat(true).id) << ".";
684 }
685
686 if (bindingIsAligned)
687 {
688 ANGLE_TRY(convertVertexBufferGPU(contextVk, bufferVk, binding, attribIndex,
689 vertexFormat, conversion,
690 attrib.relativeOffset, compressed));
691 }
692 else
693 {
694 ANGLE_VK_PERF_WARNING(
695 contextVk, GL_DEBUG_SEVERITY_HIGH,
696 "GPU stall due to vertex format conversion of unaligned data");
697
698 ANGLE_TRY(convertVertexBufferCPU(contextVk, bufferVk, binding, attribIndex,
699 vertexFormat, conversion,
700 attrib.relativeOffset, compressed));
701 }
702
703 // If conversion happens, the destination buffer stride may be changed,
704 // therefore an attribute change needs to be called. Note that it may trigger
705 // unnecessary vulkan PSO update when the destination buffer stride does not
706 // change, but for simplicity just make it conservative
707 bufferOnly = false;
708 }
709
710 vk::BufferHelper *bufferHelper = conversion->data.get();
711 mCurrentArrayBuffers[attribIndex] = bufferHelper;
712 VkDeviceSize bufferOffset;
713 mCurrentArrayBufferHandles[attribIndex] =
714 bufferHelper
715 ->getBufferForVertexArray(contextVk, bufferHelper->getSize(), &bufferOffset)
716 .getHandle();
717 mCurrentArrayBufferOffsets[attribIndex] = bufferOffset;
718 // Converted attribs are packed in their own VK buffer so offset is zero
719 mCurrentArrayBufferRelativeOffsets[attribIndex] = 0;
720
721 // Converted buffer is tightly packed
722 stride = vertexFormat.getActualBufferFormat(compressed).pixelBytes;
723 }
724 else
725 {
726 if (bufferVk->getSize() == 0)
727 {
728 vk::BufferHelper &emptyBuffer = contextVk->getEmptyBuffer();
729
730 mCurrentArrayBuffers[attribIndex] = &emptyBuffer;
731 mCurrentArrayBufferHandles[attribIndex] = emptyBuffer.getBuffer().getHandle();
732 mCurrentArrayBufferOffsets[attribIndex] = emptyBuffer.getOffset();
733 stride = 0;
734 }
735 else
736 {
737 vk::BufferHelper &bufferHelper = bufferVk->getBuffer();
738 mCurrentArrayBuffers[attribIndex] = &bufferHelper;
739 VkDeviceSize bufferOffset;
740 mCurrentArrayBufferHandles[attribIndex] =
741 bufferHelper
742 .getBufferForVertexArray(contextVk, bufferVk->getSize(), &bufferOffset)
743 .getHandle();
744
745 // Vulkan requires the offset is within the buffer. We use robust access
746 // behaviour to reset the offset if it starts outside the buffer.
747 mCurrentArrayBufferOffsets[attribIndex] =
748 binding.getOffset() < static_cast<GLint64>(bufferVk->getSize())
749 ? binding.getOffset() + bufferOffset
750 : bufferOffset;
751
752 stride = binding.getStride();
753 }
754 }
755 }
756 else
757 {
758 vk::BufferHelper &emptyBuffer = contextVk->getEmptyBuffer();
759 mCurrentArrayBuffers[attribIndex] = &emptyBuffer;
760 mCurrentArrayBufferHandles[attribIndex] = emptyBuffer.getBuffer().getHandle();
761 mCurrentArrayBufferOffsets[attribIndex] = emptyBuffer.getOffset();
762 // Client side buffer will be transfered to a tightly packed buffer later
763 stride = vertexFormat.getActualBufferFormat(compressed).pixelBytes;
764 }
765
766 if (bufferOnly)
767 {
768 ANGLE_TRY(contextVk->onVertexBufferChange(mCurrentArrayBuffers[attribIndex]));
769 }
770 else
771 {
772 ANGLE_TRY(contextVk->onVertexAttributeChange(
773 attribIndex, stride, binding.getDivisor(), attrib.format->id, compressed,
774 mCurrentArrayBufferRelativeOffsets[attribIndex],
775 mCurrentArrayBuffers[attribIndex]));
776 // Cache the stride of the attribute
777 mCurrentArrayBufferStrides[attribIndex] = stride;
778 mCurrentArrayBufferCompressed[attribIndex] = compressed;
779 }
780 }
781 else
782 {
783 contextVk->invalidateDefaultAttribute(attribIndex);
784
785 // These will be filled out by the ContextVk.
786 vk::BufferHelper &emptyBuffer = contextVk->getEmptyBuffer();
787 mCurrentArrayBuffers[attribIndex] = &emptyBuffer;
788 mCurrentArrayBufferHandles[attribIndex] = emptyBuffer.getBuffer().getHandle();
789 mCurrentArrayBufferOffsets[attribIndex] = emptyBuffer.getOffset();
790 mCurrentArrayBufferStrides[attribIndex] = 0;
791 mCurrentArrayBufferCompressed[attribIndex] = false;
792 mCurrentArrayBufferRelativeOffsets[attribIndex] = 0;
793
794 ANGLE_TRY(setDefaultPackedInput(contextVk, attribIndex));
795 }
796
797 return angle::Result::Continue;
798 }
799
800 // Handle copying client attribs and/or expanding attrib buffer in case where attribute
801 // divisor value has to be emulated.
updateStreamedAttribs(const gl::Context * context,GLint firstVertex,GLsizei vertexOrIndexCount,GLsizei instanceCount,gl::DrawElementsType indexTypeOrInvalid,const void * indices)802 angle::Result VertexArrayVk::updateStreamedAttribs(const gl::Context *context,
803 GLint firstVertex,
804 GLsizei vertexOrIndexCount,
805 GLsizei instanceCount,
806 gl::DrawElementsType indexTypeOrInvalid,
807 const void *indices)
808 {
809 ContextVk *contextVk = vk::GetImpl(context);
810 RendererVk *renderer = contextVk->getRenderer();
811
812 const gl::AttributesMask activeAttribs =
813 context->getStateCache().getActiveClientAttribsMask() |
814 context->getStateCache().getActiveBufferedAttribsMask();
815 const gl::AttributesMask activeStreamedAttribs = mStreamingVertexAttribsMask & activeAttribs;
816
817 // Early return for corner case where emulated buffered attribs are not active
818 if (!activeStreamedAttribs.any())
819 return angle::Result::Continue;
820
821 GLint startVertex;
822 size_t vertexCount;
823 ANGLE_TRY(GetVertexRangeInfo(context, firstVertex, vertexOrIndexCount, indexTypeOrInvalid,
824 indices, 0, &startVertex, &vertexCount));
825
826 const auto &attribs = mState.getVertexAttributes();
827 const auto &bindings = mState.getVertexBindings();
828
829 // TODO: When we have a bunch of interleaved attributes, they end up
830 // un-interleaved, wasting space and copying time. Consider improving on that.
831 for (size_t attribIndex : activeStreamedAttribs)
832 {
833 const gl::VertexAttribute &attrib = attribs[attribIndex];
834 ASSERT(attrib.enabled);
835 const gl::VertexBinding &binding = bindings[attrib.bindingIndex];
836
837 const vk::Format &vertexFormat = renderer->getFormat(attrib.format->id);
838 GLuint stride = vertexFormat.getActualBufferFormat(false).pixelBytes;
839
840 bool compressed = false;
841 WarnOnVertexFormatConversion(contextVk, vertexFormat, compressed, false);
842
843 ASSERT(vertexFormat.getVertexInputAlignment(false) <= vk::kVertexBufferAlignment);
844
845 vk::BufferHelper *vertexDataBuffer;
846 const uint8_t *src = static_cast<const uint8_t *>(attrib.pointer);
847 const uint32_t divisor = binding.getDivisor();
848 if (divisor > 0)
849 {
850 // Instanced attrib
851 if (divisor > renderer->getMaxVertexAttribDivisor())
852 {
853 // Divisor will be set to 1 & so update buffer to have 1 attrib per instance
854 size_t bytesToAllocate = instanceCount * stride;
855
856 // Allocate buffer for results
857 ANGLE_TRY(contextVk->allocateStreamedVertexBuffer(attribIndex, bytesToAllocate,
858 &vertexDataBuffer));
859
860 gl::Buffer *bufferGL = binding.getBuffer().get();
861 if (bufferGL != nullptr)
862 {
863 // Only do the data copy if src buffer is valid.
864 if (bufferGL->getSize() > 0)
865 {
866 // Map buffer to expand attribs for divisor emulation
867 BufferVk *bufferVk = vk::GetImpl(binding.getBuffer().get());
868 void *buffSrc = nullptr;
869 ANGLE_TRY(bufferVk->mapImpl(contextVk, GL_MAP_READ_BIT, &buffSrc));
870 src = reinterpret_cast<const uint8_t *>(buffSrc) + binding.getOffset();
871
872 uint32_t srcAttributeSize =
873 static_cast<uint32_t>(ComputeVertexAttributeTypeSize(attrib));
874
875 size_t numVertices = GetVertexCount(bufferVk, binding, srcAttributeSize);
876
877 ANGLE_TRY(StreamVertexDataWithDivisor(
878 contextVk, vertexDataBuffer, src, bytesToAllocate, binding.getStride(),
879 stride, vertexFormat.getVertexLoadFunction(compressed), divisor,
880 numVertices));
881
882 ANGLE_TRY(bufferVk->unmapImpl(contextVk));
883 }
884 else if (contextVk->getExtensions().robustnessEXT)
885 {
886 // Satisfy robustness constraints (only if extension enabled)
887 uint8_t *dst = vertexDataBuffer->getMappedMemory();
888 memset(dst, 0, bytesToAllocate);
889 }
890 }
891 else
892 {
893 size_t numVertices = instanceCount;
894 ANGLE_TRY(StreamVertexDataWithDivisor(
895 contextVk, vertexDataBuffer, src, bytesToAllocate, binding.getStride(),
896 stride, vertexFormat.getVertexLoadFunction(compressed), divisor,
897 numVertices));
898 }
899 }
900 else
901 {
902 ASSERT(binding.getBuffer().get() == nullptr);
903 size_t count = UnsignedCeilDivide(instanceCount, divisor);
904 size_t bytesToAllocate = count * stride;
905
906 // Allocate buffer for results
907 ANGLE_TRY(contextVk->allocateStreamedVertexBuffer(attribIndex, bytesToAllocate,
908 &vertexDataBuffer));
909
910 ANGLE_TRY(StreamVertexData(contextVk, vertexDataBuffer, src, bytesToAllocate, 0,
911 count, binding.getStride(),
912 vertexFormat.getVertexLoadFunction(compressed)));
913 }
914 }
915 else
916 {
917 ASSERT(binding.getBuffer().get() == nullptr);
918 // Allocate space for startVertex + vertexCount so indexing will work. If we don't
919 // start at zero all the indices will be off.
920 // Only vertexCount vertices will be used by the upcoming draw so that is all we copy.
921 src += startVertex * binding.getStride();
922 size_t destOffset = startVertex * stride;
923 size_t bytesToAllocate = (startVertex + vertexCount) * stride;
924
925 // Allocate buffer for results
926 ANGLE_TRY(contextVk->allocateStreamedVertexBuffer(attribIndex, bytesToAllocate,
927 &vertexDataBuffer));
928
929 ANGLE_TRY(StreamVertexData(contextVk, vertexDataBuffer, src, bytesToAllocate,
930 destOffset, vertexCount, binding.getStride(),
931 vertexFormat.getVertexLoadFunction(compressed)));
932 }
933
934 mCurrentArrayBuffers[attribIndex] = vertexDataBuffer;
935 VkDeviceSize bufferOffset;
936 mCurrentArrayBufferHandles[attribIndex] =
937 vertexDataBuffer
938 ->getBufferForVertexArray(contextVk, vertexDataBuffer->getSize(), &bufferOffset)
939 .getHandle();
940 mCurrentArrayBufferOffsets[attribIndex] = bufferOffset;
941 }
942
943 return angle::Result::Continue;
944 }
945
handleLineLoop(ContextVk * contextVk,GLint firstVertex,GLsizei vertexOrIndexCount,gl::DrawElementsType indexTypeOrInvalid,const void * indices,uint32_t * indexCountOut)946 angle::Result VertexArrayVk::handleLineLoop(ContextVk *contextVk,
947 GLint firstVertex,
948 GLsizei vertexOrIndexCount,
949 gl::DrawElementsType indexTypeOrInvalid,
950 const void *indices,
951 uint32_t *indexCountOut)
952 {
953 if (indexTypeOrInvalid != gl::DrawElementsType::InvalidEnum)
954 {
955 // Handle GL_LINE_LOOP drawElements.
956 if (mDirtyLineLoopTranslation)
957 {
958 gl::Buffer *elementArrayBuffer = mState.getElementArrayBuffer();
959
960 if (!elementArrayBuffer)
961 {
962 ANGLE_TRY(
963 mLineLoopHelper.streamIndices(contextVk, indexTypeOrInvalid, vertexOrIndexCount,
964 reinterpret_cast<const uint8_t *>(indices),
965 &mCurrentElementArrayBuffer, indexCountOut));
966 }
967 else
968 {
969 // When using an element array buffer, 'indices' is an offset to the first element.
970 intptr_t offset = reinterpret_cast<intptr_t>(indices);
971 BufferVk *elementArrayBufferVk = vk::GetImpl(elementArrayBuffer);
972 ANGLE_TRY(mLineLoopHelper.getIndexBufferForElementArrayBuffer(
973 contextVk, elementArrayBufferVk, indexTypeOrInvalid, vertexOrIndexCount, offset,
974 &mCurrentElementArrayBuffer, indexCountOut));
975 }
976 }
977
978 // If we've had a drawArrays call with a line loop before, we want to make sure this is
979 // invalidated the next time drawArrays is called since we use the same index buffer for
980 // both calls.
981 mLineLoopBufferFirstIndex.reset();
982 mLineLoopBufferLastIndex.reset();
983 return angle::Result::Continue;
984 }
985
986 // Note: Vertex indexes can be arbitrarily large.
987 uint32_t clampedVertexCount = gl::clampCast<uint32_t>(vertexOrIndexCount);
988
989 // Handle GL_LINE_LOOP drawArrays.
990 size_t lastVertex = static_cast<size_t>(firstVertex + clampedVertexCount);
991 if (!mLineLoopBufferFirstIndex.valid() || !mLineLoopBufferLastIndex.valid() ||
992 mLineLoopBufferFirstIndex != firstVertex || mLineLoopBufferLastIndex != lastVertex)
993 {
994 ANGLE_TRY(mLineLoopHelper.getIndexBufferForDrawArrays(
995 contextVk, clampedVertexCount, firstVertex, &mCurrentElementArrayBuffer));
996
997 mLineLoopBufferFirstIndex = firstVertex;
998 mLineLoopBufferLastIndex = lastVertex;
999 }
1000 *indexCountOut = vertexOrIndexCount + 1;
1001
1002 return angle::Result::Continue;
1003 }
1004
updateDefaultAttrib(ContextVk * contextVk,size_t attribIndex)1005 angle::Result VertexArrayVk::updateDefaultAttrib(ContextVk *contextVk, size_t attribIndex)
1006 {
1007 if (!mState.getEnabledAttributesMask().test(attribIndex))
1008 {
1009 vk::BufferHelper *bufferHelper;
1010 ANGLE_TRY(
1011 contextVk->allocateStreamedVertexBuffer(attribIndex, kDefaultValueSize, &bufferHelper));
1012
1013 const gl::VertexAttribCurrentValueData &defaultValue =
1014 contextVk->getState().getVertexAttribCurrentValues()[attribIndex];
1015 uint8_t *ptr = bufferHelper->getMappedMemory();
1016 memcpy(ptr, &defaultValue.Values, kDefaultValueSize);
1017 ANGLE_TRY(bufferHelper->flush(contextVk->getRenderer()));
1018
1019 VkDeviceSize bufferOffset;
1020 mCurrentArrayBufferHandles[attribIndex] =
1021 bufferHelper->getBufferForVertexArray(contextVk, kDefaultValueSize, &bufferOffset)
1022 .getHandle();
1023 mCurrentArrayBufferOffsets[attribIndex] = bufferOffset;
1024 mCurrentArrayBuffers[attribIndex] = bufferHelper;
1025
1026 ANGLE_TRY(setDefaultPackedInput(contextVk, attribIndex));
1027 }
1028
1029 return angle::Result::Continue;
1030 }
1031 } // namespace rx
1032