1 //
2 // Copyright 2024 The ANGLE Project Authors. All rights reserved.
3 // Use of this source code is governed by a BSD-style license that can be
4 // found in the LICENSE file.
5 //
6 // VertexArrayWgpu.cpp:
7 // Implements the class methods for VertexArrayWgpu.
8 //
9
10 #include "libANGLE/renderer/wgpu/VertexArrayWgpu.h"
11
12 #include "common/PackedEnums.h"
13 #include "common/debug.h"
14 #include "libANGLE/Error.h"
15 #include "libANGLE/renderer/wgpu/ContextWgpu.h"
16 #include "libANGLE/renderer/wgpu/wgpu_utils.h"
17
18 namespace rx
19 {
20
21 namespace
22 {
AttributeNeedsStreaming(ContextWgpu * context,const gl::VertexAttribute & attrib,const gl::VertexBinding & binding)23 bool AttributeNeedsStreaming(ContextWgpu *context,
24 const gl::VertexAttribute &attrib,
25 const gl::VertexBinding &binding)
26 {
27 const size_t stride = ComputeVertexAttributeStride(attrib, binding);
28 if (stride % 4 != 0)
29 {
30 return true;
31 }
32
33 const size_t typeSize = gl::ComputeVertexAttributeTypeSize(attrib);
34 if (stride % typeSize != 0)
35 {
36 return true;
37 }
38
39 const webgpu::Format &vertexFormat = context->getFormat(attrib.format->glInternalFormat);
40 if (vertexFormat.vertexLoadRequiresConversion())
41 {
42 return true;
43 }
44
45 gl::Buffer *bufferGl = binding.getBuffer().get();
46 if (!bufferGl || bufferGl->getSize() == 0)
47 {
48 return true;
49 }
50
51 return false;
52 }
53
54 template <typename SourceType, typename DestType>
CopyIndexData(const uint8_t * sourceData,size_t count,uint8_t * destData)55 void CopyIndexData(const uint8_t *sourceData, size_t count, uint8_t *destData)
56 {
57 if constexpr (std::is_same<SourceType, DestType>::value)
58 {
59 memcpy(destData, sourceData, sizeof(SourceType) * count);
60 }
61 else
62 {
63 for (size_t i = 0; i < count; i++)
64 {
65 DestType *dst = reinterpret_cast<DestType *>(destData) + i;
66 const SourceType *src = reinterpret_cast<const SourceType *>(sourceData) + i;
67 *dst = static_cast<DestType>(*src);
68 }
69 }
70 }
71 using CopyIndexFunction = void (*)(const uint8_t *sourceData, size_t count, uint8_t *destData);
72
GetCopyIndexFunction(gl::DrawElementsType sourceType,gl::DrawElementsType destType)73 CopyIndexFunction GetCopyIndexFunction(gl::DrawElementsType sourceType,
74 gl::DrawElementsType destType)
75 {
76 static_assert(static_cast<size_t>(gl::DrawElementsType::UnsignedByte) == 0);
77 static_assert(static_cast<size_t>(gl::DrawElementsType::UnsignedShort) == 1);
78 static_assert(static_cast<size_t>(gl::DrawElementsType::UnsignedInt) == 2);
79 ASSERT(static_cast<size_t>(sourceType) <= 2);
80 ASSERT(static_cast<size_t>(destType) <= 2);
81 ASSERT(static_cast<size_t>(destType) >=
82 static_cast<size_t>(sourceType)); // Can't copy to a smaller type
83
84 constexpr CopyIndexFunction copyFunctions[3][3] = {
85 {
86 CopyIndexData<GLubyte, GLubyte>,
87 CopyIndexData<GLubyte, GLushort>,
88 CopyIndexData<GLubyte, GLuint>,
89 },
90 {
91 nullptr,
92 CopyIndexData<GLushort, GLushort>,
93 CopyIndexData<GLushort, GLuint>,
94 },
95 {
96 nullptr,
97 nullptr,
98 CopyIndexData<GLuint, GLuint>,
99 },
100 };
101
102 CopyIndexFunction copyFunction =
103 copyFunctions[static_cast<size_t>(sourceType)][static_cast<size_t>(destType)];
104 ASSERT(copyFunction != nullptr);
105 return copyFunction;
106 }
107
108 } // namespace
109
VertexArrayWgpu(const gl::VertexArrayState & data)110 VertexArrayWgpu::VertexArrayWgpu(const gl::VertexArrayState &data) : VertexArrayImpl(data)
111 {
112 // Pre-initialize mCurrentIndexBuffer to a streaming buffer because no index buffer dirty bit is
113 // triggered if our first draw call has no buffer bound.
114 mCurrentIndexBuffer = &mStreamingIndexBuffer;
115 }
116
syncState(const gl::Context * context,const gl::VertexArray::DirtyBits & dirtyBits,gl::VertexArray::DirtyAttribBitsArray * attribBits,gl::VertexArray::DirtyBindingBitsArray * bindingBits)117 angle::Result VertexArrayWgpu::syncState(const gl::Context *context,
118 const gl::VertexArray::DirtyBits &dirtyBits,
119 gl::VertexArray::DirtyAttribBitsArray *attribBits,
120 gl::VertexArray::DirtyBindingBitsArray *bindingBits)
121 {
122 ASSERT(dirtyBits.any());
123
124 ContextWgpu *contextWgpu = GetImplAs<ContextWgpu>(context);
125
126 const std::vector<gl::VertexAttribute> &attribs = mState.getVertexAttributes();
127 const std::vector<gl::VertexBinding> &bindings = mState.getVertexBindings();
128
129 gl::AttributesMask syncedAttributes;
130
131 for (auto iter = dirtyBits.begin(), endIter = dirtyBits.end(); iter != endIter; ++iter)
132 {
133 size_t dirtyBit = *iter;
134 switch (dirtyBit)
135 {
136 case gl::VertexArray::DIRTY_BIT_LOST_OBSERVATION:
137 break;
138
139 case gl::VertexArray::DIRTY_BIT_ELEMENT_ARRAY_BUFFER:
140 case gl::VertexArray::DIRTY_BIT_ELEMENT_ARRAY_BUFFER_DATA:
141 ANGLE_TRY(syncDirtyElementArrayBuffer(contextWgpu));
142 contextWgpu->invalidateIndexBuffer();
143 break;
144
145 #define ANGLE_VERTEX_DIRTY_ATTRIB_FUNC(INDEX) \
146 case gl::VertexArray::DIRTY_BIT_ATTRIB_0 + INDEX: \
147 ANGLE_TRY(syncDirtyAttrib(contextWgpu, attribs[INDEX], \
148 bindings[attribs[INDEX].bindingIndex], INDEX)); \
149 (*attribBits)[INDEX].reset(); \
150 syncedAttributes.set(INDEX); \
151 break;
152
153 ANGLE_VERTEX_INDEX_CASES(ANGLE_VERTEX_DIRTY_ATTRIB_FUNC)
154
155 #define ANGLE_VERTEX_DIRTY_BINDING_FUNC(INDEX) \
156 case gl::VertexArray::DIRTY_BIT_BINDING_0 + INDEX: \
157 ANGLE_TRY(syncDirtyAttrib(contextWgpu, attribs[INDEX], \
158 bindings[attribs[INDEX].bindingIndex], INDEX)); \
159 (*bindingBits)[INDEX].reset(); \
160 syncedAttributes.set(INDEX); \
161 break;
162
163 ANGLE_VERTEX_INDEX_CASES(ANGLE_VERTEX_DIRTY_BINDING_FUNC)
164
165 #define ANGLE_VERTEX_DIRTY_BUFFER_DATA_FUNC(INDEX) \
166 case gl::VertexArray::DIRTY_BIT_BUFFER_DATA_0 + INDEX: \
167 ANGLE_TRY(syncDirtyAttrib(contextWgpu, attribs[INDEX], \
168 bindings[attribs[INDEX].bindingIndex], INDEX)); \
169 syncedAttributes.set(INDEX); \
170 break;
171
172 ANGLE_VERTEX_INDEX_CASES(ANGLE_VERTEX_DIRTY_BUFFER_DATA_FUNC)
173 default:
174 break;
175 }
176 }
177
178 for (size_t syncedAttribIndex : syncedAttributes)
179 {
180 contextWgpu->setVertexAttribute(syncedAttribIndex, mCurrentAttribs[syncedAttribIndex]);
181 contextWgpu->invalidateVertexBuffer(syncedAttribIndex);
182 }
183 return angle::Result::Continue;
184 }
185
syncClientArrays(const gl::Context * context,const gl::AttributesMask & activeAttributesMask,gl::PrimitiveMode mode,GLint first,GLsizei count,GLsizei instanceCount,gl::DrawElementsType sourceDrawElementsTypeOrInvalid,const void * indices,GLint baseVertex,bool primitiveRestartEnabled,const void ** adjustedIndicesPtr,uint32_t * indexCountOut)186 angle::Result VertexArrayWgpu::syncClientArrays(
187 const gl::Context *context,
188 const gl::AttributesMask &activeAttributesMask,
189 gl::PrimitiveMode mode,
190 GLint first,
191 GLsizei count,
192 GLsizei instanceCount,
193 gl::DrawElementsType sourceDrawElementsTypeOrInvalid,
194 const void *indices,
195 GLint baseVertex,
196 bool primitiveRestartEnabled,
197 const void **adjustedIndicesPtr,
198 uint32_t *indexCountOut)
199 {
200 *adjustedIndicesPtr = indices;
201
202 gl::AttributesMask clientAttributesToSync =
203 (mState.getClientMemoryAttribsMask() | mForcedStreamingAttributes) &
204 mState.getEnabledAttributesMask() & activeAttributesMask;
205
206 gl::DrawElementsType destDrawElementsTypeOrInvalid = sourceDrawElementsTypeOrInvalid;
207
208 IndexDataNeedsStreaming indexDataNeedsStreaming = IndexDataNeedsStreaming::No;
209 if (sourceDrawElementsTypeOrInvalid == gl::DrawElementsType::UnsignedByte)
210 {
211 // Promote 8-bit indices to 16-bit indices
212 indexDataNeedsStreaming = IndexDataNeedsStreaming::Yes;
213 destDrawElementsTypeOrInvalid = gl::DrawElementsType::UnsignedShort;
214 }
215 else if (mode == gl::PrimitiveMode::LineLoop)
216 {
217 // Index data will always need streaming for line loop mode regardless of what type of draw
218 // call it is.
219 if (sourceDrawElementsTypeOrInvalid == gl::DrawElementsType::InvalidEnum)
220 {
221 // Line loop draw array calls are emulated via indexed draw calls, so an index type must
222 // be set.
223 if (count >= std::numeric_limits<unsigned short>::max())
224 {
225 destDrawElementsTypeOrInvalid = gl::DrawElementsType::UnsignedInt;
226 }
227 else
228 {
229 destDrawElementsTypeOrInvalid = gl::DrawElementsType::UnsignedShort;
230 }
231 }
232 indexDataNeedsStreaming = IndexDataNeedsStreaming::Yes;
233 }
234 else if (sourceDrawElementsTypeOrInvalid != gl::DrawElementsType::InvalidEnum &&
235 !mState.getElementArrayBuffer())
236 {
237 // Index data needs to be uploaded to the GPU
238 indexDataNeedsStreaming = IndexDataNeedsStreaming::Yes;
239 }
240
241 if (!clientAttributesToSync.any() && indexDataNeedsStreaming == IndexDataNeedsStreaming::No)
242 {
243 return angle::Result::Continue;
244 }
245
246 GLsizei adjustedCount = count;
247 if (mode == gl::PrimitiveMode::LineLoop)
248 {
249 adjustedCount++;
250 }
251
252 if (indexCountOut)
253 {
254 *indexCountOut = adjustedCount;
255 }
256
257 ContextWgpu *contextWgpu = webgpu::GetImpl(context);
258 wgpu::Device device = webgpu::GetDevice(context);
259
260 // If any attributes need to be streamed, we need to know the index range. We also need to know
261 // the index range if there is a draw arrays call and we have to stream the index data for it.
262 std::optional<gl::IndexRange> indexRange;
263 if (clientAttributesToSync.any())
264 {
265 GLint startVertex = 0;
266 size_t vertexCount = 0;
267 ANGLE_TRY(GetVertexRangeInfo(context, first, count, sourceDrawElementsTypeOrInvalid,
268 indices, baseVertex, &startVertex, &vertexCount));
269 indexRange = gl::IndexRange(startVertex, startVertex + vertexCount - 1, 0);
270 }
271 else if (indexDataNeedsStreaming == IndexDataNeedsStreaming::Yes &&
272 sourceDrawElementsTypeOrInvalid == gl::DrawElementsType::InvalidEnum)
273 {
274 indexRange = gl::IndexRange(first, first + count - 1, 0);
275 }
276
277 // Pre-compute the total size of all streamed vertex and index data so a single staging buffer
278 // can be used
279 size_t stagingBufferSize = 0;
280
281 std::optional<size_t> destIndexDataSize;
282 std::optional<size_t> destIndexUnitSize;
283 gl::Buffer *elementArrayBuffer = mState.getElementArrayBuffer();
284 if (indexDataNeedsStreaming != IndexDataNeedsStreaming::No)
285 {
286 destIndexUnitSize =
287 static_cast<size_t>(gl::GetDrawElementsTypeSize(destDrawElementsTypeOrInvalid));
288 destIndexDataSize = destIndexUnitSize.value() * adjustedCount;
289
290 // Allocating staging buffer space for indices is only needed when there is no source index
291 // buffer or index data conversion is needed
292 if (!elementArrayBuffer || sourceDrawElementsTypeOrInvalid != destDrawElementsTypeOrInvalid)
293 {
294 stagingBufferSize +=
295 rx::roundUpPow2(destIndexDataSize.value(), webgpu::kBufferCopyToBufferAlignment);
296 }
297 }
298
299 const std::vector<gl::VertexAttribute> &attribs = mState.getVertexAttributes();
300 const std::vector<gl::VertexBinding> &bindings = mState.getVertexBindings();
301
302 if (clientAttributesToSync.any())
303 {
304 for (size_t attribIndex : clientAttributesToSync)
305 {
306 const gl::VertexAttribute &attrib = attribs[attribIndex];
307 const gl::VertexBinding &binding = bindings[attrib.bindingIndex];
308
309 size_t elementCount = gl::ComputeVertexBindingElementCount(
310 binding.getDivisor(), indexRange->vertexCount(), instanceCount);
311
312 const webgpu::Format &vertexFormat =
313 contextWgpu->getFormat(attrib.format->glInternalFormat);
314 size_t destTypeSize = vertexFormat.getActualBufferFormat().pixelBytes;
315 ASSERT(destTypeSize > 0);
316
317 size_t attribSize = destTypeSize * elementCount;
318 stagingBufferSize += rx::roundUpPow2(attribSize, webgpu::kBufferCopyToBufferAlignment);
319 }
320 }
321
322 if (stagingBufferSize > contextWgpu->getDisplay()->getLimitsWgpu().maxBufferSize)
323 {
324 ERR() << "Staging buffer size of " << stagingBufferSize
325 << " in sync client arrays is larger than the max buffer size "
326 << contextWgpu->getDisplay()->getLimitsWgpu().maxBufferSize;
327 return angle::Result::Stop;
328 }
329 ASSERT(stagingBufferSize % webgpu::kBufferSizeAlignment == 0);
330 webgpu::BufferHelper stagingBuffer;
331 uint8_t *stagingData = nullptr;
332 size_t currentStagingDataPosition = 0;
333 if (stagingBufferSize > 0)
334 {
335 ASSERT(stagingBufferSize > 0);
336 ASSERT(stagingBufferSize % webgpu::kBufferSizeAlignment == 0);
337 ANGLE_TRY(stagingBuffer.initBuffer(device, stagingBufferSize,
338 wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::MapWrite,
339 webgpu::MapAtCreation::Yes));
340 stagingData = stagingBuffer.getMapWritePointer(0, stagingBufferSize);
341 }
342
343 struct BufferCopy
344 {
345 uint64_t sourceOffset;
346 webgpu::BufferHelper *src;
347 webgpu::BufferHelper *dest;
348 uint64_t destOffset;
349 uint64_t size;
350 };
351 std::vector<BufferCopy> stagingUploads;
352
353 if (indexDataNeedsStreaming == IndexDataNeedsStreaming::Yes)
354 {
355 // Indices are streamed to the start of the buffer. Tell the draw call command to use 0 for
356 // firstIndex.
357 *adjustedIndicesPtr = 0;
358 ASSERT(destIndexDataSize.has_value());
359 ASSERT(destIndexUnitSize.has_value());
360
361 size_t destIndexBufferSize =
362 rx::roundUpPow2(destIndexDataSize.value(), webgpu::kBufferCopyToBufferAlignment);
363 ANGLE_TRY(ensureBufferCreated(context, mStreamingIndexBuffer, destIndexBufferSize, 0,
364 wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::Index,
365 BufferType::IndexBuffer));
366 if (sourceDrawElementsTypeOrInvalid == destDrawElementsTypeOrInvalid && elementArrayBuffer)
367 {
368 // Use the element array buffer as the source for the new streaming index buffer. This
369 // condition is only hit when an indexed draw call has an element array buffer and is
370 // trying to draw line loops.
371
372 // When using an element array buffer, 'indices' is an offset to the first element.
373 size_t sourceOffset = reinterpret_cast<size_t>(indices);
374 BufferWgpu *elementArrayBufferWgpu = GetImplAs<BufferWgpu>(elementArrayBuffer);
375 webgpu::BufferHelper *sourceBuffer = &elementArrayBufferWgpu->getBuffer();
376
377 size_t copySize = rx::roundUpPow2(destIndexUnitSize.value() * count,
378 webgpu::kBufferCopyToBufferAlignment);
379 stagingUploads.push_back(
380 {sourceOffset, sourceBuffer, &mStreamingIndexBuffer, 0, copySize});
381
382 if (mode == gl::PrimitiveMode::LineLoop)
383 {
384 // Emulate line loops with an additional copy of the first index at the end of the
385 // buffer
386 size_t lastOffset = copySize;
387 stagingUploads.push_back({sourceOffset, sourceBuffer, &mStreamingIndexBuffer,
388 lastOffset,
389 rx::roundUpPow2(destIndexUnitSize.value(),
390 webgpu::kBufferCopyToBufferAlignment)});
391 }
392 }
393 // Handle emulating line loop for draw arrays calls.
394 else if (sourceDrawElementsTypeOrInvalid == gl::DrawElementsType::InvalidEnum)
395 {
396 ASSERT(destDrawElementsTypeOrInvalid != gl::DrawElementsType::InvalidEnum);
397 ASSERT(mode == gl::PrimitiveMode::LineLoop);
398 uint32_t clampedVertexCount = gl::clampCast<uint32_t>(indexRange->vertexCount());
399 uint32_t startVertex = static_cast<uint32_t>(indexRange->start);
400 size_t index = currentStagingDataPosition;
401 for (uint32_t i = 0; i < clampedVertexCount; i++)
402 {
403 uint32_t copyData = startVertex + i;
404 memcpy(stagingData + index, ©Data, destIndexUnitSize.value());
405 index += destIndexUnitSize.value();
406 }
407 memcpy(stagingData + currentStagingDataPosition + destIndexUnitSize.value() * count,
408 &startVertex, destIndexUnitSize.value());
409
410 size_t copySize = destIndexBufferSize;
411 stagingUploads.push_back(
412 {currentStagingDataPosition, &stagingBuffer, &mStreamingIndexBuffer, 0, copySize});
413 currentStagingDataPosition += copySize;
414 }
415 else
416 {
417 const uint8_t *srcIndexData = static_cast<const uint8_t *>(indices);
418
419 webgpu::BufferReadback readbackBuffer;
420 if (mState.getElementArrayBuffer())
421 {
422 webgpu::BufferHelper &srcBuffer =
423 webgpu::GetImpl(mState.getElementArrayBuffer())->getBuffer();
424
425 const GLuint srcIndexTypeSize =
426 gl::GetDrawElementsTypeSize(sourceDrawElementsTypeOrInvalid);
427 const size_t srcIndexOffset = reinterpret_cast<uintptr_t>(indices);
428
429 ANGLE_TRY(srcBuffer.readDataImmediate(
430 contextWgpu, srcIndexOffset, count * srcIndexTypeSize,
431 webgpu::RenderPassClosureReason::IndexRangeReadback, &readbackBuffer));
432 srcIndexData = readbackBuffer.data;
433 }
434
435 CopyIndexFunction indexCopyFunction = GetCopyIndexFunction(
436 sourceDrawElementsTypeOrInvalid, destDrawElementsTypeOrInvalid);
437 ASSERT(stagingData != nullptr);
438 indexCopyFunction(srcIndexData, count, stagingData + currentStagingDataPosition);
439
440 if (mode == gl::PrimitiveMode::LineLoop)
441 {
442 indexCopyFunction(
443 srcIndexData, count,
444 stagingData + currentStagingDataPosition + (destIndexUnitSize.value() * count));
445 }
446
447 size_t copySize = destIndexBufferSize;
448 stagingUploads.push_back(
449 {currentStagingDataPosition, &stagingBuffer, &mStreamingIndexBuffer, 0, copySize});
450 currentStagingDataPosition += copySize;
451 }
452 // TODO(anglebug.com/383356846): add support for primitive restarts
453 }
454
455 for (size_t attribIndex : clientAttributesToSync)
456 {
457 const gl::VertexAttribute &attrib = attribs[attribIndex];
458 const gl::VertexBinding &binding = bindings[attrib.bindingIndex];
459
460 size_t streamedVertexCount = gl::ComputeVertexBindingElementCount(
461 binding.getDivisor(), indexRange->vertexCount(), instanceCount);
462
463 const size_t sourceStride = ComputeVertexAttributeStride(attrib, binding);
464 const size_t sourceTypeSize = gl::ComputeVertexAttributeTypeSize(attrib);
465
466 // Vertices do not apply the 'start' offset when the divisor is non-zero even when doing
467 // a non-instanced draw call
468 const size_t firstIndex = (binding.getDivisor() == 0) ? indexRange->start : 0;
469
470 // Attributes using client memory ignore the VERTEX_ATTRIB_BINDING state.
471 // https://www.opengl.org/registry/specs/ARB/vertex_attrib_binding.txt
472 const uint8_t *inputPointer = static_cast<const uint8_t *>(attrib.pointer);
473
474 webgpu::BufferReadback readbackBuffer;
475 if (binding.getBuffer().get())
476 {
477 webgpu::BufferHelper &srcBuffer =
478 webgpu::GetImpl(binding.getBuffer().get())->getBuffer();
479
480 size_t sourceVertexDataSize =
481 sourceStride * (firstIndex + streamedVertexCount - 1) + sourceTypeSize;
482
483 ANGLE_TRY(srcBuffer.readDataImmediate(
484 contextWgpu, 0, reinterpret_cast<uintptr_t>(attrib.pointer) + sourceVertexDataSize,
485 webgpu::RenderPassClosureReason::IndexRangeReadback, &readbackBuffer));
486 inputPointer = readbackBuffer.data + reinterpret_cast<uintptr_t>(attrib.pointer);
487 }
488
489 const webgpu::Format &vertexFormat =
490 contextWgpu->getFormat(attrib.format->glInternalFormat);
491 size_t destTypeSize = vertexFormat.getActualBufferFormat().pixelBytes;
492
493 VertexCopyFunction copyFunction = vertexFormat.getVertexLoadFunction();
494 ASSERT(copyFunction != nullptr);
495 ASSERT(stagingData != nullptr);
496 copyFunction(inputPointer + (sourceStride * firstIndex), sourceStride, streamedVertexCount,
497 stagingData + currentStagingDataPosition);
498
499 size_t copySize = rx::roundUpPow2(streamedVertexCount * destTypeSize,
500 webgpu::kBufferCopyToBufferAlignment);
501 // Pad the streaming buffer with empty data at the beginning to put the vertex data at the
502 // same index location. The stride is tightly packed.
503 size_t destCopyOffset = firstIndex * destTypeSize;
504
505 ANGLE_TRY(ensureBufferCreated(
506 context, mStreamingArrayBuffers[attribIndex], destCopyOffset + copySize, attribIndex,
507 wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::Vertex, BufferType::ArrayBuffer));
508
509 stagingUploads.push_back({currentStagingDataPosition, &stagingBuffer,
510 &mStreamingArrayBuffers[attribIndex], destCopyOffset, copySize});
511
512 currentStagingDataPosition += copySize;
513 }
514
515 if (stagingBuffer.valid())
516 {
517 ANGLE_TRY(stagingBuffer.unmap());
518 }
519 ANGLE_TRY(contextWgpu->flush(webgpu::RenderPassClosureReason::VertexArrayStreaming));
520
521 contextWgpu->ensureCommandEncoderCreated();
522 wgpu::CommandEncoder &commandEncoder = contextWgpu->getCurrentCommandEncoder();
523
524 for (const BufferCopy © : stagingUploads)
525 {
526 commandEncoder.CopyBufferToBuffer(copy.src->getBuffer(), copy.sourceOffset,
527 copy.dest->getBuffer(), copy.destOffset, copy.size);
528 }
529
530 return angle::Result::Continue;
531 }
532
syncDirtyAttrib(ContextWgpu * contextWgpu,const gl::VertexAttribute & attrib,const gl::VertexBinding & binding,size_t attribIndex)533 angle::Result VertexArrayWgpu::syncDirtyAttrib(ContextWgpu *contextWgpu,
534 const gl::VertexAttribute &attrib,
535 const gl::VertexBinding &binding,
536 size_t attribIndex)
537 {
538 mForcedStreamingAttributes[attribIndex] = AttributeNeedsStreaming(contextWgpu, attrib, binding);
539
540 if (attrib.enabled)
541 {
542 SetBitField(mCurrentAttribs[attribIndex].enabled, true);
543 const webgpu::Format &webgpuFormat =
544 contextWgpu->getFormat(attrib.format->glInternalFormat);
545 SetBitField(mCurrentAttribs[attribIndex].format, webgpuFormat.getActualWgpuVertexFormat());
546 SetBitField(mCurrentAttribs[attribIndex].shaderLocation, attribIndex);
547
548 if (!mForcedStreamingAttributes[attribIndex])
549 {
550 // Data is sourced directly from the array buffer.
551 SetBitField(mCurrentAttribs[attribIndex].offset, 0);
552 SetBitField(mCurrentAttribs[attribIndex].stride, binding.getStride());
553
554 gl::Buffer *bufferGl = binding.getBuffer().get();
555 ASSERT(bufferGl);
556 BufferWgpu *bufferWgpu = webgpu::GetImpl(bufferGl);
557 mCurrentArrayBuffers[attribIndex].buffer = &(bufferWgpu->getBuffer());
558 mCurrentArrayBuffers[attribIndex].offset = reinterpret_cast<uintptr_t>(attrib.pointer);
559 }
560 else
561 {
562 SetBitField(mCurrentAttribs[attribIndex].offset, 0);
563 SetBitField(mCurrentAttribs[attribIndex].stride,
564 webgpuFormat.getActualBufferFormat().pixelBytes);
565 mCurrentArrayBuffers[attribIndex].buffer = &mStreamingArrayBuffers[attribIndex];
566 mCurrentArrayBuffers[attribIndex].offset = 0;
567 }
568 }
569 else
570 {
571 memset(&mCurrentAttribs[attribIndex], 0, sizeof(webgpu::PackedVertexAttribute));
572 mCurrentArrayBuffers[attribIndex].buffer = nullptr;
573 mCurrentArrayBuffers[attribIndex].offset = 0;
574 }
575
576 return angle::Result::Continue;
577 }
578
syncDirtyElementArrayBuffer(ContextWgpu * contextWgpu)579 angle::Result VertexArrayWgpu::syncDirtyElementArrayBuffer(ContextWgpu *contextWgpu)
580 {
581 gl::Buffer *bufferGl = mState.getElementArrayBuffer();
582 if (bufferGl)
583 {
584 BufferWgpu *buffer = webgpu::GetImpl(bufferGl);
585 mCurrentIndexBuffer = &buffer->getBuffer();
586 }
587 else
588 {
589 mCurrentIndexBuffer = &mStreamingIndexBuffer;
590 }
591
592 return angle::Result::Continue;
593 }
594
ensureBufferCreated(const gl::Context * context,webgpu::BufferHelper & buffer,size_t size,size_t attribIndex,wgpu::BufferUsage usage,BufferType bufferType)595 angle::Result VertexArrayWgpu::ensureBufferCreated(const gl::Context *context,
596 webgpu::BufferHelper &buffer,
597 size_t size,
598 size_t attribIndex,
599 wgpu::BufferUsage usage,
600 BufferType bufferType)
601 {
602 ContextWgpu *contextWgpu = webgpu::GetImpl(context);
603 if (!buffer.valid() || buffer.requestedSize() < size || buffer.getBuffer().GetUsage() != usage)
604 {
605 wgpu::Device device = webgpu::GetDevice(context);
606 ANGLE_TRY(buffer.initBuffer(device, size, usage, webgpu::MapAtCreation::No));
607
608 if (bufferType == BufferType::IndexBuffer)
609 {
610 contextWgpu->invalidateIndexBuffer();
611 }
612 else
613 {
614 ASSERT(bufferType == BufferType::ArrayBuffer);
615 contextWgpu->invalidateVertexBuffer(attribIndex);
616 }
617 }
618
619 if (bufferType == BufferType::IndexBuffer)
620 {
621 mCurrentIndexBuffer = &buffer;
622 }
623 return angle::Result::Continue;
624 }
625
626 } // namespace rx
627