1 //
2 // Copyright 2024 The ANGLE Project Authors. All rights reserved.
3 // Use of this source code is governed by a BSD-style license that can be
4 // found in the LICENSE file.
5 //
6
7 #include "libANGLE/renderer/wgpu/wgpu_helpers.h"
8
9 #include <algorithm>
10
11 #include "libANGLE/formatutils.h"
12 #include "libANGLE/renderer/wgpu/ContextWgpu.h"
13 #include "libANGLE/renderer/wgpu/DisplayWgpu.h"
14 #include "libANGLE/renderer/wgpu/FramebufferWgpu.h"
15
16 namespace rx
17 {
18 namespace webgpu
19 {
20 namespace
21 {
TextureDescriptorFromTexture(const wgpu::Texture & texture)22 wgpu::TextureDescriptor TextureDescriptorFromTexture(const wgpu::Texture &texture)
23 {
24 wgpu::TextureDescriptor descriptor = {};
25 descriptor.usage = texture.GetUsage();
26 descriptor.dimension = texture.GetDimension();
27 descriptor.size = {texture.GetWidth(), texture.GetHeight(), texture.GetDepthOrArrayLayers()};
28 descriptor.format = texture.GetFormat();
29 descriptor.mipLevelCount = texture.GetMipLevelCount();
30 descriptor.sampleCount = texture.GetSampleCount();
31 descriptor.viewFormatCount = 0;
32 return descriptor;
33 }
34
GetSafeBufferMapOffset(size_t offset)35 size_t GetSafeBufferMapOffset(size_t offset)
36 {
37 static_assert(gl::isPow2(kBufferMapOffsetAlignment));
38 return roundDownPow2(offset, kBufferMapOffsetAlignment);
39 }
40
GetSafeBufferMapSize(size_t offset,size_t size)41 size_t GetSafeBufferMapSize(size_t offset, size_t size)
42 {
43 // The offset is rounded down for alignment and the size is rounded up. The safe size must cover
44 // both of these offsets.
45 size_t offsetChange = offset % kBufferMapOffsetAlignment;
46 static_assert(gl::isPow2(kBufferMapSizeAlignment));
47 return roundUpPow2(size + offsetChange, kBufferMapSizeAlignment);
48 }
49
AdjustMapPointerForOffset(uint8_t * mapPtr,size_t offset)50 uint8_t *AdjustMapPointerForOffset(uint8_t *mapPtr, size_t offset)
51 {
52 // Fix up a map pointer that has been adjusted for alignment
53 size_t offsetChange = offset % kBufferMapOffsetAlignment;
54 return mapPtr + offsetChange;
55 }
56
AdjustMapPointerForOffset(const uint8_t * mapPtr,size_t offset)57 const uint8_t *AdjustMapPointerForOffset(const uint8_t *mapPtr, size_t offset)
58 {
59 return AdjustMapPointerForOffset(const_cast<uint8_t *>(mapPtr), offset);
60 }
61
62 } // namespace
63
ImageHelper()64 ImageHelper::ImageHelper() {}
65
~ImageHelper()66 ImageHelper::~ImageHelper() {}
67
initImage(angle::FormatID intendedFormatID,angle::FormatID actualFormatID,wgpu::Device & device,gl::LevelIndex firstAllocatedLevel,wgpu::TextureDescriptor textureDescriptor)68 angle::Result ImageHelper::initImage(angle::FormatID intendedFormatID,
69 angle::FormatID actualFormatID,
70 wgpu::Device &device,
71 gl::LevelIndex firstAllocatedLevel,
72 wgpu::TextureDescriptor textureDescriptor)
73 {
74 mIntendedFormatID = intendedFormatID;
75 mActualFormatID = actualFormatID;
76 mTextureDescriptor = textureDescriptor;
77 mFirstAllocatedLevel = firstAllocatedLevel;
78 mTexture = device.CreateTexture(&mTextureDescriptor);
79 mInitialized = true;
80
81 return angle::Result::Continue;
82 }
83
initExternal(angle::FormatID intendedFormatID,angle::FormatID actualFormatID,wgpu::Texture externalTexture)84 angle::Result ImageHelper::initExternal(angle::FormatID intendedFormatID,
85 angle::FormatID actualFormatID,
86 wgpu::Texture externalTexture)
87 {
88 mIntendedFormatID = intendedFormatID;
89 mActualFormatID = actualFormatID;
90 mTextureDescriptor = TextureDescriptorFromTexture(externalTexture);
91 mFirstAllocatedLevel = gl::LevelIndex(0);
92 mTexture = externalTexture;
93 mInitialized = true;
94
95 return angle::Result::Continue;
96 }
97
flushStagedUpdates(ContextWgpu * contextWgpu)98 angle::Result ImageHelper::flushStagedUpdates(ContextWgpu *contextWgpu)
99 {
100 if (mSubresourceQueue.empty())
101 {
102 return angle::Result::Continue;
103 }
104 for (gl::LevelIndex currentMipLevel = mFirstAllocatedLevel;
105 currentMipLevel < mFirstAllocatedLevel + getLevelCount(); ++currentMipLevel)
106 {
107 ANGLE_TRY(flushSingleLevelUpdates(contextWgpu, currentMipLevel, nullptr, 0));
108 }
109 return angle::Result::Continue;
110 }
111
flushSingleLevelUpdates(ContextWgpu * contextWgpu,gl::LevelIndex levelGL,ClearValuesArray * deferredClears,uint32_t deferredClearIndex)112 angle::Result ImageHelper::flushSingleLevelUpdates(ContextWgpu *contextWgpu,
113 gl::LevelIndex levelGL,
114 ClearValuesArray *deferredClears,
115 uint32_t deferredClearIndex)
116 {
117 std::vector<SubresourceUpdate> *currentLevelQueue = getLevelUpdates(levelGL);
118 if (!currentLevelQueue || currentLevelQueue->empty())
119 {
120 return angle::Result::Continue;
121 }
122 wgpu::Device device = contextWgpu->getDevice();
123 wgpu::Queue queue = contextWgpu->getQueue();
124 wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
125 wgpu::TexelCopyTextureInfo dst;
126 dst.texture = mTexture;
127 std::vector<wgpu::RenderPassColorAttachment> colorAttachments;
128 wgpu::TextureView textureView;
129 ANGLE_TRY(createTextureView(levelGL, 0, textureView));
130 bool updateDepth = false;
131 bool updateStencil = false;
132 float depthValue = 1;
133 uint32_t stencilValue = 0;
134 for (const SubresourceUpdate &srcUpdate : *currentLevelQueue)
135 {
136 if (!isTextureLevelInAllocatedImage(srcUpdate.targetLevel))
137 {
138 continue;
139 }
140 switch (srcUpdate.updateSource)
141 {
142 case UpdateSource::Texture:
143 {
144 dst.mipLevel = toWgpuLevel(srcUpdate.targetLevel).get();
145 wgpu::Extent3D copyExtent = mTextureDescriptor.size;
146 // https://www.w3.org/TR/webgpu/#abstract-opdef-logical-miplevel-specific-texture-extent
147 copyExtent.width = std::max(1u, copyExtent.width >> dst.mipLevel);
148 copyExtent.height = std::max(1u, copyExtent.height >> dst.mipLevel);
149 if (mTextureDescriptor.dimension == wgpu::TextureDimension::e3D)
150 {
151 copyExtent.depthOrArrayLayers =
152 std::max(1u, copyExtent.depthOrArrayLayers >> dst.mipLevel);
153 }
154 encoder.CopyBufferToTexture(&srcUpdate.textureData, &dst, ©Extent);
155 }
156 break;
157
158 case UpdateSource::Clear:
159 if (deferredClears)
160 {
161 if (deferredClearIndex == kUnpackedDepthIndex)
162 {
163 if (srcUpdate.clearData.hasStencil)
164 {
165 deferredClears->store(kUnpackedStencilIndex,
166 srcUpdate.clearData.clearValues);
167 }
168 if (!srcUpdate.clearData.hasDepth)
169 {
170 break;
171 }
172 }
173 deferredClears->store(deferredClearIndex, srcUpdate.clearData.clearValues);
174 }
175 else
176 {
177 colorAttachments.push_back(CreateNewClearColorAttachment(
178 srcUpdate.clearData.clearValues.clearColor,
179 srcUpdate.clearData.clearValues.depthSlice, textureView));
180 if (srcUpdate.clearData.hasDepth)
181 {
182 updateDepth = true;
183 depthValue = srcUpdate.clearData.clearValues.depthValue;
184 }
185 if (srcUpdate.clearData.hasStencil)
186 {
187 updateStencil = true;
188 stencilValue = srcUpdate.clearData.clearValues.stencilValue;
189 }
190 }
191 break;
192 }
193 }
194 FramebufferWgpu *frameBuffer =
195 GetImplAs<FramebufferWgpu>(contextWgpu->getState().getDrawFramebuffer());
196
197 if (!colorAttachments.empty())
198 {
199 frameBuffer->addNewColorAttachments(colorAttachments);
200 }
201 if (updateDepth || updateStencil)
202 {
203 frameBuffer->updateDepthStencilAttachment(CreateNewDepthStencilAttachment(
204 depthValue, stencilValue, textureView, updateDepth, updateStencil));
205 }
206 wgpu::CommandBuffer commandBuffer = encoder.Finish();
207 queue.Submit(1, &commandBuffer);
208 encoder = nullptr;
209 currentLevelQueue->clear();
210
211 return angle::Result::Continue;
212 }
213
createTextureDescriptor(wgpu::TextureUsage usage,wgpu::TextureDimension dimension,wgpu::Extent3D size,wgpu::TextureFormat format,std::uint32_t mipLevelCount,std::uint32_t sampleCount)214 wgpu::TextureDescriptor ImageHelper::createTextureDescriptor(wgpu::TextureUsage usage,
215 wgpu::TextureDimension dimension,
216 wgpu::Extent3D size,
217 wgpu::TextureFormat format,
218 std::uint32_t mipLevelCount,
219 std::uint32_t sampleCount)
220 {
221 wgpu::TextureDescriptor textureDescriptor = {};
222 textureDescriptor.usage = usage;
223 textureDescriptor.dimension = dimension;
224 textureDescriptor.size = size;
225 textureDescriptor.format = format;
226 textureDescriptor.mipLevelCount = mipLevelCount;
227 textureDescriptor.sampleCount = sampleCount;
228 textureDescriptor.viewFormatCount = 0;
229 return textureDescriptor;
230 }
231
stageTextureUpload(ContextWgpu * contextWgpu,const webgpu::Format & webgpuFormat,GLenum type,const gl::Extents & glExtents,GLuint inputRowPitch,GLuint inputDepthPitch,uint32_t outputRowPitch,uint32_t outputDepthPitch,uint32_t allocationSize,const gl::ImageIndex & index,const uint8_t * pixels)232 angle::Result ImageHelper::stageTextureUpload(ContextWgpu *contextWgpu,
233 const webgpu::Format &webgpuFormat,
234 GLenum type,
235 const gl::Extents &glExtents,
236 GLuint inputRowPitch,
237 GLuint inputDepthPitch,
238 uint32_t outputRowPitch,
239 uint32_t outputDepthPitch,
240 uint32_t allocationSize,
241 const gl::ImageIndex &index,
242 const uint8_t *pixels)
243 {
244 if (pixels == nullptr)
245 {
246 return angle::Result::Continue;
247 }
248 wgpu::Device device = contextWgpu->getDevice();
249 wgpu::Queue queue = contextWgpu->getQueue();
250 gl::LevelIndex levelGL(index.getLevelIndex());
251 BufferHelper bufferHelper;
252 wgpu::BufferUsage usage = wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst;
253 ANGLE_TRY(bufferHelper.initBuffer(device, allocationSize, usage, MapAtCreation::Yes));
254 LoadImageFunctionInfo loadFunctionInfo = webgpuFormat.getTextureLoadFunction(type);
255 uint8_t *data = bufferHelper.getMapWritePointer(0, allocationSize);
256 loadFunctionInfo.loadFunction(contextWgpu->getImageLoadContext(), glExtents.width,
257 glExtents.height, glExtents.depth, pixels, inputRowPitch,
258 inputDepthPitch, data, outputRowPitch, outputDepthPitch);
259 ANGLE_TRY(bufferHelper.unmap());
260
261 wgpu::TexelCopyBufferLayout textureDataLayout = {};
262 textureDataLayout.bytesPerRow = outputRowPitch;
263 textureDataLayout.rowsPerImage = outputDepthPitch;
264 wgpu::TexelCopyBufferInfo imageCopyBuffer;
265 imageCopyBuffer.layout = textureDataLayout;
266 imageCopyBuffer.buffer = bufferHelper.getBuffer();
267 appendSubresourceUpdate(levelGL,
268 SubresourceUpdate(UpdateSource::Texture, levelGL, imageCopyBuffer));
269 return angle::Result::Continue;
270 }
271
stageClear(gl::LevelIndex targetLevel,ClearValues clearValues,bool hasDepth,bool hasStencil)272 void ImageHelper::stageClear(gl::LevelIndex targetLevel,
273 ClearValues clearValues,
274 bool hasDepth,
275 bool hasStencil)
276 {
277 appendSubresourceUpdate(targetLevel, SubresourceUpdate(UpdateSource::Clear, targetLevel,
278 clearValues, hasDepth, hasStencil));
279 }
280
removeStagedUpdates(gl::LevelIndex levelToRemove)281 void ImageHelper::removeStagedUpdates(gl::LevelIndex levelToRemove)
282 {
283 std::vector<SubresourceUpdate> *updateToClear = getLevelUpdates(levelToRemove);
284 if (updateToClear)
285 {
286 updateToClear->clear();
287 }
288 }
289
resetImage()290 void ImageHelper::resetImage()
291 {
292 mTexture.Destroy();
293 mTextureDescriptor = {};
294 mInitialized = false;
295 mFirstAllocatedLevel = gl::LevelIndex(0);
296 }
297 // static
getReadPixelsParams(rx::ContextWgpu * contextWgpu,const gl::PixelPackState & packState,gl::Buffer * packBuffer,GLenum format,GLenum type,const gl::Rectangle & area,const gl::Rectangle & clippedArea,rx::PackPixelsParams * paramsOut,GLuint * skipBytesOut)298 angle::Result ImageHelper::getReadPixelsParams(rx::ContextWgpu *contextWgpu,
299 const gl::PixelPackState &packState,
300 gl::Buffer *packBuffer,
301 GLenum format,
302 GLenum type,
303 const gl::Rectangle &area,
304 const gl::Rectangle &clippedArea,
305 rx::PackPixelsParams *paramsOut,
306 GLuint *skipBytesOut)
307 {
308 const gl::InternalFormat &sizedFormatInfo = gl::GetInternalFormatInfo(format, type);
309
310 GLuint outputPitch = 0;
311 ANGLE_CHECK_GL_MATH(contextWgpu,
312 sizedFormatInfo.computeRowPitch(type, area.width, packState.alignment,
313 packState.rowLength, &outputPitch));
314 ANGLE_CHECK_GL_MATH(contextWgpu, sizedFormatInfo.computeSkipBytes(
315 type, outputPitch, 0, packState, false, skipBytesOut));
316
317 ANGLE_TRY(GetPackPixelsParams(sizedFormatInfo, outputPitch, packState, packBuffer, area,
318 clippedArea, paramsOut, skipBytesOut));
319 return angle::Result::Continue;
320 }
321
readPixels(rx::ContextWgpu * contextWgpu,const gl::Rectangle & area,const rx::PackPixelsParams & packPixelsParams,void * pixels)322 angle::Result ImageHelper::readPixels(rx::ContextWgpu *contextWgpu,
323 const gl::Rectangle &area,
324 const rx::PackPixelsParams &packPixelsParams,
325 void *pixels)
326 {
327 if (mActualFormatID == angle::FormatID::NONE)
328 {
329 // Unimplemented texture format
330 UNIMPLEMENTED();
331 return angle::Result::Stop;
332 }
333
334 wgpu::Device device = contextWgpu->getDisplay()->getDevice();
335 wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
336 wgpu::Queue queue = contextWgpu->getDisplay()->getQueue();
337
338 const angle::Format &actualFormat = angle::Format::Get(mActualFormatID);
339 uint32_t textureBytesPerRow =
340 roundUp(actualFormat.pixelBytes * area.width, kCopyBufferAlignment);
341 wgpu::TexelCopyBufferLayout textureDataLayout;
342 textureDataLayout.bytesPerRow = textureBytesPerRow;
343 textureDataLayout.rowsPerImage = area.height;
344
345 size_t allocationSize = textureBytesPerRow * area.height;
346
347 BufferHelper bufferHelper;
348 ANGLE_TRY(bufferHelper.initBuffer(device, allocationSize,
349 wgpu::BufferUsage::MapRead | wgpu::BufferUsage::CopyDst,
350 MapAtCreation::No));
351 wgpu::TexelCopyBufferInfo copyBuffer;
352 copyBuffer.buffer = bufferHelper.getBuffer();
353 copyBuffer.layout = textureDataLayout;
354
355 wgpu::TexelCopyTextureInfo copyTexture;
356 wgpu::Origin3D textureOrigin;
357 textureOrigin.x = area.x;
358 textureOrigin.y = area.y;
359 copyTexture.origin = textureOrigin;
360 copyTexture.texture = mTexture;
361 copyTexture.mipLevel = toWgpuLevel(mFirstAllocatedLevel).get();
362
363 wgpu::Extent3D copySize;
364 copySize.width = area.width;
365 copySize.height = area.height;
366 encoder.CopyTextureToBuffer(©Texture, ©Buffer, ©Size);
367
368 wgpu::CommandBuffer commandBuffer = encoder.Finish();
369 queue.Submit(1, &commandBuffer);
370 encoder = nullptr;
371
372 ANGLE_TRY(bufferHelper.mapImmediate(contextWgpu, wgpu::MapMode::Read, 0, allocationSize));
373 const uint8_t *readPixelBuffer = bufferHelper.getMapReadPointer(0, allocationSize);
374 PackPixels(packPixelsParams, actualFormat, textureBytesPerRow, readPixelBuffer,
375 static_cast<uint8_t *>(pixels));
376 return angle::Result::Continue;
377 }
378
createTextureView(gl::LevelIndex targetLevel,uint32_t layerIndex,wgpu::TextureView & textureViewOut)379 angle::Result ImageHelper::createTextureView(gl::LevelIndex targetLevel,
380 uint32_t layerIndex,
381 wgpu::TextureView &textureViewOut)
382 {
383 if (!isTextureLevelInAllocatedImage(targetLevel))
384 {
385 return angle::Result::Stop;
386 }
387 wgpu::TextureViewDescriptor textureViewDesc;
388 textureViewDesc.aspect = wgpu::TextureAspect::All;
389 textureViewDesc.baseArrayLayer = layerIndex;
390 textureViewDesc.arrayLayerCount = 1;
391 textureViewDesc.baseMipLevel = toWgpuLevel(targetLevel).get();
392 textureViewDesc.mipLevelCount = 1;
393 switch (mTextureDescriptor.dimension)
394 {
395 case wgpu::TextureDimension::Undefined:
396 textureViewDesc.dimension = wgpu::TextureViewDimension::Undefined;
397 break;
398 case wgpu::TextureDimension::e1D:
399 textureViewDesc.dimension = wgpu::TextureViewDimension::e1D;
400 break;
401 case wgpu::TextureDimension::e2D:
402 textureViewDesc.dimension = wgpu::TextureViewDimension::e2D;
403 break;
404 case wgpu::TextureDimension::e3D:
405 textureViewDesc.dimension = wgpu::TextureViewDimension::e3D;
406 break;
407 }
408 textureViewDesc.format = mTextureDescriptor.format;
409 textureViewOut = mTexture.CreateView(&textureViewDesc);
410 return angle::Result::Continue;
411 }
412
getLastAllocatedLevel()413 gl::LevelIndex ImageHelper::getLastAllocatedLevel()
414 {
415 return mFirstAllocatedLevel + mTextureDescriptor.mipLevelCount - 1;
416 }
417
toWgpuLevel(gl::LevelIndex levelIndexGl) const418 LevelIndex ImageHelper::toWgpuLevel(gl::LevelIndex levelIndexGl) const
419 {
420 return gl_wgpu::getLevelIndex(levelIndexGl, mFirstAllocatedLevel);
421 }
422
toGlLevel(LevelIndex levelIndexWgpu) const423 gl::LevelIndex ImageHelper::toGlLevel(LevelIndex levelIndexWgpu) const
424 {
425 return wgpu_gl::getLevelIndex(levelIndexWgpu, mFirstAllocatedLevel);
426 }
427
isTextureLevelInAllocatedImage(gl::LevelIndex textureLevel)428 bool ImageHelper::isTextureLevelInAllocatedImage(gl::LevelIndex textureLevel)
429 {
430 if (!mInitialized || textureLevel < mFirstAllocatedLevel)
431 {
432 return false;
433 }
434 LevelIndex wgpuTextureLevel = toWgpuLevel(textureLevel);
435 return wgpuTextureLevel < LevelIndex(mTextureDescriptor.mipLevelCount);
436 }
437
appendSubresourceUpdate(gl::LevelIndex level,SubresourceUpdate && update)438 void ImageHelper::appendSubresourceUpdate(gl::LevelIndex level, SubresourceUpdate &&update)
439 {
440 if (mSubresourceQueue.size() <= static_cast<size_t>(level.get()))
441 {
442 mSubresourceQueue.resize(level.get() + 1);
443 }
444 mSubresourceQueue[level.get()].emplace_back(std::move(update));
445 }
446
getLevelUpdates(gl::LevelIndex level)447 std::vector<SubresourceUpdate> *ImageHelper::getLevelUpdates(gl::LevelIndex level)
448 {
449 return static_cast<size_t>(level.get()) < mSubresourceQueue.size()
450 ? &mSubresourceQueue[level.get()]
451 : nullptr;
452 }
453
BufferHelper()454 BufferHelper::BufferHelper() {}
455
~BufferHelper()456 BufferHelper::~BufferHelper() {}
457
reset()458 void BufferHelper::reset()
459 {
460 mBuffer = nullptr;
461 mMappedState.reset();
462 }
463
initBuffer(wgpu::Device device,size_t size,wgpu::BufferUsage usage,MapAtCreation mappedAtCreation)464 angle::Result BufferHelper::initBuffer(wgpu::Device device,
465 size_t size,
466 wgpu::BufferUsage usage,
467 MapAtCreation mappedAtCreation)
468 {
469 size_t safeBufferSize = rx::roundUpPow2(size, kBufferSizeAlignment);
470 wgpu::BufferDescriptor descriptor;
471 descriptor.size = safeBufferSize;
472 descriptor.usage = usage;
473 descriptor.mappedAtCreation = mappedAtCreation == MapAtCreation::Yes;
474
475 mBuffer = device.CreateBuffer(&descriptor);
476
477 if (mappedAtCreation == MapAtCreation::Yes)
478 {
479 mMappedState = {wgpu::MapMode::Read | wgpu::MapMode::Write, 0, safeBufferSize};
480 }
481 else
482 {
483 mMappedState.reset();
484 }
485
486 mRequestedSize = size;
487
488 return angle::Result::Continue;
489 }
490
mapImmediate(ContextWgpu * context,wgpu::MapMode mode,size_t offset,size_t size)491 angle::Result BufferHelper::mapImmediate(ContextWgpu *context,
492 wgpu::MapMode mode,
493 size_t offset,
494 size_t size)
495 {
496 ASSERT(!mMappedState.has_value());
497
498 wgpu::MapAsyncStatus mapResult = wgpu::MapAsyncStatus::Error;
499 wgpu::BufferMapCallback<wgpu::MapAsyncStatus *> *mapAsyncCallback =
500 [](wgpu::MapAsyncStatus status, wgpu::StringView message, wgpu::MapAsyncStatus *pStatus) {
501 *pStatus = status;
502 };
503 wgpu::FutureWaitInfo waitInfo;
504 size_t safeBufferMapOffset = GetSafeBufferMapOffset(offset);
505 size_t safeBufferMapSize = GetSafeBufferMapSize(offset, size);
506 waitInfo.future =
507 mBuffer.MapAsync(mode, safeBufferMapOffset, safeBufferMapSize,
508 wgpu::CallbackMode::WaitAnyOnly, mapAsyncCallback, &mapResult);
509
510 wgpu::Instance instance = context->getDisplay()->getInstance();
511 ANGLE_WGPU_TRY(context, instance.WaitAny(1, &waitInfo, -1));
512 ANGLE_WGPU_TRY(context, mapResult);
513
514 ASSERT(waitInfo.completed);
515
516 mMappedState = {mode, offset, size};
517
518 return angle::Result::Continue;
519 }
520
unmap()521 angle::Result BufferHelper::unmap()
522 {
523 ASSERT(mMappedState.has_value());
524 mBuffer.Unmap();
525 mMappedState.reset();
526 return angle::Result::Continue;
527 }
528
getMapWritePointer(size_t offset,size_t size) const529 uint8_t *BufferHelper::getMapWritePointer(size_t offset, size_t size) const
530 {
531 ASSERT(mBuffer.GetMapState() == wgpu::BufferMapState::Mapped);
532 ASSERT(mMappedState.has_value());
533 ASSERT(mMappedState->offset <= offset);
534 ASSERT(mMappedState->offset + mMappedState->size >= offset + size);
535
536 void *mapPtr =
537 mBuffer.GetMappedRange(GetSafeBufferMapOffset(offset), GetSafeBufferMapSize(offset, size));
538 ASSERT(mapPtr);
539
540 return AdjustMapPointerForOffset(static_cast<uint8_t *>(mapPtr), offset);
541 }
542
getMapReadPointer(size_t offset,size_t size) const543 const uint8_t *BufferHelper::getMapReadPointer(size_t offset, size_t size) const
544 {
545 ASSERT(mBuffer.GetMapState() == wgpu::BufferMapState::Mapped);
546 ASSERT(mMappedState.has_value());
547 ASSERT(mMappedState->offset <= offset);
548 ASSERT(mMappedState->offset + mMappedState->size >= offset + size);
549
550 // GetConstMappedRange is used for reads whereas GetMappedRange is only used for writes.
551 const void *mapPtr = mBuffer.GetConstMappedRange(GetSafeBufferMapOffset(offset),
552 GetSafeBufferMapSize(offset, size));
553 ASSERT(mapPtr);
554
555 return AdjustMapPointerForOffset(static_cast<const uint8_t *>(mapPtr), offset);
556 }
557
getMappedState() const558 const std::optional<BufferMapState> &BufferHelper::getMappedState() const
559 {
560 return mMappedState;
561 }
562
canMapForRead() const563 bool BufferHelper::canMapForRead() const
564 {
565 return (mMappedState.has_value() && (mMappedState->mode & wgpu::MapMode::Read)) ||
566 (mBuffer && (mBuffer.GetUsage() & wgpu::BufferUsage::MapRead));
567 }
568
canMapForWrite() const569 bool BufferHelper::canMapForWrite() const
570 {
571 return (mMappedState.has_value() && (mMappedState->mode & wgpu::MapMode::Write)) ||
572 (mBuffer && (mBuffer.GetUsage() & wgpu::BufferUsage::MapWrite));
573 }
574
getBuffer()575 wgpu::Buffer &BufferHelper::getBuffer()
576 {
577 return mBuffer;
578 }
579
requestedSize() const580 uint64_t BufferHelper::requestedSize() const
581 {
582 return mRequestedSize;
583 }
584
actualSize() const585 uint64_t BufferHelper::actualSize() const
586 {
587 return mBuffer ? mBuffer.GetSize() : 0;
588 }
589
readDataImmediate(ContextWgpu * context,size_t offset,size_t size,webgpu::RenderPassClosureReason reason,BufferReadback * result)590 angle::Result BufferHelper::readDataImmediate(ContextWgpu *context,
591 size_t offset,
592 size_t size,
593 webgpu::RenderPassClosureReason reason,
594 BufferReadback *result)
595 {
596 ASSERT(result);
597
598 if (getMappedState())
599 {
600 ANGLE_TRY(unmap());
601 }
602
603 // Create a staging buffer just big enough for this copy but aligned for both copying and
604 // mapping.
605 const size_t stagingBufferSize = roundUpPow2(
606 size, std::max(webgpu::kBufferCopyToBufferAlignment, webgpu::kBufferMapOffsetAlignment));
607
608 ANGLE_TRY(result->buffer.initBuffer(context->getDisplay()->getDevice(), stagingBufferSize,
609 wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::MapRead,
610 webgpu::MapAtCreation::No));
611
612 // Copy the source buffer to staging and flush the commands
613 context->ensureCommandEncoderCreated();
614 wgpu::CommandEncoder &commandEncoder = context->getCurrentCommandEncoder();
615 size_t safeCopyOffset = rx::roundDownPow2(offset, webgpu::kBufferCopyToBufferAlignment);
616 size_t offsetAdjustment = offset - safeCopyOffset;
617 size_t copySize = roundUpPow2(size + offsetAdjustment, webgpu::kBufferCopyToBufferAlignment);
618 commandEncoder.CopyBufferToBuffer(getBuffer(), safeCopyOffset, result->buffer.getBuffer(), 0,
619 copySize);
620
621 ANGLE_TRY(context->flush(reason));
622
623 // Read back from the staging buffer and compute the index range
624 ANGLE_TRY(result->buffer.mapImmediate(context, wgpu::MapMode::Read, offsetAdjustment, size));
625 result->data = result->buffer.getMapReadPointer(offsetAdjustment, size);
626
627 return angle::Result::Continue;
628 }
629
630 } // namespace webgpu
631 } // namespace rx
632