1 /* 2 * Copyright 2019 Google Inc. 3 * 4 * Use of this source code is governed by a BSD-style license that can be 5 * found in the LICENSE file. 6 */ 7 8 #ifndef GrDawnBuffer_DEFINED 9 #define GrDawnBuffer_DEFINED 10 11 #include "src/gpu/ganesh/GrGpuBuffer.h" 12 #include "webgpu/webgpu_cpp.h" 13 14 #include <functional> 15 16 class GrDawnGpu; 17 18 // GrDawnBuffer is the GrGpuBuffer implementation for the Dawn backend. 19 // 20 // Some notes on the implementation: 21 // 22 // LIFETIME AND CREATION 23 // --------------------- 24 // When a GrDawnBuffer is constructed, it allocates a GPU buffer. Depending on the requested access 25 // pattern, the buffer is typically immediately mapped at creation (which happens synchronously and 26 // relatively fast). If a client requests to create a buffer with data, then it will be immediately 27 // unmapped after the data is copied into the buffer (see `GrDawnGpu::onCreateBuffer` and 28 // `GrDawnBuffer::onUpdateData`). 29 // 30 // Clients usually create buffers through a GrResourceProvider or a GrStagingBufferManager. These 31 // buffers are constructed in `GrDawnGpu::onCreateBuffer` and GrDawnGpu is involved in their 32 // lifetime and mapping. Depending on the requested buffer type, a GrDawnBuffer that is 33 // initialized as `Mappable::kNot` can itself be backed by another GrDawnBuffer that is owned by a 34 // GrStagingBufferManager. In this case the CPU mapping happens via the `fStagingBuffer` member 35 // instead of `fBuffer`. The backing `fStagingBuffer` is initialized in `GrDawnBuffer::onMap` and 36 // its contents are instructed to be copied into `fBuffer` in `GrDawnBuffer::onUnmap` (which does 37 // not take effect until the command is submitted to the GPU). 38 // 39 // ASYNC MAP/UNMAP 40 // --------------- 41 // The Dawn API provides two ways to map the CPU-accessible memory of a wgpu::Buffer: 42 // * wgpu::Device::CreateBuffer which can synchronously map the buffer at creation; 43 // * wgpu::Buffer::MapAsync which asynchronously maps a buffer at any time. 44 // 45 // When a GrDawnBuffer gets created it starts out as mapped (except it gets unmapped immediately if 46 // initialized with data). A buffer gets unmapped when its owner calls `GrGpuBuffer::unmap()`. A 47 // buffer that is managed by a GrStagingBufferManager is always unmapped before its ownership is 48 // passed to the associated GrDawnGpu. 49 // 50 // Dawn only provides an asynchronous API for mapping an unmapped buffer and `GrGpuBuffer::map()` 51 // must work synchronously. However, blocking in a busy-wait that yields to the underlying event 52 // loop can stall the calling thread in the order of milliseconds. We optimize this specifically 53 // for staging buffers: 54 // 1. GrStagingBufferManager first unmaps the buffer and passes its ownership to GrDawnBuffer; at 55 // this stage no client is expected to access the buffer and it can remain unmapped. 56 // 2. GrDawnBuffer requests to map the buffer asynchronously and does not return it back to the 57 // backing resource provider until the map finishes. Thus, the buffer is never handed back to 58 // clients in an unmapped state. 59 // 3. If a client needs a staging buffer before the map finishes, they will need to allocate a 60 // new buffer which can get mapped at creation and avoid an async map. 61 // 62 // For all other buffers, a blocking map procedure is provided which allows them to remap a buffer 63 // if needed. For instance, a write-only non-staging buffer can be safely unmapped and mapped by a 64 // client. 65 class GrDawnBuffer : public GrGpuBuffer { 66 public: 67 static sk_sp<GrDawnBuffer> Make(GrDawnGpu* gpu, 68 size_t sizeInBytes, 69 GrGpuBufferType type, 70 GrAccessPattern pattern, 71 std::string_view label); 72 ~GrDawnBuffer() override = default; 73 74 void onMap(MapType) override; 75 void onUnmap(MapType) override; 76 bool onClearToZero() override; 77 void onRelease() override; 78 bool onUpdateData(const void* src, size_t offset, size_t size, bool preserve) override; 79 80 GrDawnGpu* getDawnGpu() const; get()81 wgpu::Buffer get() const { return fBuffer; } 82 83 // Map this buffer using the asynchronous map procedure. This function is intended to be used by 84 // the owning GrDawnGpu to manage the lifetime of this buffer and it has the following 85 // restrictions: 86 // - It must not be called while an async map is already in progress. 87 // - It must not be called on a buffer that is already mapped. 88 // - It must not be called on a buffer that is initialized as "unmappable". 89 // 90 // `callback` is called asynchronously with the result of this procedure once it's complete. 91 using MapAsyncCallback = std::function<void(bool success)>; 92 void mapAsync(MapAsyncCallback callback); 93 94 private: 95 enum class Mappable { 96 // Corresponds to Vertex and Index buffers. When a mapping is requested, these buffers are 97 // always backed by a staging buffer. NOTE: Staging buffers that are created by 98 // GrStagingBufferManager themselves are always `Mappable::kWriteOnly`. 99 kNot, 100 101 // Corresponds to `GrGpuBufferType::kXferGpuToCpu`. NOT mapped at creation. Will use a 102 // blocking-map if a mapping is requested. 103 kReadOnly, 104 105 // Corresponds to `GrGpuBufferType::kXferCpuToGpu`. Always mapped at creation. Will use a 106 // blocking-map if a mapping is requested. IF this is a staging buffer, then it will be 107 // asynchronously mapped by GrDawnGpu. 108 kWriteOnly, 109 }; 110 111 GrDawnBuffer(GrDawnGpu* gpu, 112 size_t sizeInBytes, 113 GrGpuBufferType type, 114 GrAccessPattern pattern, 115 std::string_view label, 116 Mappable mappable, 117 wgpu::Buffer buffer, 118 void* mapPtr); 119 120 void* internalMap(MapType type, size_t offset, size_t size); 121 void internalUnmap(MapType type, size_t offset, size_t size); 122 123 // Called to handle the asynchronous mapAsync callback. 124 void mapAsyncDone(WGPUBufferMapAsyncStatus status); 125 126 // Map a buffer and busy-wait until the asynchronous mapping procedure completes. This function 127 // only needs to be called for a buffer that has been unmapped since buffers start out as mapped 128 // at creation. 129 // 130 // The blocking map incurs a cost in the form of yielding to the underlying event loop until the 131 // map finishes and can block the calling thread in the order of milliseconds. This might be 132 // undesirable for buffers that are mapped and unmapped frequently. 133 // 134 // This procedure is used to cover the case where a buffer that is not managed by a 135 // GrStagingBufferManager (and thus not asynchronously mapped by the owning GrDawnGpu) is 136 // unmapped and needs to get re-mapped for use (e.g. in onUpdateData()). 137 // 138 // Returns nullptr if the buffer fails to map. 139 void* blockingMap(size_t offset, size_t size); 140 141 wgpu::Buffer fBuffer; 142 Mappable fMappable = Mappable::kNot; 143 bool fUnmapped; 144 145 // A callback is only present when a request for MapAsync is pending. The callback is reset once 146 // the procedure is complete. 147 MapAsyncCallback fMapAsyncCallback; 148 149 // Buffers that are of the "not mappable" type are backed by another GrDawnBuffer that is 150 // managed by a GrStagingBufferManager. 151 wgpu::Buffer fStagingBuffer; 152 size_t fStagingOffset = 0; 153 154 using INHERITED = GrGpuBuffer; 155 }; 156 157 #endif 158