1 /*
2 * Copyright 2019 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/ganesh/dawn/GrDawnBuffer.h"
9
10 #include "src/gpu/ganesh/dawn/GrDawnAsyncWait.h"
11 #include "src/gpu/ganesh/dawn/GrDawnGpu.h"
12
13 namespace {
GrGpuBufferTypeToDawnUsageBit(GrGpuBufferType type)14 wgpu::BufferUsage GrGpuBufferTypeToDawnUsageBit(GrGpuBufferType type) {
15 switch (type) {
16 case GrGpuBufferType::kVertex:
17 return wgpu::BufferUsage::Vertex | wgpu::BufferUsage::CopyDst;
18 case GrGpuBufferType::kIndex:
19 return wgpu::BufferUsage::Index | wgpu::BufferUsage::CopyDst;
20 case GrGpuBufferType::kXferCpuToGpu:
21 return wgpu::BufferUsage::MapWrite | wgpu::BufferUsage::CopySrc;
22 case GrGpuBufferType::kXferGpuToCpu:
23 return wgpu::BufferUsage::MapRead | wgpu::BufferUsage::CopyDst;
24 default:
25 SkASSERT(!"buffer type not supported by Dawn");
26 return wgpu::BufferUsage::Vertex;
27 }
28 }
29 }
30
31 // static
Make(GrDawnGpu * gpu,size_t sizeInBytes,GrGpuBufferType type,GrAccessPattern pattern,std::string_view label)32 sk_sp<GrDawnBuffer> GrDawnBuffer::Make(GrDawnGpu* gpu,
33 size_t sizeInBytes,
34 GrGpuBufferType type,
35 GrAccessPattern pattern,
36 std::string_view label) {
37 wgpu::BufferDescriptor bufferDesc;
38 bufferDesc.size = sizeInBytes;
39 bufferDesc.usage = GrGpuBufferTypeToDawnUsageBit(type);
40
41 Mappable mappable = Mappable::kNot;
42 if (bufferDesc.usage & wgpu::BufferUsage::MapRead) {
43 SkASSERT(!SkToBool(bufferDesc.usage & wgpu::BufferUsage::MapWrite));
44 mappable = Mappable::kReadOnly;
45 } else if (bufferDesc.usage & wgpu::BufferUsage::MapWrite) {
46 mappable = Mappable::kWriteOnly;
47 }
48
49 if (mappable == Mappable::kNot) {
50 // onMap can still succeed by using a staging buffer that gets transferred to the real
51 // buffer. updateData will use this same mechanism ("map", copy to staging buffer, "unmap").
52 // The transfer must be 4 byte aligned. So ensure the real size of the buffer is 4 byte
53 // aligned.
54 bufferDesc.size = SkAlign4(bufferDesc.size);
55 SkASSERT(gpu->caps()->transferFromBufferToBufferAlignment() == 4);
56 }
57
58 wgpu::Buffer buffer;
59 void* mapPtr = nullptr;
60 if (mappable == Mappable::kNot || mappable == Mappable::kReadOnly) {
61 buffer = gpu->device().CreateBuffer(&bufferDesc);
62 } else {
63 bufferDesc.mappedAtCreation = true;
64 buffer = gpu->device().CreateBuffer(&bufferDesc);
65 mapPtr = buffer.GetMappedRange();
66 if (!mapPtr) {
67 SkDebugf("GrDawnBuffer: failed to map buffer at creation\n");
68 return nullptr;
69 }
70 }
71
72 return sk_sp<GrDawnBuffer>(new GrDawnBuffer(
73 gpu, sizeInBytes, type, pattern, label, mappable, std::move(buffer), mapPtr));
74 }
75
GrDawnBuffer(GrDawnGpu * gpu,size_t sizeInBytes,GrGpuBufferType type,GrAccessPattern pattern,std::string_view label,Mappable mappable,wgpu::Buffer buffer,void * mapPtr)76 GrDawnBuffer::GrDawnBuffer(GrDawnGpu* gpu,
77 size_t sizeInBytes,
78 GrGpuBufferType type,
79 GrAccessPattern pattern,
80 std::string_view label,
81 Mappable mappable,
82 wgpu::Buffer buffer,
83 void* mapPtr)
84 : INHERITED(gpu, sizeInBytes, type, pattern, label)
85 , fBuffer(std::move(buffer))
86 , fMappable(mappable) {
87 fMapPtr = mapPtr;
88
89 // We want to make the blocking map in `onMap` available initially only for read-only buffers,
90 // which are not mapped at creation or backed by a staging buffer which gets mapped
91 // independently. Note that the blocking map procedure becomes available to both read-only and
92 // write-only buffers once they get explicitly unmapped.
93 fUnmapped = (mapPtr == nullptr && mappable == Mappable::kReadOnly);
94 this->registerWithCache(skgpu::Budgeted::kYes);
95 }
96
internalMap(MapType type,size_t offset,size_t size)97 void* GrDawnBuffer::internalMap(MapType type, size_t offset, size_t size) {
98 if (fUnmapped) {
99 SkASSERT(fMappable != Mappable::kNot);
100 void* ptr = this->blockingMap(offset, size);
101 if (!ptr) {
102 SkDebugf("GrDawnBuffer: failed to map buffer\n");
103 return nullptr;
104 }
105 fUnmapped = false;
106 return SkTAddOffset<void>(ptr, offset);
107 }
108
109 if (fMappable == Mappable::kNot) {
110 // Dawn requires that the offset and size be 4 byte aligned. If the offset is not
111 // then we logically align the staging slice with the previous aligned value, adjust
112 // the pointer into the slice that we return. We'll do the same adjustment when issuing the
113 // transfer in internalUnmap so that the data winds up at the right offset.
114 size_t r = offset & 0x3;
115 size += r;
116 SkASSERT(type == MapType::kWriteDiscard);
117 GrStagingBufferManager::Slice slice =
118 this->getDawnGpu()->stagingBufferManager()->allocateStagingBufferSlice(
119 size, /*requiredAlignment=*/4);
120 fStagingBuffer = static_cast<GrDawnBuffer*>(slice.fBuffer)->get();
121 fStagingOffset = slice.fOffset;
122 return SkTAddOffset<void>(slice.fOffsetMapPtr, r);
123 }
124
125 // We always create this buffers mapped or if they've been used on the gpu before we use the
126 // async map callback to know when it is safe to reuse them. Thus by the time we get here
127 // the buffer should always be mapped.
128 SkASSERT(this->isMapped());
129 return SkTAddOffset<void>(fMapPtr, offset);
130 }
131
internalUnmap(MapType type,size_t offset,size_t size)132 void GrDawnBuffer::internalUnmap(MapType type, size_t offset, size_t size) {
133 if (fMappable == Mappable::kNot) {
134 SkASSERT(type == MapType::kWriteDiscard);
135 // See comment in internalMap() about this adjustment.
136 size_t r = offset & 0x3;
137 offset -= r;
138 size = SkAlign4(size + r);
139 this->getDawnGpu()->getCopyEncoder().CopyBufferToBuffer(fStagingBuffer, fStagingOffset,
140 fBuffer, offset, size);
141 } else {
142 fBuffer.Unmap();
143 fUnmapped = true;
144 }
145 }
146
onRelease()147 void GrDawnBuffer::onRelease() {
148 if (this->wasDestroyed()) {
149 return;
150 }
151
152 if (fMapPtr && fMappable != Mappable::kNot) {
153 fBuffer.Unmap();
154 fMapPtr = nullptr;
155 fUnmapped = true;
156 }
157
158 this->GrGpuBuffer::onRelease();
159 }
160
onClearToZero()161 bool GrDawnBuffer::onClearToZero() {
162 void* ptr = this->internalMap(MapType::kWriteDiscard, 0, this->size());
163 if (!ptr) {
164 return false;
165 }
166
167 std::memset(ptr, 0, this->size());
168
169 this->internalUnmap(MapType::kWriteDiscard, 0, this->size());
170
171 return true;
172 }
173
onMap(MapType type)174 void GrDawnBuffer::onMap(MapType type) {
175 fMapPtr = this->internalMap(type, 0, this->size());
176 }
177
onUnmap(MapType type)178 void GrDawnBuffer::onUnmap(MapType type) {
179 this->internalUnmap(type, 0, this->size());
180 }
181
onUpdateData(const void * src,size_t offset,size_t size,bool)182 bool GrDawnBuffer::onUpdateData(const void* src, size_t offset, size_t size, bool /*preserve*/) {
183 // Note that this subclass's impl of kWriteDiscard never actually discards.
184 void* ptr = this->internalMap(MapType::kWriteDiscard, offset, size);
185 if (!ptr) {
186 return false;
187 }
188
189 memcpy(ptr, src, size);
190
191 this->internalUnmap(MapType::kWriteDiscard, offset, size);
192
193 return true;
194 }
195
getDawnGpu() const196 GrDawnGpu* GrDawnBuffer::getDawnGpu() const {
197 SkASSERT(!this->wasDestroyed());
198 return static_cast<GrDawnGpu*>(this->getGpu());
199 }
200
mapAsync(MapAsyncCallback callback)201 void GrDawnBuffer::mapAsync(MapAsyncCallback callback) {
202 SkASSERT(fMappable != Mappable::kNot);
203 SkASSERT(!fMapAsyncCallback);
204 SkASSERT(!this->isMapped());
205
206 fMapAsyncCallback = std::move(callback);
207 fBuffer.MapAsync(
208 (fMappable == Mappable::kReadOnly) ? wgpu::MapMode::Read : wgpu::MapMode::Write,
209 0,
210 wgpu::kWholeMapSize,
211 [](WGPUBufferMapAsyncStatus status, void* userData) {
212 static_cast<GrDawnBuffer*>(userData)->mapAsyncDone(status);
213 },
214 this);
215 }
216
mapAsyncDone(WGPUBufferMapAsyncStatus status)217 void GrDawnBuffer::mapAsyncDone(WGPUBufferMapAsyncStatus status) {
218 SkASSERT(fMapAsyncCallback);
219 auto callback = std::move(fMapAsyncCallback);
220
221 if (status != WGPUBufferMapAsyncStatus_Success) {
222 SkDebugf("GrDawnBuffer: failed to map buffer (status: %u)\n", status);
223 callback(false);
224 return;
225 }
226
227 if (fMappable == Mappable::kReadOnly) {
228 fMapPtr = const_cast<void*>(fBuffer.GetConstMappedRange());
229 } else {
230 fMapPtr = fBuffer.GetMappedRange();
231 }
232
233 if (this->isMapped()) {
234 fUnmapped = false;
235 }
236
237 // Run the callback as the last step in this function since the callback can deallocate this
238 // GrDawnBuffer.
239 callback(this->isMapped());
240 }
241
blockingMap(size_t offset,size_t size)242 void* GrDawnBuffer::blockingMap(size_t offset, size_t size) {
243 SkASSERT(fMappable != Mappable::kNot);
244
245 struct Context {
246 GrDawnBuffer* buffer;
247 void* result;
248 GrDawnAsyncWait wait;
249 };
250
251 Context context{this, nullptr, GrDawnAsyncWait{this->getDawnGpu()->device()}};
252
253 // The offset must be a multiple of 8. If not back it up to the previous 8 byte multiple
254 // and compensate by extending the size. In either case size must be a multiple of 4.
255 SkASSERT(SkIsAlign4(offset));
256 size_t r = offset & 0x7;
257 offset -= r;
258 size = SkAlign4(size + r);
259
260 fBuffer.MapAsync(
261 (fMappable == Mappable::kReadOnly) ? wgpu::MapMode::Read : wgpu::MapMode::Write,
262 offset,
263 size,
264 [](WGPUBufferMapAsyncStatus status, void* userData) {
265 auto* context = static_cast<Context*>(userData);
266 if (status != WGPUBufferMapAsyncStatus_Success) {
267 context->result = nullptr;
268 context->wait.signal();
269 return;
270 }
271 auto* wgpuBuffer = &context->buffer->fBuffer;
272 if (context->buffer->fMappable == Mappable::kReadOnly) {
273 context->result = const_cast<void*>(wgpuBuffer->GetConstMappedRange());
274 } else {
275 context->result = wgpuBuffer->GetMappedRange();
276 }
277 if (context->result) {
278 context->buffer->fUnmapped = false;
279 }
280 context->wait.signal();
281 },
282 &context);
283
284 context.wait.busyWait();
285
286 return context.result ? SkTAddOffset<void>(context.result, r) : nullptr;
287 }
288