• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2017 The Dawn Authors
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 //     http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 
15 #include "dawn_native/Buffer.h"
16 
17 #include "common/Alloc.h"
18 #include "common/Assert.h"
19 #include "dawn_native/Commands.h"
20 #include "dawn_native/Device.h"
21 #include "dawn_native/DynamicUploader.h"
22 #include "dawn_native/ErrorData.h"
23 #include "dawn_native/ObjectType_autogen.h"
24 #include "dawn_native/Queue.h"
25 #include "dawn_native/ValidationUtils_autogen.h"
26 
27 #include <cstdio>
28 #include <cstring>
29 #include <utility>
30 
31 namespace dawn_native {
32 
33     namespace {
34         struct MapRequestTask : QueueBase::TaskInFlight {
MapRequestTaskdawn_native::__anona89f98be0111::MapRequestTask35             MapRequestTask(Ref<BufferBase> buffer, MapRequestID id)
36                 : buffer(std::move(buffer)), id(id) {
37             }
Finishdawn_native::__anona89f98be0111::MapRequestTask38             void Finish() override {
39                 buffer->OnMapRequestCompleted(id, WGPUBufferMapAsyncStatus_Success);
40             }
HandleDeviceLossdawn_native::__anona89f98be0111::MapRequestTask41             void HandleDeviceLoss() override {
42                 buffer->OnMapRequestCompleted(id, WGPUBufferMapAsyncStatus_DeviceLost);
43             }
44             ~MapRequestTask() override = default;
45 
46           private:
47             Ref<BufferBase> buffer;
48             MapRequestID id;
49         };
50 
51         class ErrorBuffer final : public BufferBase {
52           public:
ErrorBuffer(DeviceBase * device,const BufferDescriptor * descriptor)53             ErrorBuffer(DeviceBase* device, const BufferDescriptor* descriptor)
54                 : BufferBase(device, descriptor, ObjectBase::kError) {
55                 if (descriptor->mappedAtCreation) {
56                     // Check that the size can be used to allocate an mFakeMappedData. A malloc(0)
57                     // is invalid, and on 32bit systems we should avoid a narrowing conversion that
58                     // would make size = 1 << 32 + 1 allocate one byte.
59                     bool isValidSize =
60                         descriptor->size != 0 &&
61                         descriptor->size < uint64_t(std::numeric_limits<size_t>::max());
62 
63                     if (isValidSize) {
64                         mFakeMappedData =
65                             std::unique_ptr<uint8_t[]>(AllocNoThrow<uint8_t>(descriptor->size));
66                     }
67                     // Since error buffers in this case may allocate memory, we need to track them
68                     // for destruction on the device.
69                     TrackInDevice();
70                 }
71             }
72 
73           private:
IsCPUWritableAtCreation() const74             bool IsCPUWritableAtCreation() const override {
75                 UNREACHABLE();
76             }
77 
MapAtCreationImpl()78             MaybeError MapAtCreationImpl() override {
79                 UNREACHABLE();
80             }
81 
MapAsyncImpl(wgpu::MapMode mode,size_t offset,size_t size)82             MaybeError MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) override {
83                 UNREACHABLE();
84             }
85 
GetMappedPointerImpl()86             void* GetMappedPointerImpl() override {
87                 return mFakeMappedData.get();
88             }
89 
UnmapImpl()90             void UnmapImpl() override {
91                 mFakeMappedData.reset();
92             }
93 
94             std::unique_ptr<uint8_t[]> mFakeMappedData;
95         };
96 
97     }  // anonymous namespace
98 
ValidateBufferDescriptor(DeviceBase *,const BufferDescriptor * descriptor)99     MaybeError ValidateBufferDescriptor(DeviceBase*, const BufferDescriptor* descriptor) {
100         DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr");
101         DAWN_TRY(ValidateBufferUsage(descriptor->usage));
102 
103         wgpu::BufferUsage usage = descriptor->usage;
104 
105         const wgpu::BufferUsage kMapWriteAllowedUsages =
106             wgpu::BufferUsage::MapWrite | wgpu::BufferUsage::CopySrc;
107         DAWN_INVALID_IF(
108             usage & wgpu::BufferUsage::MapWrite && !IsSubset(usage, kMapWriteAllowedUsages),
109             "Buffer usages (%s) is invalid. If a buffer usage contains %s the only other allowed "
110             "usage is %s.",
111             usage, wgpu::BufferUsage::MapWrite, wgpu::BufferUsage::CopySrc);
112 
113         const wgpu::BufferUsage kMapReadAllowedUsages =
114             wgpu::BufferUsage::MapRead | wgpu::BufferUsage::CopyDst;
115         DAWN_INVALID_IF(
116             usage & wgpu::BufferUsage::MapRead && !IsSubset(usage, kMapReadAllowedUsages),
117             "Buffer usages (%s) is invalid. If a buffer usage contains %s the only other allowed "
118             "usage is %s.",
119             usage, wgpu::BufferUsage::MapRead, wgpu::BufferUsage::CopyDst);
120 
121         DAWN_INVALID_IF(descriptor->mappedAtCreation && descriptor->size % 4 != 0,
122                         "Buffer is mapped at creation but its size (%u) is not a multiple of 4.",
123                         descriptor->size);
124 
125         return {};
126     }
127 
128     // Buffer
129 
BufferBase(DeviceBase * device,const BufferDescriptor * descriptor)130     BufferBase::BufferBase(DeviceBase* device, const BufferDescriptor* descriptor)
131         : ApiObjectBase(device, descriptor->label),
132           mSize(descriptor->size),
133           mUsage(descriptor->usage),
134           mState(BufferState::Unmapped) {
135         // Add readonly storage usage if the buffer has a storage usage. The validation rules in
136         // ValidateSyncScopeResourceUsage will make sure we don't use both at the same time.
137         if (mUsage & wgpu::BufferUsage::Storage) {
138             mUsage |= kReadOnlyStorageBuffer;
139         }
140 
141         // The query resolve buffer need to be used as a storage buffer in the internal compute
142         // pipeline which does timestamp uint conversion for timestamp query, it requires the buffer
143         // has Storage usage in the binding group. Implicitly add an InternalStorage usage which is
144         // only compatible with InternalStorageBuffer binding type in BGL. It shouldn't be
145         // compatible with StorageBuffer binding type and the query resolve buffer cannot be bound
146         // as storage buffer if it's created without Storage usage.
147         if (mUsage & wgpu::BufferUsage::QueryResolve) {
148             mUsage |= kInternalStorageBuffer;
149         }
150 
151         // We also add internal storage usage for Indirect buffers for some transformations before
152         // DispatchIndirect calls on the backend (e.g. validations, support of [[num_workgroups]] on
153         // D3D12), since these transformations involve binding them as storage buffers for use in a
154         // compute pass.
155         if (mUsage & wgpu::BufferUsage::Indirect) {
156             mUsage |= kInternalStorageBuffer;
157         }
158 
159         TrackInDevice();
160     }
161 
BufferBase(DeviceBase * device,const BufferDescriptor * descriptor,ObjectBase::ErrorTag tag)162     BufferBase::BufferBase(DeviceBase* device,
163                            const BufferDescriptor* descriptor,
164                            ObjectBase::ErrorTag tag)
165         : ApiObjectBase(device, tag), mSize(descriptor->size), mState(BufferState::Unmapped) {
166         if (descriptor->mappedAtCreation) {
167             mState = BufferState::MappedAtCreation;
168             mMapOffset = 0;
169             mMapSize = mSize;
170         }
171     }
172 
BufferBase(DeviceBase * device,BufferState state)173     BufferBase::BufferBase(DeviceBase* device, BufferState state)
174         : ApiObjectBase(device, kLabelNotImplemented), mState(state) {
175         TrackInDevice();
176     }
177 
~BufferBase()178     BufferBase::~BufferBase() {
179         ASSERT(mState == BufferState::Unmapped || mState == BufferState::Destroyed);
180     }
181 
DestroyImpl()182     void BufferBase::DestroyImpl() {
183         if (mState == BufferState::Mapped) {
184             UnmapInternal(WGPUBufferMapAsyncStatus_DestroyedBeforeCallback);
185         } else if (mState == BufferState::MappedAtCreation) {
186             if (mStagingBuffer != nullptr) {
187                 mStagingBuffer.reset();
188             } else if (mSize != 0) {
189                 UnmapInternal(WGPUBufferMapAsyncStatus_DestroyedBeforeCallback);
190             }
191         }
192         mState = BufferState::Destroyed;
193     }
194 
195     // static
MakeError(DeviceBase * device,const BufferDescriptor * descriptor)196     BufferBase* BufferBase::MakeError(DeviceBase* device, const BufferDescriptor* descriptor) {
197         return new ErrorBuffer(device, descriptor);
198     }
199 
GetType() const200     ObjectType BufferBase::GetType() const {
201         return ObjectType::Buffer;
202     }
203 
GetSize() const204     uint64_t BufferBase::GetSize() const {
205         ASSERT(!IsError());
206         return mSize;
207     }
208 
GetAllocatedSize() const209     uint64_t BufferBase::GetAllocatedSize() const {
210         ASSERT(!IsError());
211         // The backend must initialize this value.
212         ASSERT(mAllocatedSize != 0);
213         return mAllocatedSize;
214     }
215 
GetUsage() const216     wgpu::BufferUsage BufferBase::GetUsage() const {
217         ASSERT(!IsError());
218         return mUsage;
219     }
220 
MapAtCreation()221     MaybeError BufferBase::MapAtCreation() {
222         DAWN_TRY(MapAtCreationInternal());
223 
224         void* ptr;
225         size_t size;
226         if (mSize == 0) {
227             return {};
228         } else if (mStagingBuffer) {
229             // If there is a staging buffer for initialization, clear its contents directly.
230             // It should be exactly as large as the buffer allocation.
231             ptr = mStagingBuffer->GetMappedPointer();
232             size = mStagingBuffer->GetSize();
233             ASSERT(size == GetAllocatedSize());
234         } else {
235             // Otherwise, the buffer is directly mappable on the CPU.
236             ptr = GetMappedPointerImpl();
237             size = GetAllocatedSize();
238         }
239 
240         DeviceBase* device = GetDevice();
241         if (device->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse)) {
242             memset(ptr, uint8_t(0u), size);
243             SetIsDataInitialized();
244             device->IncrementLazyClearCountForTesting();
245         } else if (device->IsToggleEnabled(Toggle::NonzeroClearResourcesOnCreationForTesting)) {
246             memset(ptr, uint8_t(1u), size);
247         }
248 
249         return {};
250     }
251 
MapAtCreationInternal()252     MaybeError BufferBase::MapAtCreationInternal() {
253         ASSERT(!IsError());
254         mMapOffset = 0;
255         mMapSize = mSize;
256 
257         // 0-sized buffers are not supposed to be written to. Return back any non-null pointer.
258         // Skip handling 0-sized buffers so we don't try to map them in the backend.
259         if (mSize != 0) {
260             // Mappable buffers don't use a staging buffer and are just as if mapped through
261             // MapAsync.
262             if (IsCPUWritableAtCreation()) {
263                 DAWN_TRY(MapAtCreationImpl());
264             } else {
265                 // If any of these fail, the buffer will be deleted and replaced with an error
266                 // buffer. The staging buffer is used to return mappable data to inititalize the
267                 // buffer contents. Allocate one as large as the real buffer size so that every byte
268                 // is initialized.
269                 // TODO(crbug.com/dawn/828): Suballocate and reuse memory from a larger staging
270                 // buffer so we don't create many small buffers.
271                 DAWN_TRY_ASSIGN(mStagingBuffer,
272                                 GetDevice()->CreateStagingBuffer(GetAllocatedSize()));
273             }
274         }
275 
276         // Only set the state to mapped at creation if we did no fail any point in this helper.
277         // Otherwise, if we override the default unmapped state before succeeding to create a
278         // staging buffer, we will have issues when we try to destroy the buffer.
279         mState = BufferState::MappedAtCreation;
280         return {};
281     }
282 
ValidateCanUseOnQueueNow() const283     MaybeError BufferBase::ValidateCanUseOnQueueNow() const {
284         ASSERT(!IsError());
285 
286         switch (mState) {
287             case BufferState::Destroyed:
288                 return DAWN_FORMAT_VALIDATION_ERROR("%s used in submit while destroyed.", this);
289             case BufferState::Mapped:
290             case BufferState::MappedAtCreation:
291                 return DAWN_FORMAT_VALIDATION_ERROR("%s used in submit while mapped.", this);
292             case BufferState::Unmapped:
293                 return {};
294         }
295         UNREACHABLE();
296     }
297 
CallMapCallback(MapRequestID mapID,WGPUBufferMapAsyncStatus status)298     void BufferBase::CallMapCallback(MapRequestID mapID, WGPUBufferMapAsyncStatus status) {
299         ASSERT(!IsError());
300         if (mMapCallback != nullptr && mapID == mLastMapID) {
301             // Tag the callback as fired before firing it, otherwise it could fire a second time if
302             // for example buffer.Unmap() is called inside the application-provided callback.
303             WGPUBufferMapCallback callback = mMapCallback;
304             mMapCallback = nullptr;
305 
306             if (GetDevice()->IsLost()) {
307                 callback(WGPUBufferMapAsyncStatus_DeviceLost, mMapUserdata);
308             } else {
309                 callback(status, mMapUserdata);
310             }
311         }
312     }
313 
APIMapAsync(wgpu::MapMode mode,size_t offset,size_t size,WGPUBufferMapCallback callback,void * userdata)314     void BufferBase::APIMapAsync(wgpu::MapMode mode,
315                                  size_t offset,
316                                  size_t size,
317                                  WGPUBufferMapCallback callback,
318                                  void* userdata) {
319         // Handle the defaulting of size required by WebGPU, even if in webgpu_cpp.h it is not
320         // possible to default the function argument (because there is the callback later in the
321         // argument list)
322         if ((size == wgpu::kWholeMapSize) && (offset <= mSize)) {
323             size = mSize - offset;
324         }
325 
326         WGPUBufferMapAsyncStatus status;
327         if (GetDevice()->ConsumedError(ValidateMapAsync(mode, offset, size, &status),
328                                        "calling %s.MapAsync(%s, %u, %u, ...).", this, mode, offset,
329                                        size)) {
330             if (callback) {
331                 callback(status, userdata);
332             }
333             return;
334         }
335         ASSERT(!IsError());
336 
337         mLastMapID++;
338         mMapMode = mode;
339         mMapOffset = offset;
340         mMapSize = size;
341         mMapCallback = callback;
342         mMapUserdata = userdata;
343         mState = BufferState::Mapped;
344 
345         if (GetDevice()->ConsumedError(MapAsyncImpl(mode, offset, size))) {
346             CallMapCallback(mLastMapID, WGPUBufferMapAsyncStatus_DeviceLost);
347             return;
348         }
349         std::unique_ptr<MapRequestTask> request =
350             std::make_unique<MapRequestTask>(this, mLastMapID);
351         GetDevice()->GetQueue()->TrackTask(std::move(request),
352                                            GetDevice()->GetPendingCommandSerial());
353     }
354 
APIGetMappedRange(size_t offset,size_t size)355     void* BufferBase::APIGetMappedRange(size_t offset, size_t size) {
356         return GetMappedRange(offset, size, true);
357     }
358 
APIGetConstMappedRange(size_t offset,size_t size)359     const void* BufferBase::APIGetConstMappedRange(size_t offset, size_t size) {
360         return GetMappedRange(offset, size, false);
361     }
362 
GetMappedRange(size_t offset,size_t size,bool writable)363     void* BufferBase::GetMappedRange(size_t offset, size_t size, bool writable) {
364         if (!CanGetMappedRange(writable, offset, size)) {
365             return nullptr;
366         }
367 
368         if (mStagingBuffer != nullptr) {
369             return static_cast<uint8_t*>(mStagingBuffer->GetMappedPointer()) + offset;
370         }
371         if (mSize == 0) {
372             return reinterpret_cast<uint8_t*>(intptr_t(0xCAFED00D));
373         }
374         uint8_t* start = static_cast<uint8_t*>(GetMappedPointerImpl());
375         return start == nullptr ? nullptr : start + offset;
376     }
377 
APIDestroy()378     void BufferBase::APIDestroy() {
379         Destroy();
380     }
381 
CopyFromStagingBuffer()382     MaybeError BufferBase::CopyFromStagingBuffer() {
383         ASSERT(mStagingBuffer);
384         if (mSize == 0) {
385             // Staging buffer is not created if zero size.
386             ASSERT(mStagingBuffer == nullptr);
387             return {};
388         }
389 
390         DAWN_TRY(GetDevice()->CopyFromStagingToBuffer(mStagingBuffer.get(), 0, this, 0,
391                                                       GetAllocatedSize()));
392 
393         DynamicUploader* uploader = GetDevice()->GetDynamicUploader();
394         uploader->ReleaseStagingBuffer(std::move(mStagingBuffer));
395 
396         return {};
397     }
398 
APIUnmap()399     void BufferBase::APIUnmap() {
400         if (GetDevice()->ConsumedError(ValidateUnmap(), "calling %s.Unmap().", this)) {
401             return;
402         }
403         Unmap();
404     }
405 
Unmap()406     void BufferBase::Unmap() {
407         UnmapInternal(WGPUBufferMapAsyncStatus_UnmappedBeforeCallback);
408     }
409 
UnmapInternal(WGPUBufferMapAsyncStatus callbackStatus)410     void BufferBase::UnmapInternal(WGPUBufferMapAsyncStatus callbackStatus) {
411         if (mState == BufferState::Mapped) {
412             // A map request can only be called once, so this will fire only if the request wasn't
413             // completed before the Unmap.
414             // Callbacks are not fired if there is no callback registered, so this is correct for
415             // mappedAtCreation = true.
416             CallMapCallback(mLastMapID, callbackStatus);
417             UnmapImpl();
418 
419             mMapCallback = nullptr;
420             mMapUserdata = 0;
421         } else if (mState == BufferState::MappedAtCreation) {
422             if (mStagingBuffer != nullptr) {
423                 GetDevice()->ConsumedError(CopyFromStagingBuffer());
424             } else if (mSize != 0) {
425                 UnmapImpl();
426             }
427         }
428 
429         mState = BufferState::Unmapped;
430     }
431 
ValidateMapAsync(wgpu::MapMode mode,size_t offset,size_t size,WGPUBufferMapAsyncStatus * status) const432     MaybeError BufferBase::ValidateMapAsync(wgpu::MapMode mode,
433                                             size_t offset,
434                                             size_t size,
435                                             WGPUBufferMapAsyncStatus* status) const {
436         *status = WGPUBufferMapAsyncStatus_DeviceLost;
437         DAWN_TRY(GetDevice()->ValidateIsAlive());
438 
439         *status = WGPUBufferMapAsyncStatus_Error;
440         DAWN_TRY(GetDevice()->ValidateObject(this));
441 
442         DAWN_INVALID_IF(uint64_t(offset) > mSize,
443                         "Mapping offset (%u) is larger than the size (%u) of %s.", offset, mSize,
444                         this);
445 
446         DAWN_INVALID_IF(offset % 8 != 0, "Offset (%u) must be a multiple of 8.", offset);
447         DAWN_INVALID_IF(size % 4 != 0, "Size (%u) must be a multiple of 4.", size);
448 
449         DAWN_INVALID_IF(uint64_t(size) > mSize - uint64_t(offset),
450                         "Mapping range (offset:%u, size: %u) doesn't fit in the size (%u) of %s.",
451                         offset, size, mSize, this);
452 
453         switch (mState) {
454             case BufferState::Mapped:
455             case BufferState::MappedAtCreation:
456                 return DAWN_FORMAT_VALIDATION_ERROR("%s is already mapped.", this);
457             case BufferState::Destroyed:
458                 return DAWN_FORMAT_VALIDATION_ERROR("%s is destroyed.", this);
459             case BufferState::Unmapped:
460                 break;
461         }
462 
463         bool isReadMode = mode & wgpu::MapMode::Read;
464         bool isWriteMode = mode & wgpu::MapMode::Write;
465         DAWN_INVALID_IF(!(isReadMode ^ isWriteMode), "Map mode (%s) is not one of %s or %s.", mode,
466                         wgpu::MapMode::Write, wgpu::MapMode::Read);
467 
468         if (mode & wgpu::MapMode::Read) {
469             DAWN_INVALID_IF(!(mUsage & wgpu::BufferUsage::MapRead),
470                             "The buffer usages (%s) do not contain %s.", mUsage,
471                             wgpu::BufferUsage::MapRead);
472         } else {
473             ASSERT(mode & wgpu::MapMode::Write);
474             DAWN_INVALID_IF(!(mUsage & wgpu::BufferUsage::MapWrite),
475                             "The buffer usages (%s) do not contain %s.", mUsage,
476                             wgpu::BufferUsage::MapWrite);
477         }
478 
479         *status = WGPUBufferMapAsyncStatus_Success;
480         return {};
481     }
482 
CanGetMappedRange(bool writable,size_t offset,size_t size) const483     bool BufferBase::CanGetMappedRange(bool writable, size_t offset, size_t size) const {
484         if (offset % 8 != 0 || size % 4 != 0) {
485             return false;
486         }
487 
488         if (size > mMapSize || offset < mMapOffset) {
489             return false;
490         }
491 
492         size_t offsetInMappedRange = offset - mMapOffset;
493         if (offsetInMappedRange > mMapSize - size) {
494             return false;
495         }
496 
497         // Note that:
498         //
499         //   - We don't check that the device is alive because the application can ask for the
500         //     mapped pointer before it knows, and even Dawn knows, that the device was lost, and
501         //     still needs to work properly.
502         //   - We don't check that the object is alive because we need to return mapped pointers
503         //     for error buffers too.
504 
505         switch (mState) {
506             // Writeable Buffer::GetMappedRange is always allowed when mapped at creation.
507             case BufferState::MappedAtCreation:
508                 return true;
509 
510             case BufferState::Mapped:
511                 ASSERT(bool(mMapMode & wgpu::MapMode::Read) ^
512                        bool(mMapMode & wgpu::MapMode::Write));
513                 return !writable || (mMapMode & wgpu::MapMode::Write);
514 
515             case BufferState::Unmapped:
516             case BufferState::Destroyed:
517                 return false;
518         }
519         UNREACHABLE();
520     }
521 
ValidateUnmap() const522     MaybeError BufferBase::ValidateUnmap() const {
523         DAWN_TRY(GetDevice()->ValidateIsAlive());
524 
525         switch (mState) {
526             case BufferState::Mapped:
527             case BufferState::MappedAtCreation:
528                 // A buffer may be in the Mapped state if it was created with mappedAtCreation
529                 // even if it did not have a mappable usage.
530                 return {};
531             case BufferState::Unmapped:
532                 return DAWN_FORMAT_VALIDATION_ERROR("%s is unmapped.", this);
533             case BufferState::Destroyed:
534                 return DAWN_FORMAT_VALIDATION_ERROR("%s is destroyed.", this);
535         }
536         UNREACHABLE();
537     }
538 
OnMapRequestCompleted(MapRequestID mapID,WGPUBufferMapAsyncStatus status)539     void BufferBase::OnMapRequestCompleted(MapRequestID mapID, WGPUBufferMapAsyncStatus status) {
540         CallMapCallback(mapID, status);
541     }
542 
NeedsInitialization() const543     bool BufferBase::NeedsInitialization() const {
544         return !mIsDataInitialized &&
545                GetDevice()->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse);
546     }
547 
IsDataInitialized() const548     bool BufferBase::IsDataInitialized() const {
549         return mIsDataInitialized;
550     }
551 
SetIsDataInitialized()552     void BufferBase::SetIsDataInitialized() {
553         mIsDataInitialized = true;
554     }
555 
IsFullBufferRange(uint64_t offset,uint64_t size) const556     bool BufferBase::IsFullBufferRange(uint64_t offset, uint64_t size) const {
557         return offset == 0 && size == GetSize();
558     }
559 
560 }  // namespace dawn_native
561