• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2017 The Dawn Authors
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 //     http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 
15 #include "dawn_native/null/DeviceNull.h"
16 
17 #include "dawn_native/BackendConnection.h"
18 #include "dawn_native/Commands.h"
19 #include "dawn_native/ErrorData.h"
20 #include "dawn_native/Instance.h"
21 #include "dawn_native/Surface.h"
22 
23 namespace dawn_native { namespace null {
24 
25     // Implementation of pre-Device objects: the null adapter, null backend connection and Connect()
26 
Adapter(InstanceBase * instance)27     Adapter::Adapter(InstanceBase* instance) : AdapterBase(instance, wgpu::BackendType::Null) {
28         mPCIInfo.name = "Null backend";
29         mAdapterType = wgpu::AdapterType::CPU;
30         MaybeError err = Initialize();
31         ASSERT(err.IsSuccess());
32     }
33 
34     Adapter::~Adapter() = default;
35 
SupportsExternalImages() const36     bool Adapter::SupportsExternalImages() const {
37         return false;
38     }
39 
40     // Used for the tests that intend to use an adapter without all features enabled.
SetSupportedFeatures(const std::vector<const char * > & requiredFeatures)41     void Adapter::SetSupportedFeatures(const std::vector<const char*>& requiredFeatures) {
42         mSupportedFeatures = GetInstance()->FeatureNamesToFeaturesSet(requiredFeatures);
43     }
44 
InitializeImpl()45     MaybeError Adapter::InitializeImpl() {
46         return {};
47     }
48 
InitializeSupportedFeaturesImpl()49     MaybeError Adapter::InitializeSupportedFeaturesImpl() {
50         // Enable all features by default for the convenience of tests.
51         mSupportedFeatures.featuresBitSet.set();
52         return {};
53     }
54 
InitializeSupportedLimitsImpl(CombinedLimits * limits)55     MaybeError Adapter::InitializeSupportedLimitsImpl(CombinedLimits* limits) {
56         GetDefaultLimits(&limits->v1);
57         return {};
58     }
59 
CreateDeviceImpl(const DawnDeviceDescriptor * descriptor)60     ResultOrError<DeviceBase*> Adapter::CreateDeviceImpl(const DawnDeviceDescriptor* descriptor) {
61         return Device::Create(this, descriptor);
62     }
63 
64     class Backend : public BackendConnection {
65       public:
Backend(InstanceBase * instance)66         Backend(InstanceBase* instance) : BackendConnection(instance, wgpu::BackendType::Null) {
67         }
68 
DiscoverDefaultAdapters()69         std::vector<std::unique_ptr<AdapterBase>> DiscoverDefaultAdapters() override {
70             // There is always a single Null adapter because it is purely CPU based and doesn't
71             // depend on the system.
72             std::vector<std::unique_ptr<AdapterBase>> adapters;
73             std::unique_ptr<Adapter> adapter = std::make_unique<Adapter>(GetInstance());
74             adapters.push_back(std::move(adapter));
75             return adapters;
76         }
77     };
78 
Connect(InstanceBase * instance)79     BackendConnection* Connect(InstanceBase* instance) {
80         return new Backend(instance);
81     }
82 
83     struct CopyFromStagingToBufferOperation : PendingOperation {
Executedawn_native::null::CopyFromStagingToBufferOperation84         virtual void Execute() {
85             destination->CopyFromStaging(staging, sourceOffset, destinationOffset, size);
86         }
87 
88         StagingBufferBase* staging;
89         Ref<Buffer> destination;
90         uint64_t sourceOffset;
91         uint64_t destinationOffset;
92         uint64_t size;
93     };
94 
95     // Device
96 
97     // static
Create(Adapter * adapter,const DawnDeviceDescriptor * descriptor)98     ResultOrError<Device*> Device::Create(Adapter* adapter,
99                                           const DawnDeviceDescriptor* descriptor) {
100         Ref<Device> device = AcquireRef(new Device(adapter, descriptor));
101         DAWN_TRY(device->Initialize());
102         return device.Detach();
103     }
104 
~Device()105     Device::~Device() {
106         Destroy();
107     }
108 
Initialize()109     MaybeError Device::Initialize() {
110         return DeviceBase::Initialize(new Queue(this));
111     }
112 
CreateBindGroupImpl(const BindGroupDescriptor * descriptor)113     ResultOrError<Ref<BindGroupBase>> Device::CreateBindGroupImpl(
114         const BindGroupDescriptor* descriptor) {
115         return AcquireRef(new BindGroup(this, descriptor));
116     }
CreateBindGroupLayoutImpl(const BindGroupLayoutDescriptor * descriptor,PipelineCompatibilityToken pipelineCompatibilityToken)117     ResultOrError<Ref<BindGroupLayoutBase>> Device::CreateBindGroupLayoutImpl(
118         const BindGroupLayoutDescriptor* descriptor,
119         PipelineCompatibilityToken pipelineCompatibilityToken) {
120         return AcquireRef(new BindGroupLayout(this, descriptor, pipelineCompatibilityToken));
121     }
CreateBufferImpl(const BufferDescriptor * descriptor)122     ResultOrError<Ref<BufferBase>> Device::CreateBufferImpl(const BufferDescriptor* descriptor) {
123         DAWN_TRY(IncrementMemoryUsage(descriptor->size));
124         return AcquireRef(new Buffer(this, descriptor));
125     }
CreateCommandBuffer(CommandEncoder * encoder,const CommandBufferDescriptor * descriptor)126     ResultOrError<Ref<CommandBufferBase>> Device::CreateCommandBuffer(
127         CommandEncoder* encoder,
128         const CommandBufferDescriptor* descriptor) {
129         return AcquireRef(new CommandBuffer(encoder, descriptor));
130     }
CreateUninitializedComputePipelineImpl(const ComputePipelineDescriptor * descriptor)131     Ref<ComputePipelineBase> Device::CreateUninitializedComputePipelineImpl(
132         const ComputePipelineDescriptor* descriptor) {
133         return AcquireRef(new ComputePipeline(this, descriptor));
134     }
CreatePipelineLayoutImpl(const PipelineLayoutDescriptor * descriptor)135     ResultOrError<Ref<PipelineLayoutBase>> Device::CreatePipelineLayoutImpl(
136         const PipelineLayoutDescriptor* descriptor) {
137         return AcquireRef(new PipelineLayout(this, descriptor));
138     }
CreateQuerySetImpl(const QuerySetDescriptor * descriptor)139     ResultOrError<Ref<QuerySetBase>> Device::CreateQuerySetImpl(
140         const QuerySetDescriptor* descriptor) {
141         return AcquireRef(new QuerySet(this, descriptor));
142     }
CreateUninitializedRenderPipelineImpl(const RenderPipelineDescriptor * descriptor)143     Ref<RenderPipelineBase> Device::CreateUninitializedRenderPipelineImpl(
144         const RenderPipelineDescriptor* descriptor) {
145         return AcquireRef(new RenderPipeline(this, descriptor));
146     }
CreateSamplerImpl(const SamplerDescriptor * descriptor)147     ResultOrError<Ref<SamplerBase>> Device::CreateSamplerImpl(const SamplerDescriptor* descriptor) {
148         return AcquireRef(new Sampler(this, descriptor));
149     }
CreateShaderModuleImpl(const ShaderModuleDescriptor * descriptor,ShaderModuleParseResult * parseResult)150     ResultOrError<Ref<ShaderModuleBase>> Device::CreateShaderModuleImpl(
151         const ShaderModuleDescriptor* descriptor,
152         ShaderModuleParseResult* parseResult) {
153         Ref<ShaderModule> module = AcquireRef(new ShaderModule(this, descriptor));
154         DAWN_TRY(module->Initialize(parseResult));
155         return module;
156     }
CreateSwapChainImpl(const SwapChainDescriptor * descriptor)157     ResultOrError<Ref<SwapChainBase>> Device::CreateSwapChainImpl(
158         const SwapChainDescriptor* descriptor) {
159         return AcquireRef(new OldSwapChain(this, descriptor));
160     }
CreateSwapChainImpl(Surface * surface,NewSwapChainBase * previousSwapChain,const SwapChainDescriptor * descriptor)161     ResultOrError<Ref<NewSwapChainBase>> Device::CreateSwapChainImpl(
162         Surface* surface,
163         NewSwapChainBase* previousSwapChain,
164         const SwapChainDescriptor* descriptor) {
165         return SwapChain::Create(this, surface, previousSwapChain, descriptor);
166     }
CreateTextureImpl(const TextureDescriptor * descriptor)167     ResultOrError<Ref<TextureBase>> Device::CreateTextureImpl(const TextureDescriptor* descriptor) {
168         return AcquireRef(new Texture(this, descriptor, TextureBase::TextureState::OwnedInternal));
169     }
CreateTextureViewImpl(TextureBase * texture,const TextureViewDescriptor * descriptor)170     ResultOrError<Ref<TextureViewBase>> Device::CreateTextureViewImpl(
171         TextureBase* texture,
172         const TextureViewDescriptor* descriptor) {
173         return AcquireRef(new TextureView(texture, descriptor));
174     }
175 
CreateStagingBuffer(size_t size)176     ResultOrError<std::unique_ptr<StagingBufferBase>> Device::CreateStagingBuffer(size_t size) {
177         std::unique_ptr<StagingBufferBase> stagingBuffer =
178             std::make_unique<StagingBuffer>(size, this);
179         DAWN_TRY(stagingBuffer->Initialize());
180         return std::move(stagingBuffer);
181     }
182 
DestroyImpl()183     void Device::DestroyImpl() {
184         ASSERT(GetState() == State::Disconnected);
185 
186         // Clear pending operations before checking mMemoryUsage because some operations keep a
187         // reference to Buffers.
188         mPendingOperations.clear();
189         ASSERT(mMemoryUsage == 0);
190     }
191 
WaitForIdleForDestruction()192     MaybeError Device::WaitForIdleForDestruction() {
193         mPendingOperations.clear();
194         return {};
195     }
196 
CopyFromStagingToBuffer(StagingBufferBase * source,uint64_t sourceOffset,BufferBase * destination,uint64_t destinationOffset,uint64_t size)197     MaybeError Device::CopyFromStagingToBuffer(StagingBufferBase* source,
198                                                uint64_t sourceOffset,
199                                                BufferBase* destination,
200                                                uint64_t destinationOffset,
201                                                uint64_t size) {
202         if (IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse)) {
203             destination->SetIsDataInitialized();
204         }
205 
206         auto operation = std::make_unique<CopyFromStagingToBufferOperation>();
207         operation->staging = source;
208         operation->destination = ToBackend(destination);
209         operation->sourceOffset = sourceOffset;
210         operation->destinationOffset = destinationOffset;
211         operation->size = size;
212 
213         AddPendingOperation(std::move(operation));
214 
215         return {};
216     }
217 
CopyFromStagingToTexture(const StagingBufferBase * source,const TextureDataLayout & src,TextureCopy * dst,const Extent3D & copySizePixels)218     MaybeError Device::CopyFromStagingToTexture(const StagingBufferBase* source,
219                                                 const TextureDataLayout& src,
220                                                 TextureCopy* dst,
221                                                 const Extent3D& copySizePixels) {
222         return {};
223     }
224 
IncrementMemoryUsage(uint64_t bytes)225     MaybeError Device::IncrementMemoryUsage(uint64_t bytes) {
226         static_assert(kMaxMemoryUsage <= std::numeric_limits<size_t>::max(), "");
227         if (bytes > kMaxMemoryUsage || mMemoryUsage > kMaxMemoryUsage - bytes) {
228             return DAWN_OUT_OF_MEMORY_ERROR("Out of memory.");
229         }
230         mMemoryUsage += bytes;
231         return {};
232     }
233 
DecrementMemoryUsage(uint64_t bytes)234     void Device::DecrementMemoryUsage(uint64_t bytes) {
235         ASSERT(mMemoryUsage >= bytes);
236         mMemoryUsage -= bytes;
237     }
238 
TickImpl()239     MaybeError Device::TickImpl() {
240         return SubmitPendingOperations();
241     }
242 
CheckAndUpdateCompletedSerials()243     ResultOrError<ExecutionSerial> Device::CheckAndUpdateCompletedSerials() {
244         return GetLastSubmittedCommandSerial();
245     }
246 
AddPendingOperation(std::unique_ptr<PendingOperation> operation)247     void Device::AddPendingOperation(std::unique_ptr<PendingOperation> operation) {
248         mPendingOperations.emplace_back(std::move(operation));
249     }
250 
SubmitPendingOperations()251     MaybeError Device::SubmitPendingOperations() {
252         for (auto& operation : mPendingOperations) {
253             operation->Execute();
254         }
255         mPendingOperations.clear();
256 
257         DAWN_TRY(CheckPassedSerials());
258         IncrementLastSubmittedCommandSerial();
259 
260         return {};
261     }
262 
263     // BindGroupDataHolder
264 
BindGroupDataHolder(size_t size)265     BindGroupDataHolder::BindGroupDataHolder(size_t size)
266         : mBindingDataAllocation(malloc(size))  // malloc is guaranteed to return a
267                                                 // pointer aligned enough for the allocation
268     {
269     }
270 
~BindGroupDataHolder()271     BindGroupDataHolder::~BindGroupDataHolder() {
272         free(mBindingDataAllocation);
273     }
274 
275     // BindGroup
276 
BindGroup(DeviceBase * device,const BindGroupDescriptor * descriptor)277     BindGroup::BindGroup(DeviceBase* device, const BindGroupDescriptor* descriptor)
278         : BindGroupDataHolder(descriptor->layout->GetBindingDataSize()),
279           BindGroupBase(device, descriptor, mBindingDataAllocation) {
280     }
281 
282     // BindGroupLayout
283 
BindGroupLayout(DeviceBase * device,const BindGroupLayoutDescriptor * descriptor,PipelineCompatibilityToken pipelineCompatibilityToken)284     BindGroupLayout::BindGroupLayout(DeviceBase* device,
285                                      const BindGroupLayoutDescriptor* descriptor,
286                                      PipelineCompatibilityToken pipelineCompatibilityToken)
287         : BindGroupLayoutBase(device, descriptor, pipelineCompatibilityToken) {
288     }
289 
290     // Buffer
291 
Buffer(Device * device,const BufferDescriptor * descriptor)292     Buffer::Buffer(Device* device, const BufferDescriptor* descriptor)
293         : BufferBase(device, descriptor) {
294         mBackingData = std::unique_ptr<uint8_t[]>(new uint8_t[GetSize()]);
295         mAllocatedSize = GetSize();
296     }
297 
IsCPUWritableAtCreation() const298     bool Buffer::IsCPUWritableAtCreation() const {
299         // Only return true for mappable buffers so we can test cases that need / don't need a
300         // staging buffer.
301         return (GetUsage() & (wgpu::BufferUsage::MapRead | wgpu::BufferUsage::MapWrite)) != 0;
302     }
303 
MapAtCreationImpl()304     MaybeError Buffer::MapAtCreationImpl() {
305         return {};
306     }
307 
CopyFromStaging(StagingBufferBase * staging,uint64_t sourceOffset,uint64_t destinationOffset,uint64_t size)308     void Buffer::CopyFromStaging(StagingBufferBase* staging,
309                                  uint64_t sourceOffset,
310                                  uint64_t destinationOffset,
311                                  uint64_t size) {
312         uint8_t* ptr = reinterpret_cast<uint8_t*>(staging->GetMappedPointer());
313         memcpy(mBackingData.get() + destinationOffset, ptr + sourceOffset, size);
314     }
315 
DoWriteBuffer(uint64_t bufferOffset,const void * data,size_t size)316     void Buffer::DoWriteBuffer(uint64_t bufferOffset, const void* data, size_t size) {
317         ASSERT(bufferOffset + size <= GetSize());
318         ASSERT(mBackingData);
319         memcpy(mBackingData.get() + bufferOffset, data, size);
320     }
321 
MapAsyncImpl(wgpu::MapMode mode,size_t offset,size_t size)322     MaybeError Buffer::MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) {
323         return {};
324     }
325 
GetMappedPointerImpl()326     void* Buffer::GetMappedPointerImpl() {
327         return mBackingData.get();
328     }
329 
UnmapImpl()330     void Buffer::UnmapImpl() {
331     }
332 
DestroyImpl()333     void Buffer::DestroyImpl() {
334         BufferBase::DestroyImpl();
335         ToBackend(GetDevice())->DecrementMemoryUsage(GetSize());
336     }
337 
338     // CommandBuffer
339 
CommandBuffer(CommandEncoder * encoder,const CommandBufferDescriptor * descriptor)340     CommandBuffer::CommandBuffer(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor)
341         : CommandBufferBase(encoder, descriptor) {
342     }
343 
344     // QuerySet
345 
QuerySet(Device * device,const QuerySetDescriptor * descriptor)346     QuerySet::QuerySet(Device* device, const QuerySetDescriptor* descriptor)
347         : QuerySetBase(device, descriptor) {
348     }
349 
350     // Queue
351 
Queue(Device * device)352     Queue::Queue(Device* device) : QueueBase(device) {
353     }
354 
~Queue()355     Queue::~Queue() {
356     }
357 
SubmitImpl(uint32_t,CommandBufferBase * const *)358     MaybeError Queue::SubmitImpl(uint32_t, CommandBufferBase* const*) {
359         Device* device = ToBackend(GetDevice());
360 
361         // The Vulkan, D3D12 and Metal implementation all tick the device here,
362         // for testing purposes we should also tick in the null implementation.
363         DAWN_TRY(device->Tick());
364 
365         return device->SubmitPendingOperations();
366     }
367 
WriteBufferImpl(BufferBase * buffer,uint64_t bufferOffset,const void * data,size_t size)368     MaybeError Queue::WriteBufferImpl(BufferBase* buffer,
369                                       uint64_t bufferOffset,
370                                       const void* data,
371                                       size_t size) {
372         ToBackend(buffer)->DoWriteBuffer(bufferOffset, data, size);
373         return {};
374     }
375 
376     // ComputePipeline
Initialize()377     MaybeError ComputePipeline::Initialize() {
378         return {};
379     }
380 
381     // RenderPipeline
Initialize()382     MaybeError RenderPipeline::Initialize() {
383         return {};
384     }
385 
386     // SwapChain
387 
388     // static
Create(Device * device,Surface * surface,NewSwapChainBase * previousSwapChain,const SwapChainDescriptor * descriptor)389     ResultOrError<Ref<SwapChain>> SwapChain::Create(Device* device,
390                                                     Surface* surface,
391                                                     NewSwapChainBase* previousSwapChain,
392                                                     const SwapChainDescriptor* descriptor) {
393         Ref<SwapChain> swapchain = AcquireRef(new SwapChain(device, surface, descriptor));
394         DAWN_TRY(swapchain->Initialize(previousSwapChain));
395         return swapchain;
396     }
397 
Initialize(NewSwapChainBase * previousSwapChain)398     MaybeError SwapChain::Initialize(NewSwapChainBase* previousSwapChain) {
399         if (previousSwapChain != nullptr) {
400             // TODO(crbug.com/dawn/269): figure out what should happen when surfaces are used by
401             // multiple backends one after the other. It probably needs to block until the backend
402             // and GPU are completely finished with the previous swapchain.
403             if (previousSwapChain->GetBackendType() != wgpu::BackendType::Null) {
404                 return DAWN_VALIDATION_ERROR("null::SwapChain cannot switch between APIs");
405             }
406         }
407 
408         return {};
409     }
410 
411     SwapChain::~SwapChain() = default;
412 
PresentImpl()413     MaybeError SwapChain::PresentImpl() {
414         mTexture->APIDestroy();
415         mTexture = nullptr;
416         return {};
417     }
418 
GetCurrentTextureViewImpl()419     ResultOrError<TextureViewBase*> SwapChain::GetCurrentTextureViewImpl() {
420         TextureDescriptor textureDesc = GetSwapChainBaseTextureDescriptor(this);
421         // TODO(dawn:723): change to not use AcquireRef for reentrant object creation.
422         mTexture = AcquireRef(
423             new Texture(GetDevice(), &textureDesc, TextureBase::TextureState::OwnedInternal));
424         // TODO(dawn:723): change to not use AcquireRef for reentrant object creation.
425         return mTexture->APICreateView();
426     }
427 
DetachFromSurfaceImpl()428     void SwapChain::DetachFromSurfaceImpl() {
429         if (mTexture != nullptr) {
430             mTexture->APIDestroy();
431             mTexture = nullptr;
432         }
433     }
434 
435     // ShaderModule
436 
Initialize(ShaderModuleParseResult * parseResult)437     MaybeError ShaderModule::Initialize(ShaderModuleParseResult* parseResult) {
438         return InitializeBase(parseResult);
439     }
440 
441     // OldSwapChain
442 
OldSwapChain(Device * device,const SwapChainDescriptor * descriptor)443     OldSwapChain::OldSwapChain(Device* device, const SwapChainDescriptor* descriptor)
444         : OldSwapChainBase(device, descriptor) {
445         const auto& im = GetImplementation();
446         im.Init(im.userData, nullptr);
447     }
448 
~OldSwapChain()449     OldSwapChain::~OldSwapChain() {
450     }
451 
GetNextTextureImpl(const TextureDescriptor * descriptor)452     TextureBase* OldSwapChain::GetNextTextureImpl(const TextureDescriptor* descriptor) {
453         return GetDevice()->APICreateTexture(descriptor);
454     }
455 
OnBeforePresent(TextureViewBase *)456     MaybeError OldSwapChain::OnBeforePresent(TextureViewBase*) {
457         return {};
458     }
459 
460     // NativeSwapChainImpl
461 
Init(WSIContext * context)462     void NativeSwapChainImpl::Init(WSIContext* context) {
463     }
464 
Configure(WGPUTextureFormat format,WGPUTextureUsage,uint32_t width,uint32_t height)465     DawnSwapChainError NativeSwapChainImpl::Configure(WGPUTextureFormat format,
466                                                       WGPUTextureUsage,
467                                                       uint32_t width,
468                                                       uint32_t height) {
469         return DAWN_SWAP_CHAIN_NO_ERROR;
470     }
471 
GetNextTexture(DawnSwapChainNextTexture * nextTexture)472     DawnSwapChainError NativeSwapChainImpl::GetNextTexture(DawnSwapChainNextTexture* nextTexture) {
473         return DAWN_SWAP_CHAIN_NO_ERROR;
474     }
475 
Present()476     DawnSwapChainError NativeSwapChainImpl::Present() {
477         return DAWN_SWAP_CHAIN_NO_ERROR;
478     }
479 
GetPreferredFormat() const480     wgpu::TextureFormat NativeSwapChainImpl::GetPreferredFormat() const {
481         return wgpu::TextureFormat::RGBA8Unorm;
482     }
483 
484     // StagingBuffer
485 
StagingBuffer(size_t size,Device * device)486     StagingBuffer::StagingBuffer(size_t size, Device* device)
487         : StagingBufferBase(size), mDevice(device) {
488     }
489 
~StagingBuffer()490     StagingBuffer::~StagingBuffer() {
491         if (mBuffer) {
492             mDevice->DecrementMemoryUsage(GetSize());
493         }
494     }
495 
Initialize()496     MaybeError StagingBuffer::Initialize() {
497         DAWN_TRY(mDevice->IncrementMemoryUsage(GetSize()));
498         mBuffer = std::make_unique<uint8_t[]>(GetSize());
499         mMappedPointer = mBuffer.get();
500         return {};
501     }
502 
GetOptimalBytesPerRowAlignment() const503     uint32_t Device::GetOptimalBytesPerRowAlignment() const {
504         return 1;
505     }
506 
GetOptimalBufferToTextureCopyOffsetAlignment() const507     uint64_t Device::GetOptimalBufferToTextureCopyOffsetAlignment() const {
508         return 1;
509     }
510 
GetTimestampPeriodInNS() const511     float Device::GetTimestampPeriodInNS() const {
512         return 1.0f;
513     }
514 
515 }}  // namespace dawn_native::null
516